diff options
Diffstat (limited to 'drivers/gpu/drm/radeon/si.c')
-rw-r--r-- | drivers/gpu/drm/radeon/si.c | 817 |
1 files changed, 707 insertions, 110 deletions
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index b0db712060fb..9128120da044 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -38,6 +38,7 @@ #define SI_CE_UCODE_SIZE 2144 #define SI_RLC_UCODE_SIZE 2048 #define SI_MC_UCODE_SIZE 7769 +#define OLAND_MC_UCODE_SIZE 7863 MODULE_FIRMWARE("radeon/TAHITI_pfp.bin"); MODULE_FIRMWARE("radeon/TAHITI_me.bin"); @@ -54,6 +55,11 @@ MODULE_FIRMWARE("radeon/VERDE_me.bin"); MODULE_FIRMWARE("radeon/VERDE_ce.bin"); MODULE_FIRMWARE("radeon/VERDE_mc.bin"); MODULE_FIRMWARE("radeon/VERDE_rlc.bin"); +MODULE_FIRMWARE("radeon/OLAND_pfp.bin"); +MODULE_FIRMWARE("radeon/OLAND_me.bin"); +MODULE_FIRMWARE("radeon/OLAND_ce.bin"); +MODULE_FIRMWARE("radeon/OLAND_mc.bin"); +MODULE_FIRMWARE("radeon/OLAND_rlc.bin"); extern int r600_ih_ring_alloc(struct radeon_device *rdev); extern void r600_ih_ring_fini(struct radeon_device *rdev); @@ -61,6 +67,35 @@ extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev); extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save); extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save); extern u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev); +extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev); +extern bool evergreen_is_display_hung(struct radeon_device *rdev); + +#define PCIE_BUS_CLK 10000 +#define TCLK (PCIE_BUS_CLK / 10) + +/** + * si_get_xclk - get the xclk + * + * @rdev: radeon_device pointer + * + * Returns the reference clock used by the gfx engine + * (SI). + */ +u32 si_get_xclk(struct radeon_device *rdev) +{ + u32 reference_clock = rdev->clock.spll.reference_freq; + u32 tmp; + + tmp = RREG32(CG_CLKPIN_CNTL_2); + if (tmp & MUX_TCLK_TO_XCLK) + return TCLK; + + tmp = RREG32(CG_CLKPIN_CNTL); + if (tmp & XTALIN_DIVIDE) + return reference_clock / 4; + + return reference_clock; +} /* get temperature in millidegrees */ int si_get_temp(struct radeon_device *rdev) @@ -200,6 +235,45 @@ static const u32 verde_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = { {0x0000009f, 0x00a37400} }; +static const u32 oland_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = { + {0x0000006f, 0x03044000}, + {0x00000070, 0x0480c018}, + {0x00000071, 0x00000040}, + {0x00000072, 0x01000000}, + {0x00000074, 0x000000ff}, + {0x00000075, 0x00143400}, + {0x00000076, 0x08ec0800}, + {0x00000077, 0x040000cc}, + {0x00000079, 0x00000000}, + {0x0000007a, 0x21000409}, + {0x0000007c, 0x00000000}, + {0x0000007d, 0xe8000000}, + {0x0000007e, 0x044408a8}, + {0x0000007f, 0x00000003}, + {0x00000080, 0x00000000}, + {0x00000081, 0x01000000}, + {0x00000082, 0x02000000}, + {0x00000083, 0x00000000}, + {0x00000084, 0xe3f3e4f4}, + {0x00000085, 0x00052024}, + {0x00000087, 0x00000000}, + {0x00000088, 0x66036603}, + {0x00000089, 0x01000000}, + {0x0000008b, 0x1c0a0000}, + {0x0000008c, 0xff010000}, + {0x0000008e, 0xffffefff}, + {0x0000008f, 0xfff3efff}, + {0x00000090, 0xfff3efbf}, + {0x00000094, 0x00101101}, + {0x00000095, 0x00000fff}, + {0x00000096, 0x00116fff}, + {0x00000097, 0x60010000}, + {0x00000098, 0x10010000}, + {0x00000099, 0x00006000}, + {0x0000009a, 0x00001000}, + {0x0000009f, 0x00a17730} +}; + /* ucode loading */ static int si_mc_load_microcode(struct radeon_device *rdev) { @@ -228,6 +302,11 @@ static int si_mc_load_microcode(struct radeon_device *rdev) ucode_size = SI_MC_UCODE_SIZE; regs_size = TAHITI_IO_MC_REGS_SIZE; break; + case CHIP_OLAND: + io_mc_regs = (u32 *)&oland_io_mc_regs; + ucode_size = OLAND_MC_UCODE_SIZE; + regs_size = TAHITI_IO_MC_REGS_SIZE; + break; } running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK; @@ -322,6 +401,15 @@ static int si_init_microcode(struct radeon_device *rdev) rlc_req_size = SI_RLC_UCODE_SIZE * 4; mc_req_size = SI_MC_UCODE_SIZE * 4; break; + case CHIP_OLAND: + chip_name = "OLAND"; + rlc_chip_name = "OLAND"; + pfp_req_size = SI_PFP_UCODE_SIZE * 4; + me_req_size = SI_PM4_UCODE_SIZE * 4; + ce_req_size = SI_CE_UCODE_SIZE * 4; + rlc_req_size = SI_RLC_UCODE_SIZE * 4; + mc_req_size = OLAND_MC_UCODE_SIZE * 4; + break; default: BUG(); } @@ -1125,7 +1213,8 @@ static void si_tiling_mode_table_init(struct radeon_device *rdev) } WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden); } - } else if (rdev->family == CHIP_VERDE) { + } else if ((rdev->family == CHIP_VERDE) || + (rdev->family == CHIP_OLAND)) { for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { switch (reg_offset) { case 0: /* non-AA compressed depth or any compressed stencil */ @@ -1570,6 +1659,23 @@ static void si_gpu_init(struct radeon_device *rdev) rdev->config.si.sc_earlyz_tile_fifo_size = 0x130; gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN; break; + case CHIP_OLAND: + rdev->config.si.max_shader_engines = 1; + rdev->config.si.max_tile_pipes = 4; + rdev->config.si.max_cu_per_sh = 6; + rdev->config.si.max_sh_per_se = 1; + rdev->config.si.max_backends_per_se = 2; + rdev->config.si.max_texture_channel_caches = 4; + rdev->config.si.max_gprs = 256; + rdev->config.si.max_gs_threads = 16; + rdev->config.si.max_hw_contexts = 8; + + rdev->config.si.sc_prim_fifo_size_frontend = 0x20; + rdev->config.si.sc_prim_fifo_size_backend = 0x40; + rdev->config.si.sc_hiz_tile_fifo_size = 0x30; + rdev->config.si.sc_earlyz_tile_fifo_size = 0x130; + gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN; + break; } /* Initialize HDP */ @@ -1660,6 +1766,8 @@ static void si_gpu_init(struct radeon_device *rdev) WREG32(GB_ADDR_CONFIG, gb_addr_config); WREG32(DMIF_ADDR_CONFIG, gb_addr_config); WREG32(HDP_ADDR_CONFIG, gb_addr_config); + WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config); + WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config); si_tiling_mode_table_init(rdev); @@ -1836,6 +1944,9 @@ static void si_cp_enable(struct radeon_device *rdev, bool enable) radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT)); WREG32(SCRATCH_UMSK, 0); + rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; + rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false; + rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false; } udelay(50); } @@ -2007,7 +2118,7 @@ static int si_cp_resume(struct radeon_device *rdev) ring->wptr = 0; WREG32(CP_RB0_WPTR, ring->wptr); - /* set the wb address wether it's enabled or not */ + /* set the wb address whether it's enabled or not */ WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC); WREG32(CP_RB0_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF); @@ -2040,7 +2151,7 @@ static int si_cp_resume(struct radeon_device *rdev) ring->wptr = 0; WREG32(CP_RB1_WPTR, ring->wptr); - /* set the wb address wether it's enabled or not */ + /* set the wb address whether it's enabled or not */ WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC); WREG32(CP_RB1_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFF); @@ -2066,7 +2177,7 @@ static int si_cp_resume(struct radeon_device *rdev) ring->wptr = 0; WREG32(CP_RB2_WPTR, ring->wptr); - /* set the wb address wether it's enabled or not */ + /* set the wb address whether it's enabled or not */ WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC); WREG32(CP_RB2_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFF); @@ -2101,92 +2212,281 @@ static int si_cp_resume(struct radeon_device *rdev) return 0; } -bool si_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) +static u32 si_gpu_check_soft_reset(struct radeon_device *rdev) { - u32 srbm_status; - u32 grbm_status, grbm_status2; - u32 grbm_status_se0, grbm_status_se1; + u32 reset_mask = 0; + u32 tmp; - srbm_status = RREG32(SRBM_STATUS); - grbm_status = RREG32(GRBM_STATUS); - grbm_status2 = RREG32(GRBM_STATUS2); - grbm_status_se0 = RREG32(GRBM_STATUS_SE0); - grbm_status_se1 = RREG32(GRBM_STATUS_SE1); - if (!(grbm_status & GUI_ACTIVE)) { - radeon_ring_lockup_update(ring); - return false; + /* GRBM_STATUS */ + tmp = RREG32(GRBM_STATUS); + if (tmp & (PA_BUSY | SC_BUSY | + BCI_BUSY | SX_BUSY | + TA_BUSY | VGT_BUSY | + DB_BUSY | CB_BUSY | + GDS_BUSY | SPI_BUSY | + IA_BUSY | IA_BUSY_NO_DMA)) + reset_mask |= RADEON_RESET_GFX; + + if (tmp & (CF_RQ_PENDING | PF_RQ_PENDING | + CP_BUSY | CP_COHERENCY_BUSY)) + reset_mask |= RADEON_RESET_CP; + + if (tmp & GRBM_EE_BUSY) + reset_mask |= RADEON_RESET_GRBM | RADEON_RESET_GFX | RADEON_RESET_CP; + + /* GRBM_STATUS2 */ + tmp = RREG32(GRBM_STATUS2); + if (tmp & (RLC_RQ_PENDING | RLC_BUSY)) + reset_mask |= RADEON_RESET_RLC; + + /* DMA_STATUS_REG 0 */ + tmp = RREG32(DMA_STATUS_REG + DMA0_REGISTER_OFFSET); + if (!(tmp & DMA_IDLE)) + reset_mask |= RADEON_RESET_DMA; + + /* DMA_STATUS_REG 1 */ + tmp = RREG32(DMA_STATUS_REG + DMA1_REGISTER_OFFSET); + if (!(tmp & DMA_IDLE)) + reset_mask |= RADEON_RESET_DMA1; + + /* SRBM_STATUS2 */ + tmp = RREG32(SRBM_STATUS2); + if (tmp & DMA_BUSY) + reset_mask |= RADEON_RESET_DMA; + + if (tmp & DMA1_BUSY) + reset_mask |= RADEON_RESET_DMA1; + + /* SRBM_STATUS */ + tmp = RREG32(SRBM_STATUS); + + if (tmp & IH_BUSY) + reset_mask |= RADEON_RESET_IH; + + if (tmp & SEM_BUSY) + reset_mask |= RADEON_RESET_SEM; + + if (tmp & GRBM_RQ_PENDING) + reset_mask |= RADEON_RESET_GRBM; + + if (tmp & VMC_BUSY) + reset_mask |= RADEON_RESET_VMC; + + if (tmp & (MCB_BUSY | MCB_NON_DISPLAY_BUSY | + MCC_BUSY | MCD_BUSY)) + reset_mask |= RADEON_RESET_MC; + + if (evergreen_is_display_hung(rdev)) + reset_mask |= RADEON_RESET_DISPLAY; + + /* VM_L2_STATUS */ + tmp = RREG32(VM_L2_STATUS); + if (tmp & L2_BUSY) + reset_mask |= RADEON_RESET_VMC; + + /* Skip MC reset as it's mostly likely not hung, just busy */ + if (reset_mask & RADEON_RESET_MC) { + DRM_DEBUG("MC busy: 0x%08X, clearing.\n", reset_mask); + reset_mask &= ~RADEON_RESET_MC; } - /* force CP activities */ - radeon_ring_force_activity(rdev, ring); - return radeon_ring_test_lockup(rdev, ring); + + return reset_mask; } -static int si_gpu_soft_reset(struct radeon_device *rdev) +static void si_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask) { struct evergreen_mc_save save; - u32 grbm_reset = 0; + u32 grbm_soft_reset = 0, srbm_soft_reset = 0; + u32 tmp; - if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE)) - return 0; + if (reset_mask == 0) + return; + + dev_info(rdev->dev, "GPU softreset: 0x%08X\n", reset_mask); + + evergreen_print_gpu_status_regs(rdev); + dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", + RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR)); + dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", + RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS)); - dev_info(rdev->dev, "GPU softreset \n"); - dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n", - RREG32(GRBM_STATUS)); - dev_info(rdev->dev, " GRBM_STATUS2=0x%08X\n", - RREG32(GRBM_STATUS2)); - dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n", - RREG32(GRBM_STATUS_SE0)); - dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n", - RREG32(GRBM_STATUS_SE1)); - dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n", - RREG32(SRBM_STATUS)); - evergreen_mc_stop(rdev, &save); - if (radeon_mc_wait_for_idle(rdev)) { - dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); - } /* Disable CP parsing/prefetching */ WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT); - /* reset all the gfx blocks */ - grbm_reset = (SOFT_RESET_CP | - SOFT_RESET_CB | - SOFT_RESET_DB | - SOFT_RESET_GDS | - SOFT_RESET_PA | - SOFT_RESET_SC | - SOFT_RESET_BCI | - SOFT_RESET_SPI | - SOFT_RESET_SX | - SOFT_RESET_TC | - SOFT_RESET_TA | - SOFT_RESET_VGT | - SOFT_RESET_IA); - - dev_info(rdev->dev, " GRBM_SOFT_RESET=0x%08X\n", grbm_reset); - WREG32(GRBM_SOFT_RESET, grbm_reset); - (void)RREG32(GRBM_SOFT_RESET); + if (reset_mask & RADEON_RESET_DMA) { + /* dma0 */ + tmp = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET); + tmp &= ~DMA_RB_ENABLE; + WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, tmp); + } + if (reset_mask & RADEON_RESET_DMA1) { + /* dma1 */ + tmp = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET); + tmp &= ~DMA_RB_ENABLE; + WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, tmp); + } + udelay(50); - WREG32(GRBM_SOFT_RESET, 0); - (void)RREG32(GRBM_SOFT_RESET); + + evergreen_mc_stop(rdev, &save); + if (evergreen_mc_wait_for_idle(rdev)) { + dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); + } + + if (reset_mask & (RADEON_RESET_GFX | RADEON_RESET_COMPUTE | RADEON_RESET_CP)) { + grbm_soft_reset = SOFT_RESET_CB | + SOFT_RESET_DB | + SOFT_RESET_GDS | + SOFT_RESET_PA | + SOFT_RESET_SC | + SOFT_RESET_BCI | + SOFT_RESET_SPI | + SOFT_RESET_SX | + SOFT_RESET_TC | + SOFT_RESET_TA | + SOFT_RESET_VGT | + SOFT_RESET_IA; + } + + if (reset_mask & RADEON_RESET_CP) { + grbm_soft_reset |= SOFT_RESET_CP | SOFT_RESET_VGT; + + srbm_soft_reset |= SOFT_RESET_GRBM; + } + + if (reset_mask & RADEON_RESET_DMA) + srbm_soft_reset |= SOFT_RESET_DMA; + + if (reset_mask & RADEON_RESET_DMA1) + srbm_soft_reset |= SOFT_RESET_DMA1; + + if (reset_mask & RADEON_RESET_DISPLAY) + srbm_soft_reset |= SOFT_RESET_DC; + + if (reset_mask & RADEON_RESET_RLC) + grbm_soft_reset |= SOFT_RESET_RLC; + + if (reset_mask & RADEON_RESET_SEM) + srbm_soft_reset |= SOFT_RESET_SEM; + + if (reset_mask & RADEON_RESET_IH) + srbm_soft_reset |= SOFT_RESET_IH; + + if (reset_mask & RADEON_RESET_GRBM) + srbm_soft_reset |= SOFT_RESET_GRBM; + + if (reset_mask & RADEON_RESET_VMC) + srbm_soft_reset |= SOFT_RESET_VMC; + + if (reset_mask & RADEON_RESET_MC) + srbm_soft_reset |= SOFT_RESET_MC; + + if (grbm_soft_reset) { + tmp = RREG32(GRBM_SOFT_RESET); + tmp |= grbm_soft_reset; + dev_info(rdev->dev, "GRBM_SOFT_RESET=0x%08X\n", tmp); + WREG32(GRBM_SOFT_RESET, tmp); + tmp = RREG32(GRBM_SOFT_RESET); + + udelay(50); + + tmp &= ~grbm_soft_reset; + WREG32(GRBM_SOFT_RESET, tmp); + tmp = RREG32(GRBM_SOFT_RESET); + } + + if (srbm_soft_reset) { + tmp = RREG32(SRBM_SOFT_RESET); + tmp |= srbm_soft_reset; + dev_info(rdev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); + WREG32(SRBM_SOFT_RESET, tmp); + tmp = RREG32(SRBM_SOFT_RESET); + + udelay(50); + + tmp &= ~srbm_soft_reset; + WREG32(SRBM_SOFT_RESET, tmp); + tmp = RREG32(SRBM_SOFT_RESET); + } + /* Wait a little for things to settle down */ udelay(50); - dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n", - RREG32(GRBM_STATUS)); - dev_info(rdev->dev, " GRBM_STATUS2=0x%08X\n", - RREG32(GRBM_STATUS2)); - dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n", - RREG32(GRBM_STATUS_SE0)); - dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n", - RREG32(GRBM_STATUS_SE1)); - dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n", - RREG32(SRBM_STATUS)); + evergreen_mc_resume(rdev, &save); - return 0; + udelay(50); + + evergreen_print_gpu_status_regs(rdev); } int si_asic_reset(struct radeon_device *rdev) { - return si_gpu_soft_reset(rdev); + u32 reset_mask; + + reset_mask = si_gpu_check_soft_reset(rdev); + + if (reset_mask) + r600_set_bios_scratch_engine_hung(rdev, true); + + si_gpu_soft_reset(rdev, reset_mask); + + reset_mask = si_gpu_check_soft_reset(rdev); + + if (!reset_mask) + r600_set_bios_scratch_engine_hung(rdev, false); + + return 0; +} + +/** + * si_gfx_is_lockup - Check if the GFX engine is locked up + * + * @rdev: radeon_device pointer + * @ring: radeon_ring structure holding ring information + * + * Check if the GFX engine is locked up. + * Returns true if the engine appears to be locked up, false if not. + */ +bool si_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) +{ + u32 reset_mask = si_gpu_check_soft_reset(rdev); + + if (!(reset_mask & (RADEON_RESET_GFX | + RADEON_RESET_COMPUTE | + RADEON_RESET_CP))) { + radeon_ring_lockup_update(ring); + return false; + } + /* force CP activities */ + radeon_ring_force_activity(rdev, ring); + return radeon_ring_test_lockup(rdev, ring); +} + +/** + * si_dma_is_lockup - Check if the DMA engine is locked up + * + * @rdev: radeon_device pointer + * @ring: radeon_ring structure holding ring information + * + * Check if the async DMA engine is locked up. + * Returns true if the engine appears to be locked up, false if not. + */ +bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) +{ + u32 reset_mask = si_gpu_check_soft_reset(rdev); + u32 mask; + + if (ring->idx == R600_RING_TYPE_DMA_INDEX) + mask = RADEON_RESET_DMA; + else + mask = RADEON_RESET_DMA1; + + if (!(reset_mask & mask)) { + radeon_ring_lockup_update(ring); + return false; + } + /* force ring activities */ + radeon_ring_force_activity(rdev, ring); + return radeon_ring_test_lockup(rdev, ring); } /* MC */ @@ -2426,9 +2726,20 @@ static int si_pcie_gart_enable(struct radeon_device *rdev) /* enable context1-15 */ WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR, (u32)(rdev->dummy_page.addr >> 12)); - WREG32(VM_CONTEXT1_CNTL2, 0); + WREG32(VM_CONTEXT1_CNTL2, 4); WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) | - RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); + RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT | + RANGE_PROTECTION_FAULT_ENABLE_DEFAULT | + DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT | + DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT | + PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT | + PDE0_PROTECTION_FAULT_ENABLE_DEFAULT | + VALID_PROTECTION_FAULT_ENABLE_INTERRUPT | + VALID_PROTECTION_FAULT_ENABLE_DEFAULT | + READ_PROTECTION_FAULT_ENABLE_INTERRUPT | + READ_PROTECTION_FAULT_ENABLE_DEFAULT | + WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT | + WRITE_PROTECTION_FAULT_ENABLE_DEFAULT); si_pcie_gart_tlb_flush(rdev); DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", @@ -2474,6 +2785,7 @@ static bool si_vm_reg_valid(u32 reg) /* check config regs */ switch (reg) { case GRBM_GFX_INDEX: + case CP_STRMOUT_CNTL: case VGT_VTX_VECT_EJECT_REG: case VGT_CACHE_INVALIDATION: case VGT_ESGS_RING_SIZE: @@ -2533,6 +2845,7 @@ static int si_vm_packet3_gfx_check(struct radeon_device *rdev, u32 idx = pkt->idx + 1; u32 idx_value = ib[idx]; u32 start_reg, end_reg, reg, i; + u32 command, info; switch (pkt->opcode) { case PACKET3_NOP: @@ -2632,6 +2945,52 @@ static int si_vm_packet3_gfx_check(struct radeon_device *rdev, return -EINVAL; } break; + case PACKET3_CP_DMA: + command = ib[idx + 4]; + info = ib[idx + 1]; + if (command & PACKET3_CP_DMA_CMD_SAS) { + /* src address space is register */ + if (((info & 0x60000000) >> 29) == 0) { + start_reg = idx_value << 2; + if (command & PACKET3_CP_DMA_CMD_SAIC) { + reg = start_reg; + if (!si_vm_reg_valid(reg)) { + DRM_ERROR("CP DMA Bad SRC register\n"); + return -EINVAL; + } + } else { + for (i = 0; i < (command & 0x1fffff); i++) { + reg = start_reg + (4 * i); + if (!si_vm_reg_valid(reg)) { + DRM_ERROR("CP DMA Bad SRC register\n"); + return -EINVAL; + } + } + } + } + } + if (command & PACKET3_CP_DMA_CMD_DAS) { + /* dst address space is register */ + if (((info & 0x00300000) >> 20) == 0) { + start_reg = ib[idx + 2]; + if (command & PACKET3_CP_DMA_CMD_DAIC) { + reg = start_reg; + if (!si_vm_reg_valid(reg)) { + DRM_ERROR("CP DMA Bad DST register\n"); + return -EINVAL; + } + } else { + for (i = 0; i < (command & 0x1fffff); i++) { + reg = start_reg + (4 * i); + if (!si_vm_reg_valid(reg)) { + DRM_ERROR("CP DMA Bad DST register\n"); + return -EINVAL; + } + } + } + } + } + break; default: DRM_ERROR("Invalid GFX packet3: 0x%x\n", pkt->opcode); return -EINVAL; @@ -2729,19 +3088,19 @@ int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib) do { pkt.idx = idx; - pkt.type = CP_PACKET_GET_TYPE(ib->ptr[idx]); - pkt.count = CP_PACKET_GET_COUNT(ib->ptr[idx]); + pkt.type = RADEON_CP_PACKET_GET_TYPE(ib->ptr[idx]); + pkt.count = RADEON_CP_PACKET_GET_COUNT(ib->ptr[idx]); pkt.one_reg_wr = 0; switch (pkt.type) { - case PACKET_TYPE0: + case RADEON_PACKET_TYPE0: dev_err(rdev->dev, "Packet0 not allowed!\n"); ret = -EINVAL; break; - case PACKET_TYPE2: + case RADEON_PACKET_TYPE2: idx += 1; break; - case PACKET_TYPE3: - pkt.opcode = CP_PACKET3_GET_OPCODE(ib->ptr[idx]); + case RADEON_PACKET_TYPE3: + pkt.opcode = RADEON_CP_PACKET3_GET_OPCODE(ib->ptr[idx]); if (ib->is_const_ib) ret = si_vm_packet3_ce_check(rdev, ib->ptr, &pkt); else { @@ -2794,45 +3153,105 @@ void si_vm_fini(struct radeon_device *rdev) * si_vm_set_page - update the page tables using the CP * * @rdev: radeon_device pointer + * @ib: indirect buffer to fill with commands * @pe: addr of the page entry * @addr: dst addr to write into pe * @count: number of page entries to update * @incr: increase next addr by incr bytes * @flags: access flags * - * Update the page tables using the CP (cayman-si). + * Update the page tables using the CP (SI). */ -void si_vm_set_page(struct radeon_device *rdev, uint64_t pe, +void si_vm_set_page(struct radeon_device *rdev, + struct radeon_ib *ib, + uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint32_t flags) { - struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index]; uint32_t r600_flags = cayman_vm_page_flags(rdev, flags); - - while (count) { - unsigned ndw = 2 + count * 2; - if (ndw > 0x3FFE) - ndw = 0x3FFE; - - radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, ndw)); - radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | - WRITE_DATA_DST_SEL(1))); - radeon_ring_write(ring, pe); - radeon_ring_write(ring, upper_32_bits(pe)); - for (; ndw > 2; ndw -= 2, --count, pe += 8) { - uint64_t value; - if (flags & RADEON_VM_PAGE_SYSTEM) { - value = radeon_vm_map_gart(rdev, addr); - value &= 0xFFFFFFFFFFFFF000ULL; - } else if (flags & RADEON_VM_PAGE_VALID) - value = addr; - else - value = 0; - addr += incr; - value |= r600_flags; - radeon_ring_write(ring, value); - radeon_ring_write(ring, upper_32_bits(value)); + uint64_t value; + unsigned ndw; + + if (rdev->asic->vm.pt_ring_index == RADEON_RING_TYPE_GFX_INDEX) { + while (count) { + ndw = 2 + count * 2; + if (ndw > 0x3FFE) + ndw = 0x3FFE; + + ib->ptr[ib->length_dw++] = PACKET3(PACKET3_WRITE_DATA, ndw); + ib->ptr[ib->length_dw++] = (WRITE_DATA_ENGINE_SEL(0) | + WRITE_DATA_DST_SEL(1)); + ib->ptr[ib->length_dw++] = pe; + ib->ptr[ib->length_dw++] = upper_32_bits(pe); + for (; ndw > 2; ndw -= 2, --count, pe += 8) { + if (flags & RADEON_VM_PAGE_SYSTEM) { + value = radeon_vm_map_gart(rdev, addr); + value &= 0xFFFFFFFFFFFFF000ULL; + } else if (flags & RADEON_VM_PAGE_VALID) { + value = addr; + } else { + value = 0; + } + addr += incr; + value |= r600_flags; + ib->ptr[ib->length_dw++] = value; + ib->ptr[ib->length_dw++] = upper_32_bits(value); + } + } + } else { + /* DMA */ + if (flags & RADEON_VM_PAGE_SYSTEM) { + while (count) { + ndw = count * 2; + if (ndw > 0xFFFFE) + ndw = 0xFFFFE; + + /* for non-physically contiguous pages (system) */ + ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw); + ib->ptr[ib->length_dw++] = pe; + ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; + for (; ndw > 0; ndw -= 2, --count, pe += 8) { + if (flags & RADEON_VM_PAGE_SYSTEM) { + value = radeon_vm_map_gart(rdev, addr); + value &= 0xFFFFFFFFFFFFF000ULL; + } else if (flags & RADEON_VM_PAGE_VALID) { + value = addr; + } else { + value = 0; + } + addr += incr; + value |= r600_flags; + ib->ptr[ib->length_dw++] = value; + ib->ptr[ib->length_dw++] = upper_32_bits(value); + } + } + } else { + while (count) { + ndw = count * 2; + if (ndw > 0xFFFFE) + ndw = 0xFFFFE; + + if (flags & RADEON_VM_PAGE_VALID) + value = addr; + else + value = 0; + /* for physically contiguous pages (vram) */ + ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw); + ib->ptr[ib->length_dw++] = pe; /* dst addr */ + ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; + ib->ptr[ib->length_dw++] = r600_flags; /* mask */ + ib->ptr[ib->length_dw++] = 0; + ib->ptr[ib->length_dw++] = value; /* value */ + ib->ptr[ib->length_dw++] = upper_32_bits(value); + ib->ptr[ib->length_dw++] = incr; /* increment size */ + ib->ptr[ib->length_dw++] = 0; + pe += ndw * 4; + addr += (ndw / 2) * incr; + count -= ndw / 2; + } } + while (ib->length_dw & 0x7) + ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0); } } @@ -2879,6 +3298,32 @@ void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) radeon_ring_write(ring, 0x0); } +void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) +{ + struct radeon_ring *ring = &rdev->ring[ridx]; + + if (vm == NULL) + return; + + radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0)); + if (vm->id < 8) { + radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2)); + } else { + radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2)); + } + radeon_ring_write(ring, vm->pd_gpu_addr >> 12); + + /* flush hdp cache */ + radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0)); + radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2)); + radeon_ring_write(ring, 1); + + /* bits 0-7 are the VM contexts0-7 */ + radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0)); + radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2)); + radeon_ring_write(ring, 1 << vm->id); +} + /* * RLC */ @@ -3047,6 +3492,10 @@ static void si_disable_interrupt_state(struct radeon_device *rdev) WREG32(CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE); WREG32(CP_INT_CNTL_RING1, 0); WREG32(CP_INT_CNTL_RING2, 0); + tmp = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE; + WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, tmp); + tmp = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE; + WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, tmp); WREG32(GRBM_INT_CNTL, 0); WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); @@ -3166,6 +3615,7 @@ int si_irq_set(struct radeon_device *rdev) u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6; u32 grbm_int_cntl = 0; u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0; + u32 dma_cntl, dma_cntl1; if (!rdev->irq.installed) { WARN(1, "Can't enable IRQ/MSI because no handler is installed\n"); @@ -3186,6 +3636,9 @@ int si_irq_set(struct radeon_device *rdev) hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN; hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN; + dma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE; + dma_cntl1 = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE; + /* enable CP interrupts on all rings */ if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) { DRM_DEBUG("si_irq_set: sw int gfx\n"); @@ -3199,6 +3652,15 @@ int si_irq_set(struct radeon_device *rdev) DRM_DEBUG("si_irq_set: sw int cp2\n"); cp_int_cntl2 |= TIME_STAMP_INT_ENABLE; } + if (atomic_read(&rdev->irq.ring_int[R600_RING_TYPE_DMA_INDEX])) { + DRM_DEBUG("si_irq_set: sw int dma\n"); + dma_cntl |= TRAP_ENABLE; + } + + if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_DMA1_INDEX])) { + DRM_DEBUG("si_irq_set: sw int dma1\n"); + dma_cntl1 |= TRAP_ENABLE; + } if (rdev->irq.crtc_vblank_int[0] || atomic_read(&rdev->irq.pflip[0])) { DRM_DEBUG("si_irq_set: vblank 0\n"); @@ -3258,6 +3720,9 @@ int si_irq_set(struct radeon_device *rdev) WREG32(CP_INT_CNTL_RING1, cp_int_cntl1); WREG32(CP_INT_CNTL_RING2, cp_int_cntl2); + WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, dma_cntl); + WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, dma_cntl1); + WREG32(GRBM_INT_CNTL, grbm_int_cntl); WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1); @@ -3683,6 +4148,16 @@ restart_ih: break; } break; + case 146: + case 147: + dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data); + dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", + RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR)); + dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", + RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS)); + /* reset addr and status */ + WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1); + break; case 176: /* RINGID0 CP_INT */ radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX); break; @@ -3706,9 +4181,17 @@ restart_ih: break; } break; + case 224: /* DMA trap event */ + DRM_DEBUG("IH: DMA trap\n"); + radeon_fence_process(rdev, R600_RING_TYPE_DMA_INDEX); + break; case 233: /* GUI IDLE */ DRM_DEBUG("IH: GUI idle\n"); break; + case 244: /* DMA trap event */ + DRM_DEBUG("IH: DMA1 trap\n"); + radeon_fence_process(rdev, CAYMAN_RING_TYPE_DMA1_INDEX); + break; default: DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data); break; @@ -3732,6 +4215,80 @@ restart_ih: return IRQ_HANDLED; } +/** + * si_copy_dma - copy pages using the DMA engine + * + * @rdev: radeon_device pointer + * @src_offset: src GPU address + * @dst_offset: dst GPU address + * @num_gpu_pages: number of GPU pages to xfer + * @fence: radeon fence object + * + * Copy GPU paging using the DMA engine (SI). + * Used by the radeon ttm implementation to move pages if + * registered as the asic copy callback. + */ +int si_copy_dma(struct radeon_device *rdev, + uint64_t src_offset, uint64_t dst_offset, + unsigned num_gpu_pages, + struct radeon_fence **fence) +{ + struct radeon_semaphore *sem = NULL; + int ring_index = rdev->asic->copy.dma_ring_index; + struct radeon_ring *ring = &rdev->ring[ring_index]; + u32 size_in_bytes, cur_size_in_bytes; + int i, num_loops; + int r = 0; + + r = radeon_semaphore_create(rdev, &sem); + if (r) { + DRM_ERROR("radeon: moving bo (%d).\n", r); + return r; + } + + size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT); + num_loops = DIV_ROUND_UP(size_in_bytes, 0xfffff); + r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11); + if (r) { + DRM_ERROR("radeon: moving bo (%d).\n", r); + radeon_semaphore_free(rdev, &sem, NULL); + return r; + } + + if (radeon_fence_need_sync(*fence, ring->idx)) { + radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring, + ring->idx); + radeon_fence_note_sync(*fence, ring->idx); + } else { + radeon_semaphore_free(rdev, &sem, NULL); + } + + for (i = 0; i < num_loops; i++) { + cur_size_in_bytes = size_in_bytes; + if (cur_size_in_bytes > 0xFFFFF) + cur_size_in_bytes = 0xFFFFF; + size_in_bytes -= cur_size_in_bytes; + radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 1, 0, 0, cur_size_in_bytes)); + radeon_ring_write(ring, dst_offset & 0xffffffff); + radeon_ring_write(ring, src_offset & 0xffffffff); + radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff); + radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff); + src_offset += cur_size_in_bytes; + dst_offset += cur_size_in_bytes; + } + + r = radeon_fence_emit(rdev, fence, ring->idx); + if (r) { + radeon_ring_unlock_undo(rdev, ring); + return r; + } + + radeon_ring_unlock_commit(rdev, ring); + radeon_semaphore_free(rdev, &sem, *fence); + + return r; +} + /* * startup/shutdown callbacks */ @@ -3803,6 +4360,18 @@ static int si_startup(struct radeon_device *rdev) return r; } + r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_DMA_INDEX); + if (r) { + dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r); + return r; + } + + r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_DMA1_INDEX); + if (r) { + dev_err(rdev->dev, "failed initializing DMA fences (%d).\n", r); + return r; + } + /* Enable IRQ */ r = si_irq_init(rdev); if (r) { @@ -3833,6 +4402,22 @@ static int si_startup(struct radeon_device *rdev) if (r) return r; + ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; + r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET, + DMA_RB_RPTR + DMA0_REGISTER_OFFSET, + DMA_RB_WPTR + DMA0_REGISTER_OFFSET, + 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0)); + if (r) + return r; + + ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]; + r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET, + DMA_RB_RPTR + DMA1_REGISTER_OFFSET, + DMA_RB_WPTR + DMA1_REGISTER_OFFSET, + 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0)); + if (r) + return r; + r = si_cp_load_microcode(rdev); if (r) return r; @@ -3840,6 +4425,10 @@ static int si_startup(struct radeon_device *rdev) if (r) return r; + r = cayman_dma_resume(rdev); + if (r) + return r; + r = radeon_ib_pool_init(rdev); if (r) { dev_err(rdev->dev, "IB initialization failed (%d).\n", r); @@ -3881,9 +4470,7 @@ int si_resume(struct radeon_device *rdev) int si_suspend(struct radeon_device *rdev) { si_cp_enable(rdev, false); - rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false; - rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false; - rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false; + cayman_dma_stop(rdev); si_irq_suspend(rdev); radeon_wb_disable(rdev); si_pcie_gart_disable(rdev); @@ -3961,6 +4548,14 @@ int si_init(struct radeon_device *rdev) ring->ring_obj = NULL; r600_ring_init(rdev, ring, 1024 * 1024); + ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX]; + ring->ring_obj = NULL; + r600_ring_init(rdev, ring, 64 * 1024); + + ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]; + ring->ring_obj = NULL; + r600_ring_init(rdev, ring, 64 * 1024); + rdev->ih.ring_obj = NULL; r600_ih_ring_init(rdev, 64 * 1024); @@ -3973,6 +4568,7 @@ int si_init(struct radeon_device *rdev) if (r) { dev_err(rdev->dev, "disabling GPU acceleration\n"); si_cp_fini(rdev); + cayman_dma_fini(rdev); si_irq_fini(rdev); si_rlc_fini(rdev); radeon_wb_fini(rdev); @@ -4001,6 +4597,7 @@ void si_fini(struct radeon_device *rdev) r600_blit_fini(rdev); #endif si_cp_fini(rdev); + cayman_dma_fini(rdev); si_irq_fini(rdev); si_rlc_fini(rdev); radeon_wb_fini(rdev); @@ -4018,14 +4615,14 @@ void si_fini(struct radeon_device *rdev) } /** - * si_get_gpu_clock - return GPU clock counter snapshot + * si_get_gpu_clock_counter - return GPU clock counter snapshot * * @rdev: radeon_device pointer * * Fetches a GPU clock counter snapshot (SI). * Returns the 64 bit clock counter snapshot. */ -uint64_t si_get_gpu_clock(struct radeon_device *rdev) +uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev) { uint64_t clock; |