summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdgpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Kconfig9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/aldebaran_reg_init.c54
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c47
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.h30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c43
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c73
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c184
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c269
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c72
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c128
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c51
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c150
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c54
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c133
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c95
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c191
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c43
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h105
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_smuio.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c458
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/athub_v2_1.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atom.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atom.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c77
-rw-r--r--drivers/gpu/drm/amd/amdgpu/df_v3_6.c2
-rw-r--r--[-rwxr-xr-x]drivers/gpu/drm/amd/amdgpu/dimgrey_cavefish_reg_init.c0
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c161
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c1286
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.h40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c75
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c54
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c152
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c65
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c1329
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.h28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c221
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v13_0.c378
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v13_0.h30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c181
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_4.c232
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_4.h28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c111
-rw-r--r--drivers/gpu/drm/amd/amdgpu/smuio_v13_0.c121
-rw-r--r--drivers/gpu/drm/amd/amdgpu/smuio_v13_0.h30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c360
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15_common.h48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ta_secureDisplay_if.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c99
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_5.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_ih.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega20_ih.c49
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c190
102 files changed, 6981 insertions, 1231 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/Kconfig b/drivers/gpu/drm/amd/amdgpu/Kconfig
index 9375e7f12420..74a8105fd2c0 100644
--- a/drivers/gpu/drm/amd/amdgpu/Kconfig
+++ b/drivers/gpu/drm/amd/amdgpu/Kconfig
@@ -34,15 +34,6 @@ config DRM_AMDGPU_USERPTR
This option selects CONFIG_HMM and CONFIG_HMM_MIRROR if it
isn't already selected to enabled full userptr support.
-config DRM_AMDGPU_GART_DEBUGFS
- bool "Allow GART access through debugfs"
- depends on DRM_AMDGPU
- depends on DEBUG_FS
- default n
- help
- Selecting this option creates a debugfs file to inspect the mapped
- pages. Uses more memory for housekeeping, enable only for debugging.
-
source "drivers/gpu/drm/amd/acp/Kconfig"
source "drivers/gpu/drm/amd/display/Kconfig"
source "drivers/gpu/drm/amd/amdkfd/Kconfig"
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 13ebb1f71e49..741b68874e53 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -71,7 +71,7 @@ amdgpu-y += \
vi.o mxgpu_vi.o nbio_v6_1.o soc15.o emu_soc.o mxgpu_ai.o nbio_v7_0.o vega10_reg_init.o \
vega20_reg_init.o nbio_v7_4.o nbio_v2_3.o nv.o navi10_reg_init.o navi14_reg_init.o \
arct_reg_init.o navi12_reg_init.o mxgpu_nv.o sienna_cichlid_reg_init.o vangogh_reg_init.o \
- nbio_v7_2.o dimgrey_cavefish_reg_init.o hdp_v4_0.o hdp_v5_0.o
+ nbio_v7_2.o dimgrey_cavefish_reg_init.o hdp_v4_0.o hdp_v5_0.o aldebaran_reg_init.o
# add DF block
amdgpu-y += \
@@ -83,7 +83,8 @@ amdgpu-y += \
gmc_v7_0.o \
gmc_v8_0.o \
gfxhub_v1_0.o mmhub_v1_0.o gmc_v9_0.o gfxhub_v1_1.o mmhub_v9_4.o \
- gfxhub_v2_0.o mmhub_v2_0.o gmc_v10_0.o gfxhub_v2_1.o mmhub_v2_3.o
+ gfxhub_v2_0.o mmhub_v2_0.o gmc_v10_0.o gfxhub_v2_1.o mmhub_v2_3.o \
+ mmhub_v1_7.o
# add UMC block
amdgpu-y += \
@@ -106,7 +107,8 @@ amdgpu-y += \
psp_v3_1.o \
psp_v10_0.o \
psp_v11_0.o \
- psp_v12_0.o
+ psp_v12_0.o \
+ psp_v13_0.o
# add DCE block
amdgpu-y += \
@@ -121,6 +123,7 @@ amdgpu-y += \
gfx_v8_0.o \
gfx_v9_0.o \
gfx_v9_4.o \
+ gfx_v9_4_2.o \
gfx_v10_0.o
# add async DMA block
@@ -129,6 +132,7 @@ amdgpu-y += \
sdma_v2_4.o \
sdma_v3_0.o \
sdma_v4_0.o \
+ sdma_v4_4.o \
sdma_v5_0.o \
sdma_v5_2.o
@@ -172,7 +176,8 @@ amdgpu-y += \
amdgpu-y += \
smuio_v9_0.o \
smuio_v11_0.o \
- smuio_v11_0_6.o
+ smuio_v11_0_6.o \
+ smuio_v13_0.o
# add amdkfd interfaces
amdgpu-y += amdgpu_amdkfd.o
@@ -187,6 +192,7 @@ amdgpu-y += \
amdgpu_amdkfd_gfx_v8.o \
amdgpu_amdkfd_gfx_v9.o \
amdgpu_amdkfd_arcturus.o \
+ amdgpu_amdkfd_aldebaran.o \
amdgpu_amdkfd_gfx_v10.o \
amdgpu_amdkfd_gfx_v10_3.o
diff --git a/drivers/gpu/drm/amd/amdgpu/aldebaran_reg_init.c b/drivers/gpu/drm/amd/amdgpu/aldebaran_reg_init.c
new file mode 100644
index 000000000000..28e6c9ab8767
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/aldebaran_reg_init.c
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "soc15.h"
+
+#include "soc15_common.h"
+#include "aldebaran_ip_offset.h"
+
+int aldebaran_reg_base_init(struct amdgpu_device *adev)
+{
+ /* HW has more IP blocks, only initialized the block needed by our driver */
+ uint32_t i;
+ for (i = 0 ; i < MAX_INSTANCE ; ++i) {
+ adev->reg_offset[GC_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
+ adev->reg_offset[HDP_HWIP][i] = (uint32_t *)(&(HDP_BASE.instance[i]));
+ adev->reg_offset[MMHUB_HWIP][i] = (uint32_t *)(&(MMHUB_BASE.instance[i]));
+ adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i]));
+ adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i]));
+ adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i]));
+ adev->reg_offset[MP1_HWIP][i] = (uint32_t *)(&(MP1_BASE.instance[i]));
+ adev->reg_offset[DF_HWIP][i] = (uint32_t *)(&(DF_BASE.instance[i]));
+ adev->reg_offset[OSSSYS_HWIP][i] = (uint32_t *)(&(OSSSYS_BASE.instance[i]));
+ adev->reg_offset[SDMA0_HWIP][i] = (uint32_t *)(&(SDMA0_BASE.instance[i]));
+ adev->reg_offset[SDMA1_HWIP][i] = (uint32_t *)(&(SDMA1_BASE.instance[i]));
+ adev->reg_offset[SDMA2_HWIP][i] = (uint32_t *)(&(SDMA2_BASE.instance[i]));
+ adev->reg_offset[SDMA3_HWIP][i] = (uint32_t *)(&(SDMA3_BASE.instance[i]));
+ adev->reg_offset[SDMA4_HWIP][i] = (uint32_t *)(&(SDMA4_BASE.instance[i]));
+ adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i]));
+ adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i]));
+ adev->reg_offset[UMC_HWIP][i] = (uint32_t *)(&(UMC_BASE.instance[i]));
+ adev->reg_offset[VCN_HWIP][i] = (uint32_t *)(&(VCN_BASE.instance[i]));
+ }
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 1af2fa1591fd..a037c223c251 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -124,6 +124,16 @@ struct amdgpu_mgpu_info
uint32_t num_gpu;
uint32_t num_dgpu;
uint32_t num_apu;
+
+ /* delayed reset_func for XGMI configuration if necessary */
+ struct delayed_work delayed_reset_work;
+ bool pending_reset;
+};
+
+struct amdgpu_watchdog_timer
+{
+ bool timeout_fatal_disable;
+ uint32_t period; /* maxCycles = (1 << period), the number of cycles before a timeout */
};
#define AMDGPU_MAX_TIMEOUT_PARAM_LENGTH 256
@@ -177,7 +187,9 @@ extern int amdgpu_compute_multipipe;
extern int amdgpu_gpu_recovery;
extern int amdgpu_emu_mode;
extern uint amdgpu_smu_memory_pool_size;
+extern int amdgpu_smu_pptable_id;
extern uint amdgpu_dc_feature_mask;
+extern uint amdgpu_freesync_vid_mode;
extern uint amdgpu_dc_debug_mask;
extern uint amdgpu_dm_abm_level;
extern int amdgpu_backlight;
@@ -185,6 +197,7 @@ extern struct amdgpu_mgpu_info mgpu_info;
extern int amdgpu_ras_enable;
extern uint amdgpu_ras_mask;
extern int amdgpu_bad_page_threshold;
+extern struct amdgpu_watchdog_timer amdgpu_watchdog_timer;
extern int amdgpu_async_gfx_ring;
extern int amdgpu_mcbp;
extern int amdgpu_discovery;
@@ -584,6 +597,19 @@ enum amd_reset_method {
AMD_RESET_METHOD_PCI,
};
+struct amdgpu_video_codec_info {
+ u32 codec_type;
+ u32 max_width;
+ u32 max_height;
+ u32 max_pixels_per_frame;
+ u32 max_level;
+};
+
+struct amdgpu_video_codecs {
+ const u32 codec_count;
+ const struct amdgpu_video_codec_info *codec_array;
+};
+
/*
* ASIC specific functions.
*/
@@ -628,6 +654,9 @@ struct amdgpu_asic_funcs {
void (*pre_asic_init)(struct amdgpu_device *adev);
/* enter/exit umd stable pstate */
int (*update_umd_stable_pstate)(struct amdgpu_device *adev, bool enter);
+ /* query video codecs */
+ int (*query_video_codecs)(struct amdgpu_device *adev, bool encode,
+ const struct amdgpu_video_codecs **codecs);
};
/*
@@ -792,12 +821,7 @@ struct amdgpu_device {
bool accel_working;
struct notifier_block acpi_nb;
struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS];
- struct amdgpu_debugfs debugfs[AMDGPU_DEBUGFS_MAX_COMPONENTS];
- unsigned debugfs_count;
-#if defined(CONFIG_DEBUG_FS)
- struct dentry *debugfs_preempt;
- struct dentry *debugfs_regs[AMDGPU_DEBUGFS_MAX_COMPONENTS];
-#endif
+ struct debugfs_blob_wrapper debugfs_vbios_blob;
struct amdgpu_atif *atif;
struct amdgpu_atcs atcs;
struct mutex srbm_mutex;
@@ -853,8 +877,6 @@ struct amdgpu_device {
spinlock_t audio_endpt_idx_lock;
amdgpu_block_rreg_t audio_endpt_rreg;
amdgpu_block_wreg_t audio_endpt_wreg;
- void __iomem *rio_mem;
- resource_size_t rio_mem_size;
struct amdgpu_doorbell doorbell;
/* clock/pll info */
@@ -897,6 +919,7 @@ struct amdgpu_device {
struct amdgpu_irq_src vupdate_irq;
struct amdgpu_irq_src pageflip_irq;
struct amdgpu_irq_src hpd_irq;
+ struct amdgpu_irq_src dmub_trace_irq;
/* rings */
u64 fence_context;
@@ -1024,6 +1047,7 @@ struct amdgpu_device {
int asic_reset_res;
struct work_struct xgmi_reset_work;
+ struct list_head reset_list;
long gfx_timeout;
long sdma_timeout;
@@ -1088,9 +1112,6 @@ void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value);
uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset);
-u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg);
-void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v);
-
u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
u32 pcie_index, u32 pcie_data,
u32 reg_addr);
@@ -1107,6 +1128,15 @@ void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type);
bool amdgpu_device_has_dc_support(struct amdgpu_device *adev);
+int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
+ struct amdgpu_job *job,
+ bool *need_full_reset_arg);
+
+int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
+ struct list_head *device_list_handle,
+ bool *need_full_reset_arg,
+ bool skip_hw_reset);
+
int emu_soc_asic_init(struct amdgpu_device *adev);
/*
@@ -1172,8 +1202,6 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
} while (0)
#define DREG32_SYS(sqf, adev, reg) seq_printf((sqf), #reg " : 0x%08X\n", amdgpu_device_rreg((adev), (reg), false))
-#define RREG32_IO(reg) amdgpu_io_rreg(adev, (reg))
-#define WREG32_IO(reg, v) amdgpu_io_wreg(adev, (reg), (v))
#define REG_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
#define REG_FIELD_MASK(reg, field) reg##__##field##_MASK
@@ -1227,6 +1255,7 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
#define amdgpu_asic_pre_asic_init(adev) (adev)->asic_funcs->pre_asic_init((adev))
#define amdgpu_asic_update_umd_stable_pstate(adev, enter) \
((adev)->asic_funcs->update_umd_stable_pstate ? (adev)->asic_funcs->update_umd_stable_pstate((adev), (enter)) : 0)
+#define amdgpu_asic_query_video_codecs(adev, e, c) (adev)->asic_funcs->query_video_codecs((adev), (e), (c))
#define amdgpu_inc_vram_lost(adev) atomic_inc(&((adev)->vram_lost_counter));
@@ -1247,6 +1276,7 @@ void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
const u32 array_size);
bool amdgpu_device_supports_atpx(struct drm_device *dev);
+int amdgpu_device_mode1_reset(struct amdgpu_device *adev);
bool amdgpu_device_supports_boco(struct drm_device *dev);
bool amdgpu_device_supports_baco(struct drm_device *dev);
bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index c5343a5eecbe..1c6be53313a8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -44,7 +44,7 @@ int amdgpu_amdkfd_init(void)
int ret;
si_meminfo(&si);
- amdgpu_amdkfd_total_mem_size = si.totalram - si.totalhigh;
+ amdgpu_amdkfd_total_mem_size = si.freeram - si.freehigh;
amdgpu_amdkfd_total_mem_size *= si.mem_unit;
ret = kgd2kfd_init();
@@ -165,7 +165,8 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
adev->doorbell_index.last_non_cp;
}
- kgd2kfd_device_init(adev->kfd.dev, adev_to_drm(adev), &gpu_resources);
+ adev->kfd.init_complete = kgd2kfd_device_init(adev->kfd.dev,
+ adev_to_drm(adev), &gpu_resources);
}
}
@@ -638,13 +639,6 @@ void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
- /* Temp workaround to fix the soft hang observed in certain compute
- * applications if GFXOFF is enabled.
- */
- if (adev->asic_type == CHIP_SIENNA_CICHLID) {
- pr_debug("GFXOFF is %s\n", idle ? "enabled" : "disabled");
- amdgpu_gfx_off_ctrl(adev, idle);
- }
amdgpu_dpm_switch_power_profile(adev,
PP_SMC_POWER_PROFILE_COMPUTE,
!idle);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index a81d9cacf9b8..14f68c028126 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -80,6 +80,7 @@ struct amdgpu_amdkfd_fence {
struct amdgpu_kfd_dev {
struct kfd_dev *dev;
uint64_t vram_used;
+ bool init_complete;
};
enum kgd_engine_type {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c
new file mode 100644
index 000000000000..a5434b713856
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "amdgpu.h"
+#include "amdgpu_amdkfd.h"
+#include "amdgpu_amdkfd_arcturus.h"
+#include "amdgpu_amdkfd_gfx_v9.h"
+
+const struct kfd2kgd_calls aldebaran_kfd2kgd = {
+ .program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings,
+ .set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping,
+ .init_interrupts = kgd_gfx_v9_init_interrupts,
+ .hqd_load = kgd_gfx_v9_hqd_load,
+ .hiq_mqd_load = kgd_gfx_v9_hiq_mqd_load,
+ .hqd_sdma_load = kgd_arcturus_hqd_sdma_load,
+ .hqd_dump = kgd_gfx_v9_hqd_dump,
+ .hqd_sdma_dump = kgd_arcturus_hqd_sdma_dump,
+ .hqd_is_occupied = kgd_gfx_v9_hqd_is_occupied,
+ .hqd_sdma_is_occupied = kgd_arcturus_hqd_sdma_is_occupied,
+ .hqd_destroy = kgd_gfx_v9_hqd_destroy,
+ .hqd_sdma_destroy = kgd_arcturus_hqd_sdma_destroy,
+ .address_watch_disable = kgd_gfx_v9_address_watch_disable,
+ .address_watch_execute = kgd_gfx_v9_address_watch_execute,
+ .wave_control_execute = kgd_gfx_v9_wave_control_execute,
+ .address_watch_get_offset = kgd_gfx_v9_address_watch_get_offset,
+ .get_atc_vmid_pasid_mapping_info =
+ kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
+ .set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
index 604757a1e440..9ef9f3ddad48 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
@@ -122,7 +122,7 @@ static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev,
return sdma_rlc_reg_offset;
}
-static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
+int kgd_arcturus_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
uint32_t __user *wptr, struct mm_struct *mm)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
@@ -192,7 +192,7 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
return 0;
}
-static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
+int kgd_arcturus_hqd_sdma_dump(struct kgd_dev *kgd,
uint32_t engine_id, uint32_t queue_id,
uint32_t (**dump)[2], uint32_t *n_regs)
{
@@ -224,7 +224,7 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
return 0;
}
-static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
+bool kgd_arcturus_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct v9_sdma_mqd *m;
@@ -243,7 +243,7 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
return false;
}
-static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
+int kgd_arcturus_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
unsigned int utimeout)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
@@ -289,13 +289,13 @@ const struct kfd2kgd_calls arcturus_kfd2kgd = {
.init_interrupts = kgd_gfx_v9_init_interrupts,
.hqd_load = kgd_gfx_v9_hqd_load,
.hiq_mqd_load = kgd_gfx_v9_hiq_mqd_load,
- .hqd_sdma_load = kgd_hqd_sdma_load,
+ .hqd_sdma_load = kgd_arcturus_hqd_sdma_load,
.hqd_dump = kgd_gfx_v9_hqd_dump,
- .hqd_sdma_dump = kgd_hqd_sdma_dump,
+ .hqd_sdma_dump = kgd_arcturus_hqd_sdma_dump,
.hqd_is_occupied = kgd_gfx_v9_hqd_is_occupied,
- .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
+ .hqd_sdma_is_occupied = kgd_arcturus_hqd_sdma_is_occupied,
.hqd_destroy = kgd_gfx_v9_hqd_destroy,
- .hqd_sdma_destroy = kgd_hqd_sdma_destroy,
+ .hqd_sdma_destroy = kgd_arcturus_hqd_sdma_destroy,
.address_watch_disable = kgd_gfx_v9_address_watch_disable,
.address_watch_execute = kgd_gfx_v9_address_watch_execute,
.wave_control_execute = kgd_gfx_v9_wave_control_execute,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.h
new file mode 100644
index 000000000000..ce08131b7b5f
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+int kgd_arcturus_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
+ uint32_t __user *wptr, struct mm_struct *mm);
+int kgd_arcturus_hqd_sdma_dump(struct kgd_dev *kgd,
+ uint32_t engine_id, uint32_t queue_id,
+ uint32_t (**dump)[2], uint32_t *n_regs);
+bool kgd_arcturus_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
+int kgd_arcturus_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
+ unsigned int utimeout);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 9f6b299cbf74..e93850f2f3b1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -31,6 +31,7 @@
#include "amdgpu_amdkfd.h"
#include "amdgpu_dma_buf.h"
#include <uapi/linux/kfd_ioctl.h>
+#include "amdgpu_xgmi.h"
/* BO flag to indicate a KFD userptr BO */
#define AMDGPU_AMDKFD_USERPTR_BO (1ULL << 63)
@@ -96,7 +97,7 @@ void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
uint64_t mem;
si_meminfo(&si);
- mem = si.totalram - si.totalhigh;
+ mem = si.freeram - si.freehigh;
mem *= si.mem_unit;
spin_lock_init(&kfd_mem_limit.mem_limit_lock);
@@ -412,7 +413,10 @@ static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
{
struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
bool coherent = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_COHERENT;
+ bool uncached = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_UNCACHED;
uint32_t mapping_flags;
+ uint64_t pte_flags;
+ bool snoop = false;
mapping_flags = AMDGPU_VM_PAGE_READABLE;
if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE)
@@ -433,12 +437,41 @@ static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
}
break;
+ case CHIP_ALDEBARAN:
+ if (coherent && uncached) {
+ if (adev->gmc.xgmi.connected_to_cpu ||
+ !(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM))
+ snoop = true;
+ mapping_flags |= AMDGPU_VM_MTYPE_UC;
+ } else if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
+ if (bo_adev == adev) {
+ mapping_flags |= AMDGPU_VM_MTYPE_RW;
+ if (adev->gmc.xgmi.connected_to_cpu)
+ snoop = true;
+ } else {
+ mapping_flags |= AMDGPU_VM_MTYPE_NC;
+ if (amdgpu_xgmi_same_hive(adev, bo_adev))
+ snoop = true;
+ }
+ } else {
+ snoop = true;
+ if (adev->gmc.xgmi.connected_to_cpu)
+ /* system memory uses NC on A+A */
+ mapping_flags |= AMDGPU_VM_MTYPE_NC;
+ else
+ mapping_flags |= coherent ?
+ AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
+ }
+ break;
default:
mapping_flags |= coherent ?
AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC;
}
- return amdgpu_gem_va_map_flags(adev, mapping_flags);
+ pte_flags = amdgpu_gem_va_map_flags(adev, mapping_flags);
+ pte_flags |= snoop ? AMDGPU_PTE_SNOOPED : 0;
+
+ return pte_flags;
}
/* add_bo_to_vm - Add a BO to a VM
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index 86add0f4ea4d..e05648a8a145 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -1905,40 +1905,6 @@ static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
return r;
}
-/**
- * cail_ioreg_write - write IO register
- *
- * @info: atom card_info pointer
- * @reg: IO register offset
- * @val: value to write to the pll register
- *
- * Provides a IO register accessor for the atom interpreter (r4xx+).
- */
-static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
-{
- struct amdgpu_device *adev = drm_to_adev(info->dev);
-
- WREG32_IO(reg, val);
-}
-
-/**
- * cail_ioreg_read - read IO register
- *
- * @info: atom card_info pointer
- * @reg: IO register offset
- *
- * Provides an IO register accessor for the atom interpreter (r4xx+).
- * Returns the value of the IO register.
- */
-static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
-{
- struct amdgpu_device *adev = drm_to_adev(info->dev);
- uint32_t r;
-
- r = RREG32_IO(reg);
- return r;
-}
-
static ssize_t amdgpu_atombios_get_vbios_version(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -1998,15 +1964,6 @@ int amdgpu_atombios_init(struct amdgpu_device *adev)
atom_card_info->dev = adev_to_drm(adev);
atom_card_info->reg_read = cail_reg_read;
atom_card_info->reg_write = cail_reg_write;
- /* needed for iio ops */
- if (adev->rio_mem) {
- atom_card_info->ioreg_read = cail_ioreg_read;
- atom_card_info->ioreg_write = cail_ioreg_write;
- } else {
- DRM_DEBUG("PCI I/O BAR is not found. Using MMIO to access ATOM BIOS\n");
- atom_card_info->ioreg_read = cail_reg_read;
- atom_card_info->ioreg_write = cail_reg_write;
- }
atom_card_info->mc_read = cail_mc_read;
atom_card_info->mc_write = cail_mc_write;
atom_card_info->pll_read = cail_pll_read;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index 6107ac91db25..60716b35444b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -117,12 +117,15 @@ union igp_info {
union umc_info {
struct atom_umc_info_v3_1 v31;
+ struct atom_umc_info_v3_2 v32;
+ struct atom_umc_info_v3_3 v33;
};
union vram_info {
struct atom_vram_info_header_v2_3 v23;
struct atom_vram_info_header_v2_4 v24;
struct atom_vram_info_header_v2_5 v25;
+ struct atom_vram_info_header_v2_6 v26;
};
union vram_module {
@@ -164,6 +167,7 @@ static int convert_atom_mem_type_to_vram_type(struct amdgpu_device *adev,
vram_type = AMDGPU_VRAM_TYPE_GDDR5;
break;
case ATOM_DGPU_VRAM_TYPE_HBM2:
+ case ATOM_DGPU_VRAM_TYPE_HBM2E:
vram_type = AMDGPU_VRAM_TYPE_HBM;
break;
case ATOM_DGPU_VRAM_TYPE_GDDR6:
@@ -315,6 +319,26 @@ amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
if (vram_vendor)
*vram_vendor = mem_vendor;
break;
+ case 6:
+ if (module_id > vram_info->v26.vram_module_num)
+ module_id = 0;
+ vram_module = (union vram_module *)vram_info->v26.vram_module;
+ while (i < module_id) {
+ vram_module = (union vram_module *)
+ ((u8 *)vram_module + vram_module->v9.vram_module_size);
+ i++;
+ }
+ mem_type = vram_module->v9.memory_type;
+ if (vram_type)
+ *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
+ mem_channel_number = vram_module->v9.channel_num;
+ mem_channel_width = vram_module->v9.channel_width;
+ if (vram_width)
+ *vram_width = mem_channel_number * (1 << mem_channel_width);
+ mem_vendor = (vram_module->v9.vender_rev_id) & 0xF;
+ if (vram_vendor)
+ *vram_vendor = mem_vendor;
+ break;
default:
return -EINVAL;
}
@@ -337,19 +361,39 @@ bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev)
union umc_info *umc_info;
u8 frev, crev;
bool ecc_default_enabled = false;
+ u8 umc_config;
+ u32 umc_config1;
index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
umc_info);
if (amdgpu_atom_parse_data_header(mode_info->atom_context,
index, &size, &frev, &crev, &data_offset)) {
- /* support umc_info 3.1+ */
- if ((frev == 3 && crev >= 1) || (frev > 3)) {
+ if (frev == 3) {
umc_info = (union umc_info *)
(mode_info->atom_context->bios + data_offset);
- ecc_default_enabled =
- (le32_to_cpu(umc_info->v31.umc_config) &
- UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ? true : false;
+ switch (crev) {
+ case 1:
+ umc_config = le32_to_cpu(umc_info->v31.umc_config);
+ ecc_default_enabled =
+ (umc_config & UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ? true : false;
+ break;
+ case 2:
+ umc_config = le32_to_cpu(umc_info->v32.umc_config);
+ ecc_default_enabled =
+ (umc_config & UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ? true : false;
+ break;
+ case 3:
+ umc_config = le32_to_cpu(umc_info->v33.umc_config);
+ umc_config1 = le32_to_cpu(umc_info->v33.umc_config1);
+ ecc_default_enabled =
+ ((umc_config & UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ||
+ (umc_config1 & UMC_CONFIG1__ENABLE_ECC_CAPABLE)) ? true : false;
+ break;
+ default:
+ /* unsupported crev */
+ return false;
+ }
}
}
@@ -479,7 +523,8 @@ int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev)
}
union gfx_info {
- struct atom_gfx_info_v2_4 v24;
+ struct atom_gfx_info_v2_4 v24;
+ struct atom_gfx_info_v2_7 v27;
};
int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev)
@@ -514,6 +559,22 @@ int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev)
adev->gfx.cu_info.max_scratch_slots_per_cu = gfx_info->v24.gc_max_scratch_slots_per_cu;
adev->gfx.cu_info.lds_size = le16_to_cpu(gfx_info->v24.gc_lds_size);
return 0;
+ case 7:
+ adev->gfx.config.max_shader_engines = gfx_info->v27.max_shader_engines;
+ adev->gfx.config.max_cu_per_sh = gfx_info->v27.max_cu_per_sh;
+ adev->gfx.config.max_sh_per_se = gfx_info->v27.max_sh_per_se;
+ adev->gfx.config.max_backends_per_se = gfx_info->v27.max_backends_per_se;
+ adev->gfx.config.max_texture_channel_caches = gfx_info->v27.max_texture_channel_caches;
+ adev->gfx.config.max_gprs = le16_to_cpu(gfx_info->v27.gc_num_gprs);
+ adev->gfx.config.max_gs_threads = gfx_info->v27.gc_num_max_gs_thds;
+ adev->gfx.config.gs_vgt_table_depth = gfx_info->v27.gc_gs_table_depth;
+ adev->gfx.config.gs_prim_buffer_depth = le16_to_cpu(gfx_info->v27.gc_gsprim_buff_depth);
+ adev->gfx.config.double_offchip_lds_buf = gfx_info->v27.gc_double_offchip_lds_buffer;
+ adev->gfx.cu_info.wave_front_size = le16_to_cpu(gfx_info->v27.gc_wave_size);
+ adev->gfx.cu_info.max_waves_per_simd = le16_to_cpu(gfx_info->v27.gc_max_waves_per_simd);
+ adev->gfx.cu_info.max_scratch_slots_per_cu = gfx_info->v27.gc_max_scratch_slots_per_cu;
+ adev->gfx.cu_info.lds_size = le16_to_cpu(gfx_info->v27.gc_lds_size);
+ return 0;
default:
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
index cfb1a9a04477..27b19503773b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
@@ -97,6 +97,10 @@ static bool igp_read_bios_from_vram(struct amdgpu_device *adev)
if (amdgpu_device_need_post(adev))
return false;
+ /* FB BAR not enabled */
+ if (pci_resource_len(adev->pdev, 0) == 0)
+ return false;
+
adev->bios = NULL;
vram_base = pci_resource_start(adev->pdev, 0);
bios = ioremap_wc(vram_base, size);
@@ -316,7 +320,7 @@ static bool amdgpu_atrm_get_bios(struct amdgpu_device *adev)
adev->bios = kmalloc(size, GFP_KERNEL);
if (!adev->bios) {
- DRM_ERROR("Unable to allocate bios\n");
+ dev_err(adev->dev, "Unable to allocate bios\n");
return false;
}
@@ -364,7 +368,7 @@ static bool amdgpu_acpi_vfct_bios(struct amdgpu_device *adev)
return false;
tbl_size = hdr->length;
if (tbl_size < sizeof(UEFI_ACPI_VFCT)) {
- DRM_ERROR("ACPI VFCT table present but broken (too short #1)\n");
+ dev_info(adev->dev, "ACPI VFCT table present but broken (too short #1),skipping\n");
return false;
}
@@ -377,13 +381,13 @@ static bool amdgpu_acpi_vfct_bios(struct amdgpu_device *adev)
offset += sizeof(VFCT_IMAGE_HEADER);
if (offset > tbl_size) {
- DRM_ERROR("ACPI VFCT image header truncated\n");
+ dev_info(adev->dev, "ACPI VFCT image header truncated,skipping\n");
return false;
}
offset += vhdr->ImageLength;
if (offset > tbl_size) {
- DRM_ERROR("ACPI VFCT image truncated\n");
+ dev_info(adev->dev, "ACPI VFCT image truncated,skipping\n");
return false;
}
@@ -406,7 +410,7 @@ static bool amdgpu_acpi_vfct_bios(struct amdgpu_device *adev)
}
}
- DRM_ERROR("ACPI VFCT table present but broken (too short #2)\n");
+ dev_info(adev->dev, "ACPI VFCT table present but broken (too short #2),skipping\n");
return false;
}
#else
@@ -453,7 +457,7 @@ bool amdgpu_get_bios(struct amdgpu_device *adev)
goto success;
}
- DRM_ERROR("Unable to locate a BIOS ROM\n");
+ dev_err(adev->dev, "Unable to locate a BIOS ROM\n");
return false;
success:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 3e240b952e79..b5c766998045 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -117,7 +117,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs
if (cs->in.num_chunks == 0)
return 0;
- chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
+ chunk_array = kvmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
if (!chunk_array)
return -ENOMEM;
@@ -144,7 +144,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs
}
p->nchunks = cs->in.num_chunks;
- p->chunks = kmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),
+ p->chunks = kvmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),
GFP_KERNEL);
if (!p->chunks) {
ret = -ENOMEM;
@@ -238,7 +238,7 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs
if (p->uf_entry.tv.bo)
p->job->uf_addr = uf_offset;
- kfree(chunk_array);
+ kvfree(chunk_array);
/* Use this opportunity to fill in task info for the vm */
amdgpu_vm_set_task_info(vm);
@@ -250,11 +250,11 @@ free_all_kdata:
free_partial_kdata:
for (; i >= 0; i--)
kvfree(p->chunks[i].kdata);
- kfree(p->chunks);
+ kvfree(p->chunks);
p->chunks = NULL;
p->nchunks = 0;
free_chunk:
- kfree(chunk_array);
+ kvfree(chunk_array);
return ret;
}
@@ -559,7 +559,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
sizeof(struct page *),
GFP_KERNEL | __GFP_ZERO);
if (!e->user_pages) {
- DRM_ERROR("calloc failure\n");
+ DRM_ERROR("kvmalloc_array failure\n");
return -ENOMEM;
}
@@ -706,7 +706,7 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
for (i = 0; i < parser->nchunks; i++)
kvfree(parser->chunks[i].kdata);
- kfree(parser->chunks);
+ kvfree(parser->chunks);
if (parser->job)
amdgpu_job_free(parser->job);
if (parser->uf_entry.tv.bo) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index 43059ead733b..bcaf271b39bf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -28,7 +28,6 @@
#include <linux/uaccess.h>
#include <linux/pm_runtime.h>
#include <linux/poll.h>
-#include <drm/drm_debugfs.h>
#include "amdgpu.h"
#include "amdgpu_pm.h"
@@ -38,45 +37,6 @@
#include "amdgpu_securedisplay.h"
#include "amdgpu_fw_attestation.h"
-/**
- * amdgpu_debugfs_add_files - Add simple debugfs entries
- *
- * @adev: Device to attach debugfs entries to
- * @files: Array of function callbacks that respond to reads
- * @nfiles: Number of callbacks to register
- *
- */
-int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
- const struct drm_info_list *files,
- unsigned nfiles)
-{
- unsigned i;
-
- for (i = 0; i < adev->debugfs_count; i++) {
- if (adev->debugfs[i].files == files) {
- /* Already registered */
- return 0;
- }
- }
-
- i = adev->debugfs_count + 1;
- if (i > AMDGPU_DEBUGFS_MAX_COMPONENTS) {
- DRM_ERROR("Reached maximum number of debugfs components.\n");
- DRM_ERROR("Report so we increase "
- "AMDGPU_DEBUGFS_MAX_COMPONENTS.\n");
- return -EINVAL;
- }
- adev->debugfs[adev->debugfs_count].files = files;
- adev->debugfs[adev->debugfs_count].num_files = nfiles;
- adev->debugfs_count = i;
-#if defined(CONFIG_DEBUG_FS)
- drm_debugfs_create_files(files, nfiles,
- adev_to_drm(adev)->primary->debugfs_root,
- adev_to_drm(adev)->primary);
-#endif
- return 0;
-}
-
int amdgpu_debugfs_wait_dump(struct amdgpu_device *adev)
{
#if defined(CONFIG_DEBUG_FS)
@@ -1228,22 +1188,20 @@ int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
adev, debugfs_regs[i]);
if (!i && !IS_ERR_OR_NULL(ent))
i_size_write(ent->d_inode, adev->rmmio_size);
- adev->debugfs_regs[i] = ent;
}
return 0;
}
-static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data)
+static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
+ struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
+ struct drm_device *dev = adev_to_drm(adev);
int r = 0, i;
r = pm_runtime_get_sync(dev->dev);
if (r < 0) {
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(dev->dev);
return r;
}
@@ -1285,30 +1243,19 @@ static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data)
return 0;
}
-static int amdgpu_debugfs_get_vbios_dump(struct seq_file *m, void *data)
+static int amdgpu_debugfs_evict_vram(void *data, u64 *val)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
-
- seq_write(m, adev->bios, adev->bios_size);
- return 0;
-}
-
-static int amdgpu_debugfs_evict_vram(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *)m->private;
- struct drm_device *dev = node->minor->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
+ struct amdgpu_device *adev = (struct amdgpu_device *)data;
+ struct drm_device *dev = adev_to_drm(adev);
int r;
r = pm_runtime_get_sync(dev->dev);
if (r < 0) {
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(dev->dev);
return r;
}
- seq_printf(m, "(%d)\n", amdgpu_bo_evict_vram(adev));
+ *val = amdgpu_bo_evict_vram(adev);
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
@@ -1316,11 +1263,11 @@ static int amdgpu_debugfs_evict_vram(struct seq_file *m, void *data)
return 0;
}
-static int amdgpu_debugfs_evict_gtt(struct seq_file *m, void *data)
+
+static int amdgpu_debugfs_evict_gtt(void *data, u64 *val)
{
- struct drm_info_node *node = (struct drm_info_node *)m->private;
- struct drm_device *dev = node->minor->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
+ struct amdgpu_device *adev = (struct amdgpu_device *)data;
+ struct drm_device *dev = adev_to_drm(adev);
struct ttm_resource_manager *man;
int r;
@@ -1331,8 +1278,7 @@ static int amdgpu_debugfs_evict_gtt(struct seq_file *m, void *data)
}
man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT);
- r = ttm_resource_manager_evict_all(&adev->mman.bdev, man);
- seq_printf(m, "(%d)\n", r);
+ *val = ttm_resource_manager_evict_all(&adev->mman.bdev, man);
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
@@ -1340,10 +1286,11 @@ static int amdgpu_debugfs_evict_gtt(struct seq_file *m, void *data)
return 0;
}
-static int amdgpu_debugfs_vm_info(struct seq_file *m, void *data)
+
+static int amdgpu_debugfs_vm_info_show(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *)m->private;
- struct drm_device *dev = node->minor->dev;
+ struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
+ struct drm_device *dev = adev_to_drm(adev);
struct drm_file *file;
int r;
@@ -1369,13 +1316,12 @@ static int amdgpu_debugfs_vm_info(struct seq_file *m, void *data)
return r;
}
-static const struct drm_info_list amdgpu_debugfs_list[] = {
- {"amdgpu_vbios", amdgpu_debugfs_get_vbios_dump},
- {"amdgpu_test_ib", &amdgpu_debugfs_test_ib},
- {"amdgpu_evict_vram", &amdgpu_debugfs_evict_vram},
- {"amdgpu_evict_gtt", &amdgpu_debugfs_evict_gtt},
- {"amdgpu_vm_info", &amdgpu_debugfs_vm_info},
-};
+DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_test_ib);
+DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_vm_info);
+DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_evict_vram_fops, amdgpu_debugfs_evict_vram,
+ NULL, "%lld\n");
+DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_evict_gtt_fops, amdgpu_debugfs_evict_gtt,
+ NULL, "%lld\n");
static void amdgpu_ib_preempt_fences_swap(struct amdgpu_ring *ring,
struct dma_fence **fences)
@@ -1586,71 +1532,50 @@ static int amdgpu_debugfs_sclk_set(void *data, u64 val)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(fops_ib_preempt, NULL,
+DEFINE_DEBUGFS_ATTRIBUTE(fops_ib_preempt, NULL,
amdgpu_debugfs_ib_preempt, "%llu\n");
-DEFINE_SIMPLE_ATTRIBUTE(fops_sclk_set, NULL,
+DEFINE_DEBUGFS_ATTRIBUTE(fops_sclk_set, NULL,
amdgpu_debugfs_sclk_set, "%llu\n");
int amdgpu_debugfs_init(struct amdgpu_device *adev)
{
+ struct dentry *root = adev_to_drm(adev)->primary->debugfs_root;
+ struct dentry *ent;
int r, i;
- adev->debugfs_preempt =
- debugfs_create_file("amdgpu_preempt_ib", 0600,
- adev_to_drm(adev)->primary->debugfs_root, adev,
- &fops_ib_preempt);
- if (!(adev->debugfs_preempt)) {
+
+
+ ent = debugfs_create_file("amdgpu_preempt_ib", 0600, root, adev,
+ &fops_ib_preempt);
+ if (!ent) {
DRM_ERROR("unable to create amdgpu_preempt_ib debugsfs file\n");
return -EIO;
}
- adev->smu.debugfs_sclk =
- debugfs_create_file("amdgpu_force_sclk", 0200,
- adev_to_drm(adev)->primary->debugfs_root, adev,
- &fops_sclk_set);
- if (!(adev->smu.debugfs_sclk)) {
+ ent = debugfs_create_file("amdgpu_force_sclk", 0200, root, adev,
+ &fops_sclk_set);
+ if (!ent) {
DRM_ERROR("unable to create amdgpu_set_sclk debugsfs file\n");
return -EIO;
}
/* Register debugfs entries for amdgpu_ttm */
- r = amdgpu_ttm_debugfs_init(adev);
- if (r) {
- DRM_ERROR("Failed to init debugfs\n");
- return r;
- }
-
- r = amdgpu_debugfs_pm_init(adev);
- if (r) {
- DRM_ERROR("Failed to register debugfs file for dpm!\n");
- return r;
- }
-
- if (amdgpu_debugfs_sa_init(adev)) {
- dev_err(adev->dev, "failed to register debugfs file for SA\n");
- }
-
- if (amdgpu_debugfs_fence_init(adev))
- dev_err(adev->dev, "fence debugfs file creation failed\n");
-
- r = amdgpu_debugfs_gem_init(adev);
- if (r)
- DRM_ERROR("registering gem debugfs failed (%d).\n", r);
+ amdgpu_ttm_debugfs_init(adev);
+ amdgpu_debugfs_pm_init(adev);
+ amdgpu_debugfs_sa_init(adev);
+ amdgpu_debugfs_fence_init(adev);
+ amdgpu_debugfs_gem_init(adev);
r = amdgpu_debugfs_regs_init(adev);
if (r)
DRM_ERROR("registering register debugfs failed (%d).\n", r);
- r = amdgpu_debugfs_firmware_init(adev);
- if (r)
- DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
+ amdgpu_debugfs_firmware_init(adev);
#if defined(CONFIG_DRM_AMD_DC)
- if (amdgpu_device_has_dc_support(adev)) {
- if (dtn_debugfs_init(adev))
- DRM_ERROR("amdgpu: failed initialize dtn debugfs support.\n");
- }
+ if (amdgpu_device_has_dc_support(adev))
+ dtn_debugfs_init(adev);
#endif
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
@@ -1665,17 +1590,26 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
}
amdgpu_ras_debugfs_create_all(adev);
-
amdgpu_debugfs_autodump_init(adev);
-
amdgpu_rap_debugfs_init(adev);
-
amdgpu_securedisplay_debugfs_init(adev);
-
amdgpu_fw_attestation_debugfs_init(adev);
- return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_list,
- ARRAY_SIZE(amdgpu_debugfs_list));
+ debugfs_create_file("amdgpu_evict_vram", 0444, root, adev,
+ &amdgpu_evict_vram_fops);
+ debugfs_create_file("amdgpu_evict_gtt", 0444, root, adev,
+ &amdgpu_evict_gtt_fops);
+ debugfs_create_file("amdgpu_test_ib", 0444, root, adev,
+ &amdgpu_debugfs_test_ib_fops);
+ debugfs_create_file("amdgpu_vm_info", 0444, root, adev,
+ &amdgpu_debugfs_vm_info_fops);
+
+ adev->debugfs_vbios_blob.data = adev->bios;
+ adev->debugfs_vbios_blob.size = adev->bios_size;
+ debugfs_create_blob("amdgpu_vbios", 0444, root,
+ &adev->debugfs_vbios_blob);
+
+ return 0;
}
#else
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
index 2803884d338d..141a8474e24f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
@@ -26,11 +26,6 @@
/*
* Debugfs
*/
-struct amdgpu_debugfs {
- const struct drm_info_list *files;
- unsigned num_files;
-};
-
struct amdgpu_autodump {
struct completion dumping;
struct wait_queue_head gpu_hang;
@@ -39,10 +34,7 @@ struct amdgpu_autodump {
int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
int amdgpu_debugfs_init(struct amdgpu_device *adev);
void amdgpu_debugfs_fini(struct amdgpu_device *adev);
-int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
- const struct drm_info_list *files,
- unsigned nfiles);
-int amdgpu_debugfs_fence_init(struct amdgpu_device *adev);
-int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev);
-int amdgpu_debugfs_gem_init(struct amdgpu_device *adev);
+void amdgpu_debugfs_fence_init(struct amdgpu_device *adev);
+void amdgpu_debugfs_firmware_init(struct amdgpu_device *adev);
+void amdgpu_debugfs_gem_init(struct amdgpu_device *adev);
int amdgpu_debugfs_wait_dump(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 6447cd6ca5a8..0f82c5d21237 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -110,6 +110,7 @@ const char *amdgpu_asic_name[] = {
"RAVEN",
"ARCTURUS",
"RENOIR",
+ "ALDEBARAN",
"NAVI10",
"NAVI14",
"NAVI12",
@@ -465,49 +466,6 @@ void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
}
/**
- * amdgpu_io_rreg - read an IO register
- *
- * @adev: amdgpu_device pointer
- * @reg: dword aligned register offset
- *
- * Returns the 32 bit value from the offset specified.
- */
-u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
-{
- if (adev->in_pci_err_recovery)
- return 0;
-
- if ((reg * 4) < adev->rio_mem_size)
- return ioread32(adev->rio_mem + (reg * 4));
- else {
- iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
- return ioread32(adev->rio_mem + (mmMM_DATA * 4));
- }
-}
-
-/**
- * amdgpu_io_wreg - write to an IO register
- *
- * @adev: amdgpu_device pointer
- * @reg: dword aligned register offset
- * @v: 32 bit value to write to the register
- *
- * Writes the value specified to the offset specified.
- */
-void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
-{
- if (adev->in_pci_err_recovery)
- return;
-
- if ((reg * 4) < adev->rio_mem_size)
- iowrite32(v, adev->rio_mem + (reg * 4));
- else {
- iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
- iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
- }
-}
-
-/**
* amdgpu_mm_rdoorbell - read a doorbell dword
*
* @adev: amdgpu_device pointer
@@ -1223,6 +1181,10 @@ bool amdgpu_device_need_post(struct amdgpu_device *adev)
}
}
+ /* Don't post if we need to reset whole hive on init */
+ if (adev->gmc.xgmi.pending_reset)
+ return false;
+
if (adev->has_hw_reset) {
adev->has_hw_reset = false;
return true;
@@ -1810,6 +1772,7 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
case CHIP_CARRIZO:
case CHIP_STONEY:
case CHIP_VEGA20:
+ case CHIP_ALDEBARAN:
case CHIP_SIENNA_CICHLID:
case CHIP_NAVY_FLOUNDER:
case CHIP_DIMGREY_CAVEFISH:
@@ -2010,6 +1973,7 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
case CHIP_RAVEN:
case CHIP_ARCTURUS:
case CHIP_RENOIR:
+ case CHIP_ALDEBARAN:
if (adev->flags & AMD_IS_APU)
adev->family = AMDGPU_FAMILY_RV;
else
@@ -2045,6 +2009,8 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
adev->pm.pp_feature = amdgpu_pp_feature_mask;
if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
+ if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
+ adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
for (i = 0; i < adev->num_ip_blocks; i++) {
if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
@@ -2149,6 +2115,9 @@ static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
continue;
+ if (!adev->ip_blocks[i].status.sw)
+ continue;
+
/* no need to do the fw loading again if already done*/
if (adev->ip_blocks[i].status.hw == true)
break;
@@ -2289,7 +2258,10 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
if (adev->gmc.xgmi.num_physical_nodes > 1)
amdgpu_xgmi_add_device(adev);
- amdgpu_amdkfd_device_init(adev);
+
+ /* Don't init kfd if whole hive need to be reset during init */
+ if (!adev->gmc.xgmi.pending_reset)
+ amdgpu_amdkfd_device_init(adev);
amdgpu_fru_get_product_info(adev);
@@ -2498,6 +2470,11 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
if (r)
DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
+ /* For XGMI + passthrough configuration on arcturus, enable light SBR */
+ if (adev->asic_type == CHIP_ARCTURUS &&
+ amdgpu_passthrough(adev) &&
+ adev->gmc.xgmi.num_physical_nodes > 1)
+ smu_set_light_sbr(&adev->smu, true);
if (adev->gmc.xgmi.num_physical_nodes > 1) {
mutex_lock(&mgpu_info.mutex);
@@ -2734,6 +2711,16 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
adev->ip_blocks[i].status.hw = false;
continue;
}
+
+ /* skip unnecessary suspend if we do not initialize them yet */
+ if (adev->gmc.xgmi.pending_reset &&
+ !(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
+ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC ||
+ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
+ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)) {
+ adev->ip_blocks[i].status.hw = false;
+ continue;
+ }
/* XXX handle errors */
r = adev->ip_blocks[i].version->funcs->suspend(adev);
/* XXX handle errors */
@@ -2753,7 +2740,6 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
}
}
}
- adev->ip_blocks[i].status.hw = false;
}
return 0;
@@ -2774,8 +2760,10 @@ int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
{
int r;
- if (amdgpu_sriov_vf(adev))
+ if (amdgpu_sriov_vf(adev)) {
+ amdgpu_virt_fini_data_exchange(adev);
amdgpu_virt_request_full_gpu(adev, false);
+ }
r = amdgpu_device_ip_suspend_phase1(adev);
if (r)
@@ -3257,7 +3245,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
/* mutex initialization are all done here so we
* can recall function without having locking issues */
- atomic_set(&adev->irq.ih.lock, 0);
mutex_init(&adev->firmware.mutex);
mutex_init(&adev->pm.mutex);
mutex_init(&adev->gfx.gpu_clock_mutex);
@@ -3290,6 +3277,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
INIT_LIST_HEAD(&adev->shadow_list);
mutex_init(&adev->shadow_list_lock);
+ INIT_LIST_HEAD(&adev->reset_list);
+
INIT_DELAYED_WORK(&adev->delayed_init_work,
amdgpu_device_delayed_init_work_handler);
INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
@@ -3328,17 +3317,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
- /* io port mapping */
- for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
- if (pci_resource_flags(adev->pdev, i) & IORESOURCE_IO) {
- adev->rio_mem_size = pci_resource_len(adev->pdev, i);
- adev->rio_mem = pci_iomap(adev->pdev, i, adev->rio_mem_size);
- break;
- }
- }
- if (adev->rio_mem == NULL)
- DRM_INFO("PCI I/O BAR is not found.\n");
-
/* enable PCIE atomic ops */
r = pci_enable_atomic_ops_to_root(adev->pdev,
PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
@@ -3405,10 +3383,28 @@ int amdgpu_device_init(struct amdgpu_device *adev,
* E.g., driver was not cleanly unloaded previously, etc.
*/
if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
- r = amdgpu_asic_reset(adev);
- if (r) {
- dev_err(adev->dev, "asic reset on init failed\n");
- goto failed;
+ if (adev->gmc.xgmi.num_physical_nodes) {
+ dev_info(adev->dev, "Pending hive reset.\n");
+ adev->gmc.xgmi.pending_reset = true;
+ /* Only need to init necessary block for SMU to handle the reset */
+ for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!adev->ip_blocks[i].status.valid)
+ continue;
+ if (!(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
+ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
+ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
+ adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)) {
+ DRM_DEBUG("IP %s disabled for hw_init.\n",
+ adev->ip_blocks[i].version->funcs->name);
+ adev->ip_blocks[i].status.hw = true;
+ }
+ }
+ } else {
+ r = amdgpu_asic_reset(adev);
+ if (r) {
+ dev_err(adev->dev, "asic reset on init failed\n");
+ goto failed;
+ }
}
}
@@ -3474,11 +3470,11 @@ fence_driver_init:
adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
adev->virt.ops = NULL;
r = -EAGAIN;
- goto failed;
+ goto release_ras_con;
}
dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
- goto failed;
+ goto release_ras_con;
}
dev_info(adev->dev,
@@ -3539,19 +3535,19 @@ fence_driver_init:
/* enable clockgating, etc. after ib tests, etc. since some blocks require
* explicit gating rather than handling it automatically.
*/
- r = amdgpu_device_ip_late_init(adev);
- if (r) {
- dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
- amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
- goto failed;
+ if (!adev->gmc.xgmi.pending_reset) {
+ r = amdgpu_device_ip_late_init(adev);
+ if (r) {
+ dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
+ amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
+ goto release_ras_con;
+ }
+ /* must succeed. */
+ amdgpu_ras_resume(adev);
+ queue_delayed_work(system_wq, &adev->delayed_init_work,
+ msecs_to_jiffies(AMDGPU_RESUME_MS));
}
- /* must succeed. */
- amdgpu_ras_resume(adev);
-
- queue_delayed_work(system_wq, &adev->delayed_init_work,
- msecs_to_jiffies(AMDGPU_RESUME_MS));
-
if (amdgpu_sriov_vf(adev))
flush_delayed_work(&adev->delayed_init_work);
@@ -3568,8 +3564,15 @@ fence_driver_init:
if (amdgpu_device_cache_pci_state(adev->pdev))
pci_restore_state(pdev);
+ if (adev->gmc.xgmi.pending_reset)
+ queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work,
+ msecs_to_jiffies(AMDGPU_RESUME_MS));
+
return 0;
+release_ras_con:
+ amdgpu_release_ras_context(adev);
+
failed:
amdgpu_vf_error_trans_all(adev);
if (atpx)
@@ -3594,6 +3597,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
{
dev_info(adev->dev, "amdgpu: finishing device.\n");
flush_delayed_work(&adev->delayed_init_work);
+ ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
adev->shutdown = true;
kfree(adev->pci_state);
@@ -3640,9 +3644,6 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
vga_switcheroo_fini_domain_pm_ops(adev->dev);
if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
vga_client_register(adev->pdev, NULL, NULL, NULL);
- if (adev->rio_mem)
- pci_iounmap(adev->pdev, adev->rio_mem);
- adev->rio_mem = NULL;
iounmap(adev->rmmio);
adev->rmmio = NULL;
amdgpu_device_doorbell_fini(adev);
@@ -4237,15 +4238,56 @@ disabled:
return false;
}
+int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
+{
+ u32 i;
+ int ret = 0;
+
+ amdgpu_atombios_scratch_regs_engine_hung(adev, true);
+
+ dev_info(adev->dev, "GPU mode1 reset\n");
+
+ /* disable BM */
+ pci_clear_master(adev->pdev);
+
+ amdgpu_device_cache_pci_state(adev->pdev);
+
+ if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
+ dev_info(adev->dev, "GPU smu mode1 reset\n");
+ ret = amdgpu_dpm_mode1_reset(adev);
+ } else {
+ dev_info(adev->dev, "GPU psp mode1 reset\n");
+ ret = psp_gpu_reset(adev);
+ }
+
+ if (ret)
+ dev_err(adev->dev, "GPU mode1 reset failed\n");
+
+ amdgpu_device_load_pci_state(adev->pdev);
+
+ /* wait for asic to come out of reset */
+ for (i = 0; i < adev->usec_timeout; i++) {
+ u32 memsize = adev->nbio.funcs->get_memsize(adev);
-static int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
- struct amdgpu_job *job,
- bool *need_full_reset_arg)
+ if (memsize != 0xffffffff)
+ break;
+ udelay(1);
+ }
+
+ amdgpu_atombios_scratch_regs_engine_hung(adev, false);
+ return ret;
+}
+
+int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
+ struct amdgpu_job *job,
+ bool *need_full_reset_arg)
{
int i, r = 0;
bool need_full_reset = *need_full_reset_arg;
- amdgpu_debugfs_wait_dump(adev);
+ /* no need to dump if device is not in good state during probe period */
+ if (!adev->gmc.xgmi.pending_reset)
+ amdgpu_debugfs_wait_dump(adev);
if (amdgpu_sriov_vf(adev)) {
/* stop the data exchange thread */
@@ -4291,23 +4333,24 @@ static int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
return r;
}
-static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
- struct list_head *device_list_handle,
- bool *need_full_reset_arg,
- bool skip_hw_reset)
+int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
+ struct list_head *device_list_handle,
+ bool *need_full_reset_arg,
+ bool skip_hw_reset)
{
struct amdgpu_device *tmp_adev = NULL;
bool need_full_reset = *need_full_reset_arg, vram_lost = false;
int r = 0;
/*
- * ASIC reset has to be done on all HGMI hive nodes ASAP
+ * ASIC reset has to be done on all XGMI hive nodes ASAP
* to allow proper links negotiation in FW (within 1 sec)
*/
if (!skip_hw_reset && need_full_reset) {
- list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
+ list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
/* For XGMI run all resets in parallel to speed up the process */
if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
+ tmp_adev->gmc.xgmi.pending_reset = false;
if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
r = -EALREADY;
} else
@@ -4322,8 +4365,7 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
/* For XGMI wait for all resets to complete before proceed */
if (!r) {
- list_for_each_entry(tmp_adev, device_list_handle,
- gmc.xgmi.head) {
+ list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
flush_work(&tmp_adev->xgmi_reset_work);
r = tmp_adev->asic_reset_res;
@@ -4335,7 +4377,7 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
}
if (!r && amdgpu_ras_intr_triggered()) {
- list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
+ list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
if (tmp_adev->mmhub.funcs &&
tmp_adev->mmhub.funcs->reset_ras_error_count)
tmp_adev->mmhub.funcs->reset_ras_error_count(tmp_adev);
@@ -4344,13 +4386,13 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
amdgpu_ras_intr_cleared();
}
- list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
+ list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
if (need_full_reset) {
/* post card */
- if (amdgpu_device_asic_init(tmp_adev))
+ r = amdgpu_device_asic_init(tmp_adev);
+ if (r) {
dev_warn(tmp_adev->dev, "asic atom init failed!");
-
- if (!r) {
+ } else {
dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
r = amdgpu_device_ip_resume_phase1(tmp_adev);
if (r)
@@ -4383,6 +4425,9 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
*/
amdgpu_register_gpu_instance(tmp_adev);
+ if (!hive && tmp_adev->gmc.xgmi.num_physical_nodes > 1)
+ amdgpu_xgmi_add_device(tmp_adev);
+
r = amdgpu_device_ip_late_init(tmp_adev);
if (r)
goto out;
@@ -4399,7 +4444,7 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
* bad_page_threshold value to fix this once
* probing driver again.
*/
- if (!amdgpu_ras_check_err_threshold(tmp_adev)) {
+ if (!amdgpu_ras_eeprom_check_err_threshold(tmp_adev)) {
/* must succeed. */
amdgpu_ras_resume(tmp_adev);
} else {
@@ -4449,7 +4494,6 @@ static bool amdgpu_device_lock_adev(struct amdgpu_device *adev,
down_write(&adev->reset_sem);
}
- atomic_inc(&adev->gpu_reset_counter);
switch (amdgpu_asic_reset_method(adev)) {
case AMD_RESET_METHOD_MODE1:
adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
@@ -4656,16 +4700,18 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
*/
INIT_LIST_HEAD(&device_list);
if (adev->gmc.xgmi.num_physical_nodes > 1) {
- if (!list_is_first(&adev->gmc.xgmi.head, &hive->device_list))
- list_rotate_to_front(&adev->gmc.xgmi.head, &hive->device_list);
- device_list_handle = &hive->device_list;
+ list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
+ list_add_tail(&tmp_adev->reset_list, &device_list);
+ if (!list_is_first(&adev->reset_list, &device_list))
+ list_rotate_to_front(&adev->reset_list, &device_list);
+ device_list_handle = &device_list;
} else {
- list_add_tail(&adev->gmc.xgmi.head, &device_list);
+ list_add_tail(&adev->reset_list, &device_list);
device_list_handle = &device_list;
}
/* block all schedulers and reset given job's ring */
- list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
+ list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
/*
* Try to put the audio codec into suspend state
* before gpu reset started.
@@ -4710,6 +4756,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
if (need_emergency_restart)
amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
}
+ atomic_inc(&tmp_adev->gpu_reset_counter);
}
if (need_emergency_restart)
@@ -4729,7 +4776,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
}
retry: /* Rest of adevs pre asic reset from XGMI hive. */
- list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
+ list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
r = amdgpu_device_pre_asic_reset(tmp_adev,
(tmp_adev == adev) ? job : NULL,
&need_full_reset);
@@ -4756,7 +4803,7 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */
skip_hw_reset:
/* Post ASIC reset for all devs .*/
- list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
+ list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = tmp_adev->rings[i];
@@ -4787,10 +4834,17 @@ skip_hw_reset:
}
skip_sched_resume:
- list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
- /*unlock kfd: SRIOV would do it separately */
+ list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
+ /* unlock kfd: SRIOV would do it separately */
if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
amdgpu_amdkfd_post_reset(tmp_adev);
+
+ /* kfd_post_reset will do nothing if kfd device is not initialized,
+ * need to bring up kfd here if it's not be initialized before
+ */
+ if (!adev->kfd.init_complete)
+ amdgpu_amdkfd_device_init(adev);
+
if (audio_suspended)
amdgpu_device_resume_display_audio(tmp_adev);
amdgpu_device_unlock_adev(tmp_adev);
@@ -5052,6 +5106,7 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta
drm_sched_stop(&ring->sched, NULL);
}
+ atomic_inc(&adev->gpu_reset_counter);
return PCI_ERS_RESULT_NEED_RESET;
case pci_channel_io_perm_failure:
/* Permanent error, prepare for device removal */
@@ -5100,7 +5155,7 @@ pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
DRM_INFO("PCI error: slot reset callback!!\n");
INIT_LIST_HEAD(&device_list);
- list_add_tail(&adev->gmc.xgmi.head, &device_list);
+ list_add_tail(&adev->reset_list, &device_list);
/* wait for asic to come out of reset */
msleep(500);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 48cb33e5b382..b05301e1815c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -870,17 +870,62 @@ static int amdgpu_display_get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb
return r;
}
+int amdgpu_display_gem_fb_init(struct drm_device *dev,
+ struct amdgpu_framebuffer *rfb,
+ const struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj)
+{
+ int ret;
+
+ rfb->base.obj[0] = obj;
+ drm_helper_mode_fill_fb_struct(dev, &rfb->base, mode_cmd);
+ ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
+ if (ret)
+ goto err;
+
+ ret = amdgpu_display_framebuffer_init(dev, rfb, mode_cmd, obj);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ drm_err(dev, "Failed to init gem fb: %d\n", ret);
+ rfb->base.obj[0] = NULL;
+ return ret;
+}
+
+int amdgpu_display_gem_fb_verify_and_init(
+ struct drm_device *dev, struct amdgpu_framebuffer *rfb,
+ struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj)
+{
+ int ret;
+
+ rfb->base.obj[0] = obj;
+
+ /* Verify that bo size can fit the fb size. */
+ ret = drm_gem_fb_init_with_funcs(dev, &rfb->base, file_priv, mode_cmd,
+ &amdgpu_fb_funcs);
+ if (ret)
+ goto err;
+
+ ret = amdgpu_display_framebuffer_init(dev, rfb, mode_cmd, obj);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ drm_err(dev, "Failed to verify and init gem fb: %d\n", ret);
+ rfb->base.obj[0] = NULL;
+ return ret;
+}
+
int amdgpu_display_framebuffer_init(struct drm_device *dev,
struct amdgpu_framebuffer *rfb,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj)
{
int ret, i;
- rfb->base.obj[0] = obj;
- drm_helper_mode_fill_fb_struct(dev, &rfb->base, mode_cmd);
- ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs);
- if (ret)
- goto fail;
/*
* This needs to happen before modifier conversion as that might change
@@ -891,13 +936,13 @@ int amdgpu_display_framebuffer_init(struct drm_device *dev,
drm_dbg_kms(dev, "Plane 0 and %d have different BOs: %u vs. %u\n",
i, mode_cmd->handles[0], mode_cmd->handles[i]);
ret = -EINVAL;
- goto fail;
+ return ret;
}
}
ret = amdgpu_display_get_fb_info(rfb, &rfb->tiling_flags, &rfb->tmz_surface);
if (ret)
- goto fail;
+ return ret;
if (dev->mode_config.allow_fb_modifiers &&
!(rfb->base.flags & DRM_MODE_FB_MODIFIERS)) {
@@ -905,20 +950,17 @@ int amdgpu_display_framebuffer_init(struct drm_device *dev,
if (ret) {
drm_dbg_kms(dev, "Failed to convert tiling flags 0x%llX to a modifier",
rfb->tiling_flags);
- goto fail;
+ return ret;
}
}
for (i = 1; i < rfb->base.format->num_planes; ++i) {
+ drm_gem_object_get(rfb->base.obj[0]);
+ drm_gem_object_put(rfb->base.obj[i]);
rfb->base.obj[i] = rfb->base.obj[0];
- drm_gem_object_get(rfb->base.obj[i]);
}
return 0;
-
-fail:
- rfb->base.obj[0] = NULL;
- return ret;
}
struct drm_framebuffer *
@@ -953,13 +995,15 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev,
return ERR_PTR(-ENOMEM);
}
- ret = amdgpu_display_framebuffer_init(dev, amdgpu_fb, mode_cmd, obj);
+ ret = amdgpu_display_gem_fb_verify_and_init(dev, amdgpu_fb, file_priv,
+ mode_cmd, obj);
if (ret) {
kfree(amdgpu_fb);
drm_gem_object_put(obj);
return ERR_PTR(ret);
}
+ drm_gem_object_put(obj);
return &amdgpu_fb->base;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index 47e0b48dc26f..e0c4f7c7f1b9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -321,17 +321,12 @@ static void amdgpu_dma_buf_unmap(struct dma_buf_attachment *attach,
struct sg_table *sgt,
enum dma_data_direction dir)
{
- struct dma_buf *dma_buf = attach->dmabuf;
- struct drm_gem_object *obj = dma_buf->priv;
- struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
- struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
-
if (sgt->sgl->page_link) {
dma_unmap_sgtable(attach->dev, sgt, dir, 0);
sg_free_table(sgt);
kfree(sgt);
} else {
- amdgpu_vram_mgr_free_sgt(adev, attach->dev, dir, sgt);
+ amdgpu_vram_mgr_free_sgt(attach->dev, dir, sgt);
}
}
@@ -434,22 +429,22 @@ amdgpu_dma_buf_create_obj(struct drm_device *dev, struct dma_buf *dma_buf)
{
struct dma_resv *resv = dma_buf->resv;
struct amdgpu_device *adev = drm_to_adev(dev);
- struct amdgpu_bo *bo;
- struct amdgpu_bo_param bp;
struct drm_gem_object *gobj;
+ struct amdgpu_bo *bo;
+ uint64_t flags = 0;
int ret;
- memset(&bp, 0, sizeof(bp));
- bp.size = dma_buf->size;
- bp.byte_align = PAGE_SIZE;
- bp.domain = AMDGPU_GEM_DOMAIN_CPU;
- bp.flags = 0;
- bp.type = ttm_bo_type_sg;
- bp.resv = resv;
dma_resv_lock(resv, NULL);
+
+ if (dma_buf->ops == &amdgpu_dmabuf_ops) {
+ struct amdgpu_bo *other = gem_to_amdgpu_bo(dma_buf->priv);
+
+ flags |= other->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC;
+ }
+
ret = amdgpu_gem_object_create(adev, dma_buf->size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_CPU,
- 0, ttm_bo_type_sg, resv, &gobj);
+ AMDGPU_GEM_DOMAIN_CPU, flags,
+ ttm_bo_type_sg, resv, &gobj);
if (ret)
goto error;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index b26e2fd1c538..33991b4a5627 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -45,6 +45,7 @@
#include "amdgpu_amdkfd.h"
#include "amdgpu_ras.h"
+#include "amdgpu_xgmi.h"
/*
* KMS wrapper.
@@ -90,9 +91,10 @@
* - 3.38.0 - Add AMDGPU_IB_FLAG_EMIT_MEM_SYNC
* - 3.39.0 - DMABUF implicit sync does a full pipeline sync
* - 3.40.0 - Add AMDGPU_IDS_FLAGS_TMZ
+ * - 3.41.0 - Add video codec query
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 40
+#define KMS_DRIVER_MINOR 41
#define KMS_DRIVER_PATCHLEVEL 0
int amdgpu_vram_limit;
@@ -145,6 +147,7 @@ int amdgpu_compute_multipipe = -1;
int amdgpu_gpu_recovery = -1; /* auto */
int amdgpu_emu_mode;
uint amdgpu_smu_memory_pool_size;
+int amdgpu_smu_pptable_id = -1;
/*
* FBC (bit 0) disabled by default
* MULTI_MON_PP_MCLK_SWITCH (bit 1) enabled by default
@@ -162,16 +165,26 @@ int amdgpu_discovery = -1;
int amdgpu_mes;
int amdgpu_noretry = -1;
int amdgpu_force_asic_type = -1;
-int amdgpu_tmz;
+int amdgpu_tmz = -1; /* auto */
+uint amdgpu_freesync_vid_mode;
int amdgpu_reset_method = -1; /* auto */
int amdgpu_num_kcq = -1;
+static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work);
+
struct amdgpu_mgpu_info mgpu_info = {
.mutex = __MUTEX_INITIALIZER(mgpu_info.mutex),
+ .delayed_reset_work = __DELAYED_WORK_INITIALIZER(
+ mgpu_info.delayed_reset_work,
+ amdgpu_drv_delayed_reset_work_handler, 0),
};
int amdgpu_ras_enable = -1;
uint amdgpu_ras_mask = 0xffffffff;
int amdgpu_bad_page_threshold = -1;
+struct amdgpu_watchdog_timer amdgpu_watchdog_timer = {
+ .timeout_fatal_disable = false,
+ .period = 0x23, /* default to max. timeout = 1 << 0x23 cycles */
+};
/**
* DOC: vramlimit (int)
@@ -528,6 +541,20 @@ MODULE_PARM_DESC(ras_mask, "Mask of RAS features to enable (default 0xffffffff),
module_param_named(ras_mask, amdgpu_ras_mask, uint, 0444);
/**
+ * DOC: timeout_fatal_disable (bool)
+ * Disable Watchdog timeout fatal error event
+ */
+MODULE_PARM_DESC(timeout_fatal_disable, "disable watchdog timeout fatal error (false = default)");
+module_param_named(timeout_fatal_disable, amdgpu_watchdog_timer.timeout_fatal_disable, bool, 0644);
+
+/**
+ * DOC: timeout_period (uint)
+ * Modify the watchdog timeout max_cycles as (1 << period)
+ */
+MODULE_PARM_DESC(timeout_period, "watchdog timeout period (1 to 0x23(default), timeout maxCycles = (1 << period)");
+module_param_named(timeout_period, amdgpu_watchdog_timer.period, uint, 0644);
+
+/**
* DOC: si_support (int)
* Set SI support driver. This parameter works after set config CONFIG_DRM_AMDGPU_SI. For SI asic, when radeon driver is enabled,
* set value 0 to use radeon driver, while set value 1 to use amdgpu driver. The default is using radeon driver when it available,
@@ -748,6 +775,13 @@ bool no_system_mem_limit;
module_param(no_system_mem_limit, bool, 0644);
MODULE_PARM_DESC(no_system_mem_limit, "disable system memory limit (false = default)");
+/**
+ * DOC: no_queue_eviction_on_vm_fault (int)
+ * If set, process queues will not be evicted on gpuvm fault. This is to keep the wavefront context for debugging (0 = queue eviction, 1 = no queue eviction). The default is 0 (queue eviction).
+ */
+int amdgpu_no_queue_eviction_on_vm_fault = 0;
+MODULE_PARM_DESC(no_queue_eviction_on_vm_fault, "No queue eviction on VM fault (0 = queue eviction, 1 = no queue eviction)");
+module_param_named(no_queue_eviction_on_vm_fault, amdgpu_no_queue_eviction_on_vm_fault, int, 0444);
#endif
/**
@@ -792,10 +826,21 @@ module_param_named(backlight, amdgpu_backlight, bint, 0444);
*
* The default value: 0 (off). TODO: change to auto till it is completed.
*/
-MODULE_PARM_DESC(tmz, "Enable TMZ feature (-1 = auto, 0 = off (default), 1 = on)");
+MODULE_PARM_DESC(tmz, "Enable TMZ feature (-1 = auto (default), 0 = off, 1 = on)");
module_param_named(tmz, amdgpu_tmz, int, 0444);
/**
+ * DOC: freesync_video (uint)
+ * Enabled the optimization to adjust front porch timing to achieve seamless mode change experience
+ * when setting a freesync supported mode for which full modeset is not needed.
+ * The default value: 0 (off).
+ */
+MODULE_PARM_DESC(
+ freesync_video,
+ "Enable freesync modesetting optimization feature (0 = off (default), 1 = on)");
+module_param_named(freesync_video, amdgpu_freesync_vid_mode, uint, 0444);
+
+/**
* DOC: reset_method (int)
* GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco, 5 = pci)
*/
@@ -815,6 +860,15 @@ module_param_named(bad_page_threshold, amdgpu_bad_page_threshold, int, 0444);
MODULE_PARM_DESC(num_kcq, "number of kernel compute queue user want to setup (8 if set to greater than 8 or less than 0, only affect gfx 8+)");
module_param_named(num_kcq, amdgpu_num_kcq, int, 0444);
+/**
+ * DOC: smu_pptable_id (int)
+ * Used to override pptable id. id = 0 use VBIOS pptable.
+ * id > 0 use the soft pptable with specicfied id.
+ */
+MODULE_PARM_DESC(smu_pptable_id,
+ "specify pptable id to be used (-1 = auto(default) value, 0 = use pptable from vbios, > 0 = soft pptable id)");
+module_param_named(smu_pptable_id, amdgpu_smu_pptable_id, int, 0444);
+
static const struct pci_device_id pciidlist[] = {
#ifdef CONFIG_DRM_AMDGPU_SI
{0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
@@ -1124,6 +1178,11 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x73E2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
{0x1002, 0x73FF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
+ /* Aldebaran */
+ {0x1002, 0x7408, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN|AMD_EXP_HW_SUPPORT},
+ {0x1002, 0x740C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN|AMD_EXP_HW_SUPPORT},
+ {0x1002, 0x740F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN|AMD_EXP_HW_SUPPORT},
+
{0, 0, 0}
};
@@ -1280,6 +1339,69 @@ amdgpu_pci_shutdown(struct pci_dev *pdev)
adev->mp1_state = PP_MP1_STATE_NONE;
}
+/**
+ * amdgpu_drv_delayed_reset_work_handler - work handler for reset
+ *
+ * @work: work_struct.
+ */
+static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work)
+{
+ struct list_head device_list;
+ struct amdgpu_device *adev;
+ int i, r;
+ bool need_full_reset = true;
+
+ mutex_lock(&mgpu_info.mutex);
+ if (mgpu_info.pending_reset == true) {
+ mutex_unlock(&mgpu_info.mutex);
+ return;
+ }
+ mgpu_info.pending_reset = true;
+ mutex_unlock(&mgpu_info.mutex);
+
+ for (i = 0; i < mgpu_info.num_dgpu; i++) {
+ adev = mgpu_info.gpu_ins[i].adev;
+ r = amdgpu_device_pre_asic_reset(adev, NULL, &need_full_reset);
+ if (r) {
+ dev_err(adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
+ r, adev_to_drm(adev)->unique);
+ }
+ if (!queue_work(system_unbound_wq, &adev->xgmi_reset_work))
+ r = -EALREADY;
+ }
+ for (i = 0; i < mgpu_info.num_dgpu; i++) {
+ adev = mgpu_info.gpu_ins[i].adev;
+ flush_work(&adev->xgmi_reset_work);
+ adev->gmc.xgmi.pending_reset = false;
+ }
+
+ /* reset function will rebuild the xgmi hive info , clear it now */
+ for (i = 0; i < mgpu_info.num_dgpu; i++)
+ amdgpu_xgmi_remove_device(mgpu_info.gpu_ins[i].adev);
+
+ INIT_LIST_HEAD(&device_list);
+
+ for (i = 0; i < mgpu_info.num_dgpu; i++)
+ list_add_tail(&mgpu_info.gpu_ins[i].adev->reset_list, &device_list);
+
+ /* unregister the GPU first, reset function will add them back */
+ list_for_each_entry(adev, &device_list, reset_list)
+ amdgpu_unregister_gpu_instance(adev);
+
+ r = amdgpu_do_asic_reset(NULL, &device_list, &need_full_reset, true);
+ if (r) {
+ DRM_ERROR("reinit gpus failure");
+ return;
+ }
+ for (i = 0; i < mgpu_info.num_dgpu; i++) {
+ adev = mgpu_info.gpu_ins[i].adev;
+ if (!adev->kfd.init_complete)
+ amdgpu_amdkfd_device_init(adev);
+ amdgpu_ttm_set_buffer_funcs_status(adev, true);
+ }
+ return;
+}
+
static int amdgpu_pmops_suspend(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index 24010cacf7d0..4c5c19820d37 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -232,8 +232,8 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
goto out;
}
- ret = amdgpu_display_framebuffer_init(adev_to_drm(adev), &rfbdev->rfb,
- &mode_cmd, gobj);
+ ret = amdgpu_display_gem_fb_init(adev_to_drm(adev), &rfbdev->rfb,
+ &mode_cmd, gobj);
if (ret) {
DRM_ERROR("failed to initialize framebuffer %d\n", ret);
goto out;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 8e0a5650d383..1a4809d9e850 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -36,8 +36,6 @@
#include <linux/firmware.h>
#include <linux/pm_runtime.h>
-#include <drm/drm_debugfs.h>
-
#include "amdgpu.h"
#include "amdgpu_trace.h"
@@ -533,6 +531,8 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
if (!ring || !ring->fence_drv.initialized)
continue;
+ if (!ring->no_scheduler)
+ drm_sched_fini(&ring->sched);
r = amdgpu_fence_wait_empty(ring);
if (r) {
/* no need to trigger GPU reset as we are unloading */
@@ -541,8 +541,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
if (ring->fence_drv.irq_src)
amdgpu_irq_put(adev, ring->fence_drv.irq_src,
ring->fence_drv.irq_type);
- if (!ring->no_scheduler)
- drm_sched_fini(&ring->sched);
+
del_timer_sync(&ring->fence_drv.fallback_timer);
for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
dma_fence_put(ring->fence_drv.fences[j]);
@@ -697,11 +696,9 @@ static const struct dma_fence_ops amdgpu_fence_ops = {
* Fence debugfs
*/
#if defined(CONFIG_DEBUG_FS)
-static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data)
+static int amdgpu_debugfs_fence_info_show(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *)m->private;
- struct drm_device *dev = node->minor->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
+ struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
int i;
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
@@ -746,11 +743,10 @@ static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data)
*
* Manually trigger a gpu reset at the next fence wait.
*/
-static int amdgpu_debugfs_gpu_recover(struct seq_file *m, void *data)
+static int gpu_recover_get(void *data, u64 *val)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
+ struct amdgpu_device *adev = (struct amdgpu_device *)data;
+ struct drm_device *dev = adev_to_drm(adev);
int r;
r = pm_runtime_get_sync(dev->dev);
@@ -759,8 +755,7 @@ static int amdgpu_debugfs_gpu_recover(struct seq_file *m, void *data)
return 0;
}
- seq_printf(m, "gpu recover\n");
- amdgpu_device_gpu_recover(adev, NULL);
+ *val = amdgpu_device_gpu_recover(adev, NULL);
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
@@ -768,26 +763,24 @@ static int amdgpu_debugfs_gpu_recover(struct seq_file *m, void *data)
return 0;
}
-static const struct drm_info_list amdgpu_debugfs_fence_list[] = {
- {"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL},
- {"amdgpu_gpu_recover", &amdgpu_debugfs_gpu_recover, 0, NULL}
-};
+DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_fence_info);
+DEFINE_DEBUGFS_ATTRIBUTE(amdgpu_debugfs_gpu_recover_fops, gpu_recover_get, NULL,
+ "%lld\n");
-static const struct drm_info_list amdgpu_debugfs_fence_list_sriov[] = {
- {"amdgpu_fence_info", &amdgpu_debugfs_fence_info, 0, NULL},
-};
#endif
-int amdgpu_debugfs_fence_init(struct amdgpu_device *adev)
+void amdgpu_debugfs_fence_init(struct amdgpu_device *adev)
{
#if defined(CONFIG_DEBUG_FS)
- if (amdgpu_sriov_vf(adev))
- return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list_sriov,
- ARRAY_SIZE(amdgpu_debugfs_fence_list_sriov));
- return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_fence_list,
- ARRAY_SIZE(amdgpu_debugfs_fence_list));
-#else
- return 0;
+ struct drm_minor *minor = adev_to_drm(adev)->primary;
+ struct dentry *root = minor->debugfs_root;
+
+ debugfs_create_file("amdgpu_fence_info", 0444, root, adev,
+ &amdgpu_debugfs_fence_info_fops);
+
+ if (!amdgpu_sriov_vf(adev))
+ debugfs_create_file("amdgpu_gpu_recover", 0444, root, adev,
+ &amdgpu_debugfs_gpu_recover_fops);
#endif
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index fde2d899b2c4..5807cad833d3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -202,6 +202,7 @@ void amdgpu_gart_table_vram_free(struct amdgpu_device *adev)
return;
}
amdgpu_bo_unref(&adev->gart.bo);
+ adev->gart.ptr = NULL;
}
/*
@@ -236,9 +237,6 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
t = offset / AMDGPU_GPU_PAGE_SIZE;
p = t / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
for (i = 0; i < pages; i++, p++) {
-#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
- adev->gart.pages[p] = NULL;
-#endif
page_base = adev->dummy_page_addr;
if (!adev->gart.ptr)
continue;
@@ -312,9 +310,6 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
int pages, struct page **pagelist, dma_addr_t *dma_addr,
uint64_t flags)
{
-#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
- unsigned t,p;
-#endif
int r, i;
if (!adev->gart.ready) {
@@ -322,13 +317,6 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
return -EINVAL;
}
-#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
- t = offset / AMDGPU_GPU_PAGE_SIZE;
- p = t / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
- for (i = 0; i < pages; i++, p++)
- adev->gart.pages[p] = pagelist ? pagelist[i] : NULL;
-#endif
-
if (!adev->gart.ptr)
return 0;
@@ -373,14 +361,6 @@ int amdgpu_gart_init(struct amdgpu_device *adev)
DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
adev->gart.num_cpu_pages, adev->gart.num_gpu_pages);
-#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
- /* Allocate pages table */
- adev->gart.pages = vzalloc(array_size(sizeof(void *),
- adev->gart.num_cpu_pages));
- if (adev->gart.pages == NULL)
- return -ENOMEM;
-#endif
-
return 0;
}
@@ -393,9 +373,5 @@ int amdgpu_gart_init(struct amdgpu_device *adev)
*/
void amdgpu_gart_fini(struct amdgpu_device *adev)
{
-#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
- vfree(adev->gart.pages);
- adev->gart.pages = NULL;
-#endif
amdgpu_gart_dummy_page_fini(adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
index afa2e2877d87..a25fe97b0196 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
@@ -46,9 +46,6 @@ struct amdgpu_gart {
unsigned num_gpu_pages;
unsigned num_cpu_pages;
unsigned table_size;
-#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
- struct page **pages;
-#endif
bool ready;
/* Asic default pte flags */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index b443907afcea..fb7171e5507c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -32,7 +32,6 @@
#include <linux/dma-buf.h>
#include <drm/amdgpu_drm.h>
-#include <drm/drm_debugfs.h>
#include <drm/drm_gem_ttm_helper.h>
#include "amdgpu.h"
@@ -855,10 +854,10 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
}
#if defined(CONFIG_DEBUG_FS)
-static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data)
+static int amdgpu_debugfs_gem_info_show(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *)m->private;
- struct drm_device *dev = node->minor->dev;
+ struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
+ struct drm_device *dev = adev_to_drm(adev);
struct drm_file *file;
int r;
@@ -896,16 +895,17 @@ static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data)
return 0;
}
-static const struct drm_info_list amdgpu_debugfs_gem_list[] = {
- {"amdgpu_gem_info", &amdgpu_debugfs_gem_info, 0, NULL},
-};
+DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_gem_info);
+
#endif
-int amdgpu_debugfs_gem_init(struct amdgpu_device *adev)
+void amdgpu_debugfs_gem_init(struct amdgpu_device *adev)
{
#if defined(CONFIG_DEBUG_FS)
- return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_gem_list,
- ARRAY_SIZE(amdgpu_debugfs_gem_list));
+ struct drm_minor *minor = adev_to_drm(adev)->primary;
+ struct dentry *root = minor->debugfs_root;
+
+ debugfs_create_file("amdgpu_gem_info", 0444, root, adev,
+ &amdgpu_debugfs_gem_info_fops);
#endif
- return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 8e0a6c62322e..689addb1520d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -601,6 +601,7 @@ int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev)
struct ras_ih_if ih_info = {
.cb = amdgpu_gfx_process_ras_data_cb,
};
+ struct ras_query_if info = { 0 };
if (!adev->gfx.ras_if) {
adev->gfx.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
@@ -612,13 +613,19 @@ int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev)
strcpy(adev->gfx.ras_if->name, "gfx");
}
fs_info.head = ih_info.head = *adev->gfx.ras_if;
-
r = amdgpu_ras_late_init(adev, adev->gfx.ras_if,
&fs_info, &ih_info);
if (r)
goto free;
if (amdgpu_ras_is_supported(adev, adev->gfx.ras_if->block)) {
+ if (adev->gmc.xgmi.connected_to_cpu) {
+ info.head = *adev->gfx.ras_if;
+ amdgpu_ras_query_error_status(adev, &info);
+ } else {
+ amdgpu_ras_reset_error_status(adev, AMDGPU_RAS_BLOCK__GFX);
+ }
+
r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0);
if (r)
goto late_fini;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index 72dbcd2bc6a6..38af93f501e1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -30,6 +30,7 @@
#include "clearstate_defs.h"
#include "amdgpu_ring.h"
#include "amdgpu_rlc.h"
+#include "soc15.h"
/* GFX current status */
#define AMDGPU_GFX_NORMAL_MODE 0x00000000L
@@ -224,7 +225,9 @@ struct amdgpu_gfx_funcs {
void (*reset_ras_error_count) (struct amdgpu_device *adev);
void (*init_spm_golden)(struct amdgpu_device *adev);
void (*query_ras_error_status) (struct amdgpu_device *adev);
+ void (*reset_ras_error_status) (struct amdgpu_device *adev);
void (*update_perfmon_mgcg)(struct amdgpu_device *adev, bool enable);
+ void (*enable_watchdog_timer)(struct amdgpu_device *adev);
};
struct sq_work {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index fe1a39ffda72..6f7995293a1e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -32,6 +32,57 @@
#include "amdgpu_xgmi.h"
/**
+ * amdgpu_gmc_pdb0_alloc - allocate vram for pdb0
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Allocate video memory for pdb0 and map it for CPU access
+ * Returns 0 for success, error for failure.
+ */
+int amdgpu_gmc_pdb0_alloc(struct amdgpu_device *adev)
+{
+ int r;
+ struct amdgpu_bo_param bp;
+ u64 vram_size = adev->gmc.xgmi.node_segment_size * adev->gmc.xgmi.num_physical_nodes;
+ uint32_t pde0_page_shift = adev->gmc.vmid0_page_table_block_size + 21;
+ uint32_t npdes = (vram_size + (1ULL << pde0_page_shift) -1) >> pde0_page_shift;
+
+ memset(&bp, 0, sizeof(bp));
+ bp.size = PAGE_ALIGN((npdes + 1) * 8);
+ bp.byte_align = PAGE_SIZE;
+ bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
+ bp.flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
+ bp.type = ttm_bo_type_kernel;
+ bp.resv = NULL;
+ r = amdgpu_bo_create(adev, &bp, &adev->gmc.pdb0_bo);
+ if (r)
+ return r;
+
+ r = amdgpu_bo_reserve(adev->gmc.pdb0_bo, false);
+ if (unlikely(r != 0))
+ goto bo_reserve_failure;
+
+ r = amdgpu_bo_pin(adev->gmc.pdb0_bo, AMDGPU_GEM_DOMAIN_VRAM);
+ if (r)
+ goto bo_pin_failure;
+ r = amdgpu_bo_kmap(adev->gmc.pdb0_bo, &adev->gmc.ptr_pdb0);
+ if (r)
+ goto bo_kmap_failure;
+
+ amdgpu_bo_unreserve(adev->gmc.pdb0_bo);
+ return 0;
+
+bo_kmap_failure:
+ amdgpu_bo_unpin(adev->gmc.pdb0_bo);
+bo_pin_failure:
+ amdgpu_bo_unreserve(adev->gmc.pdb0_bo);
+bo_reserve_failure:
+ amdgpu_bo_unref(&adev->gmc.pdb0_bo);
+ return r;
+}
+
+/**
* amdgpu_gmc_get_pde_for_bo - get the PDE for a BO
*
* @bo: the BO to get the PDE for
@@ -158,6 +209,39 @@ void amdgpu_gmc_vram_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc,
mc->vram_end, mc->real_vram_size >> 20);
}
+/** amdgpu_gmc_sysvm_location - place vram and gart in sysvm aperture
+ *
+ * @adev: amdgpu device structure holding all necessary information
+ * @mc: memory controller structure holding memory information
+ *
+ * This function is only used if use GART for FB translation. In such
+ * case, we use sysvm aperture (vmid0 page tables) for both vram
+ * and gart (aka system memory) access.
+ *
+ * GPUVM (and our organization of vmid0 page tables) require sysvm
+ * aperture to be placed at a location aligned with 8 times of native
+ * page size. For example, if vm_context0_cntl.page_table_block_size
+ * is 12, then native page size is 8G (2M*2^12), sysvm should start
+ * with a 64G aligned address. For simplicity, we just put sysvm at
+ * address 0. So vram start at address 0 and gart is right after vram.
+ */
+void amdgpu_gmc_sysvm_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc)
+{
+ u64 hive_vram_start = 0;
+ u64 hive_vram_end = mc->xgmi.node_segment_size * mc->xgmi.num_physical_nodes - 1;
+ mc->vram_start = mc->xgmi.node_segment_size * mc->xgmi.physical_node_id;
+ mc->vram_end = mc->vram_start + mc->xgmi.node_segment_size - 1;
+ mc->gart_start = hive_vram_end + 1;
+ mc->gart_end = mc->gart_start + mc->gart_size - 1;
+ mc->fb_start = hive_vram_start;
+ mc->fb_end = hive_vram_end;
+ dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
+ mc->mc_vram_size >> 20, mc->vram_start,
+ mc->vram_end, mc->real_vram_size >> 20);
+ dev_info(adev->dev, "GART: %lluM 0x%016llX - 0x%016llX\n",
+ mc->gart_size >> 20, mc->gart_start, mc->gart_end);
+}
+
/**
* amdgpu_gmc_gart_location - try to find GART location
*
@@ -165,7 +249,6 @@ void amdgpu_gmc_vram_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc,
* @mc: memory controller structure holding memory information
*
* Function will place try to place GART before or after VRAM.
- *
* If GART size is bigger than space left then we ajust GART size.
* Thus function will never fails.
*/
@@ -176,8 +259,6 @@ void amdgpu_gmc_gart_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc)
/*To avoid the hole, limit the max mc address to AMDGPU_GMC_HOLE_START*/
u64 max_mc_address = min(adev->gmc.mc_mask, AMDGPU_GMC_HOLE_START - 1);
- mc->gart_size += adev->pm.smu_prv_buffer_size;
-
/* VCE doesn't like it when BOs cross a 4GB segment, so align
* the GART base on a 4GB boundary as well.
*/
@@ -384,6 +465,16 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
{
switch (adev->asic_type) {
case CHIP_RAVEN:
+ if (amdgpu_tmz == 0) {
+ adev->gmc.tmz_enabled = false;
+ dev_info(adev->dev,
+ "Trusted Memory Zone (TMZ) feature disabled (cmd line)\n");
+ } else {
+ adev->gmc.tmz_enabled = true;
+ dev_info(adev->dev,
+ "Trusted Memory Zone (TMZ) feature enabled\n");
+ }
+ break;
case CHIP_RENOIR:
case CHIP_NAVI10:
case CHIP_NAVI14:
@@ -423,6 +514,7 @@ void amdgpu_gmc_noretry_set(struct amdgpu_device *adev)
switch (adev->asic_type) {
case CHIP_VEGA10:
case CHIP_VEGA20:
+ case CHIP_ALDEBARAN:
/*
* noretry = 0 will cause kfd page fault tests fail
* for some ASICs, so set default to 1 for these ASICs.
@@ -518,3 +610,55 @@ void amdgpu_gmc_get_vbios_allocations(struct amdgpu_device *adev)
adev->mman.stolen_extended_size = 0;
}
}
+
+/**
+ * amdgpu_gmc_init_pdb0 - initialize PDB0
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * This function is only used when GART page table is used
+ * for FB address translatioin. In such a case, we construct
+ * a 2-level system VM page table: PDB0->PTB, to cover both
+ * VRAM of the hive and system memory.
+ *
+ * PDB0 is static, initialized once on driver initialization.
+ * The first n entries of PDB0 are used as PTE by setting
+ * P bit to 1, pointing to VRAM. The n+1'th entry points
+ * to a big PTB covering system memory.
+ *
+ */
+void amdgpu_gmc_init_pdb0(struct amdgpu_device *adev)
+{
+ int i;
+ uint64_t flags = adev->gart.gart_pte_flags; //TODO it is UC. explore NC/RW?
+ /* Each PDE0 (used as PTE) covers (2^vmid0_page_table_block_size)*2M
+ */
+ u64 vram_size = adev->gmc.xgmi.node_segment_size * adev->gmc.xgmi.num_physical_nodes;
+ u64 pde0_page_size = (1ULL<<adev->gmc.vmid0_page_table_block_size)<<21;
+ u64 vram_addr = adev->vm_manager.vram_base_offset -
+ adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
+ u64 vram_end = vram_addr + vram_size;
+ u64 gart_ptb_gpu_pa = amdgpu_bo_gpu_offset(adev->gart.bo) +
+ adev->vm_manager.vram_base_offset - adev->gmc.vram_start;
+
+ flags |= AMDGPU_PTE_VALID | AMDGPU_PTE_READABLE;
+ flags |= AMDGPU_PTE_WRITEABLE;
+ flags |= AMDGPU_PTE_SNOOPED;
+ flags |= AMDGPU_PTE_FRAG((adev->gmc.vmid0_page_table_block_size + 9*1));
+ flags |= AMDGPU_PDE_PTE;
+
+ /* The first n PDE0 entries are used as PTE,
+ * pointing to vram
+ */
+ for (i = 0; vram_addr < vram_end; i++, vram_addr += pde0_page_size)
+ amdgpu_gmc_set_pte_pde(adev, adev->gmc.ptr_pdb0, i, vram_addr, flags);
+
+ /* The n+1'th PDE0 entry points to a huge
+ * PTB who has more than 512 entries each
+ * pointing to a 4K system page
+ */
+ flags = AMDGPU_PTE_VALID;
+ flags |= AMDGPU_PDE_BFS(0) | AMDGPU_PTE_SNOOPED;
+ /* Requires gart_ptb_gpu_pa to be 4K aligned */
+ amdgpu_gmc_set_pte_pde(adev, adev->gmc.ptr_pdb0, i, gart_ptb_gpu_pa, flags);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
index aa0c83776ce0..7e248a4e2fa3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -149,6 +149,8 @@ struct amdgpu_xgmi {
struct list_head head;
bool supported;
struct ras_common_if *ras_if;
+ bool connected_to_cpu;
+ bool pending_reset;
};
struct amdgpu_gmc {
@@ -189,10 +191,13 @@ struct amdgpu_gmc {
u64 gart_end;
/* Frame buffer aperture of this GPU device. Different from
* fb_start (see below), this only covers the local GPU device.
- * Driver get fb_start from MC_VM_FB_LOCATION_BASE (set by vbios)
- * and calculate vram_start of this local device by adding an
- * offset inside the XGMI hive.
- * Under VMID0, logical address == MC address
+ * If driver uses FB aperture to access FB, driver get fb_start from
+ * MC_VM_FB_LOCATION_BASE (set by vbios) and calculate vram_start
+ * of this local device by adding an offset inside the XGMI hive.
+ * If driver uses GART table for VMID0 FB access, driver finds a hole in
+ * VMID0's virtual address space to place the SYSVM aperture inside
+ * which the first part is vram and the second part is gart (covering
+ * system ram).
*/
u64 vram_start;
u64 vram_end;
@@ -204,6 +209,15 @@ struct amdgpu_gmc {
*/
u64 fb_start;
u64 fb_end;
+ /* In the case of use GART table for vmid0 FB access, [fb_start, fb_end]
+ * will be squeezed to GART aperture. But we have a PSP FW issue to fix
+ * for now. To temporarily workaround the PSP FW issue, added below two
+ * variables to remember the original fb_start/end to re-enable FB
+ * aperture to workaround the PSP FW issue. Will delete it after we
+ * get a proper PSP FW fix.
+ */
+ u64 fb_start_original;
+ u64 fb_end_original;
unsigned vram_width;
u64 real_vram_size;
int vram_mtrr;
@@ -240,6 +254,12 @@ struct amdgpu_gmc {
struct amdgpu_xgmi xgmi;
struct amdgpu_irq_src ecc_irq;
int noretry;
+
+ uint32_t vmid0_page_table_block_size;
+ uint32_t vmid0_page_table_depth;
+ struct amdgpu_bo *pdb0_bo;
+ /* CPU kmapped address of pdb0*/
+ void *ptr_pdb0;
};
#define amdgpu_gmc_flush_gpu_tlb(adev, vmid, vmhub, type) ((adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid), (vmhub), (type)))
@@ -281,6 +301,7 @@ static inline uint64_t amdgpu_gmc_sign_extend(uint64_t addr)
return addr;
}
+int amdgpu_gmc_pdb0_alloc(struct amdgpu_device *adev);
void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level,
uint64_t *addr, uint64_t *flags);
int amdgpu_gmc_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
@@ -288,6 +309,7 @@ int amdgpu_gmc_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
uint64_t flags);
uint64_t amdgpu_gmc_pd_addr(struct amdgpu_bo *bo);
uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo);
+void amdgpu_gmc_sysvm_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc);
void amdgpu_gmc_vram_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc,
u64 base);
void amdgpu_gmc_gart_location(struct amdgpu_device *adev,
@@ -309,4 +331,5 @@ amdgpu_gmc_set_vm_fault_masks(struct amdgpu_device *adev, int hub_type,
void amdgpu_gmc_get_vbios_allocations(struct amdgpu_device *adev);
+void amdgpu_gmc_init_pdb0(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 7645223ea0ef..148a3b481b12 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -30,7 +30,6 @@
#include <linux/slab.h>
#include <drm/amdgpu_drm.h>
-#include <drm/drm_debugfs.h>
#include "amdgpu.h"
#include "atom.h"
@@ -453,11 +452,9 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
*/
#if defined(CONFIG_DEBUG_FS)
-static int amdgpu_debugfs_sa_info(struct seq_file *m, void *data)
+static int amdgpu_debugfs_sa_info_show(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
+ struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
seq_printf(m, "--------------------- DELAYED --------------------- \n");
amdgpu_sa_bo_dump_debug_info(&adev->ib_pools[AMDGPU_IB_POOL_DELAYED],
@@ -471,18 +468,18 @@ static int amdgpu_debugfs_sa_info(struct seq_file *m, void *data)
return 0;
}
-static const struct drm_info_list amdgpu_debugfs_sa_list[] = {
- {"amdgpu_sa_info", &amdgpu_debugfs_sa_info, 0, NULL},
-};
+DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_sa_info);
#endif
-int amdgpu_debugfs_sa_init(struct amdgpu_device *adev)
+void amdgpu_debugfs_sa_init(struct amdgpu_device *adev)
{
#if defined(CONFIG_DEBUG_FS)
- return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_sa_list,
- ARRAY_SIZE(amdgpu_debugfs_sa_list));
-#else
- return 0;
+ struct drm_minor *minor = adev_to_drm(adev)->primary;
+ struct dentry *root = minor->debugfs_root;
+
+ debugfs_create_file("amdgpu_sa_info", 0444, root, adev,
+ &amdgpu_debugfs_sa_info_fops);
+
#endif
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
index dc852af4f3b7..faaa6aa2faaf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
@@ -99,6 +99,8 @@ int amdgpu_ih_ring_init(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih,
ih->rptr_addr = adev->wb.gpu_addr + rptr_offs * 4;
ih->rptr_cpu = &adev->wb.wb[rptr_offs];
}
+
+ init_waitqueue_head(&ih->wait_process);
return 0;
}
@@ -160,6 +162,52 @@ void amdgpu_ih_ring_write(struct amdgpu_ih_ring *ih, const uint32_t *iv,
}
}
+/* Waiter helper that checks current rptr matches or passes checkpoint wptr */
+static bool amdgpu_ih_has_checkpoint_processed(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih,
+ uint32_t checkpoint_wptr,
+ uint32_t *prev_rptr)
+{
+ uint32_t cur_rptr = ih->rptr | (*prev_rptr & ~ih->ptr_mask);
+
+ /* rptr has wrapped. */
+ if (cur_rptr < *prev_rptr)
+ cur_rptr += ih->ptr_mask + 1;
+ *prev_rptr = cur_rptr;
+
+ return cur_rptr >= checkpoint_wptr;
+}
+
+/**
+ * amdgpu_ih_wait_on_checkpoint_process - wait to process IVs up to checkpoint
+ *
+ * @adev: amdgpu_device pointer
+ * @ih: ih ring to process
+ *
+ * Used to ensure ring has processed IVs up to the checkpoint write pointer.
+ */
+int amdgpu_ih_wait_on_checkpoint_process(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih)
+{
+ uint32_t checkpoint_wptr, rptr;
+
+ if (!ih->enabled || adev->shutdown)
+ return -ENODEV;
+
+ checkpoint_wptr = amdgpu_ih_get_wptr(adev, ih);
+ /* Order wptr with rptr. */
+ rmb();
+ rptr = READ_ONCE(ih->rptr);
+
+ /* wptr has wrapped. */
+ if (rptr > checkpoint_wptr)
+ checkpoint_wptr += ih->ptr_mask + 1;
+
+ return wait_event_interruptible(ih->wait_process,
+ amdgpu_ih_has_checkpoint_processed(adev, ih,
+ checkpoint_wptr, &rptr));
+}
+
/**
* amdgpu_ih_process - interrupt handler
*
@@ -180,10 +228,6 @@ int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih)
wptr = amdgpu_ih_get_wptr(adev, ih);
restart_ih:
- /* is somebody else already processing irqs? */
- if (atomic_xchg(&ih->lock, 1))
- return IRQ_NONE;
-
DRM_DEBUG("%s: rptr %d, wptr %d\n", __func__, ih->rptr, wptr);
/* Order reading of wptr vs. reading of IH ring data */
@@ -195,7 +239,7 @@ restart_ih:
}
amdgpu_ih_set_rptr(adev, ih);
- atomic_set(&ih->lock, 0);
+ wake_up_all(&ih->wait_process);
/* make sure wptr hasn't changed while processing */
wptr = amdgpu_ih_get_wptr(adev, ih);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
index 6ed4a85fc7c3..0649b59830a5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
@@ -64,8 +64,10 @@ struct amdgpu_ih_ring {
bool enabled;
unsigned rptr;
- atomic_t lock;
struct amdgpu_ih_regs ih_regs;
+
+ /* For waiting on IH processing at checkpoint. */
+ wait_queue_head_t wait_process;
};
/* provided by the ih block */
@@ -87,6 +89,8 @@ int amdgpu_ih_ring_init(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih,
void amdgpu_ih_ring_fini(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih);
void amdgpu_ih_ring_write(struct amdgpu_ih_ring *ih, const uint32_t *iv,
unsigned int num_dw);
+int amdgpu_ih_wait_on_checkpoint_process(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih);
int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih);
void amdgpu_ih_decode_iv_helper(struct amdgpu_device *adev,
struct amdgpu_ih_ring *ih,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index afbbec82a289..af026109421a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -65,6 +65,41 @@
#define AMDGPU_WAIT_IDLE_TIMEOUT 200
+const char *soc15_ih_clientid_name[] = {
+ "IH",
+ "SDMA2 or ACP",
+ "ATHUB",
+ "BIF",
+ "SDMA3 or DCE",
+ "SDMA4 or ISP",
+ "VMC1 or PCIE0",
+ "RLC",
+ "SDMA0",
+ "SDMA1",
+ "SE0SH",
+ "SE1SH",
+ "SE2SH",
+ "SE3SH",
+ "VCN1 or UVD1",
+ "THM",
+ "VCN or UVD",
+ "SDMA5 or VCE0",
+ "VMC",
+ "SDMA6 or XDMA",
+ "GRBM_CP",
+ "ATS",
+ "ROM_SMUIO",
+ "DF",
+ "SDMA7 or VCE1",
+ "PWR",
+ "reserved",
+ "UTCL2",
+ "EA",
+ "UTCL2LOG",
+ "MP0",
+ "MP1"
+};
+
/**
* amdgpu_hotplug_work_func - work handler for display hotplug event
*
@@ -535,7 +570,7 @@ void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev)
for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
- if (!src)
+ if (!src || !src->funcs || !src->funcs->set)
continue;
for (k = 0; k < src->num_types; k++)
amdgpu_irq_update(adev, src, k);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 64beb3399604..ada807de978b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -27,7 +27,6 @@
*/
#include "amdgpu.h"
-#include <drm/drm_debugfs.h>
#include <drm/amdgpu_drm.h>
#include "amdgpu_uvd.h"
#include "amdgpu_vce.h"
@@ -287,22 +286,30 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
break;
case AMDGPU_INFO_FW_TA:
switch (query_fw->index) {
- case 0:
+ case TA_FW_TYPE_PSP_XGMI:
fw_info->ver = adev->psp.ta_fw_version;
fw_info->feature = adev->psp.ta_xgmi_ucode_version;
break;
- case 1:
+ case TA_FW_TYPE_PSP_RAS:
fw_info->ver = adev->psp.ta_fw_version;
fw_info->feature = adev->psp.ta_ras_ucode_version;
break;
- case 2:
+ case TA_FW_TYPE_PSP_HDCP:
fw_info->ver = adev->psp.ta_fw_version;
fw_info->feature = adev->psp.ta_hdcp_ucode_version;
break;
- case 3:
+ case TA_FW_TYPE_PSP_DTM:
fw_info->ver = adev->psp.ta_fw_version;
fw_info->feature = adev->psp.ta_dtm_ucode_version;
break;
+ case TA_FW_TYPE_PSP_RAP:
+ fw_info->ver = adev->psp.ta_fw_version;
+ fw_info->feature = adev->psp.ta_rap_ucode_version;
+ break;
+ case TA_FW_TYPE_PSP_SECUREDISPLAY:
+ fw_info->ver = adev->psp.ta_fw_version;
+ fw_info->feature = adev->psp.ta_securedisplay_ucode_version;
+ break;
default:
return -EINVAL;
}
@@ -981,6 +988,63 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
min_t(u64, size, sizeof(ras_mask))) ?
-EFAULT : 0;
}
+ case AMDGPU_INFO_VIDEO_CAPS: {
+ const struct amdgpu_video_codecs *codecs;
+ struct drm_amdgpu_info_video_caps *caps;
+ int r;
+
+ switch (info->video_cap.type) {
+ case AMDGPU_INFO_VIDEO_CAPS_DECODE:
+ r = amdgpu_asic_query_video_codecs(adev, false, &codecs);
+ if (r)
+ return -EINVAL;
+ break;
+ case AMDGPU_INFO_VIDEO_CAPS_ENCODE:
+ r = amdgpu_asic_query_video_codecs(adev, true, &codecs);
+ if (r)
+ return -EINVAL;
+ break;
+ default:
+ DRM_DEBUG_KMS("Invalid request %d\n",
+ info->video_cap.type);
+ return -EINVAL;
+ }
+
+ caps = kzalloc(sizeof(*caps), GFP_KERNEL);
+ if (!caps)
+ return -ENOMEM;
+
+ for (i = 0; i < codecs->codec_count; i++) {
+ int idx = codecs->codec_array[i].codec_type;
+
+ switch (idx) {
+ case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2:
+ case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4:
+ case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1:
+ case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC:
+ case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC:
+ case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG:
+ case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9:
+ case AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1:
+ caps->codec_info[idx].valid = 1;
+ caps->codec_info[idx].max_width =
+ codecs->codec_array[i].max_width;
+ caps->codec_info[idx].max_height =
+ codecs->codec_array[i].max_height;
+ caps->codec_info[idx].max_pixels_per_frame =
+ codecs->codec_array[i].max_pixels_per_frame;
+ caps->codec_info[idx].max_level =
+ codecs->codec_array[i].max_level;
+ break;
+ default:
+ break;
+ }
+ }
+ r = copy_to_user(out, caps,
+ min((size_t)size, sizeof(*caps))) ? -EFAULT : 0;
+ kfree(caps);
+ return r;
+ }
default:
DRM_DEBUG_KMS("Invalid request %d\n", info->query);
return -EINVAL;
@@ -1262,16 +1326,25 @@ void amdgpu_disable_vblank_kms(struct drm_crtc *crtc)
*/
#if defined(CONFIG_DEBUG_FS)
-static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data)
+static int amdgpu_debugfs_firmware_info_show(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
+ struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
struct drm_amdgpu_info_firmware fw_info;
struct drm_amdgpu_query_fw query_fw;
struct atom_context *ctx = adev->mode_info.atom_context;
int ret, i;
+ static const char *ta_fw_name[TA_FW_TYPE_MAX_INDEX] = {
+#define TA_FW_NAME(type) [TA_FW_TYPE_PSP_##type] = #type
+ TA_FW_NAME(XGMI),
+ TA_FW_NAME(RAS),
+ TA_FW_NAME(HDCP),
+ TA_FW_NAME(DTM),
+ TA_FW_NAME(RAP),
+ TA_FW_NAME(SECUREDISPLAY),
+#undef TA_FW_NAME
+ };
+
/* VCE */
query_fw.fw_type = AMDGPU_INFO_FW_VCE;
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
@@ -1389,31 +1462,14 @@ static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data)
fw_info.feature, fw_info.ver);
query_fw.fw_type = AMDGPU_INFO_FW_TA;
- for (i = 0; i < 4; i++) {
+ for (i = TA_FW_TYPE_PSP_XGMI; i < TA_FW_TYPE_MAX_INDEX; i++) {
query_fw.index = i;
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
if (ret)
continue;
- switch (query_fw.index) {
- case 0:
- seq_printf(m, "TA %s feature version: 0x%08x, firmware version: 0x%08x\n",
- "RAS", fw_info.feature, fw_info.ver);
- break;
- case 1:
- seq_printf(m, "TA %s feature version: 0x%08x, firmware version: 0x%08x\n",
- "XGMI", fw_info.feature, fw_info.ver);
- break;
- case 2:
- seq_printf(m, "TA %s feature version: 0x%08x, firmware version: 0x%08x\n",
- "HDCP", fw_info.feature, fw_info.ver);
- break;
- case 3:
- seq_printf(m, "TA %s feature version: 0x%08x, firmware version: 0x%08x\n",
- "DTM", fw_info.feature, fw_info.ver);
- break;
- default:
- return -EINVAL;
- }
+
+ seq_printf(m, "TA %s feature version: 0x%08x, firmware version: 0x%08x\n",
+ ta_fw_name[i], fw_info.feature, fw_info.ver);
}
/* SMC */
@@ -1472,17 +1528,18 @@ static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data)
return 0;
}
-static const struct drm_info_list amdgpu_firmware_info_list[] = {
- {"amdgpu_firmware_info", amdgpu_debugfs_firmware_info, 0, NULL},
-};
+DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_firmware_info);
+
#endif
-int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev)
+void amdgpu_debugfs_firmware_init(struct amdgpu_device *adev)
{
#if defined(CONFIG_DEBUG_FS)
- return amdgpu_debugfs_add_files(adev, amdgpu_firmware_info_list,
- ARRAY_SIZE(amdgpu_firmware_info_list));
-#else
- return 0;
+ struct drm_minor *minor = adev_to_drm(adev)->primary;
+ struct dentry *root = minor->debugfs_root;
+
+ debugfs_create_file("amdgpu_firmware_info", 0444, root,
+ adev, &amdgpu_debugfs_firmware_info_fops);
+
#endif
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index 319cb19e1b99..cb0b581bbce7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -602,6 +602,14 @@ int amdgpu_display_get_crtc_scanoutpos(struct drm_device *dev,
int *hpos, ktime_t *stime, ktime_t *etime,
const struct drm_display_mode *mode);
+int amdgpu_display_gem_fb_init(struct drm_device *dev,
+ struct amdgpu_framebuffer *rfb,
+ const struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj);
+int amdgpu_display_gem_fb_verify_and_init(
+ struct drm_device *dev, struct amdgpu_framebuffer *rfb,
+ struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj);
int amdgpu_display_framebuffer_init(struct drm_device *dev,
struct amdgpu_framebuffer *rfb,
const struct drm_mode_fb_cmd2 *mode_cmd,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 984dcf5a475e..ac1bb5089260 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -1061,13 +1061,17 @@ static const char *amdgpu_vram_names[] = {
*/
int amdgpu_bo_init(struct amdgpu_device *adev)
{
- /* reserve PAT memory space to WC for VRAM */
- arch_io_reserve_memtype_wc(adev->gmc.aper_base,
- adev->gmc.aper_size);
+ /* On A+A platform, VRAM can be mapped as WB */
+ if (!adev->gmc.xgmi.connected_to_cpu) {
+ /* reserve PAT memory space to WC for VRAM */
+ arch_io_reserve_memtype_wc(adev->gmc.aper_base,
+ adev->gmc.aper_size);
+
+ /* Add an MTRR for the VRAM */
+ adev->gmc.vram_mtrr = arch_phys_wc_add(adev->gmc.aper_base,
+ adev->gmc.aper_size);
+ }
- /* Add an MTRR for the VRAM */
- adev->gmc.vram_mtrr = arch_phys_wc_add(adev->gmc.aper_base,
- adev->gmc.aper_size);
DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
adev->gmc.mc_vram_size >> 20,
(unsigned long long)adev->gmc.aper_size >> 20);
@@ -1085,8 +1089,10 @@ int amdgpu_bo_init(struct amdgpu_device *adev)
void amdgpu_bo_fini(struct amdgpu_device *adev)
{
amdgpu_ttm_fini(adev);
- arch_phys_wc_del(adev->gmc.vram_mtrr);
- arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
+ if (!adev->gmc.xgmi.connected_to_cpu) {
+ arch_phys_wc_del(adev->gmc.vram_mtrr);
+ arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
+ }
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 9ac37569823f..54ceb065e546 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -100,7 +100,6 @@ struct amdgpu_bo {
struct amdgpu_bo *parent;
struct amdgpu_bo *shadow;
- struct amdgpu_mn *mn;
#ifdef CONFIG_MMU_NOTIFIER
@@ -329,7 +328,7 @@ void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
struct seq_file *m);
u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m);
#endif
-int amdgpu_debugfs_sa_init(struct amdgpu_device *adev);
+void amdgpu_debugfs_sa_init(struct amdgpu_device *adev);
bool amdgpu_bo_support_uswc(u64 bo_flags);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 839917eb7bc3..bae304b0d67a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -34,6 +34,7 @@
#include "psp_v10_0.h"
#include "psp_v11_0.h"
#include "psp_v12_0.h"
+#include "psp_v13_0.h"
#include "amdgpu_ras.h"
#include "amdgpu_securedisplay.h"
@@ -56,7 +57,7 @@ static int psp_load_smu_fw(struct psp_context *psp);
* - Load XGMI/RAS/HDCP/DTM TA if any
*
* This new sequence is required for
- * - Arcturus
+ * - Arcturus and onwards
* - Navi12 and onwards
*/
static void psp_check_pmfw_centralized_cstate_management(struct psp_context *psp)
@@ -71,7 +72,7 @@ static void psp_check_pmfw_centralized_cstate_management(struct psp_context *psp
if (adev->flags & AMD_IS_APU)
return;
- if ((adev->asic_type == CHIP_ARCTURUS) ||
+ if ((adev->asic_type >= CHIP_ARCTURUS) ||
(adev->asic_type >= CHIP_NAVI12))
psp->pmfw_centralized_cstate_management = true;
}
@@ -109,6 +110,9 @@ static int psp_early_init(void *handle)
case CHIP_RENOIR:
psp_v12_0_set_psp_funcs(psp);
break;
+ case CHIP_ALDEBARAN:
+ psp_v13_0_set_psp_funcs(psp);
+ break;
default:
return -EINVAL;
}
@@ -383,7 +387,7 @@ static int psp_tmr_init(struct psp_context *psp)
* Note: this memory need be reserved till the driver
* uninitializes.
*/
- tmr_size = PSP_TMR_SIZE;
+ tmr_size = PSP_TMR_SIZE(psp->adev);
/* For ASICs support RLC autoload, psp will parse the toc
* and calculate the total size of TMR needed */
@@ -399,10 +403,20 @@ static int psp_tmr_init(struct psp_context *psp)
}
pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
- ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, PSP_TMR_SIZE,
+ ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, PSP_TMR_SIZE(psp->adev),
AMDGPU_GEM_DOMAIN_VRAM,
&psp->tmr_bo, &psp->tmr_mc_addr, pptr);
+ /* workaround the tmr_mc_addr:
+ * PSP requires an address in FB aperture. Right now driver produce
+ * tmr_mc_addr in the GART aperture. Convert it back to FB aperture
+ * for PSP. Will revert it after we get a fix from PSP FW.
+ */
+ if (psp->adev->asic_type == CHIP_ALDEBARAN) {
+ psp->tmr_mc_addr -= psp->adev->gmc.fb_start;
+ psp->tmr_mc_addr += psp->adev->gmc.fb_start_original;
+ }
+
return ret;
}
@@ -542,6 +556,28 @@ int psp_get_fw_attestation_records_addr(struct psp_context *psp,
return ret;
}
+static int psp_rl_load(struct amdgpu_device *adev)
+{
+ struct psp_context *psp = &adev->psp;
+ struct psp_gfx_cmd_resp *cmd = psp->cmd;
+
+ if (psp->rl_bin_size == 0)
+ return 0;
+
+ memset(psp->fw_pri_buf, 0, PSP_1_MEG);
+ memcpy(psp->fw_pri_buf, psp->rl_start_addr, psp->rl_bin_size);
+
+ memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp));
+
+ cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
+ cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(psp->fw_pri_mc_addr);
+ cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(psp->fw_pri_mc_addr);
+ cmd->cmd.cmd_load_ip_fw.fw_size = psp->rl_bin_size;
+ cmd->cmd.cmd_load_ip_fw.fw_type = GFX_FW_TYPE_REG_LIST;
+
+ return psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
+}
+
static void psp_prep_asd_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
uint64_t asd_mc, uint32_t size)
{
@@ -755,8 +791,9 @@ static int psp_xgmi_unload(struct psp_context *psp)
struct psp_gfx_cmd_resp *cmd;
struct amdgpu_device *adev = psp->adev;
- /* XGMI TA unload currently is not supported on Arcturus */
- if (adev->asic_type == CHIP_ARCTURUS)
+ /* XGMI TA unload currently is not supported on Arcturus/Aldebaran A+A */
+ if (adev->asic_type == CHIP_ARCTURUS ||
+ (adev->asic_type == CHIP_ALDEBARAN && adev->gmc.xgmi.connected_to_cpu))
return 0;
/*
@@ -1561,6 +1598,7 @@ static int psp_rap_unload(struct psp_context *psp)
static int psp_rap_initialize(struct psp_context *psp)
{
int ret;
+ enum ta_rap_status status = TA_RAP_STATUS__SUCCESS;
/*
* TODO: bypass the initialize in sriov for now
@@ -1584,8 +1622,8 @@ static int psp_rap_initialize(struct psp_context *psp)
if (ret)
return ret;
- ret = psp_rap_invoke(psp, TA_CMD_RAP__INITIALIZE);
- if (ret != TA_RAP_STATUS__SUCCESS) {
+ ret = psp_rap_invoke(psp, TA_CMD_RAP__INITIALIZE, &status);
+ if (ret || status != TA_RAP_STATUS__SUCCESS) {
psp_rap_unload(psp);
amdgpu_bo_free_kernel(&psp->rap_context.rap_shared_bo,
@@ -1594,8 +1632,10 @@ static int psp_rap_initialize(struct psp_context *psp)
psp->rap_context.rap_initialized = false;
- dev_warn(psp->adev->dev, "RAP TA initialize fail.\n");
- return -EINVAL;
+ dev_warn(psp->adev->dev, "RAP TA initialize fail (%d) status %d.\n",
+ ret, status);
+
+ return ret;
}
return 0;
@@ -1620,13 +1660,13 @@ static int psp_rap_terminate(struct psp_context *psp)
return ret;
}
-int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
+int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id, enum ta_rap_status *status)
{
struct ta_rap_shared_memory *rap_cmd;
- int ret;
+ int ret = 0;
if (!psp->rap_context.rap_initialized)
- return -EINVAL;
+ return 0;
if (ta_cmd_id != TA_CMD_RAP__INITIALIZE &&
ta_cmd_id != TA_CMD_RAP__VALIDATE_L0)
@@ -1642,14 +1682,16 @@ int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
rap_cmd->validation_method_id = METHOD_A;
ret = psp_ta_invoke(psp, rap_cmd->cmd_id, psp->rap_context.session_id);
- if (ret) {
- mutex_unlock(&psp->rap_context.mutex);
- return ret;
- }
+ if (ret)
+ goto out_unlock;
+ if (status)
+ *status = rap_cmd->rap_status;
+
+out_unlock:
mutex_unlock(&psp->rap_context.mutex);
- return rap_cmd->rap_status;
+ return ret;
}
// RAP end
@@ -2276,6 +2318,12 @@ skip_memalloc:
return ret;
}
+ ret = psp_rl_load(adev);
+ if (ret) {
+ DRM_ERROR("PSP load RL failed!\n");
+ return ret;
+ }
+
if (psp->adev->psp.ta_fw) {
ret = psp_ras_initialize(psp);
if (ret)
@@ -2751,6 +2799,9 @@ int psp_init_sos_microcode(struct psp_context *psp,
adev->psp.spl_bin_size = le32_to_cpu(sos_hdr_v1_3->spl_size_bytes);
adev->psp.spl_start_addr = (uint8_t *)adev->psp.sys_start_addr +
le32_to_cpu(sos_hdr_v1_3->spl_offset_bytes);
+ adev->psp.rl_bin_size = le32_to_cpu(sos_hdr_v1_3->rl_size_bytes);
+ adev->psp.rl_start_addr = (uint8_t *)adev->psp.sys_start_addr +
+ le32_to_cpu(sos_hdr_v1_3->rl_offset_bytes);
}
break;
default:
@@ -3052,3 +3103,11 @@ const struct amdgpu_ip_block_version psp_v12_0_ip_block =
.rev = 0,
.funcs = &psp_ip_funcs,
};
+
+const struct amdgpu_ip_block_version psp_v13_0_ip_block = {
+ .type = AMD_IP_BLOCK_TYPE_PSP,
+ .major = 13,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &psp_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index cb50ba445f8c..64f143390835 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -37,7 +37,7 @@
#define PSP_XGMI_SHARED_MEM_SIZE 0x4000
#define PSP_RAS_SHARED_MEM_SIZE 0x4000
#define PSP_1_MEG 0x100000
-#define PSP_TMR_SIZE 0x400000
+#define PSP_TMR_SIZE(adev) ((adev)->asic_type == CHIP_ALDEBARAN ? 0x800000 : 0x400000)
#define PSP_HDCP_SHARED_MEM_SIZE 0x4000
#define PSP_DTM_SHARED_MEM_SIZE 0x4000
#define PSP_RAP_SHARED_MEM_SIZE 0x4000
@@ -248,11 +248,13 @@ struct psp_context
uint32_t toc_bin_size;
uint32_t kdb_bin_size;
uint32_t spl_bin_size;
+ uint32_t rl_bin_size;
uint8_t *sys_start_addr;
uint8_t *sos_start_addr;
uint8_t *toc_start_addr;
uint8_t *kdb_start_addr;
uint8_t *spl_start_addr;
+ uint8_t *rl_start_addr;
/* tmr buffer */
struct amdgpu_bo *tmr_bo;
@@ -365,11 +367,13 @@ struct amdgpu_psp_funcs {
extern const struct amd_ip_funcs psp_ip_funcs;
extern const struct amdgpu_ip_block_version psp_v3_1_ip_block;
-extern int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
- uint32_t field_val, uint32_t mask, bool check_changed);
-
extern const struct amdgpu_ip_block_version psp_v10_0_ip_block;
+extern const struct amdgpu_ip_block_version psp_v11_0_ip_block;
extern const struct amdgpu_ip_block_version psp_v12_0_ip_block;
+extern const struct amdgpu_ip_block_version psp_v13_0_ip_block;
+
+extern int psp_wait_for(struct psp_context *psp, uint32_t reg_index,
+ uint32_t field_val, uint32_t mask, bool check_changed);
int psp_gpu_reset(struct amdgpu_device *adev);
int psp_update_vcn_sram(struct amdgpu_device *adev, int inst_idx,
@@ -395,12 +399,11 @@ int psp_ras_trigger_error(struct psp_context *psp,
int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
-int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
+int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id, enum ta_rap_status *status);
int psp_securedisplay_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
int psp_rlc_autoload_start(struct psp_context *psp);
-extern const struct amdgpu_ip_block_version psp_v11_0_ip_block;
int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg,
uint32_t value);
int psp_ring_cmd_submit(struct psp_context *psp,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c
index 8da5356c36f1..51909bf8798c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c
@@ -48,6 +48,7 @@ static ssize_t amdgpu_rap_debugfs_write(struct file *f, const char __user *buf,
struct ta_rap_cmd_output_data *rap_cmd_output;
struct drm_device *dev = adev_to_drm(adev);
uint32_t op;
+ enum ta_rap_status status;
int ret;
if (*pos || size != 2)
@@ -70,9 +71,8 @@ static ssize_t amdgpu_rap_debugfs_write(struct file *f, const char __user *buf,
switch (op) {
case 2:
- ret = psp_rap_invoke(&adev->psp, op);
-
- if (ret == TA_RAP_STATUS__SUCCESS) {
+ ret = psp_rap_invoke(&adev->psp, op, &status);
+ if (!ret && status == TA_RAP_STATUS__SUCCESS) {
dev_info(adev->dev, "RAP L0 validate test success.\n");
} else {
rap_shared_mem = (struct ta_rap_shared_memory *)
@@ -97,6 +97,7 @@ static ssize_t amdgpu_rap_debugfs_write(struct file *f, const char __user *buf,
default:
dev_info(adev->dev, "Unsupported op id: %d, ", op);
dev_info(adev->dev, "Only support op 2(L0 validate test).\n");
+ break;
}
amdgpu_gfx_off_ctrl(adev, true);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 1fb2a91ad30a..0e16683876aa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -109,7 +109,7 @@ static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
ssize_t s;
char val[128];
- if (amdgpu_ras_error_query(obj->adev, &info))
+ if (amdgpu_ras_query_error_status(obj->adev, &info))
return -EINVAL;
s = snprintf(val, sizeof(val), "%s: %lu\n%s: %lu\n",
@@ -434,7 +434,7 @@ static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
return snprintf(buf, PAGE_SIZE,
"Query currently inaccessible\n");
- if (amdgpu_ras_error_query(obj->adev, &info))
+ if (amdgpu_ras_query_error_status(obj->adev, &info))
return -EINVAL;
return snprintf(buf, PAGE_SIZE, "%s: %lu\n%s: %lu\n",
@@ -463,7 +463,7 @@ static struct ras_manager *amdgpu_ras_create_obj(struct amdgpu_device *adev,
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_manager *obj;
- if (!con)
+ if (!adev->ras_features || !con)
return NULL;
if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
@@ -490,7 +490,7 @@ struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
struct ras_manager *obj;
int i;
- if (!con)
+ if (!adev->ras_features || !con)
return NULL;
if (head) {
@@ -590,7 +590,11 @@ static int __amdgpu_ras_feature_enable(struct amdgpu_device *adev,
con->features |= BIT(head->block);
} else {
if (obj && amdgpu_ras_is_feature_enabled(adev, head)) {
- con->features &= ~BIT(head->block);
+ /* skip clean gfx ras context feature for VEGA20 Gaming.
+ * will clean later
+ */
+ if (!(!adev->ras_features && con->features & BIT(AMDGPU_RAS_BLOCK__GFX)))
+ con->features &= ~BIT(head->block);
put_obj(obj);
}
}
@@ -693,6 +697,10 @@ int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev,
if (ret)
return ret;
+ /* gfx block ras dsiable cmd must send to ras-ta */
+ if (head->block == AMDGPU_RAS_BLOCK__GFX)
+ con->features |= BIT(head->block);
+
ret = amdgpu_ras_feature_enable(adev, head, 0);
}
} else
@@ -757,8 +765,8 @@ static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev,
/* feature ctl end */
/* query/inject/cure begin */
-int amdgpu_ras_error_query(struct amdgpu_device *adev,
- struct ras_query_if *info)
+int amdgpu_ras_query_error_status(struct amdgpu_device *adev,
+ struct ras_query_if *info)
{
struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
struct ras_err_data err_data = {0, 0, 0, NULL};
@@ -787,10 +795,16 @@ int amdgpu_ras_error_query(struct amdgpu_device *adev,
case AMDGPU_RAS_BLOCK__GFX:
if (adev->gfx.funcs->query_ras_error_count)
adev->gfx.funcs->query_ras_error_count(adev, &err_data);
+
+ if (adev->gfx.funcs->query_ras_error_status)
+ adev->gfx.funcs->query_ras_error_status(adev);
break;
case AMDGPU_RAS_BLOCK__MMHUB:
if (adev->mmhub.funcs->query_ras_error_count)
adev->mmhub.funcs->query_ras_error_count(adev, &err_data);
+
+ if (adev->mmhub.funcs->query_ras_error_status)
+ adev->mmhub.funcs->query_ras_error_status(adev);
break;
case AMDGPU_RAS_BLOCK__PCIE_BIF:
if (adev->nbio.funcs->query_ras_error_count)
@@ -826,6 +840,35 @@ int amdgpu_ras_error_query(struct amdgpu_device *adev,
return 0;
}
+int amdgpu_ras_reset_error_status(struct amdgpu_device *adev,
+ enum amdgpu_ras_block block)
+{
+ if (!amdgpu_ras_is_supported(adev, block))
+ return -EINVAL;
+
+ switch (block) {
+ case AMDGPU_RAS_BLOCK__GFX:
+ if (adev->gfx.funcs->reset_ras_error_count)
+ adev->gfx.funcs->reset_ras_error_count(adev);
+
+ if (adev->gfx.funcs->reset_ras_error_status)
+ adev->gfx.funcs->reset_ras_error_status(adev);
+ break;
+ case AMDGPU_RAS_BLOCK__MMHUB:
+ if (adev->mmhub.funcs->reset_ras_error_count)
+ adev->mmhub.funcs->reset_ras_error_count(adev);
+ break;
+ case AMDGPU_RAS_BLOCK__SDMA:
+ if (adev->sdma.funcs->reset_ras_error_count)
+ adev->sdma.funcs->reset_ras_error_count(adev);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
/* Trigger XGMI/WAFL error */
static int amdgpu_ras_error_inject_xgmi(struct amdgpu_device *adev,
struct ta_ras_trigger_error_input *block_info)
@@ -913,7 +956,7 @@ unsigned long amdgpu_ras_query_error_count(struct amdgpu_device *adev,
struct ras_manager *obj;
struct ras_err_data data = {0, 0};
- if (!con)
+ if (!adev->ras_features || !con)
return 0;
list_for_each_entry(obj, &con->head, node) {
@@ -921,7 +964,7 @@ unsigned long amdgpu_ras_query_error_count(struct amdgpu_device *adev,
.head = obj->head,
};
- if (amdgpu_ras_error_query(adev, &info))
+ if (amdgpu_ras_query_error_status(adev, &info))
return 0;
data.ce_count += info.ce_count;
@@ -1137,16 +1180,17 @@ static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev)
*
*/
/* debugfs begin */
-static void amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev)
+static struct dentry *amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct dentry *dir;
struct drm_minor *minor = adev_to_drm(adev)->primary;
- con->dir = debugfs_create_dir(RAS_FS_NAME, minor->debugfs_root);
- debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, con->dir,
- adev, &amdgpu_ras_debugfs_ctrl_ops);
- debugfs_create_file("ras_eeprom_reset", S_IWUGO | S_IRUGO, con->dir,
- adev, &amdgpu_ras_debugfs_eeprom_ops);
+ dir = debugfs_create_dir(RAS_FS_NAME, minor->debugfs_root);
+ debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, dir, adev,
+ &amdgpu_ras_debugfs_ctrl_ops);
+ debugfs_create_file("ras_eeprom_reset", S_IWUGO | S_IRUGO, dir, adev,
+ &amdgpu_ras_debugfs_eeprom_ops);
/*
* After one uncorrectable error happens, usually GPU recovery will
@@ -1156,24 +1200,24 @@ static void amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev)
* ERREVENT_ATHUB_INTERRUPT generated. Normal GPU recovery routine
* will never be called.
*/
- debugfs_create_bool("auto_reboot", S_IWUGO | S_IRUGO, con->dir,
- &con->reboot);
+ debugfs_create_bool("auto_reboot", S_IWUGO | S_IRUGO, dir, &con->reboot);
/*
* User could set this not to clean up hardware's error count register
* of RAS IPs during ras recovery.
*/
- debugfs_create_bool("disable_ras_err_cnt_harvest", 0644,
- con->dir, &con->disable_ras_err_cnt_harvest);
+ debugfs_create_bool("disable_ras_err_cnt_harvest", 0644, dir,
+ &con->disable_ras_err_cnt_harvest);
+ return dir;
}
static void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
- struct ras_fs_if *head)
+ struct ras_fs_if *head,
+ struct dentry *dir)
{
- struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head->head);
- if (!obj || obj->ent)
+ if (!obj || !dir)
return;
get_obj(obj);
@@ -1182,14 +1226,14 @@ static void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
head->debugfs_name,
sizeof(obj->fs_data.debugfs_name));
- obj->ent = debugfs_create_file(obj->fs_data.debugfs_name,
- S_IWUGO | S_IRUGO, con->dir, obj,
- &amdgpu_ras_debugfs_ops);
+ debugfs_create_file(obj->fs_data.debugfs_name, S_IWUGO | S_IRUGO, dir,
+ obj, &amdgpu_ras_debugfs_ops);
}
void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct dentry *dir;
struct ras_manager *obj;
struct ras_fs_if fs_info;
@@ -1200,7 +1244,7 @@ void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
if (!IS_ENABLED(CONFIG_DEBUG_FS) || !con)
return;
- amdgpu_ras_debugfs_create_ctrl_node(adev);
+ dir = amdgpu_ras_debugfs_create_ctrl_node(adev);
list_for_each_entry(obj, &con->head, node) {
if (amdgpu_ras_is_supported(adev, obj->head.block) &&
@@ -1208,34 +1252,11 @@ void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
sprintf(fs_info.debugfs_name, "%s_err_inject",
ras_block_str(obj->head.block));
fs_info.head = obj->head;
- amdgpu_ras_debugfs_create(adev, &fs_info);
+ amdgpu_ras_debugfs_create(adev, &fs_info, dir);
}
}
}
-static void amdgpu_ras_debugfs_remove(struct amdgpu_device *adev,
- struct ras_common_if *head)
-{
- struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
-
- if (!obj || !obj->ent)
- return;
-
- obj->ent = NULL;
- put_obj(obj);
-}
-
-static void amdgpu_ras_debugfs_remove_all(struct amdgpu_device *adev)
-{
- struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
- struct ras_manager *obj, *tmp;
-
- list_for_each_entry_safe(obj, tmp, &con->head, node) {
- amdgpu_ras_debugfs_remove(adev, &obj->head);
- }
-
- con->dir = NULL;
-}
/* debugfs end */
/* ras fs */
@@ -1282,8 +1303,17 @@ static int amdgpu_ras_fs_init(struct amdgpu_device *adev)
static int amdgpu_ras_fs_fini(struct amdgpu_device *adev)
{
- if (IS_ENABLED(CONFIG_DEBUG_FS))
- amdgpu_ras_debugfs_remove_all(adev);
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_manager *con_obj, *ip_obj, *tmp;
+
+ if (IS_ENABLED(CONFIG_DEBUG_FS)) {
+ list_for_each_entry_safe(con_obj, tmp, &con->head, node) {
+ ip_obj = amdgpu_ras_find_obj(adev, &con_obj->head);
+ if (ip_obj)
+ put_obj(ip_obj);
+ }
+ }
+
amdgpu_ras_sysfs_remove_all(adev);
return 0;
}
@@ -1447,7 +1477,7 @@ static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev)
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_manager *obj;
- if (!con)
+ if (!adev->ras_features || !con)
return;
list_for_each_entry(obj, &con->head, node) {
@@ -1464,7 +1494,7 @@ static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev)
if (info.head.block == AMDGPU_RAS_BLOCK__PCIE_BIF)
continue;
- amdgpu_ras_error_query(adev, &info);
+ amdgpu_ras_query_error_status(adev, &info);
}
}
@@ -1495,7 +1525,7 @@ static void amdgpu_ras_query_err_status(struct amdgpu_device *adev)
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_manager *obj;
- if (!con)
+ if (!adev->ras_features || !con)
return;
list_for_each_entry(obj, &con->head, node) {
@@ -1809,7 +1839,7 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
bool exc_err_limit = false;
int ret;
- if (con)
+ if (adev->ras_features && con)
data = &con->eh_data;
else
return 0;
@@ -1828,6 +1858,12 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
max_eeprom_records_len = amdgpu_ras_eeprom_get_record_max_length();
amdgpu_ras_validate_threshold(adev, max_eeprom_records_len);
+ /* Todo: During test the SMU might fail to read the eeprom through I2C
+ * when the GPU is pending on XGMI reset during probe time
+ * (Mostly after second bus reset), skip it now
+ */
+ if (adev->gmc.xgmi.pending_reset)
+ return 0;
ret = amdgpu_ras_eeprom_init(&con->eeprom_control, &exc_err_limit);
/*
* This calling fails when exc_err_limit is true or
@@ -1928,11 +1964,11 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev,
return;
if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
- dev_info(adev->dev, "HBM ECC is active.\n");
+ dev_info(adev->dev, "MEM ECC is active.\n");
*hw_supported |= (1 << AMDGPU_RAS_BLOCK__UMC |
1 << AMDGPU_RAS_BLOCK__DF);
} else
- dev_info(adev->dev, "HBM ECC is not presented.\n");
+ dev_info(adev->dev, "MEM ECC is not presented.\n");
if (amdgpu_atomfirmware_sram_ecc_supported(adev)) {
dev_info(adev->dev, "SRAM ECC is active.\n");
@@ -1970,6 +2006,15 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
amdgpu_ras_check_supported(adev, &con->hw_supported,
&con->supported);
if (!con->hw_supported || (adev->asic_type == CHIP_VEGA10)) {
+ /* set gfx block ras context feature for VEGA20 Gaming
+ * send ras disable cmd to ras ta during ras late init.
+ */
+ if (!adev->ras_features && adev->asic_type == CHIP_VEGA20) {
+ con->features |= BIT(AMDGPU_RAS_BLOCK__GFX);
+
+ return 0;
+ }
+
r = 0;
goto release_con;
}
@@ -2083,8 +2128,12 @@ void amdgpu_ras_resume(struct amdgpu_device *adev)
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_manager *obj, *tmp;
- if (!con)
+ if (!adev->ras_features || !con) {
+ /* clean ras context for VEGA20 Gaming after send ras disable cmd */
+ amdgpu_release_ras_context(adev);
+
return;
+ }
if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
/* Set up all other IPs which are not implemented. There is a
@@ -2125,7 +2174,7 @@ void amdgpu_ras_suspend(struct amdgpu_device *adev)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
- if (!con)
+ if (!adev->ras_features || !con)
return;
amdgpu_ras_disable_all_features(adev, 0);
@@ -2139,7 +2188,7 @@ int amdgpu_ras_pre_fini(struct amdgpu_device *adev)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
- if (!con)
+ if (!adev->ras_features || !con)
return 0;
/* Need disable ras on all IPs here before ip [hw/sw]fini */
@@ -2152,7 +2201,7 @@ int amdgpu_ras_fini(struct amdgpu_device *adev)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
- if (!con)
+ if (!adev->ras_features || !con)
return 0;
amdgpu_ras_fs_fini(adev);
@@ -2196,18 +2245,16 @@ bool amdgpu_ras_need_emergency_restart(struct amdgpu_device *adev)
return false;
}
-bool amdgpu_ras_check_err_threshold(struct amdgpu_device *adev)
+void amdgpu_release_ras_context(struct amdgpu_device *adev)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
- bool exc_err_limit = false;
- if (con && (amdgpu_bad_page_threshold != 0))
- amdgpu_ras_eeprom_check_err_threshold(&con->eeprom_control,
- &exc_err_limit);
+ if (!con)
+ return;
- /*
- * We are only interested in variable exc_err_limit,
- * as it says if GPU is in bad state or not.
- */
- return exc_err_limit;
+ if (!adev->ras_features && con->features & BIT(AMDGPU_RAS_BLOCK__GFX)) {
+ con->features &= ~BIT(AMDGPU_RAS_BLOCK__GFX);
+ amdgpu_ras_set_context(adev, NULL);
+ kfree(con);
+ }
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
index 762f5e46c007..60df268a0c66 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
@@ -318,8 +318,6 @@ struct amdgpu_ras {
uint32_t supported;
uint32_t features;
struct list_head head;
- /* debugfs */
- struct dentry *dir;
/* sysfs */
struct device_attribute features_attr;
struct bin_attribute badpages_attr;
@@ -395,8 +393,6 @@ struct ras_manager {
struct list_head node;
/* the device */
struct amdgpu_device *adev;
- /* debugfs */
- struct dentry *ent;
/* sysfs */
struct device_attribute sysfs_attr;
int attr_inuse;
@@ -495,8 +491,6 @@ void amdgpu_ras_suspend(struct amdgpu_device *adev);
unsigned long amdgpu_ras_query_error_count(struct amdgpu_device *adev,
bool is_ce);
-bool amdgpu_ras_check_err_threshold(struct amdgpu_device *adev);
-
/* error handling functions */
int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
struct eeprom_table_record *bps, int pages);
@@ -594,9 +588,12 @@ int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev,
void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev);
-int amdgpu_ras_error_query(struct amdgpu_device *adev,
+int amdgpu_ras_query_error_status(struct amdgpu_device *adev,
struct ras_query_if *info);
+int amdgpu_ras_reset_error_status(struct amdgpu_device *adev,
+ enum amdgpu_ras_block block);
+
int amdgpu_ras_error_inject(struct amdgpu_device *adev,
struct ras_inject_if *info);
@@ -629,4 +626,6 @@ void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev);
void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready);
bool amdgpu_ras_need_emergency_restart(struct amdgpu_device *adev);
+
+void amdgpu_release_ras_context(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
index 19d9aa76cfbf..a05dbbbd9803 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -434,47 +434,28 @@ static uint32_t __correct_eeprom_dest_address(uint32_t curr_address)
return curr_address;
}
-int amdgpu_ras_eeprom_check_err_threshold(
- struct amdgpu_ras_eeprom_control *control,
- bool *exceed_err_limit)
+bool amdgpu_ras_eeprom_check_err_threshold(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev = to_amdgpu_device(control);
- unsigned char buff[EEPROM_ADDRESS_SIZE +
- EEPROM_TABLE_HEADER_SIZE] = { 0 };
- struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr;
- struct i2c_msg msg = {
- .addr = control->i2c_address,
- .flags = I2C_M_RD,
- .len = EEPROM_ADDRESS_SIZE + EEPROM_TABLE_HEADER_SIZE,
- .buf = buff,
- };
- int ret;
-
- *exceed_err_limit = false;
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
if (!__is_ras_eeprom_supported(adev))
- return 0;
-
- /* read EEPROM table header */
- mutex_lock(&control->tbl_mutex);
- ret = i2c_transfer(&adev->pm.smu_i2c, &msg, 1);
- if (ret < 1) {
- dev_err(adev->dev, "Failed to read EEPROM table header.\n");
- goto err;
- }
+ return false;
- __decode_table_header_from_buff(hdr, &buff[2]);
+ /* skip check eeprom table for VEGA20 Gaming */
+ if (!con)
+ return false;
+ else
+ if (!(con->features & BIT(AMDGPU_RAS_BLOCK__UMC)))
+ return false;
- if (hdr->header == EEPROM_TABLE_HDR_BAD) {
+ if (con->eeprom_control.tbl_hdr.header == EEPROM_TABLE_HDR_BAD) {
dev_warn(adev->dev, "This GPU is in BAD status.");
dev_warn(adev->dev, "Please retire it or setting one bigger "
"threshold value when reloading driver.\n");
- *exceed_err_limit = true;
+ return true;
}
-err:
- mutex_unlock(&control->tbl_mutex);
- return 0;
+ return false;
}
int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
index c7a5e5c7c61e..178721170974 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
@@ -80,9 +80,7 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control,
bool *exceed_err_limit);
int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control);
-int amdgpu_ras_eeprom_check_err_threshold(
- struct amdgpu_ras_eeprom_control *control,
- bool *exceed_err_limit);
+bool amdgpu_ras_eeprom_check_err_threshold(struct amdgpu_device *adev);
int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control,
struct eeprom_table_record *records,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
new file mode 100644
index 000000000000..b49a61d07d60
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ */
+
+#ifndef __AMDGPU_RES_CURSOR_H__
+#define __AMDGPU_RES_CURSOR_H__
+
+#include <drm/drm_mm.h>
+#include <drm/ttm/ttm_resource.h>
+
+/* state back for walking over vram_mgr and gtt_mgr allocations */
+struct amdgpu_res_cursor {
+ uint64_t start;
+ uint64_t size;
+ uint64_t remaining;
+ struct drm_mm_node *node;
+};
+
+/**
+ * amdgpu_res_first - initialize a amdgpu_res_cursor
+ *
+ * @res: TTM resource object to walk
+ * @start: Start of the range
+ * @size: Size of the range
+ * @cur: cursor object to initialize
+ *
+ * Start walking over the range of allocations between @start and @size.
+ */
+static inline void amdgpu_res_first(struct ttm_resource *res,
+ uint64_t start, uint64_t size,
+ struct amdgpu_res_cursor *cur)
+{
+ struct drm_mm_node *node;
+
+ if (!res || !res->mm_node) {
+ cur->start = start;
+ cur->size = size;
+ cur->remaining = size;
+ cur->node = NULL;
+ return;
+ }
+
+ BUG_ON(start + size > res->num_pages << PAGE_SHIFT);
+
+ node = res->mm_node;
+ while (start > node->size << PAGE_SHIFT)
+ start -= node++->size << PAGE_SHIFT;
+
+ cur->start = (node->start << PAGE_SHIFT) + start;
+ cur->size = min((node->size << PAGE_SHIFT) - start, size);
+ cur->remaining = size;
+ cur->node = node;
+}
+
+/**
+ * amdgpu_res_next - advance the cursor
+ *
+ * @cur: the cursor to advance
+ * @size: number of bytes to move forward
+ *
+ * Move the cursor @size bytes forwrad, walking to the next node if necessary.
+ */
+static inline void amdgpu_res_next(struct amdgpu_res_cursor *cur, uint64_t size)
+{
+ struct drm_mm_node *node = cur->node;
+
+ BUG_ON(size > cur->remaining);
+
+ cur->remaining -= size;
+ if (!cur->remaining)
+ return;
+
+ cur->size -= size;
+ if (cur->size) {
+ cur->start += size;
+ return;
+ }
+
+ cur->node = ++node;
+ cur->start = node->start << PAGE_SHIFT;
+ cur->size = min(node->size << PAGE_SHIFT, cur->remaining);
+}
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
index e5b8fb8e75c5..f8fb755e3aa6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
@@ -64,6 +64,11 @@ struct amdgpu_sdma {
struct amdgpu_irq_src trap_irq;
struct amdgpu_irq_src illegal_inst_irq;
struct amdgpu_irq_src ecc_irq;
+ struct amdgpu_irq_src vm_hole_irq;
+ struct amdgpu_irq_src doorbell_invalid_irq;
+ struct amdgpu_irq_src pool_timeout_irq;
+ struct amdgpu_irq_src srbm_write_irq;
+
int num_instances;
uint32_t srbm_soft_reset;
bool has_page_queue;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c
index 834440ab9ff7..9cf856c94f94 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c
@@ -69,6 +69,9 @@ void psp_securedisplay_parse_resp_status(struct psp_context *psp,
case TA_SECUREDISPLAY_STATUS__READ_CRC_ERROR:
dev_err(psp->adev->dev, "Secure display: Failed to Read CRC");
break;
+ case TA_SECUREDISPLAY_STATUS__I2C_INIT_ERROR:
+ dev_err(psp->adev->dev, "Secure display: Failed to initialize I2C.");
+ break;
default:
dev_err(psp->adev->dev, "Secure display: Failed to parse status: %d\n", status);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_smuio.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_smuio.h
index 03009157aec8..b860ec913ac5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_smuio.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_smuio.h
@@ -28,6 +28,8 @@ struct amdgpu_smuio_funcs {
u32 (*get_rom_data_offset)(struct amdgpu_device *adev);
void (*update_rom_clock_gating)(struct amdgpu_device *adev, bool enable);
void (*get_clock_gating_state)(struct amdgpu_device *adev, u32 *flags);
+ u32 (*get_die_id)(struct amdgpu_device *adev);
+ bool (*is_host_gpu_xgmi_supported)(struct amdgpu_device *adev);
};
struct amdgpu_smuio {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index a785acc09f20..1c6131489a85 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -47,7 +47,6 @@
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
-#include <drm/drm_debugfs.h>
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
@@ -57,6 +56,7 @@
#include "amdgpu_sdma.h"
#include "amdgpu_ras.h"
#include "amdgpu_atomfirmware.h"
+#include "amdgpu_res_cursor.h"
#include "bif/bif_4_1_d.h"
#define AMDGPU_TTM_VRAM_MAX_DW_READ (size_t)128
@@ -179,54 +179,11 @@ static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
}
/**
- * amdgpu_mm_node_addr - Compute the GPU relative offset of a GTT buffer.
- *
- * @bo: The bo to assign the memory to.
- * @mm_node: Memory manager node for drm allocator.
- * @mem: The region where the bo resides.
- *
- */
-static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
- struct drm_mm_node *mm_node,
- struct ttm_resource *mem)
-{
- uint64_t addr = 0;
-
- if (mm_node->start != AMDGPU_BO_INVALID_OFFSET) {
- addr = mm_node->start << PAGE_SHIFT;
- addr += amdgpu_ttm_domain_start(amdgpu_ttm_adev(bo->bdev),
- mem->mem_type);
- }
- return addr;
-}
-
-/**
- * amdgpu_find_mm_node - Helper function finds the drm_mm_node corresponding to
- * @offset. It also modifies the offset to be within the drm_mm_node returned
- *
- * @mem: The region where the bo resides.
- * @offset: The offset that drm_mm_node is used for finding.
- *
- */
-static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_resource *mem,
- uint64_t *offset)
-{
- struct drm_mm_node *mm_node = mem->mm_node;
-
- while (*offset >= (mm_node->size << PAGE_SHIFT)) {
- *offset -= (mm_node->size << PAGE_SHIFT);
- ++mm_node;
- }
- return mm_node;
-}
-
-/**
* amdgpu_ttm_map_buffer - Map memory into the GART windows
* @bo: buffer object to map
* @mem: memory object to map
- * @mm_node: drm_mm node object to map
+ * @mm_cur: range to map
* @num_pages: number of pages to map
- * @offset: offset into @mm_node where to start
* @window: which GART window to use
* @ring: DMA ring to use for the copy
* @tmz: if we should setup a TMZ enabled mapping
@@ -237,10 +194,10 @@ static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_resource *mem,
*/
static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
struct ttm_resource *mem,
- struct drm_mm_node *mm_node,
- unsigned num_pages, uint64_t offset,
- unsigned window, struct amdgpu_ring *ring,
- bool tmz, uint64_t *addr)
+ struct amdgpu_res_cursor *mm_cur,
+ unsigned num_pages, unsigned window,
+ struct amdgpu_ring *ring, bool tmz,
+ uint64_t *addr)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_job *job;
@@ -257,14 +214,15 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
/* Map only what can't be accessed directly */
if (!tmz && mem->start != AMDGPU_BO_INVALID_OFFSET) {
- *addr = amdgpu_mm_node_addr(bo, mm_node, mem) + offset;
+ *addr = amdgpu_ttm_domain_start(adev, mem->mem_type) +
+ mm_cur->start;
return 0;
}
*addr = adev->gmc.gart_start;
*addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
AMDGPU_GPU_PAGE_SIZE;
- *addr += offset & ~PAGE_MASK;
+ *addr += mm_cur->start & ~PAGE_MASK;
num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
num_bytes = num_pages * 8;
@@ -292,17 +250,17 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
cpu_addr = &job->ibs[0].ptr[num_dw];
if (mem->mem_type == TTM_PL_TT) {
- dma_addr_t *dma_address;
+ dma_addr_t *dma_addr;
- dma_address = &bo->ttm->dma_address[offset >> PAGE_SHIFT];
- r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags,
+ dma_addr = &bo->ttm->dma_address[mm_cur->start >> PAGE_SHIFT];
+ r = amdgpu_gart_map(adev, 0, num_pages, dma_addr, flags,
cpu_addr);
if (r)
goto error_free;
} else {
dma_addr_t dma_address;
- dma_address = (mm_node->start << PAGE_SHIFT) + offset;
+ dma_address = mm_cur->start;
dma_address += adev->vm_manager.vram_base_offset;
for (i = 0; i < num_pages; ++i) {
@@ -354,9 +312,8 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
const uint32_t GTT_MAX_BYTES = (AMDGPU_GTT_MAX_TRANSFER_SIZE *
AMDGPU_GPU_PAGE_SIZE);
- uint64_t src_node_size, dst_node_size, src_offset, dst_offset;
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
- struct drm_mm_node *src_mm, *dst_mm;
+ struct amdgpu_res_cursor src_mm, dst_mm;
struct dma_fence *fence = NULL;
int r = 0;
@@ -365,29 +322,13 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
return -EINVAL;
}
- src_offset = src->offset;
- if (src->mem->mm_node) {
- src_mm = amdgpu_find_mm_node(src->mem, &src_offset);
- src_node_size = (src_mm->size << PAGE_SHIFT) - src_offset;
- } else {
- src_mm = NULL;
- src_node_size = ULLONG_MAX;
- }
-
- dst_offset = dst->offset;
- if (dst->mem->mm_node) {
- dst_mm = amdgpu_find_mm_node(dst->mem, &dst_offset);
- dst_node_size = (dst_mm->size << PAGE_SHIFT) - dst_offset;
- } else {
- dst_mm = NULL;
- dst_node_size = ULLONG_MAX;
- }
+ amdgpu_res_first(src->mem, src->offset, size, &src_mm);
+ amdgpu_res_first(dst->mem, dst->offset, size, &dst_mm);
mutex_lock(&adev->mman.gtt_window_lock);
-
- while (size) {
- uint32_t src_page_offset = src_offset & ~PAGE_MASK;
- uint32_t dst_page_offset = dst_offset & ~PAGE_MASK;
+ while (src_mm.remaining) {
+ uint32_t src_page_offset = src_mm.start & ~PAGE_MASK;
+ uint32_t dst_page_offset = dst_mm.start & ~PAGE_MASK;
struct dma_fence *next;
uint32_t cur_size;
uint64_t from, to;
@@ -396,19 +337,19 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
* begins at an offset, then adjust the size accordingly
*/
cur_size = max(src_page_offset, dst_page_offset);
- cur_size = min(min3(src_node_size, dst_node_size, size),
+ cur_size = min(min3(src_mm.size, dst_mm.size, size),
(uint64_t)(GTT_MAX_BYTES - cur_size));
/* Map src to window 0 and dst to window 1. */
- r = amdgpu_ttm_map_buffer(src->bo, src->mem, src_mm,
+ r = amdgpu_ttm_map_buffer(src->bo, src->mem, &src_mm,
PFN_UP(cur_size + src_page_offset),
- src_offset, 0, ring, tmz, &from);
+ 0, ring, tmz, &from);
if (r)
goto error;
- r = amdgpu_ttm_map_buffer(dst->bo, dst->mem, dst_mm,
+ r = amdgpu_ttm_map_buffer(dst->bo, dst->mem, &dst_mm,
PFN_UP(cur_size + dst_page_offset),
- dst_offset, 1, ring, tmz, &to);
+ 1, ring, tmz, &to);
if (r)
goto error;
@@ -420,27 +361,8 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
dma_fence_put(fence);
fence = next;
- size -= cur_size;
- if (!size)
- break;
-
- src_node_size -= cur_size;
- if (!src_node_size) {
- ++src_mm;
- src_node_size = src_mm->size << PAGE_SHIFT;
- src_offset = 0;
- } else {
- src_offset += cur_size;
- }
-
- dst_node_size -= cur_size;
- if (!dst_node_size) {
- ++dst_mm;
- dst_node_size = dst_mm->size << PAGE_SHIFT;
- dst_offset = 0;
- } else {
- dst_offset += cur_size;
- }
+ amdgpu_res_next(&src_mm, cur_size);
+ amdgpu_res_next(&dst_mm, cur_size);
}
error:
mutex_unlock(&adev->mman.gtt_window_lock);
@@ -519,7 +441,8 @@ error:
static bool amdgpu_mem_visible(struct amdgpu_device *adev,
struct ttm_resource *mem)
{
- struct drm_mm_node *nodes = mem->mm_node;
+ uint64_t mem_size = (u64)mem->num_pages << PAGE_SHIFT;
+ struct amdgpu_res_cursor cursor;
if (mem->mem_type == TTM_PL_SYSTEM ||
mem->mem_type == TTM_PL_TT)
@@ -527,12 +450,13 @@ static bool amdgpu_mem_visible(struct amdgpu_device *adev,
if (mem->mem_type != TTM_PL_VRAM)
return false;
+ amdgpu_res_first(mem, 0, mem_size, &cursor);
+
/* ttm_resource_ioremap only supports contiguous memory */
- if (nodes->size != mem->num_pages)
+ if (cursor.size != mem_size)
return false;
- return ((nodes->start + nodes->size) << PAGE_SHIFT)
- <= adev->gmc.visible_vram_size;
+ return cursor.start + cursor.size <= adev->gmc.visible_vram_size;
}
/*
@@ -674,7 +598,10 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resourc
mem->bus.offset += adev->gmc.aper_base;
mem->bus.is_iomem = true;
- mem->bus.caching = ttm_write_combined;
+ if (adev->gmc.xgmi.connected_to_cpu)
+ mem->bus.caching = ttm_cached;
+ else
+ mem->bus.caching = ttm_write_combined;
break;
default:
return -EINVAL;
@@ -686,12 +613,10 @@ static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
unsigned long page_offset)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
- uint64_t offset = (page_offset << PAGE_SHIFT);
- struct drm_mm_node *mm;
+ struct amdgpu_res_cursor cursor;
- mm = amdgpu_find_mm_node(&bo->mem, &offset);
- offset += adev->gmc.aper_base;
- return mm->start + (offset >> PAGE_SHIFT);
+ amdgpu_res_first(&bo->mem, (u64)page_offset << PAGE_SHIFT, 0, &cursor);
+ return (adev->gmc.aper_base + cursor.start) >> PAGE_SHIFT;
}
/**
@@ -1430,6 +1355,10 @@ uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem)
flags |= AMDGPU_PTE_SNOOPED;
}
+ if (mem && mem->mem_type == TTM_PL_VRAM &&
+ mem->bus.caching == ttm_cached)
+ flags |= AMDGPU_PTE_SNOOPED;
+
return flags;
}
@@ -1469,7 +1398,7 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
const struct ttm_place *place)
{
unsigned long num_pages = bo->mem.num_pages;
- struct drm_mm_node *node = bo->mem.mm_node;
+ struct amdgpu_res_cursor cursor;
struct dma_resv_list *flist;
struct dma_fence *f;
int i;
@@ -1501,13 +1430,15 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
case TTM_PL_VRAM:
/* Check each drm MM node individually */
- while (num_pages) {
- if (place->fpfn < (node->start + node->size) &&
- !(place->lpfn && place->lpfn <= node->start))
+ amdgpu_res_first(&bo->mem, 0, (u64)num_pages << PAGE_SHIFT,
+ &cursor);
+ while (cursor.remaining) {
+ if (place->fpfn < PFN_DOWN(cursor.start + cursor.size)
+ && !(place->lpfn &&
+ place->lpfn <= PFN_DOWN(cursor.start)))
return true;
- num_pages -= node->size;
- ++node;
+ amdgpu_res_next(&cursor, cursor.size);
}
return false;
@@ -1531,41 +1462,36 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
* access for debugging purposes.
*/
static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
- unsigned long offset,
- void *buf, int len, int write)
+ unsigned long offset, void *buf, int len,
+ int write)
{
struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
- struct drm_mm_node *nodes;
+ struct amdgpu_res_cursor cursor;
+ unsigned long flags;
uint32_t value = 0;
int ret = 0;
- uint64_t pos;
- unsigned long flags;
if (bo->mem.mem_type != TTM_PL_VRAM)
return -EIO;
- pos = offset;
- nodes = amdgpu_find_mm_node(&abo->tbo.mem, &pos);
- pos += (nodes->start << PAGE_SHIFT);
-
- while (len && pos < adev->gmc.mc_vram_size) {
- uint64_t aligned_pos = pos & ~(uint64_t)3;
- uint64_t bytes = 4 - (pos & 3);
- uint32_t shift = (pos & 3) * 8;
+ amdgpu_res_first(&bo->mem, offset, len, &cursor);
+ while (cursor.remaining) {
+ uint64_t aligned_pos = cursor.start & ~(uint64_t)3;
+ uint64_t bytes = 4 - (cursor.start & 3);
+ uint32_t shift = (cursor.start & 3) * 8;
uint32_t mask = 0xffffffff << shift;
- if (len < bytes) {
- mask &= 0xffffffff >> (bytes - len) * 8;
- bytes = len;
+ if (cursor.size < bytes) {
+ mask &= 0xffffffff >> (bytes - cursor.size) * 8;
+ bytes = cursor.size;
}
if (mask != 0xffffffff) {
spin_lock_irqsave(&adev->mmio_idx_lock, flags);
WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000);
WREG32_NO_KIQ(mmMM_INDEX_HI, aligned_pos >> 31);
- if (!write || mask != 0xffffffff)
- value = RREG32_NO_KIQ(mmMM_DATA);
+ value = RREG32_NO_KIQ(mmMM_DATA);
if (write) {
value &= ~mask;
value |= (*(uint32_t *)buf << shift) & mask;
@@ -1577,21 +1503,15 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
memcpy(buf, &value, bytes);
}
} else {
- bytes = (nodes->start + nodes->size) << PAGE_SHIFT;
- bytes = min(bytes - pos, (uint64_t)len & ~0x3ull);
-
- amdgpu_device_vram_access(adev, pos, (uint32_t *)buf,
- bytes, write);
+ bytes = cursor.size & 0x3ull;
+ amdgpu_device_vram_access(adev, cursor.start,
+ (uint32_t *)buf, bytes,
+ write);
}
ret += bytes;
buf = (uint8_t *)buf + bytes;
- pos += bytes;
- len -= bytes;
- if (pos >= (nodes->start + nodes->size) << PAGE_SHIFT) {
- ++nodes;
- pos = (nodes->start << PAGE_SHIFT);
- }
+ amdgpu_res_next(&cursor, bytes);
}
return ret;
@@ -1696,7 +1616,7 @@ static void amdgpu_ttm_training_data_block_init(struct amdgpu_device *adev)
(adev->gmc.mc_vram_size - GDDR6_MEM_TRAINING_OFFSET);
ctx->train_data_size =
GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES;
-
+
DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
ctx->train_data_size,
ctx->p2c_train_data_offset,
@@ -1812,8 +1732,15 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
/* Change the size here instead of the init above so only lpfn is affected */
amdgpu_ttm_set_buffer_funcs_status(adev, false);
#ifdef CONFIG_64BIT
- adev->mman.aper_base_kaddr = ioremap_wc(adev->gmc.aper_base,
- adev->gmc.visible_vram_size);
+#ifdef CONFIG_X86
+ if (adev->gmc.xgmi.connected_to_cpu)
+ adev->mman.aper_base_kaddr = ioremap_cache(adev->gmc.aper_base,
+ adev->gmc.visible_vram_size);
+
+ else
+#endif
+ adev->mman.aper_base_kaddr = ioremap_wc(adev->gmc.aper_base,
+ adev->gmc.visible_vram_size);
#endif
/*
@@ -2053,7 +1980,8 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
return r;
if (vm_needs_flush) {
- job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gart.bo);
+ job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gmc.pdb0_bo ?
+ adev->gmc.pdb0_bo : adev->gart.bo);
job->vm_needs_flush = true;
}
if (resv) {
@@ -2104,9 +2032,9 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
uint32_t max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
- struct drm_mm_node *mm_node;
- unsigned long num_pages;
+ struct amdgpu_res_cursor cursor;
unsigned int num_loops, num_dw;
+ uint64_t num_bytes;
struct amdgpu_job *job;
int r;
@@ -2122,15 +2050,13 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
return r;
}
- num_pages = bo->tbo.mem.num_pages;
- mm_node = bo->tbo.mem.mm_node;
+ num_bytes = bo->tbo.mem.num_pages << PAGE_SHIFT;
num_loops = 0;
- while (num_pages) {
- uint64_t byte_count = mm_node->size << PAGE_SHIFT;
- num_loops += DIV_ROUND_UP_ULL(byte_count, max_bytes);
- num_pages -= mm_node->size;
- ++mm_node;
+ amdgpu_res_first(&bo->tbo.mem, 0, num_bytes, &cursor);
+ while (cursor.remaining) {
+ num_loops += DIV_ROUND_UP_ULL(cursor.size, max_bytes);
+ amdgpu_res_next(&cursor, cursor.size);
}
num_dw = num_loops * adev->mman.buffer_funcs->fill_num_dw;
@@ -2152,27 +2078,16 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
}
}
- num_pages = bo->tbo.mem.num_pages;
- mm_node = bo->tbo.mem.mm_node;
-
- while (num_pages) {
- uint64_t byte_count = mm_node->size << PAGE_SHIFT;
- uint64_t dst_addr;
-
- dst_addr = amdgpu_mm_node_addr(&bo->tbo, mm_node, &bo->tbo.mem);
- while (byte_count) {
- uint32_t cur_size_in_bytes = min_t(uint64_t, byte_count,
- max_bytes);
+ amdgpu_res_first(&bo->tbo.mem, 0, num_bytes, &cursor);
+ while (cursor.remaining) {
+ uint32_t cur_size = min_t(uint64_t, cursor.size, max_bytes);
+ uint64_t dst_addr = cursor.start;
- amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data,
- dst_addr, cur_size_in_bytes);
+ dst_addr += amdgpu_ttm_domain_start(adev, bo->tbo.mem.mem_type);
+ amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data, dst_addr,
+ cur_size);
- dst_addr += cur_size_in_bytes;
- byte_count -= cur_size_in_bytes;
- }
-
- num_pages -= mm_node->size;
- ++mm_node;
+ amdgpu_res_next(&cursor, cur_size);
}
amdgpu_ring_pad_ib(ring, &job->ibs[0]);
@@ -2191,36 +2106,74 @@ error_free:
#if defined(CONFIG_DEBUG_FS)
-static int amdgpu_mm_dump_table(struct seq_file *m, void *data)
+static int amdgpu_mm_vram_table_show(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *)m->private;
- unsigned ttm_pl = (uintptr_t)node->info_ent->data;
- struct drm_device *dev = node->minor->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, ttm_pl);
+ struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
+ struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev,
+ TTM_PL_VRAM);
struct drm_printer p = drm_seq_file_printer(m);
man->func->debug(man, &p);
return 0;
}
-static int amdgpu_ttm_pool_debugfs(struct seq_file *m, void *data)
+static int amdgpu_ttm_page_pool_show(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *)m->private;
- struct drm_device *dev = node->minor->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
+ struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
return ttm_pool_debugfs(&adev->mman.bdev.pool, m);
}
-static const struct drm_info_list amdgpu_ttm_debugfs_list[] = {
- {"amdgpu_vram_mm", amdgpu_mm_dump_table, 0, (void *)TTM_PL_VRAM},
- {"amdgpu_gtt_mm", amdgpu_mm_dump_table, 0, (void *)TTM_PL_TT},
- {"amdgpu_gds_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_GDS},
- {"amdgpu_gws_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_GWS},
- {"amdgpu_oa_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_OA},
- {"ttm_page_pool", amdgpu_ttm_pool_debugfs, 0, NULL},
-};
+static int amdgpu_mm_tt_table_show(struct seq_file *m, void *unused)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
+ struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev,
+ TTM_PL_TT);
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ man->func->debug(man, &p);
+ return 0;
+}
+
+static int amdgpu_mm_gds_table_show(struct seq_file *m, void *unused)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
+ struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev,
+ AMDGPU_PL_GDS);
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ man->func->debug(man, &p);
+ return 0;
+}
+
+static int amdgpu_mm_gws_table_show(struct seq_file *m, void *unused)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
+ struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev,
+ AMDGPU_PL_GWS);
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ man->func->debug(man, &p);
+ return 0;
+}
+
+static int amdgpu_mm_oa_table_show(struct seq_file *m, void *unused)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
+ struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev,
+ AMDGPU_PL_OA);
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ man->func->debug(man, &p);
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_vram_table);
+DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_tt_table);
+DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_gds_table);
+DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_gws_table);
+DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_oa_table);
+DEFINE_SHOW_ATTRIBUTE(amdgpu_ttm_page_pool);
/*
* amdgpu_ttm_vram_read - Linear read access to VRAM
@@ -2308,58 +2261,6 @@ static const struct file_operations amdgpu_ttm_vram_fops = {
.llseek = default_llseek,
};
-#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
-
-/*
- * amdgpu_ttm_gtt_read - Linear read access to GTT memory
- */
-static ssize_t amdgpu_ttm_gtt_read(struct file *f, char __user *buf,
- size_t size, loff_t *pos)
-{
- struct amdgpu_device *adev = file_inode(f)->i_private;
- ssize_t result = 0;
- int r;
-
- while (size) {
- loff_t p = *pos / PAGE_SIZE;
- unsigned off = *pos & ~PAGE_MASK;
- size_t cur_size = min_t(size_t, size, PAGE_SIZE - off);
- struct page *page;
- void *ptr;
-
- if (p >= adev->gart.num_cpu_pages)
- return result;
-
- page = adev->gart.pages[p];
- if (page) {
- ptr = kmap(page);
- ptr += off;
-
- r = copy_to_user(buf, ptr, cur_size);
- kunmap(adev->gart.pages[p]);
- } else
- r = clear_user(buf, cur_size);
-
- if (r)
- return -EFAULT;
-
- result += cur_size;
- buf += cur_size;
- *pos += cur_size;
- size -= cur_size;
- }
-
- return result;
-}
-
-static const struct file_operations amdgpu_ttm_gtt_fops = {
- .owner = THIS_MODULE,
- .read = amdgpu_ttm_gtt_read,
- .llseek = default_llseek
-};
-
-#endif
-
/*
* amdgpu_iomem_read - Virtual read access to GPU mapped memory
*
@@ -2474,46 +2375,29 @@ static const struct file_operations amdgpu_ttm_iomem_fops = {
.llseek = default_llseek
};
-static const struct {
- char *name;
- const struct file_operations *fops;
- int domain;
-} ttm_debugfs_entries[] = {
- { "amdgpu_vram", &amdgpu_ttm_vram_fops, TTM_PL_VRAM },
-#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
- { "amdgpu_gtt", &amdgpu_ttm_gtt_fops, TTM_PL_TT },
-#endif
- { "amdgpu_iomem", &amdgpu_ttm_iomem_fops, TTM_PL_SYSTEM },
-};
-
#endif
-int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
+void amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
{
#if defined(CONFIG_DEBUG_FS)
- unsigned count;
-
struct drm_minor *minor = adev_to_drm(adev)->primary;
- struct dentry *ent, *root = minor->debugfs_root;
-
- for (count = 0; count < ARRAY_SIZE(ttm_debugfs_entries); count++) {
- ent = debugfs_create_file(
- ttm_debugfs_entries[count].name,
- S_IFREG | S_IRUGO, root,
- adev,
- ttm_debugfs_entries[count].fops);
- if (IS_ERR(ent))
- return PTR_ERR(ent);
- if (ttm_debugfs_entries[count].domain == TTM_PL_VRAM)
- i_size_write(ent->d_inode, adev->gmc.mc_vram_size);
- else if (ttm_debugfs_entries[count].domain == TTM_PL_TT)
- i_size_write(ent->d_inode, adev->gmc.gart_size);
- adev->mman.debugfs_entries[count] = ent;
- }
-
- count = ARRAY_SIZE(amdgpu_ttm_debugfs_list);
- return amdgpu_debugfs_add_files(adev, amdgpu_ttm_debugfs_list, count);
-#else
- return 0;
+ struct dentry *root = minor->debugfs_root;
+
+ debugfs_create_file_size("amdgpu_vram", 0444, root, adev,
+ &amdgpu_ttm_vram_fops, adev->gmc.mc_vram_size);
+ debugfs_create_file("amdgpu_iomem", 0444, root, adev,
+ &amdgpu_ttm_iomem_fops);
+ debugfs_create_file("amdgpu_vram_mm", 0444, root, adev,
+ &amdgpu_mm_vram_table_fops);
+ debugfs_create_file("amdgpu_gtt_mm", 0444, root, adev,
+ &amdgpu_mm_tt_table_fops);
+ debugfs_create_file("amdgpu_gds_mm", 0444, root, adev,
+ &amdgpu_mm_gds_table_fops);
+ debugfs_create_file("amdgpu_gws_mm", 0444, root, adev,
+ &amdgpu_mm_gws_table_fops);
+ debugfs_create_file("amdgpu_oa_mm", 0444, root, adev,
+ &amdgpu_mm_oa_table_fops);
+ debugfs_create_file("ttm_page_pool", 0444, root, adev,
+ &amdgpu_ttm_page_pool_fops);
#endif
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 7189f8370108..dec0db8b0b13 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -64,10 +64,6 @@ struct amdgpu_mman {
bool initialized;
void __iomem *aper_base_kaddr;
-#if defined(CONFIG_DEBUG_FS)
- struct dentry *debugfs_entries[8];
-#endif
-
/* buffer handling */
const struct amdgpu_buffer_funcs *buffer_funcs;
struct amdgpu_ring *buffer_funcs_ring;
@@ -119,8 +115,7 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
struct device *dev,
enum dma_data_direction dir,
struct sg_table **sgt);
-void amdgpu_vram_mgr_free_sgt(struct amdgpu_device *adev,
- struct device *dev,
+void amdgpu_vram_mgr_free_sgt(struct device *dev,
enum dma_data_direction dir,
struct sg_table *sgt);
uint64_t amdgpu_vram_mgr_usage(struct ttm_resource_manager *man);
@@ -186,6 +181,6 @@ uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem);
uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
struct ttm_resource *mem);
-int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev);
+void amdgpu_ttm_debugfs_init(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index 1beb08af347f..9733224117e3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -402,6 +402,7 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type)
case CHIP_NAVY_FLOUNDER:
case CHIP_VANGOGH:
case CHIP_DIMGREY_CAVEFISH:
+ case CHIP_ALDEBARAN:
if (!load_type)
return AMDGPU_FW_LOAD_DIRECT;
else
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
index 46449e70348b..2c42874f7784 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
@@ -105,6 +105,9 @@ struct psp_firmware_header_v1_3 {
uint32_t spl_header_version;
uint32_t spl_offset_bytes;
uint32_t spl_size_bytes;
+ uint32_t rl_header_version;
+ uint32_t rl_offset_bytes;
+ uint32_t rl_size_bytes;
};
/* version_major=1, version_minor=0 */
@@ -136,6 +139,7 @@ enum ta_fw_type {
TA_FW_TYPE_PSP_DTM,
TA_FW_TYPE_PSP_RAP,
TA_FW_TYPE_PSP_SECUREDISPLAY,
+ TA_FW_TYPE_MAX_INDEX,
};
struct ta_fw_bin_desc {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 99b82f3c2617..201645963ba5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -47,6 +47,7 @@
#define FIRMWARE_NAVY_FLOUNDER "amdgpu/navy_flounder_vcn.bin"
#define FIRMWARE_VANGOGH "amdgpu/vangogh_vcn.bin"
#define FIRMWARE_DIMGREY_CAVEFISH "amdgpu/dimgrey_cavefish_vcn.bin"
+#define FIRMWARE_ALDEBARAN "amdgpu/aldebaran_vcn.bin"
MODULE_FIRMWARE(FIRMWARE_RAVEN);
MODULE_FIRMWARE(FIRMWARE_PICASSO);
@@ -54,6 +55,7 @@ MODULE_FIRMWARE(FIRMWARE_RAVEN2);
MODULE_FIRMWARE(FIRMWARE_ARCTURUS);
MODULE_FIRMWARE(FIRMWARE_RENOIR);
MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE);
+MODULE_FIRMWARE(FIRMWARE_ALDEBARAN);
MODULE_FIRMWARE(FIRMWARE_NAVI10);
MODULE_FIRMWARE(FIRMWARE_NAVI14);
MODULE_FIRMWARE(FIRMWARE_NAVI12);
@@ -104,6 +106,12 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
adev->vcn.indirect_sram = true;
break;
+ case CHIP_ALDEBARAN:
+ fw_name = FIRMWARE_ALDEBARAN;
+ if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
+ (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
+ adev->vcn.indirect_sram = true;
+ break;
case CHIP_NAVI10:
fw_name = FIRMWARE_NAVI10;
if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index 13aa417f6be7..1843bf8de0cf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -155,6 +155,7 @@
} \
} while (0)
+#define AMDGPU_VCN_FW_SHARED_FLAG_0_RB (1 << 6)
#define AMDGPU_VCN_MULTI_QUEUE_FLAG (1 << 8)
#define AMDGPU_VCN_SW_RING_FLAG (1 << 9)
@@ -243,6 +244,12 @@ struct amdgpu_vcn {
int inst_idx, struct dpg_pause_state *new_state);
};
+struct amdgpu_fw_shared_rb_ptrs_struct {
+ /* to WA DPG R/W ptr issues.*/
+ uint32_t rptr;
+ uint32_t wptr;
+};
+
struct amdgpu_fw_shared_multi_queue {
uint8_t decode_queue_mode;
uint8_t encode_generalpurpose_queue_mode;
@@ -258,10 +265,12 @@ struct amdgpu_fw_shared_sw_ring {
struct amdgpu_fw_shared {
uint32_t present_flag_0;
- uint8_t pad[53];
+ uint8_t pad[44];
+ struct amdgpu_fw_shared_rb_ptrs_struct rb;
+ uint8_t pad1[1];
struct amdgpu_fw_shared_multi_queue multi_queue;
struct amdgpu_fw_shared_sw_ring sw_ring;
-} __attribute__((__packed__));
+};
struct amdgpu_vcn_decode_buffer {
uint32_t valid_buf_flag;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index 5da04d45b637..d9ffff8eb41d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -640,6 +640,7 @@ void amdgpu_detect_virtualization(struct amdgpu_device *adev)
case CHIP_NAVI12:
case CHIP_SIENNA_CICHLID:
case CHIP_ARCTURUS:
+ case CHIP_ALDEBARAN:
reg = RREG32(mmRCC_IOV_FUNC_IDENTIFIER);
break;
default: /* other chip doesn't support SRIOV */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 9d19078246c8..e8cafc97eada 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -92,13 +92,13 @@ struct amdgpu_prt_cb {
static inline void amdgpu_vm_eviction_lock(struct amdgpu_vm *vm)
{
mutex_lock(&vm->eviction_lock);
- vm->saved_flags = memalloc_nofs_save();
+ vm->saved_flags = memalloc_noreclaim_save();
}
static inline int amdgpu_vm_eviction_trylock(struct amdgpu_vm *vm)
{
if (mutex_trylock(&vm->eviction_lock)) {
- vm->saved_flags = memalloc_nofs_save();
+ vm->saved_flags = memalloc_noreclaim_save();
return 1;
}
return 0;
@@ -106,7 +106,7 @@ static inline int amdgpu_vm_eviction_trylock(struct amdgpu_vm *vm)
static inline void amdgpu_vm_eviction_unlock(struct amdgpu_vm *vm)
{
- memalloc_nofs_restore(vm->saved_flags);
+ memalloc_noreclaim_restore(vm->saved_flags);
mutex_unlock(&vm->eviction_lock);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index c89b66bb70e2..b2fc475ce6f7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -639,15 +639,13 @@ error_free:
/**
* amdgpu_vram_mgr_free_sgt - allocate and fill a sg table
*
- * @adev: amdgpu device pointer
* @dev: device pointer
* @dir: data direction of resource to unmap
* @sgt: sg table to free
*
* Free a previously allocate sg table.
*/
-void amdgpu_vram_mgr_free_sgt(struct amdgpu_device *adev,
- struct device *dev,
+void amdgpu_vram_mgr_free_sgt(struct device *dev,
enum dma_data_direction dir,
struct sg_table *sgt)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index 659b385b27b5..33f748e5bbfc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -468,15 +468,22 @@ int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_dev
}
+/*
+ * NOTE psp_xgmi_node_info.num_hops layout is as follows:
+ * num_hops[7:6] = link type (0 = xGMI2, 1 = xGMI3, 2/3 = reserved)
+ * num_hops[5:3] = reserved
+ * num_hops[2:0] = number of hops
+ */
int amdgpu_xgmi_get_hops_count(struct amdgpu_device *adev,
struct amdgpu_device *peer_adev)
{
struct psp_xgmi_topology_info *top = &adev->psp.xgmi_context.top_info;
+ uint8_t num_hops_mask = 0x7;
int i;
for (i = 0 ; i < top->num_nodes; ++i)
if (top->nodes[i].node_id == peer_adev->gmc.xgmi.node_id)
- return top->nodes[i].num_hops;
+ return top->nodes[i].num_hops & num_hops_mask;
return -EINVAL;
}
@@ -492,7 +499,8 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
if (!adev->gmc.xgmi.supported)
return 0;
- if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) {
+ if (!adev->gmc.xgmi.pending_reset &&
+ amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) {
ret = psp_xgmi_initialize(&adev->psp);
if (ret) {
dev_err(adev->dev,
@@ -538,7 +546,8 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
task_barrier_add_task(&hive->tb);
- if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) {
+ if (!adev->gmc.xgmi.pending_reset &&
+ amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) {
list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
/* update node list for other device in the hive */
if (tmp_adev != adev) {
@@ -567,7 +576,7 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
}
}
- if (!ret)
+ if (!ret && !adev->gmc.xgmi.pending_reset)
ret = amdgpu_xgmi_sysfs_add_dev_info(adev, hive);
exit_unlock:
diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v2_1.c b/drivers/gpu/drm/amd/amdgpu/athub_v2_1.c
index 7b1b18350bf9..2ac4988ea0ff 100644
--- a/drivers/gpu/drm/amd/amdgpu/athub_v2_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/athub_v2_1.c
@@ -74,10 +74,8 @@ int athub_v2_1_set_clockgating(struct amdgpu_device *adev,
case CHIP_SIENNA_CICHLID:
case CHIP_NAVY_FLOUNDER:
case CHIP_DIMGREY_CAVEFISH:
- athub_v2_1_update_medium_grain_clock_gating(adev,
- state == AMD_CG_STATE_GATE ? true : false);
- athub_v2_1_update_medium_grain_light_sleep(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ athub_v2_1_update_medium_grain_clock_gating(adev, state == AMD_CG_STATE_GATE);
+ athub_v2_1_update_medium_grain_light_sleep(adev, state == AMD_CG_STATE_GATE);
break;
default:
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c
index 515890f4f5a0..3dcb8b32f48b 100644
--- a/drivers/gpu/drm/amd/amdgpu/atom.c
+++ b/drivers/gpu/drm/amd/amdgpu/atom.c
@@ -114,11 +114,11 @@ static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
base++;
break;
case ATOM_IIO_READ:
- temp = ctx->card->ioreg_read(ctx->card, CU16(base + 1));
+ temp = ctx->card->reg_read(ctx->card, CU16(base + 1));
base += 3;
break;
case ATOM_IIO_WRITE:
- ctx->card->ioreg_write(ctx->card, CU16(base + 1), temp);
+ ctx->card->reg_write(ctx->card, CU16(base + 1), temp);
base += 3;
break;
case ATOM_IIO_CLEAR:
diff --git a/drivers/gpu/drm/amd/amdgpu/atom.h b/drivers/gpu/drm/amd/amdgpu/atom.h
index 4205bbe5d8d7..d279759cab47 100644
--- a/drivers/gpu/drm/amd/amdgpu/atom.h
+++ b/drivers/gpu/drm/amd/amdgpu/atom.h
@@ -116,8 +116,6 @@ struct card_info {
struct drm_device *dev;
void (* reg_write)(struct card_info *, uint32_t, uint32_t); /* filled by driver */
uint32_t (* reg_read)(struct card_info *, uint32_t); /* filled by driver */
- void (* ioreg_write)(struct card_info *, uint32_t, uint32_t); /* filled by driver */
- uint32_t (* ioreg_read)(struct card_info *, uint32_t); /* filled by driver */
void (* mc_write)(struct card_info *, uint32_t, uint32_t); /* filled by driver */
uint32_t (* mc_read)(struct card_info *, uint32_t); /* filled by driver */
void (* pll_write)(struct card_info *, uint32_t, uint32_t); /* filled by driver */
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index 4d6832cc7fb0..c0fcc41ee574 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -26,6 +26,8 @@
#include <linux/module.h>
#include <linux/pci.h>
+#include <drm/amdgpu_drm.h>
+
#include "amdgpu.h"
#include "amdgpu_atombios.h"
#include "amdgpu_ih.h"
@@ -70,6 +72,80 @@
#include "amdgpu_amdkfd.h"
#include "dce_virtual.h"
+static const struct amdgpu_video_codec_info cik_video_codecs_encode_array[] =
+{
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
+ .max_width = 2048,
+ .max_height = 1152,
+ .max_pixels_per_frame = 2048 * 1152,
+ .max_level = 0,
+ },
+};
+
+static const struct amdgpu_video_codecs cik_video_codecs_encode =
+{
+ .codec_count = ARRAY_SIZE(cik_video_codecs_encode_array),
+ .codec_array = cik_video_codecs_encode_array,
+};
+
+static const struct amdgpu_video_codec_info cik_video_codecs_decode_array[] =
+{
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
+ .max_width = 2048,
+ .max_height = 1152,
+ .max_pixels_per_frame = 2048 * 1152,
+ .max_level = 3,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
+ .max_width = 2048,
+ .max_height = 1152,
+ .max_pixels_per_frame = 2048 * 1152,
+ .max_level = 5,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
+ .max_width = 2048,
+ .max_height = 1152,
+ .max_pixels_per_frame = 2048 * 1152,
+ .max_level = 41,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
+ .max_width = 2048,
+ .max_height = 1152,
+ .max_pixels_per_frame = 2048 * 1152,
+ .max_level = 4,
+ },
+};
+
+static const struct amdgpu_video_codecs cik_video_codecs_decode =
+{
+ .codec_count = ARRAY_SIZE(cik_video_codecs_decode_array),
+ .codec_array = cik_video_codecs_decode_array,
+};
+
+static int cik_query_video_codecs(struct amdgpu_device *adev, bool encode,
+ const struct amdgpu_video_codecs **codecs)
+{
+ switch (adev->asic_type) {
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ case CHIP_KAVERI:
+ case CHIP_KABINI:
+ case CHIP_MULLINS:
+ if (encode)
+ *codecs = &cik_video_codecs_encode;
+ else
+ *codecs = &cik_video_codecs_decode;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
/*
* Indirect registers accessor
*/
@@ -1933,6 +2009,7 @@ static const struct amdgpu_asic_funcs cik_asic_funcs =
.get_pcie_replay_count = &cik_get_pcie_replay_count,
.supports_baco = &cik_asic_supports_baco,
.pre_asic_init = &cik_pre_asic_init,
+ .query_video_codecs = &cik_query_video_codecs,
};
static int cik_common_early_init(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
index 6b4b30a8dce5..44109a6b8f44 100644
--- a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
+++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
@@ -568,6 +568,8 @@ static int df_v3_6_pmc_stop(struct amdgpu_device *adev, uint64_t config,
if (ret)
return ret;
+ df_v3_6_perfmon_wreg(adev, lo_base_addr, lo_val,
+ hi_base_addr, hi_val);
if (is_remove) {
df_v3_6_reset_perfmon_cntr(adev, config, counter_idx);
diff --git a/drivers/gpu/drm/amd/amdgpu/dimgrey_cavefish_reg_init.c b/drivers/gpu/drm/amd/amdgpu/dimgrey_cavefish_reg_init.c
index e9f177e9e3cf..e9f177e9e3cf 100755..100644
--- a/drivers/gpu/drm/amd/amdgpu/dimgrey_cavefish_reg_init.c
+++ b/drivers/gpu/drm/amd/amdgpu/dimgrey_cavefish_reg_init.c
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 84d2eaa38101..4d45844a1d92 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -6718,7 +6718,8 @@ static int gfx_v8_0_cp_ecc_error_irq(struct amdgpu_device *adev,
return 0;
}
-static void gfx_v8_0_parse_sq_irq(struct amdgpu_device *adev, unsigned ih_data)
+static void gfx_v8_0_parse_sq_irq(struct amdgpu_device *adev, unsigned ih_data,
+ bool from_wq)
{
u32 enc, se_id, sh_id, cu_id;
char type[20];
@@ -6756,7 +6757,7 @@ static void gfx_v8_0_parse_sq_irq(struct amdgpu_device *adev, unsigned ih_data)
* or from BH in which case we can access SQ_EDC_INFO
* instance
*/
- if (in_task()) {
+ if (from_wq) {
mutex_lock(&adev->grbm_idx_mutex);
gfx_v8_0_select_se_sh(adev, se_id, sh_id, cu_id);
@@ -6794,7 +6795,7 @@ static void gfx_v8_0_sq_irq_work_func(struct work_struct *work)
struct amdgpu_device *adev = container_of(work, struct amdgpu_device, gfx.sq_work.work);
struct sq_work *sq_work = container_of(work, struct sq_work, work);
- gfx_v8_0_parse_sq_irq(adev, sq_work->ih_data);
+ gfx_v8_0_parse_sq_irq(adev, sq_work->ih_data, true);
}
static int gfx_v8_0_sq_irq(struct amdgpu_device *adev,
@@ -6809,7 +6810,7 @@ static int gfx_v8_0_sq_irq(struct amdgpu_device *adev,
* just print whatever info is possible directly from the ISR.
*/
if (work_pending(&adev->gfx.sq_work.work)) {
- gfx_v8_0_parse_sq_irq(adev, ih_data);
+ gfx_v8_0_parse_sq_irq(adev, ih_data, false);
} else {
adev->gfx.sq_work.ih_data = ih_data;
schedule_work(&adev->gfx.sq_work.work);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 65db88bb6cbc..1393ccae7418 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -49,6 +49,7 @@
#include "gfx_v9_4.h"
#include "gfx_v9_0.h"
+#include "gfx_v9_4_2.h"
#include "asic_reg/pwr/pwr_10_0_offset.h"
#include "asic_reg/pwr/pwr_10_0_sh_mask.h"
@@ -107,14 +108,12 @@ MODULE_FIRMWARE("amdgpu/raven2_rlc.bin");
MODULE_FIRMWARE("amdgpu/raven_kicker_rlc.bin");
MODULE_FIRMWARE("amdgpu/arcturus_mec.bin");
-MODULE_FIRMWARE("amdgpu/arcturus_mec2.bin");
MODULE_FIRMWARE("amdgpu/arcturus_rlc.bin");
MODULE_FIRMWARE("amdgpu/renoir_ce.bin");
MODULE_FIRMWARE("amdgpu/renoir_pfp.bin");
MODULE_FIRMWARE("amdgpu/renoir_me.bin");
MODULE_FIRMWARE("amdgpu/renoir_mec.bin");
-MODULE_FIRMWARE("amdgpu/renoir_mec2.bin");
MODULE_FIRMWARE("amdgpu/renoir_rlc.bin");
MODULE_FIRMWARE("amdgpu/green_sardine_ce.bin");
@@ -124,6 +123,10 @@ MODULE_FIRMWARE("amdgpu/green_sardine_mec.bin");
MODULE_FIRMWARE("amdgpu/green_sardine_mec2.bin");
MODULE_FIRMWARE("amdgpu/green_sardine_rlc.bin");
+MODULE_FIRMWARE("amdgpu/aldebaran_mec.bin");
+MODULE_FIRMWARE("amdgpu/aldebaran_mec2.bin");
+MODULE_FIRMWARE("amdgpu/aldebaran_rlc.bin");
+
#define mmTCP_CHAN_STEER_0_ARCT 0x0b03
#define mmTCP_CHAN_STEER_0_ARCT_BASE_IDX 0
#define mmTCP_CHAN_STEER_1_ARCT 0x0b04
@@ -981,11 +984,16 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
golden_settings_gc_9_1_rn,
ARRAY_SIZE(golden_settings_gc_9_1_rn));
return; /* for renoir, don't need common goldensetting */
+ case CHIP_ALDEBARAN:
+ gfx_v9_4_2_init_golden_registers(adev,
+ adev->smuio.funcs->get_die_id(adev));
+ break;
default:
break;
}
- if (adev->asic_type != CHIP_ARCTURUS)
+ if ((adev->asic_type != CHIP_ARCTURUS) &&
+ (adev->asic_type != CHIP_ALDEBARAN))
soc15_program_register_sequence(adev, golden_settings_gc_9_x_common,
(const u32)ARRAY_SIZE(golden_settings_gc_9_x_common));
}
@@ -1517,6 +1525,16 @@ out:
return err;
}
+static bool gfx_v9_0_load_mec2_fw_bin_support(struct amdgpu_device *adev)
+{
+ if (adev->asic_type == CHIP_ALDEBARAN ||
+ adev->asic_type == CHIP_ARCTURUS ||
+ adev->asic_type == CHIP_RENOIR)
+ return false;
+
+ return true;
+}
+
static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev,
const char *chip_name)
{
@@ -1538,21 +1556,23 @@ static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev,
adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
- err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
- if (!err) {
- err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
- if (err)
- goto out;
- cp_hdr = (const struct gfx_firmware_header_v1_0 *)
- adev->gfx.mec2_fw->data;
- adev->gfx.mec2_fw_version =
- le32_to_cpu(cp_hdr->header.ucode_version);
- adev->gfx.mec2_feature_version =
- le32_to_cpu(cp_hdr->ucode_feature_version);
- } else {
- err = 0;
- adev->gfx.mec2_fw = NULL;
+ if (gfx_v9_0_load_mec2_fw_bin_support(adev)) {
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
+ err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
+ if (!err) {
+ err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
+ if (err)
+ goto out;
+ cp_hdr = (const struct gfx_firmware_header_v1_0 *)
+ adev->gfx.mec2_fw->data;
+ adev->gfx.mec2_fw_version =
+ le32_to_cpu(cp_hdr->header.ucode_version);
+ adev->gfx.mec2_feature_version =
+ le32_to_cpu(cp_hdr->ucode_feature_version);
+ } else {
+ err = 0;
+ adev->gfx.mec2_fw = NULL;
+ }
}
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
@@ -1581,8 +1601,7 @@ static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev,
/* TODO: Determine if MEC2 JT FW loading can be removed
for all GFX V9 asic and above */
- if (adev->asic_type != CHIP_ARCTURUS &&
- adev->asic_type != CHIP_RENOIR) {
+ if (gfx_v9_0_load_mec2_fw_bin_support(adev)) {
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2_JT];
info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2_JT;
info->fw = adev->gfx.mec2_fw;
@@ -1642,6 +1661,9 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
else
chip_name = "green_sardine";
break;
+ case CHIP_ALDEBARAN:
+ chip_name = "aldebaran";
+ break;
default:
BUG();
}
@@ -1882,7 +1904,10 @@ static void gfx_v9_0_enable_lbpw(struct amdgpu_device *adev, bool enable)
static int gfx_v9_0_cp_jump_table_num(struct amdgpu_device *adev)
{
- return 5;
+ if (gfx_v9_0_load_mec2_fw_bin_support(adev))
+ return 5;
+ else
+ return 4;
}
static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
@@ -2088,6 +2113,21 @@ static const struct amdgpu_gfx_funcs gfx_v9_4_gfx_funcs = {
.query_ras_error_status = &gfx_v9_4_query_ras_error_status,
};
+static const struct amdgpu_gfx_funcs gfx_v9_4_2_gfx_funcs = {
+ .get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter,
+ .select_se_sh = &gfx_v9_0_select_se_sh,
+ .read_wave_data = &gfx_v9_0_read_wave_data,
+ .read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
+ .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
+ .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q,
+ .ras_error_inject = &gfx_v9_4_2_ras_error_inject,
+ .query_ras_error_count = &gfx_v9_4_2_query_ras_error_count,
+ .reset_ras_error_count = &gfx_v9_4_2_reset_ras_error_count,
+ .query_ras_error_status = &gfx_v9_4_2_query_ras_error_status,
+ .reset_ras_error_status = &gfx_v9_4_2_reset_ras_error_status,
+ .enable_watchdog_timer = &gfx_v9_4_2_enable_watchdog_timer,
+};
+
static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
{
u32 gb_addr_config;
@@ -2159,6 +2199,21 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
gb_addr_config &= ~0xf3e777ff;
gb_addr_config |= 0x22010042;
break;
+ case CHIP_ALDEBARAN:
+ adev->gfx.funcs = &gfx_v9_4_2_gfx_funcs;
+ adev->gfx.config.max_hw_contexts = 8;
+ adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
+ adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
+ adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
+ adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
+ gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
+ gb_addr_config &= ~0xf3e777ff;
+ gb_addr_config |= 0x22014042;
+ /* check vbios table if gpu info is not available */
+ err = amdgpu_atomfirmware_get_gfx_info(adev);
+ if (err)
+ return err;
+ break;
default:
BUG();
break;
@@ -2249,6 +2304,7 @@ static int gfx_v9_0_sw_init(void *handle)
case CHIP_RAVEN:
case CHIP_ARCTURUS:
case CHIP_RENOIR:
+ case CHIP_ALDEBARAN:
adev->gfx.mec.num_mec = 2;
break;
default:
@@ -2634,17 +2690,15 @@ static void gfx_v9_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
{
u32 tmp;
- /* don't toggle interrupts that are only applicable
- * to me0 pipe0 on AISCs that have me0 removed */
- if (!adev->gfx.num_gfx_rings)
- return;
+ /* These interrupts should be enabled to drive DS clock */
tmp= RREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0);
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0);
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0);
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CMP_BUSY_INT_ENABLE, enable ? 1 : 0);
- tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, enable ? 1 : 0);
+ if(adev->gfx.num_gfx_rings)
+ tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, GFX_IDLE_INT_ENABLE, enable ? 1 : 0);
WREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0, tmp);
}
@@ -3696,11 +3750,18 @@ static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
struct amdgpu_device *adev = ring->adev;
struct v9_mqd *mqd = ring->mqd_ptr;
int mqd_idx = AMDGPU_MAX_COMPUTE_RINGS;
+ struct v9_mqd *tmp_mqd;
gfx_v9_0_kiq_setting(ring);
- if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
- /* reset MQD to a clean status */
+ /* GPU could be in bad state during probe, driver trigger the reset
+ * after load the SMU, in this case , the mqd is not be initialized.
+ * driver need to re-init the mqd.
+ * check mqd->cp_hqd_pq_control since this value should not be 0
+ */
+ tmp_mqd = (struct v9_mqd *)adev->gfx.mec.mqd_backup[mqd_idx];
+ if (amdgpu_in_reset(adev) && tmp_mqd->cp_hqd_pq_control){
+ /* for GPU_RESET case , reset MQD to a clean status */
if (adev->gfx.mec.mqd_backup[mqd_idx])
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
@@ -3736,8 +3797,15 @@ static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring)
struct amdgpu_device *adev = ring->adev;
struct v9_mqd *mqd = ring->mqd_ptr;
int mqd_idx = ring - &adev->gfx.compute_ring[0];
+ struct v9_mqd *tmp_mqd;
- if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
+ /* Same as above kiq init, driver need to re-init the mqd if mqd->cp_hqd_pq_control
+ * is not be initialized before
+ */
+ tmp_mqd = (struct v9_mqd *)adev->gfx.mec.mqd_backup[mqd_idx];
+
+ if (!tmp_mqd->cp_hqd_pq_control ||
+ (!amdgpu_in_reset(adev) && !adev->in_suspend)) {
memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
@@ -3913,6 +3981,9 @@ static int gfx_v9_0_hw_init(void *handle)
if (r)
return r;
+ if (adev->asic_type == CHIP_ALDEBARAN)
+ gfx_v9_4_2_set_power_brake_sequence(adev);
+
return r;
}
@@ -4101,7 +4172,7 @@ static uint64_t gfx_v9_0_kiq_read_clock(struct amdgpu_device *adev)
*
* also don't wait anymore for IRQ context
* */
- if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt()))
+ if (r < 1 && (amdgpu_in_reset(adev)))
goto failed_kiq_read;
might_sleep();
@@ -4486,7 +4557,8 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
if (!ring->sched.ready)
return 0;
- if (adev->asic_type == CHIP_ARCTURUS) {
+ if (adev->asic_type == CHIP_ARCTURUS ||
+ adev->asic_type == CHIP_ALDEBARAN) {
vgpr_init_shader_ptr = vgpr_init_compute_shader_arcturus;
vgpr_init_shader_size = sizeof(vgpr_init_compute_shader_arcturus);
vgpr_init_regs_ptr = vgpr_init_regs_arcturus;
@@ -4636,7 +4708,8 @@ static int gfx_v9_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (adev->asic_type == CHIP_ARCTURUS)
+ if (adev->asic_type == CHIP_ARCTURUS ||
+ adev->asic_type == CHIP_ALDEBARAN)
adev->gfx.num_gfx_rings = 0;
else
adev->gfx.num_gfx_rings = GFX9_NUM_GFX_RINGS;
@@ -4662,7 +4735,8 @@ static int gfx_v9_0_ecc_late_init(void *handle)
* to GDS in suspend/resume sequence on several cards. So just
* limit this operation in cold boot sequence.
*/
- if (!adev->in_suspend) {
+ if ((!adev->in_suspend) &&
+ (adev->gds.gds_size)) {
r = gfx_v9_0_do_edc_gds_workarounds(adev);
if (r)
return r;
@@ -4673,14 +4747,13 @@ static int gfx_v9_0_ecc_late_init(void *handle)
if (r)
return r;
- if (adev->gfx.funcs &&
- adev->gfx.funcs->reset_ras_error_count)
- adev->gfx.funcs->reset_ras_error_count(adev);
-
r = amdgpu_gfx_ras_late_init(adev);
if (r)
return r;
+ if (adev->gfx.funcs->enable_watchdog_timer)
+ adev->gfx.funcs->enable_watchdog_timer(adev);
+
return 0;
}
@@ -4858,7 +4931,7 @@ static void gfx_v9_0_update_3d_clock_gating(struct amdgpu_device *adev,
{
uint32_t data, def;
- if (adev->asic_type == CHIP_ARCTURUS)
+ if (!adev->gfx.num_gfx_rings)
return;
amdgpu_gfx_rlc_enter_safe_mode(adev);
@@ -5105,6 +5178,7 @@ static int gfx_v9_0_set_clockgating_state(void *handle,
case CHIP_RAVEN:
case CHIP_ARCTURUS:
case CHIP_RENOIR:
+ case CHIP_ALDEBARAN:
gfx_v9_0_update_gfx_clock_gating(adev,
state == AMD_CG_STATE_GATE);
break;
@@ -6924,6 +6998,7 @@ static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev)
case CHIP_RAVEN:
case CHIP_ARCTURUS:
case CHIP_RENOIR:
+ case CHIP_ALDEBARAN:
adev->gfx.rlc.funcs = &gfx_v9_0_rlc_funcs;
break;
default:
@@ -6944,6 +7019,12 @@ static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev)
case CHIP_ARCTURUS:
adev->gds.gds_size = 0x1000;
break;
+ case CHIP_ALDEBARAN:
+ /* aldebaran removed all the GDS internal memory,
+ * only support GWS opcode in kernel, like barrier
+ * semaphore.etc */
+ adev->gds.gds_size = 0;
+ break;
default:
adev->gds.gds_size = 0x10000;
break;
@@ -6966,6 +7047,10 @@ static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev)
case CHIP_ARCTURUS:
adev->gds.gds_compute_max_wave_id = 0xfff;
break;
+ case CHIP_ALDEBARAN:
+ /* deprecated for Aldebaran, no usage at all */
+ adev->gds.gds_compute_max_wave_id = 0;
+ break;
default:
/* this really depends on the chip */
adev->gds.gds_compute_max_wave_id = 0x7ff;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
new file mode 100644
index 000000000000..2e94998c9812
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
@@ -0,0 +1,1286 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "soc15.h"
+
+#include "gc/gc_9_4_2_offset.h"
+#include "gc/gc_9_4_2_sh_mask.h"
+#include "gfx_v9_0.h"
+
+#include "gfx_v9_4_2.h"
+#include "amdgpu_ras.h"
+#include "amdgpu_gfx.h"
+
+enum gfx_v9_4_2_utc_type {
+ VML2_MEM,
+ VML2_WALKER_MEM,
+ UTCL2_MEM,
+ ATC_L2_CACHE_2M,
+ ATC_L2_CACHE_32K,
+ ATC_L2_CACHE_4K
+};
+
+struct gfx_v9_4_2_utc_block {
+ enum gfx_v9_4_2_utc_type type;
+ uint32_t num_banks;
+ uint32_t num_ways;
+ uint32_t num_mem_blocks;
+ struct soc15_reg idx_reg;
+ struct soc15_reg data_reg;
+ uint32_t sec_count_mask;
+ uint32_t sec_count_shift;
+ uint32_t ded_count_mask;
+ uint32_t ded_count_shift;
+ uint32_t clear;
+};
+
+static const struct soc15_reg_golden golden_settings_gc_9_4_2_alde_die_0[] = {
+ SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CHAN_STEER_0, 0x3fffffff, 0x141dc920),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CHAN_STEER_1, 0x3fffffff, 0x3b458b93),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CHAN_STEER_2, 0x3fffffff, 0x1a4f5583),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CHAN_STEER_3, 0x3fffffff, 0x317717f6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CHAN_STEER_4, 0x3fffffff, 0x107cc1e6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CHAN_STEER_5, 0x3ff, 0x351),
+};
+
+static const struct soc15_reg_golden golden_settings_gc_9_4_2_alde_die_1[] = {
+ SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CHAN_STEER_0, 0x3fffffff, 0x2591aa38),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CHAN_STEER_1, 0x3fffffff, 0xac9e88b),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CHAN_STEER_2, 0x3fffffff, 0x2bc3369b),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CHAN_STEER_3, 0x3fffffff, 0xfb74ee),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CHAN_STEER_4, 0x3fffffff, 0x21f0a2fe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CHAN_STEER_5, 0x3ff, 0x49),
+};
+
+static const struct soc15_reg_golden golden_settings_gc_9_4_2_alde[] = {
+ SOC15_REG_GOLDEN_VALUE(GC, 0, regGB_ADDR_CONFIG, 0xffff77ff, 0x2a114042),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, regTA_CNTL_AUX, 0xfffffeef, 0x10b0000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_UTCL1_CNTL1, 0xffffffff, 0x30800400),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, regTCI_CNTL_3, 0xff, 0x20),
+};
+
+static void gfx_v9_4_2_query_sq_timeout_status(struct amdgpu_device *adev);
+static void gfx_v9_4_2_reset_sq_timeout_status(struct amdgpu_device *adev);
+
+void gfx_v9_4_2_init_golden_registers(struct amdgpu_device *adev,
+ uint32_t die_id)
+{
+ soc15_program_register_sequence(adev,
+ golden_settings_gc_9_4_2_alde,
+ ARRAY_SIZE(golden_settings_gc_9_4_2_alde));
+
+ /* apply golden settings per die */
+ switch (die_id) {
+ case 0:
+ soc15_program_register_sequence(adev,
+ golden_settings_gc_9_4_2_alde_die_0,
+ ARRAY_SIZE(golden_settings_gc_9_4_2_alde_die_0));
+ break;
+ case 1:
+ soc15_program_register_sequence(adev,
+ golden_settings_gc_9_4_2_alde_die_1,
+ ARRAY_SIZE(golden_settings_gc_9_4_2_alde_die_1));
+ break;
+ default:
+ dev_warn(adev->dev,
+ "invalid die id %d, ignore channel fabricid remap settings\n",
+ die_id);
+ break;
+ }
+
+ return;
+}
+
+void gfx_v9_4_2_debug_trap_config_init(struct amdgpu_device *adev,
+ uint32_t first_vmid,
+ uint32_t last_vmid)
+{
+ uint32_t data;
+ int i;
+
+ mutex_lock(&adev->srbm_mutex);
+
+ for (i = first_vmid; i < last_vmid; i++) {
+ data = 0;
+ soc15_grbm_select(adev, 0, 0, 0, i);
+ data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1);
+ data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_EN, 0);
+ data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, EXCP_REPLACE,
+ 0);
+ WREG32(SOC15_REG_OFFSET(GC, 0, regSPI_GDBG_PER_VMID_CNTL), data);
+ }
+
+ soc15_grbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+}
+
+void gfx_v9_4_2_set_power_brake_sequence(struct amdgpu_device *adev)
+{
+ u32 tmp;
+
+ gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+
+ tmp = 0;
+ tmp = REG_SET_FIELD(tmp, GC_THROTTLE_CTRL, PATTERN_MODE, 1);
+ WREG32_SOC15(GC, 0, regGC_THROTTLE_CTRL, tmp);
+
+ tmp = 0;
+ tmp = REG_SET_FIELD(tmp, GC_THROTTLE_CTRL1, PWRBRK_STALL_EN, 1);
+ WREG32_SOC15(GC, 0, regGC_THROTTLE_CTRL1, tmp);
+
+ WREG32_SOC15(GC, 0, regDIDT_IND_INDEX, ixDIDT_SQ_THROTTLE_CTRL);
+ tmp = 0;
+ tmp = REG_SET_FIELD(tmp, DIDT_SQ_THROTTLE_CTRL, PWRBRK_STALL_EN, 1);
+ WREG32_SOC15(GC, 0, regDIDT_IND_DATA, tmp);
+
+ WREG32_SOC15(GC, 0, regGC_CAC_IND_INDEX, ixPWRBRK_STALL_PATTERN_CTRL);
+ tmp = 0;
+ tmp = REG_SET_FIELD(tmp, PWRBRK_STALL_PATTERN_CTRL, PWRBRK_END_STEP, 0x12);
+ WREG32_SOC15(GC, 0, regGC_CAC_IND_DATA, tmp);
+}
+
+static const struct soc15_reg_entry gfx_v9_4_2_edc_counter_regs[] = {
+ /* CPF */
+ { SOC15_REG_ENTRY(GC, 0, regCPF_EDC_ROQ_CNT), 0, 1, 1 },
+ { SOC15_REG_ENTRY(GC, 0, regCPF_EDC_TAG_CNT), 0, 1, 1 },
+ /* CPC */
+ { SOC15_REG_ENTRY(GC, 0, regCPC_EDC_SCRATCH_CNT), 0, 1, 1 },
+ { SOC15_REG_ENTRY(GC, 0, regCPC_EDC_UCODE_CNT), 0, 1, 1 },
+ { SOC15_REG_ENTRY(GC, 0, regDC_EDC_STATE_CNT), 0, 1, 1 },
+ { SOC15_REG_ENTRY(GC, 0, regDC_EDC_CSINVOC_CNT), 0, 1, 1 },
+ { SOC15_REG_ENTRY(GC, 0, regDC_EDC_RESTORE_CNT), 0, 1, 1 },
+ /* GDS */
+ { SOC15_REG_ENTRY(GC, 0, regGDS_EDC_CNT), 0, 1, 1 },
+ { SOC15_REG_ENTRY(GC, 0, regGDS_EDC_GRBM_CNT), 0, 1, 1 },
+ { SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_DED), 0, 1, 1 },
+ { SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_PHY_CNT), 0, 1, 1 },
+ { SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_PIPE_CNT), 0, 1, 1 },
+ /* RLC */
+ { SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT), 0, 1, 1 },
+ { SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT2), 0, 1, 1 },
+ /* SPI */
+ { SOC15_REG_ENTRY(GC, 0, regSPI_EDC_CNT), 0, 8, 1 },
+ /* SQC */
+ { SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT), 0, 8, 7 },
+ { SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT2), 0, 8, 7 },
+ { SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT3), 0, 8, 7 },
+ { SOC15_REG_ENTRY(GC, 0, regSQC_EDC_PARITY_CNT3), 0, 8, 7 },
+ /* SQ */
+ { SOC15_REG_ENTRY(GC, 0, regSQ_EDC_CNT), 0, 8, 14 },
+ /* TCP */
+ { SOC15_REG_ENTRY(GC, 0, regTCP_EDC_CNT_NEW), 0, 8, 14 },
+ /* TCI */
+ { SOC15_REG_ENTRY(GC, 0, regTCI_EDC_CNT), 0, 1, 69 },
+ /* TCC */
+ { SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT), 0, 1, 16 },
+ { SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT2), 0, 1, 16 },
+ /* TCA */
+ { SOC15_REG_ENTRY(GC, 0, regTCA_EDC_CNT), 0, 1, 2 },
+ /* TCX */
+ { SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT), 0, 1, 2 },
+ { SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT2), 0, 1, 2 },
+ /* TD */
+ { SOC15_REG_ENTRY(GC, 0, regTD_EDC_CNT), 0, 8, 14 },
+ /* TA */
+ { SOC15_REG_ENTRY(GC, 0, regTA_EDC_CNT), 0, 8, 14 },
+ /* GCEA */
+ { SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT), 0, 1, 16 },
+ { SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT2), 0, 1, 16 },
+ { SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT3), 0, 1, 16 },
+};
+
+static void gfx_v9_4_2_select_se_sh(struct amdgpu_device *adev, u32 se_num,
+ u32 sh_num, u32 instance)
+{
+ u32 data;
+
+ if (instance == 0xffffffff)
+ data = REG_SET_FIELD(0, GRBM_GFX_INDEX,
+ INSTANCE_BROADCAST_WRITES, 1);
+ else
+ data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX,
+ instance);
+
+ if (se_num == 0xffffffff)
+ data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES,
+ 1);
+ else
+ data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num);
+
+ if (sh_num == 0xffffffff)
+ data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES,
+ 1);
+ else
+ data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num);
+
+ WREG32_SOC15_RLC_SHADOW_EX(reg, GC, 0, regGRBM_GFX_INDEX, data);
+}
+
+static const struct soc15_ras_field_entry gfx_v9_4_2_ras_fields[] = {
+ /* CPF */
+ { "CPF_ROQ_ME2", SOC15_REG_ENTRY(GC, 0, regCPF_EDC_ROQ_CNT),
+ SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, SEC_COUNT_ME2),
+ SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, DED_COUNT_ME2) },
+ { "CPF_ROQ_ME1", SOC15_REG_ENTRY(GC, 0, regCPF_EDC_ROQ_CNT),
+ SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, SEC_COUNT_ME1),
+ SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, DED_COUNT_ME1) },
+ { "CPF_TCIU_TAG", SOC15_REG_ENTRY(GC, 0, regCPF_EDC_TAG_CNT),
+ SOC15_REG_FIELD(CPF_EDC_TAG_CNT, SEC_COUNT),
+ SOC15_REG_FIELD(CPF_EDC_TAG_CNT, DED_COUNT) },
+
+ /* CPC */
+ { "CPC_SCRATCH", SOC15_REG_ENTRY(GC, 0, regCPC_EDC_SCRATCH_CNT),
+ SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, SEC_COUNT),
+ SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, DED_COUNT) },
+ { "CPC_UCODE", SOC15_REG_ENTRY(GC, 0, regCPC_EDC_UCODE_CNT),
+ SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, SEC_COUNT),
+ SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, DED_COUNT) },
+ { "CPC_DC_STATE_RAM_ME1", SOC15_REG_ENTRY(GC, 0, regDC_EDC_STATE_CNT),
+ SOC15_REG_FIELD(DC_EDC_STATE_CNT, SEC_COUNT_ME1),
+ SOC15_REG_FIELD(DC_EDC_STATE_CNT, DED_COUNT_ME1) },
+ { "CPC_DC_CSINVOC_RAM_ME1",
+ SOC15_REG_ENTRY(GC, 0, regDC_EDC_CSINVOC_CNT),
+ SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, SEC_COUNT_ME1),
+ SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, DED_COUNT_ME1) },
+ { "CPC_DC_RESTORE_RAM_ME1",
+ SOC15_REG_ENTRY(GC, 0, regDC_EDC_RESTORE_CNT),
+ SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, SEC_COUNT_ME1),
+ SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, DED_COUNT_ME1) },
+ { "CPC_DC_CSINVOC_RAM1_ME1",
+ SOC15_REG_ENTRY(GC, 0, regDC_EDC_CSINVOC_CNT),
+ SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, SEC_COUNT1_ME1),
+ SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, DED_COUNT1_ME1) },
+ { "CPC_DC_RESTORE_RAM1_ME1",
+ SOC15_REG_ENTRY(GC, 0, regDC_EDC_RESTORE_CNT),
+ SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, SEC_COUNT1_ME1),
+ SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, DED_COUNT1_ME1) },
+
+ /* GDS */
+ { "GDS_GRBM", SOC15_REG_ENTRY(GC, 0, regGDS_EDC_GRBM_CNT),
+ SOC15_REG_FIELD(GDS_EDC_GRBM_CNT, SEC),
+ SOC15_REG_FIELD(GDS_EDC_GRBM_CNT, DED) },
+ { "GDS_MEM", SOC15_REG_ENTRY(GC, 0, regGDS_EDC_CNT),
+ SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_SEC),
+ SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_DED) },
+ { "GDS_PHY_CMD_RAM_MEM", SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_PHY_CNT),
+ SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_SEC),
+ SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_DED) },
+ { "GDS_PHY_DATA_RAM_MEM", SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_PHY_CNT),
+ SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_DATA_RAM_MEM_SEC),
+ SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_DATA_RAM_MEM_DED) },
+ { "GDS_ME0_CS_PIPE_MEM", SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_PHY_CNT),
+ SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_SEC),
+ SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_DED) },
+ { "GDS_ME1_PIPE0_PIPE_MEM",
+ SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_PIPE_CNT),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_SEC),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_DED) },
+ { "GDS_ME1_PIPE1_PIPE_MEM",
+ SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_PIPE_CNT),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_SEC),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_DED) },
+ { "GDS_ME1_PIPE2_PIPE_MEM",
+ SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_PIPE_CNT),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_SEC),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_DED) },
+ { "GDS_ME1_PIPE3_PIPE_MEM",
+ SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_PIPE_CNT),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_SEC),
+ SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_DED) },
+ { "GDS_ME0_GFXHP3D_PIX_DED",
+ SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_DED), 0, 0,
+ SOC15_REG_FIELD(GDS_EDC_OA_DED, ME0_GFXHP3D_PIX_DED) },
+ { "GDS_ME0_GFXHP3D_VTX_DED",
+ SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_DED), 0, 0,
+ SOC15_REG_FIELD(GDS_EDC_OA_DED, ME0_GFXHP3D_VTX_DED) },
+ { "GDS_ME0_CS_DED",
+ SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_DED), 0, 0,
+ SOC15_REG_FIELD(GDS_EDC_OA_DED, ME0_CS_DED) },
+ { "GDS_ME0_GFXHP3D_GS_DED",
+ SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_DED), 0, 0,
+ SOC15_REG_FIELD(GDS_EDC_OA_DED, ME0_GFXHP3D_GS_DED) },
+ { "GDS_ME1_PIPE0_DED",
+ SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_DED), 0, 0,
+ SOC15_REG_FIELD(GDS_EDC_OA_DED, ME1_PIPE0_DED) },
+ { "GDS_ME1_PIPE1_DED",
+ SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_DED), 0, 0,
+ SOC15_REG_FIELD(GDS_EDC_OA_DED, ME1_PIPE1_DED) },
+ { "GDS_ME1_PIPE2_DED",
+ SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_DED), 0, 0,
+ SOC15_REG_FIELD(GDS_EDC_OA_DED, ME1_PIPE2_DED) },
+ { "GDS_ME1_PIPE3_DED",
+ SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_DED), 0, 0,
+ SOC15_REG_FIELD(GDS_EDC_OA_DED, ME1_PIPE3_DED) },
+ { "GDS_ME2_PIPE0_DED",
+ SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_DED), 0, 0,
+ SOC15_REG_FIELD(GDS_EDC_OA_DED, ME2_PIPE0_DED) },
+ { "GDS_ME2_PIPE1_DED",
+ SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_DED), 0, 0,
+ SOC15_REG_FIELD(GDS_EDC_OA_DED, ME2_PIPE1_DED) },
+ { "GDS_ME2_PIPE2_DED",
+ SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_DED), 0, 0,
+ SOC15_REG_FIELD(GDS_EDC_OA_DED, ME2_PIPE2_DED) },
+ { "GDS_ME2_PIPE3_DED",
+ SOC15_REG_ENTRY(GC, 0, regGDS_EDC_OA_DED), 0, 0,
+ SOC15_REG_FIELD(GDS_EDC_OA_DED, ME2_PIPE3_DED) },
+
+ /* RLC */
+ { "RLCG_INSTR_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLCG_INSTR_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLCG_INSTR_RAM_DED_COUNT) },
+ { "RLCG_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLCG_SCRATCH_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLCG_SCRATCH_RAM_DED_COUNT) },
+ { "RLCV_INSTR_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLCV_INSTR_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLCV_INSTR_RAM_DED_COUNT) },
+ { "RLCV_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLCV_SCRATCH_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLCV_SCRATCH_RAM_DED_COUNT) },
+ { "RLC_TCTAG_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLC_TCTAG_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLC_TCTAG_RAM_DED_COUNT) },
+ { "RLC_SPM_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SPM_SCRATCH_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SPM_SCRATCH_RAM_DED_COUNT) },
+ { "RLC_SRM_DATA_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SRM_DATA_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SRM_DATA_RAM_DED_COUNT) },
+ { "RLC_SRM_ADDR_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SRM_ADDR_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SRM_ADDR_RAM_DED_COUNT) },
+ { "RLC_SPM_SE0_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT2),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE0_SCRATCH_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE0_SCRATCH_RAM_DED_COUNT) },
+ { "RLC_SPM_SE1_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT2),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE1_SCRATCH_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE1_SCRATCH_RAM_DED_COUNT) },
+ { "RLC_SPM_SE2_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT2),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE2_SCRATCH_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE2_SCRATCH_RAM_DED_COUNT) },
+ { "RLC_SPM_SE3_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT2),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE3_SCRATCH_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE3_SCRATCH_RAM_DED_COUNT) },
+ { "RLC_SPM_SE4_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT2),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE4_SCRATCH_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE4_SCRATCH_RAM_DED_COUNT) },
+ { "RLC_SPM_SE5_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT2),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE5_SCRATCH_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE5_SCRATCH_RAM_DED_COUNT) },
+ { "RLC_SPM_SE6_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT2),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE6_SCRATCH_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE6_SCRATCH_RAM_DED_COUNT) },
+ { "RLC_SPM_SE7_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, regRLC_EDC_CNT2),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE7_SCRATCH_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE7_SCRATCH_RAM_DED_COUNT) },
+
+ /* SPI */
+ { "SPI_SR_MEM", SOC15_REG_ENTRY(GC, 0, regSPI_EDC_CNT),
+ SOC15_REG_FIELD(SPI_EDC_CNT, SPI_SR_MEM_SEC_COUNT),
+ SOC15_REG_FIELD(SPI_EDC_CNT, SPI_SR_MEM_DED_COUNT) },
+ { "SPI_GDS_EXPREQ", SOC15_REG_ENTRY(GC, 0, regSPI_EDC_CNT),
+ SOC15_REG_FIELD(SPI_EDC_CNT, SPI_GDS_EXPREQ_SEC_COUNT),
+ SOC15_REG_FIELD(SPI_EDC_CNT, SPI_GDS_EXPREQ_DED_COUNT) },
+ { "SPI_WB_GRANT_30", SOC15_REG_ENTRY(GC, 0, regSPI_EDC_CNT),
+ SOC15_REG_FIELD(SPI_EDC_CNT, SPI_WB_GRANT_30_SEC_COUNT),
+ SOC15_REG_FIELD(SPI_EDC_CNT, SPI_WB_GRANT_30_DED_COUNT) },
+ { "SPI_LIFE_CNT", SOC15_REG_ENTRY(GC, 0, regSPI_EDC_CNT),
+ SOC15_REG_FIELD(SPI_EDC_CNT, SPI_LIFE_CNT_SEC_COUNT),
+ SOC15_REG_FIELD(SPI_EDC_CNT, SPI_LIFE_CNT_DED_COUNT) },
+
+ /* SQC - regSQC_EDC_CNT */
+ { "SQC_DATA_CU0_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_DED_COUNT) },
+ { "SQC_DATA_CU0_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_DED_COUNT) },
+ { "SQC_DATA_CU1_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_DED_COUNT) },
+ { "SQC_DATA_CU1_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_DED_COUNT) },
+ { "SQC_DATA_CU2_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_DED_COUNT) },
+ { "SQC_DATA_CU2_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_DED_COUNT) },
+ { "SQC_DATA_CU3_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU3_WRITE_DATA_BUF_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU3_WRITE_DATA_BUF_DED_COUNT) },
+ { "SQC_DATA_CU3_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU3_UTCL1_LFIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU3_UTCL1_LFIFO_DED_COUNT) },
+
+ /* SQC - regSQC_EDC_CNT2 */
+ { "SQC_INST_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT2),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_DED_COUNT) },
+ { "SQC_INST_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT2),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_DED_COUNT) },
+ { "SQC_DATA_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT2),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_DED_COUNT) },
+ { "SQC_DATA_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT2),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_DED_COUNT) },
+ { "SQC_INST_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT2),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_DED_COUNT) },
+ { "SQC_DATA_BANKA_DIRTY_BIT_RAM", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT2),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_DIRTY_BIT_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_DIRTY_BIT_RAM_DED_COUNT) },
+
+ /* SQC - regSQC_EDC_CNT3 */
+ { "SQC_INST_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_DED_COUNT) },
+ { "SQC_INST_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_DED_COUNT) },
+ { "SQC_DATA_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_DED_COUNT) },
+ { "SQC_DATA_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_DED_COUNT) },
+ { "SQC_DATA_BANKB_DIRTY_BIT_RAM", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_DIRTY_BIT_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_DIRTY_BIT_RAM_DED_COUNT) },
+
+ /* SQC - regSQC_EDC_PARITY_CNT3 */
+ { "SQC_INST_BANKA_UTCL1_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_PARITY_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, INST_BANKA_UTCL1_MISS_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, INST_BANKA_UTCL1_MISS_FIFO_DED_COUNT) },
+ { "SQC_INST_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_PARITY_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, INST_BANKA_MISS_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, INST_BANKA_MISS_FIFO_DED_COUNT) },
+ { "SQC_DATA_BANKA_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_PARITY_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKA_HIT_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKA_HIT_FIFO_DED_COUNT) },
+ { "SQC_DATA_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_PARITY_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKA_MISS_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKA_MISS_FIFO_DED_COUNT) },
+ { "SQC_INST_BANKB_UTCL1_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_PARITY_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, INST_BANKB_UTCL1_MISS_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, INST_BANKB_UTCL1_MISS_FIFO_DED_COUNT) },
+ { "SQC_INST_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_PARITY_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, INST_BANKB_MISS_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, INST_BANKB_MISS_FIFO_DED_COUNT) },
+ { "SQC_DATA_BANKB_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_PARITY_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKB_HIT_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKB_HIT_FIFO_DED_COUNT) },
+ { "SQC_DATA_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, regSQC_EDC_PARITY_CNT3),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKB_MISS_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKB_MISS_FIFO_DED_COUNT) },
+
+ /* SQ */
+ { "SQ_LDS_D", SOC15_REG_ENTRY(GC, 0, regSQ_EDC_CNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_SEC_COUNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_DED_COUNT) },
+ { "SQ_LDS_I", SOC15_REG_ENTRY(GC, 0, regSQ_EDC_CNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_SEC_COUNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_DED_COUNT) },
+ { "SQ_SGPR", SOC15_REG_ENTRY(GC, 0, regSQ_EDC_CNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_SEC_COUNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_DED_COUNT) },
+ { "SQ_VGPR0", SOC15_REG_ENTRY(GC, 0, regSQ_EDC_CNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_SEC_COUNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_DED_COUNT) },
+ { "SQ_VGPR1", SOC15_REG_ENTRY(GC, 0, regSQ_EDC_CNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_SEC_COUNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_DED_COUNT) },
+ { "SQ_VGPR2", SOC15_REG_ENTRY(GC, 0, regSQ_EDC_CNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_SEC_COUNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_DED_COUNT) },
+ { "SQ_VGPR3", SOC15_REG_ENTRY(GC, 0, regSQ_EDC_CNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_SEC_COUNT),
+ SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_DED_COUNT) },
+
+ /* TCP */
+ { "TCP_CACHE_RAM", SOC15_REG_ENTRY(GC, 0, regTCP_EDC_CNT_NEW),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_DED_COUNT) },
+ { "TCP_LFIFO_RAM", SOC15_REG_ENTRY(GC, 0, regTCP_EDC_CNT_NEW),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_DED_COUNT) },
+ { "TCP_CMD_FIFO", SOC15_REG_ENTRY(GC, 0, regTCP_EDC_CNT_NEW),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CMD_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CMD_FIFO_DED_COUNT) },
+ { "TCP_VM_FIFO", SOC15_REG_ENTRY(GC, 0, regTCP_EDC_CNT_NEW),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, VM_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, VM_FIFO_DED_COUNT) },
+ { "TCP_DB_RAM", SOC15_REG_ENTRY(GC, 0, regTCP_EDC_CNT_NEW),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, DB_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, DB_RAM_DED_COUNT) },
+ { "TCP_UTCL1_LFIFO0", SOC15_REG_ENTRY(GC, 0, regTCP_EDC_CNT_NEW),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_SEC_COUNT),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_DED_COUNT) },
+ { "TCP_UTCL1_LFIFO1", SOC15_REG_ENTRY(GC, 0, regTCP_EDC_CNT_NEW),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_SEC_COUNT),
+ SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_DED_COUNT) },
+
+ /* TCI */
+ { "TCI_WRITE_RAM", SOC15_REG_ENTRY(GC, 0, regTCI_EDC_CNT),
+ SOC15_REG_FIELD(TCI_EDC_CNT, WRITE_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(TCI_EDC_CNT, WRITE_RAM_DED_COUNT) },
+
+ /* TCC */
+ { "TCC_CACHE_DATA", SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_DED_COUNT) },
+ { "TCC_CACHE_DIRTY", SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_DED_COUNT) },
+ { "TCC_HIGH_RATE_TAG", SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_DED_COUNT) },
+ { "TCC_LOW_RATE_TAG", SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_DED_COUNT) },
+ { "TCC_SRC_FIFO", SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_DED_COUNT) },
+ { "TCC_LATENCY_FIFO", SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_DED_COUNT) },
+ { "TCC_LATENCY_FIFO_NEXT_RAM", SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_NEXT_RAM_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_NEXT_RAM_DED_COUNT) },
+ { "TCC_CACHE_TAG_PROBE_FIFO", SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT2),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, CACHE_TAG_PROBE_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, CACHE_TAG_PROBE_FIFO_DED_COUNT) },
+ { "TCC_UC_ATOMIC_FIFO", SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT2),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, UC_ATOMIC_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, UC_ATOMIC_FIFO_DED_COUNT) },
+ { "TCC_WRITE_CACHE_READ", SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT2),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_CACHE_READ_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_CACHE_READ_DED_COUNT) },
+ { "TCC_RETURN_CONTROL", SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT2),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, RETURN_CONTROL_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, RETURN_CONTROL_DED_COUNT) },
+ { "TCC_IN_USE_TRANSFER", SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT2),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, IN_USE_TRANSFER_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, IN_USE_TRANSFER_DED_COUNT) },
+ { "TCC_IN_USE_DEC", SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT2),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, IN_USE_DEC_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, IN_USE_DEC_DED_COUNT) },
+ { "TCC_WRITE_RETURN", SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT2),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_RETURN_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_RETURN_DED_COUNT) },
+ { "TCC_RETURN_DATA", SOC15_REG_ENTRY(GC, 0, regTCC_EDC_CNT2),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, RETURN_DATA_SEC_COUNT),
+ SOC15_REG_FIELD(TCC_EDC_CNT2, RETURN_DATA_DED_COUNT) },
+
+ /* TCA */
+ { "TCA_HOLE_FIFO", SOC15_REG_ENTRY(GC, 0, regTCA_EDC_CNT),
+ SOC15_REG_FIELD(TCA_EDC_CNT, HOLE_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(TCA_EDC_CNT, HOLE_FIFO_DED_COUNT) },
+ { "TCA_REQ_FIFO", SOC15_REG_ENTRY(GC, 0, regTCA_EDC_CNT),
+ SOC15_REG_FIELD(TCA_EDC_CNT, REQ_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(TCA_EDC_CNT, REQ_FIFO_DED_COUNT) },
+
+ /* TCX */
+ { "TCX_GROUP0", SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT),
+ SOC15_REG_FIELD(TCX_EDC_CNT, GROUP0_SEC_COUNT),
+ SOC15_REG_FIELD(TCX_EDC_CNT, GROUP0_DED_COUNT) },
+ { "TCX_GROUP1", SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT),
+ SOC15_REG_FIELD(TCX_EDC_CNT, GROUP1_SEC_COUNT),
+ SOC15_REG_FIELD(TCX_EDC_CNT, GROUP1_DED_COUNT) },
+ { "TCX_GROUP2", SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT),
+ SOC15_REG_FIELD(TCX_EDC_CNT, GROUP2_SEC_COUNT),
+ SOC15_REG_FIELD(TCX_EDC_CNT, GROUP2_DED_COUNT) },
+ { "TCX_GROUP3", SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT),
+ SOC15_REG_FIELD(TCX_EDC_CNT, GROUP3_SEC_COUNT),
+ SOC15_REG_FIELD(TCX_EDC_CNT, GROUP3_DED_COUNT) },
+ { "TCX_GROUP4", SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT),
+ SOC15_REG_FIELD(TCX_EDC_CNT, GROUP4_SEC_COUNT),
+ SOC15_REG_FIELD(TCX_EDC_CNT, GROUP4_DED_COUNT) },
+ { "TCX_GROUP5", SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT),
+ SOC15_REG_FIELD(TCX_EDC_CNT, GROUP5_SED_COUNT), 0, 0 },
+ { "TCX_GROUP6", SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT),
+ SOC15_REG_FIELD(TCX_EDC_CNT, GROUP6_SED_COUNT), 0, 0 },
+ { "TCX_GROUP7", SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT),
+ SOC15_REG_FIELD(TCX_EDC_CNT, GROUP7_SED_COUNT), 0, 0 },
+ { "TCX_GROUP8", SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT),
+ SOC15_REG_FIELD(TCX_EDC_CNT, GROUP8_SED_COUNT), 0, 0 },
+ { "TCX_GROUP9", SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT),
+ SOC15_REG_FIELD(TCX_EDC_CNT, GROUP9_SED_COUNT), 0, 0 },
+ { "TCX_GROUP10", SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT),
+ SOC15_REG_FIELD(TCX_EDC_CNT, GROUP10_SED_COUNT), 0, 0 },
+ { "TCX_GROUP11", SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT2),
+ SOC15_REG_FIELD(TCX_EDC_CNT2, GROUP11_SED_COUNT), 0, 0 },
+ { "TCX_GROUP12", SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT2),
+ SOC15_REG_FIELD(TCX_EDC_CNT2, GROUP12_SED_COUNT), 0, 0 },
+ { "TCX_GROUP13", SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT2),
+ SOC15_REG_FIELD(TCX_EDC_CNT2, GROUP13_SED_COUNT), 0, 0 },
+ { "TCX_GROUP14", SOC15_REG_ENTRY(GC, 0, regTCX_EDC_CNT2),
+ SOC15_REG_FIELD(TCX_EDC_CNT2, GROUP14_SED_COUNT), 0, 0 },
+
+ /* TD */
+ { "TD_SS_FIFO_LO", SOC15_REG_ENTRY(GC, 0, regTD_EDC_CNT),
+ SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_SEC_COUNT),
+ SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_DED_COUNT) },
+ { "TD_SS_FIFO_HI", SOC15_REG_ENTRY(GC, 0, regTD_EDC_CNT),
+ SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_SEC_COUNT),
+ SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_DED_COUNT) },
+ { "TD_CS_FIFO", SOC15_REG_ENTRY(GC, 0, regTD_EDC_CNT),
+ SOC15_REG_FIELD(TD_EDC_CNT, CS_FIFO_SEC_COUNT),
+ SOC15_REG_FIELD(TD_EDC_CNT, CS_FIFO_DED_COUNT) },
+
+ /* TA */
+ { "TA_FS_DFIFO", SOC15_REG_ENTRY(GC, 0, regTA_EDC_CNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_SEC_COUNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_DED_COUNT) },
+ { "TA_FS_AFIFO_LO", SOC15_REG_ENTRY(GC, 0, regTA_EDC_CNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_AFIFO_LO_SEC_COUNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_AFIFO_LO_DED_COUNT) },
+ { "TA_FL_LFIFO", SOC15_REG_ENTRY(GC, 0, regTA_EDC_CNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FL_LFIFO_SEC_COUNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FL_LFIFO_DED_COUNT) },
+ { "TA_FX_LFIFO", SOC15_REG_ENTRY(GC, 0, regTA_EDC_CNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FX_LFIFO_SEC_COUNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FX_LFIFO_DED_COUNT) },
+ { "TA_FS_CFIFO", SOC15_REG_ENTRY(GC, 0, regTA_EDC_CNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_CFIFO_SEC_COUNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_CFIFO_DED_COUNT) },
+ { "TA_FS_AFIFO_HI", SOC15_REG_ENTRY(GC, 0, regTA_EDC_CNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_AFIFO_HI_SEC_COUNT),
+ SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_AFIFO_HI_DED_COUNT) },
+
+ /* EA - regGCEA_EDC_CNT */
+ { "EA_DRAMRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT) },
+ { "EA_DRAMWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT) },
+ { "EA_DRAMWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT) },
+ { "EA_RRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_DED_COUNT) },
+ { "EA_WRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_DED_COUNT) },
+ { "EA_IOWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_DATAMEM_DED_COUNT) },
+ { "EA_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT), 0, 0 },
+ { "EA_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT), 0, 0 },
+ { "EA_IORD_CMDMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, IORD_CMDMEM_SED_COUNT), 0, 0 },
+ { "EA_IOWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_CMDMEM_SED_COUNT), 0, 0 },
+
+ /* EA - regGCEA_EDC_CNT2 */
+ { "EA_GMIRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT) },
+ { "EA_GMIWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT) },
+ { "EA_GMIWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT) },
+ { "EA_GMIRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT), 0, 0 },
+ { "EA_GMIWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT), 0, 0 },
+ { "EA_MAM_D0MEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D0MEM_SED_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D0MEM_DED_COUNT) },
+ { "EA_MAM_D1MEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D1MEM_SED_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D1MEM_DED_COUNT) },
+ { "EA_MAM_D2MEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D2MEM_SED_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D2MEM_DED_COUNT) },
+ { "EA_MAM_D3MEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT2),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D3MEM_SED_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D3MEM_DED_COUNT) },
+
+ /* EA - regGCEA_EDC_CNT3 */
+ { "EA_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT3), 0, 0,
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT) },
+ { "EA_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT3), 0, 0,
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT) },
+ { "EA_IORD_CMDMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT3), 0, 0,
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, IORD_CMDMEM_DED_COUNT) },
+ { "EA_IOWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT3), 0, 0,
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, IOWR_CMDMEM_DED_COUNT) },
+ { "EA_GMIRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT3), 0, 0,
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT) },
+ { "EA_GMIWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT3), 0, 0,
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT) },
+ { "EA_MAM_A0MEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT3),
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A0MEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A0MEM_DED_COUNT) },
+ { "EA_MAM_A1MEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT3),
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A1MEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A1MEM_DED_COUNT) },
+ { "EA_MAM_A2MEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT3),
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A2MEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A2MEM_DED_COUNT) },
+ { "EA_MAM_A3MEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT3),
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A3MEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A3MEM_DED_COUNT) },
+ { "EA_MAM_AFMEM", SOC15_REG_ENTRY(GC, 0, regGCEA_EDC_CNT3),
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_AFMEM_SEC_COUNT),
+ SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_AFMEM_DED_COUNT) },
+};
+
+static const char * const vml2_walker_mems[] = {
+ "UTC_VML2_CACHE_PDE0_MEM0",
+ "UTC_VML2_CACHE_PDE0_MEM1",
+ "UTC_VML2_CACHE_PDE1_MEM0",
+ "UTC_VML2_CACHE_PDE1_MEM1",
+ "UTC_VML2_CACHE_PDE2_MEM0",
+ "UTC_VML2_CACHE_PDE2_MEM1",
+ "UTC_VML2_RDIF_ARADDRS",
+ "UTC_VML2_RDIF_LOG_FIFO",
+ "UTC_VML2_QUEUE_REQ",
+ "UTC_VML2_QUEUE_RET",
+};
+
+static struct gfx_v9_4_2_utc_block gfx_v9_4_2_utc_blocks[] = {
+ { VML2_MEM, 8, 2, 2,
+ { SOC15_REG_ENTRY(GC, 0, regVML2_MEM_ECC_INDEX) },
+ { SOC15_REG_ENTRY(GC, 0, regVML2_MEM_ECC_CNTL) },
+ SOC15_REG_FIELD(VML2_MEM_ECC_CNTL, SEC_COUNT),
+ SOC15_REG_FIELD(VML2_MEM_ECC_CNTL, DED_COUNT),
+ REG_SET_FIELD(0, VML2_MEM_ECC_CNTL, WRITE_COUNTERS, 1) },
+ { VML2_WALKER_MEM, ARRAY_SIZE(vml2_walker_mems), 1, 1,
+ { SOC15_REG_ENTRY(GC, 0, regVML2_WALKER_MEM_ECC_INDEX) },
+ { SOC15_REG_ENTRY(GC, 0, regVML2_WALKER_MEM_ECC_CNTL) },
+ SOC15_REG_FIELD(VML2_WALKER_MEM_ECC_CNTL, SEC_COUNT),
+ SOC15_REG_FIELD(VML2_WALKER_MEM_ECC_CNTL, DED_COUNT),
+ REG_SET_FIELD(0, VML2_WALKER_MEM_ECC_CNTL, WRITE_COUNTERS, 1) },
+ { UTCL2_MEM, 18, 1, 2,
+ { SOC15_REG_ENTRY(GC, 0, regUTCL2_MEM_ECC_INDEX) },
+ { SOC15_REG_ENTRY(GC, 0, regUTCL2_MEM_ECC_CNTL) },
+ SOC15_REG_FIELD(UTCL2_MEM_ECC_CNTL, SEC_COUNT),
+ SOC15_REG_FIELD(UTCL2_MEM_ECC_CNTL, DED_COUNT),
+ REG_SET_FIELD(0, UTCL2_MEM_ECC_CNTL, WRITE_COUNTERS, 1) },
+ { ATC_L2_CACHE_2M, 8, 2, 1,
+ { SOC15_REG_ENTRY(GC, 0, regATC_L2_CACHE_2M_DSM_INDEX) },
+ { SOC15_REG_ENTRY(GC, 0, regATC_L2_CACHE_2M_DSM_CNTL) },
+ SOC15_REG_FIELD(ATC_L2_CACHE_2M_DSM_CNTL, SEC_COUNT),
+ SOC15_REG_FIELD(ATC_L2_CACHE_2M_DSM_CNTL, DED_COUNT),
+ REG_SET_FIELD(0, ATC_L2_CACHE_2M_DSM_CNTL, WRITE_COUNTERS, 1) },
+ { ATC_L2_CACHE_32K, 8, 2, 2,
+ { SOC15_REG_ENTRY(GC, 0, regATC_L2_CACHE_32K_DSM_INDEX) },
+ { SOC15_REG_ENTRY(GC, 0, regATC_L2_CACHE_32K_DSM_CNTL) },
+ SOC15_REG_FIELD(ATC_L2_CACHE_32K_DSM_CNTL, SEC_COUNT),
+ SOC15_REG_FIELD(ATC_L2_CACHE_32K_DSM_CNTL, DED_COUNT),
+ REG_SET_FIELD(0, ATC_L2_CACHE_32K_DSM_CNTL, WRITE_COUNTERS, 1) },
+ { ATC_L2_CACHE_4K, 8, 2, 8,
+ { SOC15_REG_ENTRY(GC, 0, regATC_L2_CACHE_4K_DSM_INDEX) },
+ { SOC15_REG_ENTRY(GC, 0, regATC_L2_CACHE_4K_DSM_CNTL) },
+ SOC15_REG_FIELD(ATC_L2_CACHE_4K_DSM_CNTL, SEC_COUNT),
+ SOC15_REG_FIELD(ATC_L2_CACHE_4K_DSM_CNTL, DED_COUNT),
+ REG_SET_FIELD(0, ATC_L2_CACHE_4K_DSM_CNTL, WRITE_COUNTERS, 1) },
+};
+
+static const struct soc15_reg_entry gfx_v9_4_2_rdrsp_status_regs =
+ { SOC15_REG_ENTRY(GC, 0, regGCEA_ERR_STATUS), 0, 1, 16 };
+
+static int gfx_v9_4_2_get_reg_error_count(struct amdgpu_device *adev,
+ const struct soc15_reg_entry *reg,
+ uint32_t se_id, uint32_t inst_id,
+ uint32_t value, uint32_t *sec_count,
+ uint32_t *ded_count)
+{
+ uint32_t i;
+ uint32_t sec_cnt, ded_cnt;
+
+ for (i = 0; i < ARRAY_SIZE(gfx_v9_4_2_ras_fields); i++) {
+ if (gfx_v9_4_2_ras_fields[i].reg_offset != reg->reg_offset ||
+ gfx_v9_4_2_ras_fields[i].seg != reg->seg ||
+ gfx_v9_4_2_ras_fields[i].inst != reg->inst)
+ continue;
+
+ sec_cnt = SOC15_RAS_REG_FIELD_VAL(
+ value, gfx_v9_4_2_ras_fields[i], sec);
+ if (sec_cnt) {
+ dev_info(adev->dev,
+ "GFX SubBlock %s, Instance[%d][%d], SEC %d\n",
+ gfx_v9_4_2_ras_fields[i].name, se_id, inst_id,
+ sec_cnt);
+ *sec_count += sec_cnt;
+ }
+
+ ded_cnt = SOC15_RAS_REG_FIELD_VAL(
+ value, gfx_v9_4_2_ras_fields[i], ded);
+ if (ded_cnt) {
+ dev_info(adev->dev,
+ "GFX SubBlock %s, Instance[%d][%d], DED %d\n",
+ gfx_v9_4_2_ras_fields[i].name, se_id, inst_id,
+ ded_cnt);
+ *ded_count += ded_cnt;
+ }
+ }
+
+ return 0;
+}
+
+static int gfx_v9_4_2_query_sram_edc_count(struct amdgpu_device *adev,
+ uint32_t *sec_count, uint32_t *ded_count)
+{
+ uint32_t i, j, k, data;
+ uint32_t sec_cnt = 0, ded_cnt = 0;
+
+ if (sec_count && ded_count) {
+ *sec_count = 0;
+ *ded_count = 0;
+ }
+
+ mutex_lock(&adev->grbm_idx_mutex);
+
+ for (i = 0; i < ARRAY_SIZE(gfx_v9_4_2_edc_counter_regs); i++) {
+ for (j = 0; j < gfx_v9_4_2_edc_counter_regs[i].se_num; j++) {
+ for (k = 0; k < gfx_v9_4_2_edc_counter_regs[i].instance;
+ k++) {
+ gfx_v9_4_2_select_se_sh(adev, j, 0, k);
+
+ /* if sec/ded_count is null, just clear counter */
+ if (!sec_count || !ded_count) {
+ WREG32(SOC15_REG_ENTRY_OFFSET(
+ gfx_v9_4_2_edc_counter_regs[i]), 0);
+ continue;
+ }
+
+ data = RREG32(SOC15_REG_ENTRY_OFFSET(
+ gfx_v9_4_2_edc_counter_regs[i]));
+
+ if (!data)
+ continue;
+
+ gfx_v9_4_2_get_reg_error_count(adev,
+ &gfx_v9_4_2_edc_counter_regs[i],
+ j, k, data, &sec_cnt, &ded_cnt);
+
+ /* clear counter after read */
+ WREG32(SOC15_REG_ENTRY_OFFSET(
+ gfx_v9_4_2_edc_counter_regs[i]), 0);
+ }
+ }
+ }
+
+ if (sec_count && ded_count) {
+ *sec_count += sec_cnt;
+ *ded_count += ded_cnt;
+ }
+
+ gfx_v9_4_2_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ return 0;
+}
+
+static void gfx_v9_4_2_log_utc_edc_count(struct amdgpu_device *adev,
+ struct gfx_v9_4_2_utc_block *blk,
+ uint32_t instance, uint32_t sec_cnt,
+ uint32_t ded_cnt)
+{
+ uint32_t bank, way, mem;
+ static const char *vml2_way_str[] = { "BIGK", "4K" };
+ static const char *utcl2_rounter_str[] = { "VMC", "APT" };
+
+ mem = instance % blk->num_mem_blocks;
+ way = (instance / blk->num_mem_blocks) % blk->num_ways;
+ bank = instance / (blk->num_mem_blocks * blk->num_ways);
+
+ switch (blk->type) {
+ case VML2_MEM:
+ dev_info(
+ adev->dev,
+ "GFX SubBlock UTC_VML2_BANK_CACHE_%d_%s_MEM%d, SED %d, DED %d\n",
+ bank, vml2_way_str[way], mem, sec_cnt, ded_cnt);
+ break;
+ case VML2_WALKER_MEM:
+ dev_info(adev->dev, "GFX SubBlock %s, SED %d, DED %d\n",
+ vml2_walker_mems[bank], sec_cnt, ded_cnt);
+ break;
+ case UTCL2_MEM:
+ dev_info(
+ adev->dev,
+ "GFX SubBlock UTCL2_ROUTER_IFIF%d_GROUP0_%s, SED %d, DED %d\n",
+ bank, utcl2_rounter_str[mem], sec_cnt, ded_cnt);
+ break;
+ case ATC_L2_CACHE_2M:
+ dev_info(
+ adev->dev,
+ "GFX SubBlock UTC_ATCL2_CACHE_2M_BANK%d_WAY%d_MEM, SED %d, DED %d\n",
+ bank, way, sec_cnt, ded_cnt);
+ break;
+ case ATC_L2_CACHE_32K:
+ dev_info(
+ adev->dev,
+ "GFX SubBlock UTC_ATCL2_CACHE_32K_BANK%d_WAY%d_MEM%d, SED %d, DED %d\n",
+ bank, way, mem, sec_cnt, ded_cnt);
+ break;
+ case ATC_L2_CACHE_4K:
+ dev_info(
+ adev->dev,
+ "GFX SubBlock UTC_ATCL2_CACHE_4K_BANK%d_WAY%d_MEM%d, SED %d, DED %d\n",
+ bank, way, mem, sec_cnt, ded_cnt);
+ break;
+ }
+}
+
+static int gfx_v9_4_2_query_utc_edc_count(struct amdgpu_device *adev,
+ uint32_t *sec_count,
+ uint32_t *ded_count)
+{
+ uint32_t i, j, data;
+ uint32_t sec_cnt, ded_cnt;
+ uint32_t num_instances;
+ struct gfx_v9_4_2_utc_block *blk;
+
+ if (sec_count && ded_count) {
+ *sec_count = 0;
+ *ded_count = 0;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(gfx_v9_4_2_utc_blocks); i++) {
+ blk = &gfx_v9_4_2_utc_blocks[i];
+ num_instances =
+ blk->num_banks * blk->num_ways * blk->num_mem_blocks;
+ for (j = 0; j < num_instances; j++) {
+ WREG32(SOC15_REG_ENTRY_OFFSET(blk->idx_reg), j);
+
+ /* if sec/ded_count is NULL, just clear counter */
+ if (!sec_count || !ded_count) {
+ WREG32(SOC15_REG_ENTRY_OFFSET(blk->data_reg),
+ blk->clear);
+ continue;
+ }
+
+ data = RREG32(SOC15_REG_ENTRY_OFFSET(blk->data_reg));
+ if (!data)
+ continue;
+
+ sec_cnt = SOC15_RAS_REG_FIELD_VAL(data, *blk, sec);
+ *sec_count += sec_cnt;
+ ded_cnt = SOC15_RAS_REG_FIELD_VAL(data, *blk, ded);
+ *ded_count += ded_cnt;
+
+ /* clear counter after read */
+ WREG32(SOC15_REG_ENTRY_OFFSET(blk->data_reg),
+ blk->clear);
+
+ /* print the edc count */
+ gfx_v9_4_2_log_utc_edc_count(adev, blk, j, sec_cnt,
+ ded_cnt);
+ }
+ }
+
+ return 0;
+}
+
+int gfx_v9_4_2_query_ras_error_count(struct amdgpu_device *adev,
+ void *ras_error_status)
+{
+ struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+ uint32_t sec_count = 0, ded_count = 0;
+
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
+ return -EINVAL;
+
+ err_data->ue_count = 0;
+ err_data->ce_count = 0;
+
+ gfx_v9_4_2_query_sram_edc_count(adev, &sec_count, &ded_count);
+ err_data->ce_count += sec_count;
+ err_data->ue_count += ded_count;
+
+ gfx_v9_4_2_query_utc_edc_count(adev, &sec_count, &ded_count);
+ err_data->ce_count += sec_count;
+ err_data->ue_count += ded_count;
+
+ return 0;
+}
+
+static void gfx_v9_4_2_reset_utc_err_status(struct amdgpu_device *adev)
+{
+ WREG32_SOC15(GC, 0, regUTCL2_MEM_ECC_STATUS, 0x3);
+ WREG32_SOC15(GC, 0, regVML2_MEM_ECC_STATUS, 0x3);
+ WREG32_SOC15(GC, 0, regVML2_WALKER_MEM_ECC_STATUS, 0x3);
+}
+
+static void gfx_v9_4_2_reset_ea_err_status(struct amdgpu_device *adev)
+{
+ uint32_t i, j;
+
+ mutex_lock(&adev->grbm_idx_mutex);
+ for (i = 0; i < gfx_v9_4_2_rdrsp_status_regs.se_num; i++) {
+ for (j = 0; j < gfx_v9_4_2_rdrsp_status_regs.instance;
+ j++) {
+ gfx_v9_4_2_select_se_sh(adev, i, 0, j);
+ WREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_4_2_rdrsp_status_regs), 0x10);
+ }
+ }
+ gfx_v9_4_2_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
+}
+
+void gfx_v9_4_2_reset_ras_error_count(struct amdgpu_device *adev)
+{
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
+ return;
+
+ gfx_v9_4_2_query_sram_edc_count(adev, NULL, NULL);
+ gfx_v9_4_2_query_utc_edc_count(adev, NULL, NULL);
+}
+
+int gfx_v9_4_2_ras_error_inject(struct amdgpu_device *adev, void *inject_if)
+{
+ struct ras_inject_if *info = (struct ras_inject_if *)inject_if;
+ int ret;
+ struct ta_ras_trigger_error_input block_info = { 0 };
+
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
+ return -EINVAL;
+
+ block_info.block_id = amdgpu_ras_block_to_ta(info->head.block);
+ block_info.sub_block_index = info->head.sub_block_index;
+ block_info.inject_error_type = amdgpu_ras_error_to_ta(info->head.type);
+ block_info.address = info->address;
+ block_info.value = info->value;
+
+ mutex_lock(&adev->grbm_idx_mutex);
+ ret = psp_ras_trigger_error(&adev->psp, &block_info);
+ mutex_unlock(&adev->grbm_idx_mutex);
+
+ return ret;
+}
+
+static void gfx_v9_4_2_query_ea_err_status(struct amdgpu_device *adev)
+{
+ uint32_t i, j;
+ uint32_t reg_value;
+
+ mutex_lock(&adev->grbm_idx_mutex);
+
+ for (i = 0; i < gfx_v9_4_2_rdrsp_status_regs.se_num; i++) {
+ for (j = 0; j < gfx_v9_4_2_rdrsp_status_regs.instance;
+ j++) {
+ gfx_v9_4_2_select_se_sh(adev, i, 0, j);
+ reg_value = RREG32(SOC15_REG_ENTRY_OFFSET(
+ gfx_v9_4_2_rdrsp_status_regs));
+ if (reg_value)
+ dev_warn(adev->dev, "GCEA err detected at instance: %d, status: 0x%x!\n",
+ j, reg_value);
+ /* clear after read */
+ WREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_4_2_rdrsp_status_regs), 0x10);
+ }
+ }
+
+ gfx_v9_4_2_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
+}
+
+static void gfx_v9_4_2_query_utc_err_status(struct amdgpu_device *adev)
+{
+ uint32_t data;
+
+ data = RREG32_SOC15(GC, 0, regUTCL2_MEM_ECC_STATUS);
+ if (!data) {
+ dev_warn(adev->dev, "GFX UTCL2 Mem Ecc Status: 0x%x!\n", data);
+ WREG32_SOC15(GC, 0, regUTCL2_MEM_ECC_STATUS, 0x3);
+ }
+
+ data = RREG32_SOC15(GC, 0, regVML2_MEM_ECC_STATUS);
+ if (!data) {
+ dev_warn(adev->dev, "GFX VML2 Mem Ecc Status: 0x%x!\n", data);
+ WREG32_SOC15(GC, 0, regVML2_MEM_ECC_STATUS, 0x3);
+ }
+
+ data = RREG32_SOC15(GC, 0, regVML2_WALKER_MEM_ECC_STATUS);
+ if (!data) {
+ dev_warn(adev->dev, "GFX VML2 Walker Mem Ecc Status: 0x%x!\n", data);
+ WREG32_SOC15(GC, 0, regVML2_WALKER_MEM_ECC_STATUS, 0x3);
+ }
+}
+
+void gfx_v9_4_2_query_ras_error_status(struct amdgpu_device *adev)
+{
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
+ return;
+
+ gfx_v9_4_2_query_ea_err_status(adev);
+ gfx_v9_4_2_query_utc_err_status(adev);
+ gfx_v9_4_2_query_sq_timeout_status(adev);
+}
+
+void gfx_v9_4_2_reset_ras_error_status(struct amdgpu_device *adev)
+{
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
+ return;
+
+ gfx_v9_4_2_reset_utc_err_status(adev);
+ gfx_v9_4_2_reset_ea_err_status(adev);
+ gfx_v9_4_2_reset_sq_timeout_status(adev);
+}
+
+void gfx_v9_4_2_enable_watchdog_timer(struct amdgpu_device *adev)
+{
+ uint32_t i;
+ uint32_t data;
+
+ data = REG_SET_FIELD(0, SQ_TIMEOUT_CONFIG, TIMEOUT_FATAL_DISABLE,
+ amdgpu_watchdog_timer.timeout_fatal_disable ? 1 :
+ 0);
+
+ if (amdgpu_watchdog_timer.timeout_fatal_disable &&
+ (amdgpu_watchdog_timer.period < 1 ||
+ amdgpu_watchdog_timer.period > 0x23)) {
+ dev_warn(adev->dev, "Watchdog period range is 1 to 0x23\n");
+ amdgpu_watchdog_timer.period = 0x23;
+ }
+ data = REG_SET_FIELD(data, SQ_TIMEOUT_CONFIG, PERIOD_SEL,
+ amdgpu_watchdog_timer.period);
+
+ mutex_lock(&adev->grbm_idx_mutex);
+ for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
+ gfx_v9_4_2_select_se_sh(adev, i, 0xffffffff, 0xffffffff);
+ WREG32_SOC15(GC, 0, regSQ_TIMEOUT_CONFIG, data);
+ }
+ gfx_v9_4_2_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
+}
+
+static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
+{
+ WREG32_SOC15_RLC_EX(reg, GC, 0, regSQ_IND_INDEX,
+ (wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
+ (simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
+ (address << SQ_IND_INDEX__INDEX__SHIFT) |
+ (SQ_IND_INDEX__FORCE_READ_MASK));
+ return RREG32_SOC15(GC, 0, regSQ_IND_DATA);
+}
+
+static void gfx_v9_4_2_log_cu_timeout_status(struct amdgpu_device *adev,
+ uint32_t status)
+{
+ struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
+ uint32_t i, simd, wave;
+ uint32_t wave_status;
+ uint32_t wave_pc_lo, wave_pc_hi;
+ uint32_t wave_exec_lo, wave_exec_hi;
+ uint32_t wave_inst_dw0, wave_inst_dw1;
+ uint32_t wave_ib_sts;
+
+ for (i = 0; i < 32; i++) {
+ if (!((i << 1) & status))
+ continue;
+
+ simd = i / cu_info->max_waves_per_simd;
+ wave = i % cu_info->max_waves_per_simd;
+
+ wave_status = wave_read_ind(adev, simd, wave, ixSQ_WAVE_STATUS);
+ wave_pc_lo = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_LO);
+ wave_pc_hi = wave_read_ind(adev, simd, wave, ixSQ_WAVE_PC_HI);
+ wave_exec_lo =
+ wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_LO);
+ wave_exec_hi =
+ wave_read_ind(adev, simd, wave, ixSQ_WAVE_EXEC_HI);
+ wave_inst_dw0 =
+ wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW0);
+ wave_inst_dw1 =
+ wave_read_ind(adev, simd, wave, ixSQ_WAVE_INST_DW1);
+ wave_ib_sts = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS);
+
+ dev_info(
+ adev->dev,
+ "\t SIMD %d, Wave %d: status 0x%x, pc 0x%llx, exec 0x%llx, inst 0x%llx, ib_sts 0x%x\n",
+ simd, wave, wave_status,
+ ((uint64_t)wave_pc_hi << 32 | wave_pc_lo),
+ ((uint64_t)wave_exec_hi << 32 | wave_exec_lo),
+ ((uint64_t)wave_inst_dw1 << 32 | wave_inst_dw0),
+ wave_ib_sts);
+ }
+}
+
+static void gfx_v9_4_2_query_sq_timeout_status(struct amdgpu_device *adev)
+{
+ uint32_t se_idx, sh_idx, cu_idx;
+ uint32_t status;
+
+ mutex_lock(&adev->grbm_idx_mutex);
+ for (se_idx = 0; se_idx < adev->gfx.config.max_shader_engines;
+ se_idx++) {
+ for (sh_idx = 0; sh_idx < adev->gfx.config.max_sh_per_se;
+ sh_idx++) {
+ for (cu_idx = 0;
+ cu_idx < adev->gfx.config.max_cu_per_sh;
+ cu_idx++) {
+ gfx_v9_4_2_select_se_sh(adev, se_idx, sh_idx,
+ cu_idx);
+ status = RREG32_SOC15(GC, 0,
+ regSQ_TIMEOUT_STATUS);
+ if (status != 0) {
+ dev_info(
+ adev->dev,
+ "GFX Watchdog Timeout: SE %d, SH %d, CU %d\n",
+ se_idx, sh_idx, cu_idx);
+ gfx_v9_4_2_log_cu_timeout_status(
+ adev, status);
+ }
+ /* clear old status */
+ WREG32_SOC15(GC, 0, regSQ_TIMEOUT_STATUS, 0);
+ }
+ }
+ }
+ gfx_v9_4_2_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
+}
+
+static void gfx_v9_4_2_reset_sq_timeout_status(struct amdgpu_device *adev)
+{
+ uint32_t se_idx, sh_idx, cu_idx;
+
+ mutex_lock(&adev->grbm_idx_mutex);
+ for (se_idx = 0; se_idx < adev->gfx.config.max_shader_engines;
+ se_idx++) {
+ for (sh_idx = 0; sh_idx < adev->gfx.config.max_sh_per_se;
+ sh_idx++) {
+ for (cu_idx = 0;
+ cu_idx < adev->gfx.config.max_cu_per_sh;
+ cu_idx++) {
+ gfx_v9_4_2_select_se_sh(adev, se_idx, sh_idx,
+ cu_idx);
+ WREG32_SOC15(GC, 0, regSQ_TIMEOUT_STATUS, 0);
+ }
+ }
+ }
+ gfx_v9_4_2_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
+} \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.h b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.h
new file mode 100644
index 000000000000..c143d178ef98
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __GFX_V9_4_2_H__
+#define __GFX_V9_4_2_H__
+
+void gfx_v9_4_2_debug_trap_config_init(struct amdgpu_device *adev,
+ uint32_t first_vmid, uint32_t last_vmid);
+void gfx_v9_4_2_init_golden_registers(struct amdgpu_device *adev,
+ uint32_t die_id);
+void gfx_v9_4_2_set_power_brake_sequence(struct amdgpu_device *adev);
+
+void gfx_v9_4_2_reset_ras_error_count(struct amdgpu_device *adev);
+int gfx_v9_4_2_ras_error_inject(struct amdgpu_device *adev, void *inject_if);
+void gfx_v9_4_2_query_ras_error_status(struct amdgpu_device *adev);
+int gfx_v9_4_2_query_ras_error_count(struct amdgpu_device *adev,
+ void *ras_error_status);
+void gfx_v9_4_2_reset_ras_error_status(struct amdgpu_device *adev);
+void gfx_v9_4_2_enable_watchdog_timer(struct amdgpu_device *adev);
+#endif /* __GFX_V9_4_2_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
index 6ddd53ba8b77..d189507dcef0 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
@@ -53,19 +53,39 @@ static void gfxhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev,
static void gfxhub_v1_0_init_gart_aperture_regs(struct amdgpu_device *adev)
{
- uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
+ uint64_t pt_base;
- gfxhub_v1_0_setup_vm_pt_regs(adev, 0, pt_base);
+ if (adev->gmc.pdb0_bo)
+ pt_base = amdgpu_gmc_pd_addr(adev->gmc.pdb0_bo);
+ else
+ pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
- WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
- (u32)(adev->gmc.gart_start >> 12));
- WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
- (u32)(adev->gmc.gart_start >> 44));
+ gfxhub_v1_0_setup_vm_pt_regs(adev, 0, pt_base);
- WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
- (u32)(adev->gmc.gart_end >> 12));
- WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
- (u32)(adev->gmc.gart_end >> 44));
+ /* If use GART for FB translation, vmid0 page table covers both
+ * vram and system memory (gart)
+ */
+ if (adev->gmc.pdb0_bo) {
+ WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
+ (u32)(adev->gmc.fb_start >> 12));
+ WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
+ (u32)(adev->gmc.fb_start >> 44));
+
+ WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
+ (u32)(adev->gmc.gart_end >> 12));
+ WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
+ (u32)(adev->gmc.gart_end >> 44));
+ } else {
+ WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
+ (u32)(adev->gmc.gart_start >> 12));
+ WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
+ (u32)(adev->gmc.gart_start >> 44));
+
+ WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
+ (u32)(adev->gmc.gart_end >> 12));
+ WREG32_SOC15(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
+ (u32)(adev->gmc.gart_end >> 44));
+ }
}
static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
@@ -116,6 +136,27 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
WREG32_FIELD15(GC, 0, VM_L2_PROTECTION_FAULT_CNTL2,
ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
}
+
+ /* In the case squeezing vram into GART aperture, we don't use
+ * FB aperture and AGP aperture. Disable them.
+ */
+ if (adev->gmc.pdb0_bo) {
+ if (adev->asic_type == CHIP_ALDEBARAN) {
+ WREG32_SOC15(GC, 0, mmMC_VM_FB_LOCATION_TOP, adev->gmc.fb_end_original >> 24);
+ WREG32_SOC15(GC, 0, mmMC_VM_FB_LOCATION_BASE, adev->gmc.fb_start_original >> 24);
+ WREG32_SOC15(GC, 0, mmMC_VM_AGP_TOP, 0);
+ WREG32_SOC15(GC, 0, mmMC_VM_AGP_BOT, 0xFFFFFF);
+ WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, adev->gmc.fb_start_original >> 18);
+ WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, adev->gmc.fb_end_original >> 18);
+ } else {
+ WREG32_SOC15(GC, 0, mmMC_VM_FB_LOCATION_TOP, 0);
+ WREG32_SOC15(GC, 0, mmMC_VM_FB_LOCATION_BASE, 0x00FFFFFF);
+ WREG32_SOC15(GC, 0, mmMC_VM_AGP_TOP, 0);
+ WREG32_SOC15(GC, 0, mmMC_VM_AGP_BOT, 0xFFFFFF);
+ WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, 0x3FFFFFFF);
+ WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 0);
+ }
+ }
}
static void gfxhub_v1_0_init_tlb_regs(struct amdgpu_device *adev)
@@ -173,8 +214,13 @@ static void gfxhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
WREG32_SOC15_RLC(GC, 0, mmVM_L2_CNTL3, tmp);
tmp = mmVM_L2_CNTL4_DEFAULT;
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 0);
+ if (adev->gmc.xgmi.connected_to_cpu) {
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 1);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 1);
+ } else {
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 0);
+ }
WREG32_SOC15_RLC(GC, 0, mmVM_L2_CNTL4, tmp);
}
@@ -184,7 +230,10 @@ static void gfxhub_v1_0_enable_system_domain(struct amdgpu_device *adev)
tmp = RREG32_SOC15(GC, 0, mmVM_CONTEXT0_CNTL);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
- tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH,
+ adev->gmc.vmid0_page_table_depth);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_BLOCK_SIZE,
+ adev->gmc.vmid0_page_table_block_size);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL,
RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
WREG32_SOC15(GC, 0, mmVM_CONTEXT0_CNTL, tmp);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c
index c0ab71df0d90..8fca72ebd11c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c
@@ -28,13 +28,42 @@
#include "soc15_common.h"
+#define mmMC_VM_XGMI_LFB_CNTL_ALDE 0x0978
+#define mmMC_VM_XGMI_LFB_CNTL_ALDE_BASE_IDX 0
+#define mmMC_VM_XGMI_LFB_SIZE_ALDE 0x0979
+#define mmMC_VM_XGMI_LFB_SIZE_ALDE_BASE_IDX 0
+//MC_VM_XGMI_LFB_CNTL
+#define MC_VM_XGMI_LFB_CNTL_ALDE__PF_LFB_REGION__SHIFT 0x0
+#define MC_VM_XGMI_LFB_CNTL_ALDE__PF_MAX_REGION__SHIFT 0x4
+#define MC_VM_XGMI_LFB_CNTL_ALDE__PF_LFB_REGION_MASK 0x0000000FL
+#define MC_VM_XGMI_LFB_CNTL_ALDE__PF_MAX_REGION_MASK 0x000000F0L
+//MC_VM_XGMI_LFB_SIZE
+#define MC_VM_XGMI_LFB_SIZE_ALDE__PF_LFB_SIZE__SHIFT 0x0
+#define MC_VM_XGMI_LFB_SIZE_ALDE__PF_LFB_SIZE_MASK 0x0001FFFFL
+
int gfxhub_v1_1_get_xgmi_info(struct amdgpu_device *adev)
{
- u32 xgmi_lfb_cntl = RREG32_SOC15(GC, 0, mmMC_VM_XGMI_LFB_CNTL);
- u32 max_region =
+ u32 max_num_physical_nodes;
+ u32 max_physical_node_id;
+ u32 xgmi_lfb_cntl;
+ u32 max_region;
+ u64 seg_size;
+
+ if (adev->asic_type == CHIP_ALDEBARAN) {
+ xgmi_lfb_cntl = RREG32_SOC15(GC, 0, mmMC_VM_XGMI_LFB_CNTL_ALDE);
+ seg_size = REG_GET_FIELD(
+ RREG32_SOC15(GC, 0, mmMC_VM_XGMI_LFB_SIZE_ALDE),
+ MC_VM_XGMI_LFB_SIZE, PF_LFB_SIZE) << 24;
+ } else {
+ xgmi_lfb_cntl = RREG32_SOC15(GC, 0, mmMC_VM_XGMI_LFB_CNTL);
+ seg_size = REG_GET_FIELD(
+ RREG32_SOC15(GC, 0, mmMC_VM_XGMI_LFB_SIZE),
+ MC_VM_XGMI_LFB_SIZE, PF_LFB_SIZE) << 24;
+ }
+
+ max_region =
REG_GET_FIELD(xgmi_lfb_cntl, MC_VM_XGMI_LFB_CNTL, PF_MAX_REGION);
- u32 max_num_physical_nodes = 0;
- u32 max_physical_node_id = 0;
+
switch (adev->asic_type) {
case CHIP_VEGA20:
@@ -45,23 +74,30 @@ int gfxhub_v1_1_get_xgmi_info(struct amdgpu_device *adev)
max_num_physical_nodes = 8;
max_physical_node_id = 7;
break;
+ case CHIP_ALDEBARAN:
+ /* just using duplicates for Aldebaran support, revisit later */
+ max_num_physical_nodes = 8;
+ max_physical_node_id = 7;
+ break;
default:
return -EINVAL;
}
/* PF_MAX_REGION=0 means xgmi is disabled */
- if (max_region) {
+ if (max_region || adev->gmc.xgmi.connected_to_cpu) {
adev->gmc.xgmi.num_physical_nodes = max_region + 1;
+
if (adev->gmc.xgmi.num_physical_nodes > max_num_physical_nodes)
return -EINVAL;
adev->gmc.xgmi.physical_node_id =
- REG_GET_FIELD(xgmi_lfb_cntl, MC_VM_XGMI_LFB_CNTL, PF_LFB_REGION);
+ REG_GET_FIELD(xgmi_lfb_cntl, MC_VM_XGMI_LFB_CNTL,
+ PF_LFB_REGION);
+
if (adev->gmc.xgmi.physical_node_id > max_physical_node_id)
return -EINVAL;
- adev->gmc.xgmi.node_segment_size = REG_GET_FIELD(
- RREG32_SOC15(GC, 0, mmMC_VM_XGMI_LFB_SIZE),
- MC_VM_XGMI_LFB_SIZE, PF_LFB_SIZE) << 24;
+
+ adev->gmc.xgmi.node_segment_size = seg_size;
}
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index 3b7c6c31fce1..33e54eed2eec 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -113,7 +113,7 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev,
/* Delegate it to a different ring if the hardware hasn't
* already done it.
*/
- if (in_interrupt()) {
+ if (entry->ih == &adev->irq.ih) {
amdgpu_irq_delegate(adev, entry, 8);
return 1;
}
@@ -152,8 +152,9 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev,
entry->src_id, entry->ring_id, entry->vmid,
entry->pasid, task_info.process_name, task_info.tgid,
task_info.task_name, task_info.pid);
- dev_err(adev->dev, " in page starting at address 0x%012llx from client %d\n",
- addr, entry->client_id);
+ dev_err(adev->dev, " in page starting at address 0x%016llx from client 0x%x (%s)\n",
+ addr, entry->client_id,
+ soc15_ih_clientid_name[entry->client_id]);
if (!amdgpu_sriov_vf(adev))
hub->vmhub_funcs->print_l2_protection_fault_status(adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index f5b69484c45a..405d6ad09022 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -346,6 +346,7 @@ static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
}
+ adev->gmc.gart_size += adev->pm.smu_prv_buffer_size;
gmc_v6_0_vram_gtt_location(adev, &adev->gmc);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index dee2b34effb6..210ada2289ec 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -414,6 +414,7 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
}
+ adev->gmc.gart_size += adev->pm.smu_prv_buffer_size;
gmc_v7_0_vram_gtt_location(adev, &adev->gmc);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 2d832fc23119..c1bd190841f8 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -599,6 +599,7 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
}
+ adev->gmc.gart_size += adev->pm.smu_prv_buffer_size;
gmc_v8_0_vram_gtt_location(adev, &adev->gmc);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 3686e777c76c..468acc06b681 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -50,6 +50,7 @@
#include "athub_v1_0.h"
#include "gfxhub_v1_1.h"
#include "mmhub_v9_4.h"
+#include "mmhub_v1_7.h"
#include "umc_v6_1.h"
#include "umc_v6_0.h"
@@ -279,6 +280,47 @@ static const char *mmhub_client_ids_arcturus[][2] = {
[384][1] = "OSS",
};
+static const char *mmhub_client_ids_aldebaran[][2] = {
+ [2][0] = "MP1",
+ [3][0] = "MP0",
+ [32+1][0] = "DBGU_IO0",
+ [32+2][0] = "DBGU_IO2",
+ [32+4][0] = "MPIO",
+ [96+11][0] = "JPEG0",
+ [96+12][0] = "VCN0",
+ [96+13][0] = "VCNU0",
+ [128+11][0] = "JPEG1",
+ [128+12][0] = "VCN1",
+ [128+13][0] = "VCNU1",
+ [160+1][0] = "XDP",
+ [160+14][0] = "HDP",
+ [256+0][0] = "SDMA0",
+ [256+1][0] = "SDMA1",
+ [256+2][0] = "SDMA2",
+ [256+3][0] = "SDMA3",
+ [256+4][0] = "SDMA4",
+ [384+0][0] = "OSS",
+ [2][1] = "MP1",
+ [3][1] = "MP0",
+ [32+1][1] = "DBGU_IO0",
+ [32+2][1] = "DBGU_IO2",
+ [32+4][1] = "MPIO",
+ [96+11][1] = "JPEG0",
+ [96+12][1] = "VCN0",
+ [96+13][1] = "VCNU0",
+ [128+11][1] = "JPEG1",
+ [128+12][1] = "VCN1",
+ [128+13][1] = "VCNU1",
+ [160+1][1] = "XDP",
+ [160+14][1] = "HDP",
+ [256+0][1] = "SDMA0",
+ [256+1][1] = "SDMA1",
+ [256+2][1] = "SDMA2",
+ [256+3][1] = "SDMA3",
+ [256+4][1] = "SDMA4",
+ [384+0][1] = "OSS",
+};
+
static const struct soc15_reg_golden golden_settings_mmhub_1_0_0[] =
{
SOC15_REG_GOLDEN_VALUE(MMHUB, 0, mmDAGB1_WRCLI2, 0x00000007, 0xfe5fe0fa),
@@ -484,7 +526,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
/* Delegate it to a different ring if the hardware hasn't
* already done it.
*/
- if (in_interrupt()) {
+ if (entry->ih == &adev->irq.ih) {
amdgpu_irq_delegate(adev, entry, 8);
return 1;
}
@@ -520,8 +562,9 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
entry->src_id, entry->ring_id, entry->vmid,
entry->pasid, task_info.process_name, task_info.tgid,
task_info.task_name, task_info.pid);
- dev_err(adev->dev, " in page starting at address 0x%012llx from client %d\n",
- addr, entry->client_id);
+ dev_err(adev->dev, " in page starting at address 0x%016llx from IH client 0x%x (%s)\n",
+ addr, entry->client_id,
+ soc15_ih_clientid_name[entry->client_id]);
if (amdgpu_sriov_vf(adev))
return 0;
@@ -568,6 +611,9 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
case CHIP_RENOIR:
mmhub_cid = mmhub_client_ids_renoir[cid][rw];
break;
+ case CHIP_ALDEBARAN:
+ mmhub_cid = mmhub_client_ids_aldebaran[cid][rw];
+ break;
default:
mmhub_cid = NULL;
break;
@@ -642,6 +688,9 @@ static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid,
static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev,
uint32_t vmhub)
{
+ if (adev->asic_type == CHIP_ALDEBARAN)
+ return false;
+
return ((vmhub == AMDGPU_MMHUB_0 ||
vmhub == AMDGPU_MMHUB_1) &&
(!amdgpu_sriov_vf(adev)) &&
@@ -1033,10 +1082,14 @@ static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
*flags &= ~AMDGPU_PTE_VALID;
}
- if (adev->asic_type == CHIP_ARCTURUS &&
+ if ((adev->asic_type == CHIP_ARCTURUS ||
+ adev->asic_type == CHIP_ALDEBARAN) &&
!(*flags & AMDGPU_PTE_SYSTEM) &&
mapping->bo_va->is_xgmi)
*flags |= AMDGPU_PTE_SNOOPED;
+
+ if (adev->asic_type == CHIP_ALDEBARAN)
+ *flags |= mapping->flags & AMDGPU_PTE_SNOOPED;
}
static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
@@ -1123,6 +1176,9 @@ static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev)
case CHIP_ARCTURUS:
adev->mmhub.funcs = &mmhub_v9_4_funcs;
break;
+ case CHIP_ALDEBARAN:
+ adev->mmhub.funcs = &mmhub_v1_7_funcs;
+ break;
default:
adev->mmhub.funcs = &mmhub_v1_0_funcs;
break;
@@ -1144,6 +1200,16 @@ static int gmc_v9_0_early_init(void *handle)
gmc_v9_0_set_mmhub_funcs(adev);
gmc_v9_0_set_gfxhub_funcs(adev);
+ if (adev->asic_type == CHIP_VEGA20 ||
+ adev->asic_type == CHIP_ARCTURUS)
+ adev->gmc.xgmi.supported = true;
+
+ if (adev->asic_type == CHIP_ALDEBARAN) {
+ adev->gmc.xgmi.supported = true;
+ adev->gmc.xgmi.connected_to_cpu =
+ adev->smuio.funcs->is_host_gpu_xgmi_supported(adev);
+ }
+
adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
adev->gmc.shared_aperture_end =
adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
@@ -1194,9 +1260,13 @@ static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
/* add the xgmi offset of the physical node */
base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
- amdgpu_gmc_vram_location(adev, mc, base);
- amdgpu_gmc_gart_location(adev, mc);
- amdgpu_gmc_agp_location(adev, mc);
+ if (adev->gmc.xgmi.connected_to_cpu) {
+ amdgpu_gmc_sysvm_location(adev, mc);
+ } else {
+ amdgpu_gmc_vram_location(adev, mc, base);
+ amdgpu_gmc_gart_location(adev, mc);
+ amdgpu_gmc_agp_location(adev, mc);
+ }
/* base offset of vram pages */
adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev);
@@ -1223,7 +1293,8 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
- if (!(adev->flags & AMD_IS_APU)) {
+ if (!(adev->flags & AMD_IS_APU) &&
+ !adev->gmc.xgmi.connected_to_cpu) {
r = amdgpu_device_resize_fb_bar(adev);
if (r)
return r;
@@ -1232,10 +1303,28 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
#ifdef CONFIG_X86_64
- if (adev->flags & AMD_IS_APU) {
- adev->gmc.aper_base = adev->gfxhub.funcs->get_mc_fb_offset(adev);
+ /*
+ * AMD Accelerated Processing Platform (APP) supporting GPU-HOST xgmi
+ * interface can use VRAM through here as it appears system reserved
+ * memory in host address space.
+ *
+ * For APUs, VRAM is just the stolen system memory and can be accessed
+ * directly.
+ *
+ * Otherwise, use the legacy Host Data Path (HDP) through PCIe BAR.
+ */
+
+ /* check whether both host-gpu and gpu-gpu xgmi links exist */
+ if ((adev->flags & AMD_IS_APU) ||
+ (adev->gmc.xgmi.supported &&
+ adev->gmc.xgmi.connected_to_cpu)) {
+ adev->gmc.aper_base =
+ adev->gfxhub.funcs->get_mc_fb_offset(adev) +
+ adev->gmc.xgmi.physical_node_id *
+ adev->gmc.xgmi.node_segment_size;
adev->gmc.aper_size = adev->gmc.real_vram_size;
}
+
#endif
/* In case the PCI BAR is larger than the actual amount of vram */
adev->gmc.visible_vram_size = adev->gmc.aper_size;
@@ -1249,6 +1338,7 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
case CHIP_VEGA12: /* all engines support GPUVM */
case CHIP_VEGA20:
case CHIP_ARCTURUS:
+ case CHIP_ALDEBARAN:
default:
adev->gmc.gart_size = 512ULL << 20;
break;
@@ -1261,6 +1351,8 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
}
+ adev->gmc.gart_size += adev->pm.smu_prv_buffer_size;
+
gmc_v9_0_vram_gtt_location(adev, &adev->gmc);
return 0;
@@ -1274,6 +1366,15 @@ static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
WARN(1, "VEGA10 PCIE GART already initialized\n");
return 0;
}
+
+ if (adev->gmc.xgmi.connected_to_cpu) {
+ adev->gmc.vmid0_page_table_depth = 1;
+ adev->gmc.vmid0_page_table_block_size = 12;
+ } else {
+ adev->gmc.vmid0_page_table_depth = 0;
+ adev->gmc.vmid0_page_table_block_size = 0;
+ }
+
/* Initialize common gart structure */
r = amdgpu_gart_init(adev);
if (r)
@@ -1281,7 +1382,16 @@ static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
adev->gart.table_size = adev->gart.num_gpu_pages * 8;
adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_VG10(MTYPE_UC) |
AMDGPU_PTE_EXECUTABLE;
- return amdgpu_gart_table_vram_alloc(adev);
+
+ r = amdgpu_gart_table_vram_alloc(adev);
+ if (r)
+ return r;
+
+ if (adev->gmc.xgmi.connected_to_cpu) {
+ r = amdgpu_gmc_pdb0_alloc(adev);
+ }
+
+ return r;
}
/**
@@ -1352,6 +1462,7 @@ static int gmc_v9_0_sw_init(void *handle)
case CHIP_VEGA12:
case CHIP_VEGA20:
case CHIP_RENOIR:
+ case CHIP_ALDEBARAN:
adev->num_vmhubs = 2;
@@ -1448,7 +1559,8 @@ static int gmc_v9_0_sw_init(void *handle)
* for video processing.
*/
adev->vm_manager.first_kfd_vmid =
- adev->asic_type == CHIP_ARCTURUS ? 3 : 8;
+ (adev->asic_type == CHIP_ARCTURUS ||
+ adev->asic_type == CHIP_ALDEBARAN) ? 3 : 8;
amdgpu_vm_manager_init(adev);
@@ -1465,6 +1577,7 @@ static int gmc_v9_0_sw_fini(void *handle)
amdgpu_gem_force_release(adev);
amdgpu_vm_manager_fini(adev);
amdgpu_gart_table_vram_free(adev);
+ amdgpu_bo_unref(&adev->gmc.pdb0_bo);
amdgpu_bo_fini(adev);
amdgpu_gart_fini(adev);
@@ -1525,10 +1638,14 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
{
int r;
+ if (adev->gmc.xgmi.connected_to_cpu)
+ amdgpu_gmc_init_pdb0(adev);
+
if (adev->gart.bo == NULL) {
dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
return -EINVAL;
}
+
r = amdgpu_gart_table_vram_pin(adev);
if (r)
return r;
@@ -1541,9 +1658,14 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
if (r)
return r;
- DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
- (unsigned)(adev->gmc.gart_size >> 20),
- (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
+ DRM_INFO("PCIE GART of %uM enabled.\n",
+ (unsigned)(adev->gmc.gart_size >> 20));
+ if (adev->gmc.pdb0_bo)
+ DRM_INFO("PDB0 located at 0x%016llX\n",
+ (unsigned long long)amdgpu_bo_gpu_offset(adev->gmc.pdb0_bo));
+ DRM_INFO("PTB located at 0x%016llX\n",
+ (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
+
adev->gart.ready = true;
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
index e46621fed5b9..edbd35d293eb 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
@@ -49,6 +49,9 @@ static void hdp_v4_0_flush_hdp(struct amdgpu_device *adev,
static void hdp_v4_0_invalidate_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
+ if (adev->asic_type == CHIP_ALDEBARAN)
+ return;
+
if (!ring || !ring->funcs->emit_wreg)
WREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
else
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
index c6724a0e0c43..072774ae16bd 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
@@ -565,6 +565,26 @@ static const struct amd_ip_funcs jpeg_v2_5_ip_funcs = {
.set_powergating_state = jpeg_v2_5_set_powergating_state,
};
+static const struct amd_ip_funcs jpeg_v2_6_ip_funcs = {
+ .name = "jpeg_v2_6",
+ .early_init = jpeg_v2_5_early_init,
+ .late_init = NULL,
+ .sw_init = jpeg_v2_5_sw_init,
+ .sw_fini = jpeg_v2_5_sw_fini,
+ .hw_init = jpeg_v2_5_hw_init,
+ .hw_fini = jpeg_v2_5_hw_fini,
+ .suspend = jpeg_v2_5_suspend,
+ .resume = jpeg_v2_5_resume,
+ .is_idle = jpeg_v2_5_is_idle,
+ .wait_for_idle = jpeg_v2_5_wait_for_idle,
+ .check_soft_reset = NULL,
+ .pre_soft_reset = NULL,
+ .soft_reset = NULL,
+ .post_soft_reset = NULL,
+ .set_clockgating_state = jpeg_v2_5_set_clockgating_state,
+ .set_powergating_state = jpeg_v2_5_set_powergating_state,
+};
+
static const struct amdgpu_ring_funcs jpeg_v2_5_dec_ring_vm_funcs = {
.type = AMDGPU_RING_TYPE_VCN_JPEG,
.align_mask = 0xf,
@@ -595,6 +615,36 @@ static const struct amdgpu_ring_funcs jpeg_v2_5_dec_ring_vm_funcs = {
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
};
+static const struct amdgpu_ring_funcs jpeg_v2_6_dec_ring_vm_funcs = {
+ .type = AMDGPU_RING_TYPE_VCN_JPEG,
+ .align_mask = 0xf,
+ .vmhub = AMDGPU_MMHUB_0,
+ .get_rptr = jpeg_v2_5_dec_ring_get_rptr,
+ .get_wptr = jpeg_v2_5_dec_ring_get_wptr,
+ .set_wptr = jpeg_v2_5_dec_ring_set_wptr,
+ .emit_frame_size =
+ SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
+ 8 + /* jpeg_v2_5_dec_ring_emit_vm_flush */
+ 18 + 18 + /* jpeg_v2_5_dec_ring_emit_fence x2 vm fence */
+ 8 + 16,
+ .emit_ib_size = 22, /* jpeg_v2_5_dec_ring_emit_ib */
+ .emit_ib = jpeg_v2_0_dec_ring_emit_ib,
+ .emit_fence = jpeg_v2_0_dec_ring_emit_fence,
+ .emit_vm_flush = jpeg_v2_0_dec_ring_emit_vm_flush,
+ .test_ring = amdgpu_jpeg_dec_ring_test_ring,
+ .test_ib = amdgpu_jpeg_dec_ring_test_ib,
+ .insert_nop = jpeg_v2_0_dec_ring_nop,
+ .insert_start = jpeg_v2_0_dec_ring_insert_start,
+ .insert_end = jpeg_v2_0_dec_ring_insert_end,
+ .pad_ib = amdgpu_ring_generic_pad_ib,
+ .begin_use = amdgpu_jpeg_ring_begin_use,
+ .end_use = amdgpu_jpeg_ring_end_use,
+ .emit_wreg = jpeg_v2_0_dec_ring_emit_wreg,
+ .emit_reg_wait = jpeg_v2_0_dec_ring_emit_reg_wait,
+ .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+};
+
static void jpeg_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev)
{
int i;
@@ -602,8 +652,10 @@ static void jpeg_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev)
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
if (adev->jpeg.harvest_config & (1 << i))
continue;
-
- adev->jpeg.inst[i].ring_dec.funcs = &jpeg_v2_5_dec_ring_vm_funcs;
+ if (adev->asic_type == CHIP_ARCTURUS)
+ adev->jpeg.inst[i].ring_dec.funcs = &jpeg_v2_5_dec_ring_vm_funcs;
+ else /* CHIP_ALDEBARAN */
+ adev->jpeg.inst[i].ring_dec.funcs = &jpeg_v2_6_dec_ring_vm_funcs;
adev->jpeg.inst[i].ring_dec.me = i;
DRM_INFO("JPEG(%d) JPEG decode is enabled in VM mode\n", i);
}
@@ -635,3 +687,12 @@ const struct amdgpu_ip_block_version jpeg_v2_5_ip_block =
.rev = 0,
.funcs = &jpeg_v2_5_ip_funcs,
};
+
+const struct amdgpu_ip_block_version jpeg_v2_6_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_JPEG,
+ .major = 2,
+ .minor = 6,
+ .rev = 0,
+ .funcs = &jpeg_v2_6_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.h
index 2b4087c02620..3b0aa29b9879 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.h
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.h
@@ -25,5 +25,6 @@
#define __JPEG_V2_5_H__
extern const struct amdgpu_ip_block_version jpeg_v2_5_ip_block;
+extern const struct amdgpu_ip_block_version jpeg_v2_6_ip_block;
#endif /* __JPEG_V2_5_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c
new file mode 100644
index 000000000000..ae7d8a1738a3
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c
@@ -0,0 +1,1329 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "amdgpu_ras.h"
+#include "mmhub_v1_7.h"
+
+#include "mmhub/mmhub_1_7_offset.h"
+#include "mmhub/mmhub_1_7_sh_mask.h"
+#include "vega10_enum.h"
+
+#include "soc15_common.h"
+#include "soc15.h"
+
+#define regVM_L2_CNTL3_DEFAULT 0x80100007
+#define regVM_L2_CNTL4_DEFAULT 0x000000c1
+
+static u64 mmhub_v1_7_get_fb_location(struct amdgpu_device *adev)
+{
+ u64 base = RREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_BASE);
+ u64 top = RREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_TOP);
+
+ base &= MC_VM_FB_LOCATION_BASE__FB_BASE_MASK;
+ base <<= 24;
+
+ top &= MC_VM_FB_LOCATION_TOP__FB_TOP_MASK;
+ top <<= 24;
+
+ adev->gmc.fb_start = base;
+ adev->gmc.fb_end = top;
+ adev->gmc.fb_start_original = base;
+ adev->gmc.fb_end_original = top;
+
+ return base;
+}
+
+static void mmhub_v1_7_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+ uint64_t page_table_base)
+{
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
+
+ WREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
+ hub->ctx_addr_distance * vmid, lower_32_bits(page_table_base));
+
+ WREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
+ hub->ctx_addr_distance * vmid, upper_32_bits(page_table_base));
+}
+
+static void mmhub_v1_7_init_gart_aperture_regs(struct amdgpu_device *adev)
+{
+ uint64_t pt_base;
+
+ if (adev->gmc.pdb0_bo)
+ pt_base = amdgpu_gmc_pd_addr(adev->gmc.pdb0_bo);
+ else
+ pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
+
+ mmhub_v1_7_setup_vm_pt_regs(adev, 0, pt_base);
+
+ /* If use GART for FB translation, vmid0 page table covers both
+ * vram and system memory (gart)
+ */
+ if (adev->gmc.pdb0_bo) {
+ WREG32_SOC15(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
+ (u32)(adev->gmc.fb_start >> 12));
+ WREG32_SOC15(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
+ (u32)(adev->gmc.fb_start >> 44));
+
+ WREG32_SOC15(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
+ (u32)(adev->gmc.gart_end >> 12));
+ WREG32_SOC15(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
+ (u32)(adev->gmc.gart_end >> 44));
+
+ } else {
+ WREG32_SOC15(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
+ (u32)(adev->gmc.gart_start >> 12));
+ WREG32_SOC15(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
+ (u32)(adev->gmc.gart_start >> 44));
+
+ WREG32_SOC15(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
+ (u32)(adev->gmc.gart_end >> 12));
+ WREG32_SOC15(MMHUB, 0, regVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
+ (u32)(adev->gmc.gart_end >> 44));
+ }
+}
+
+static void mmhub_v1_7_init_system_aperture_regs(struct amdgpu_device *adev)
+{
+ uint64_t value;
+ uint32_t tmp;
+
+ /* Program the AGP BAR */
+ WREG32_SOC15(MMHUB, 0, regMC_VM_AGP_BASE, 0);
+ WREG32_SOC15(MMHUB, 0, regMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
+ WREG32_SOC15(MMHUB, 0, regMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
+
+ /* Program the system aperture low logical page number. */
+ WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_LOW_ADDR,
+ min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
+
+ WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+ max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
+
+ /* In the case squeezing vram into GART aperture, we don't use
+ * FB aperture and AGP aperture. Disable them.
+ */
+ if (adev->gmc.pdb0_bo) {
+ WREG32_SOC15(MMHUB, 0, regMC_VM_AGP_BOT, 0xFFFFFF);
+ WREG32_SOC15(MMHUB, 0, regMC_VM_AGP_TOP, 0);
+ WREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_TOP, adev->gmc.fb_end_original >> 24);
+ WREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_BASE, adev->gmc.fb_start_original >> 24);
+ WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_LOW_ADDR, adev->gmc.fb_start_original >> 18);
+ WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_HIGH_ADDR, adev->gmc.fb_end_original >> 18);
+ }
+ if (amdgpu_sriov_vf(adev))
+ return;
+
+ /* Set default page address. */
+ value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start +
+ adev->vm_manager.vram_base_offset;
+ WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
+ (u32)(value >> 12));
+ WREG32_SOC15(MMHUB, 0, regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
+ (u32)(value >> 44));
+
+ /* Program "protection fault". */
+ WREG32_SOC15(MMHUB, 0, regVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
+ (u32)(adev->dummy_page_addr >> 12));
+ WREG32_SOC15(MMHUB, 0, regVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
+ (u32)((u64)adev->dummy_page_addr >> 44));
+
+ tmp = RREG32_SOC15(MMHUB, 0, regVM_L2_PROTECTION_FAULT_CNTL2);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL2,
+ ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
+ WREG32_SOC15(MMHUB, 0, regVM_L2_PROTECTION_FAULT_CNTL2, tmp);
+}
+
+static void mmhub_v1_7_init_tlb_regs(struct amdgpu_device *adev)
+{
+ uint32_t tmp;
+
+ /* Setup TLB control */
+ tmp = RREG32_SOC15(MMHUB, 0, regMC_VM_MX_L1_TLB_CNTL);
+
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
+ ENABLE_ADVANCED_DRIVER_MODEL, 1);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
+ SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
+ MTYPE, MTYPE_UC);/* XXX for emulation. */
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1);
+
+ WREG32_SOC15(MMHUB, 0, regMC_VM_MX_L1_TLB_CNTL, tmp);
+}
+
+static void mmhub_v1_7_init_cache_regs(struct amdgpu_device *adev)
+{
+ uint32_t tmp;
+
+ if (amdgpu_sriov_vf(adev))
+ return;
+
+ /* Setup L2 cache */
+ tmp = RREG32_SOC15(MMHUB, 0, regVM_L2_CNTL);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1);
+ /* XXX for emulation, Refer to closed source code.*/
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, L2_PDE0_CACHE_TAG_GENERATION_MODE,
+ 0);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 0);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0);
+ WREG32_SOC15(MMHUB, 0, regVM_L2_CNTL, tmp);
+
+ tmp = RREG32_SOC15(MMHUB, 0, regVM_L2_CNTL2);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
+ WREG32_SOC15(MMHUB, 0, regVM_L2_CNTL2, tmp);
+
+ tmp = regVM_L2_CNTL3_DEFAULT;
+ if (adev->gmc.translate_further) {
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
+ L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
+ } else {
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 9);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
+ L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
+ }
+ WREG32_SOC15(MMHUB, 0, regVM_L2_CNTL3, tmp);
+
+ tmp = regVM_L2_CNTL4_DEFAULT;
+ if (adev->gmc.xgmi.connected_to_cpu) {
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4,
+ VMC_TAP_PDE_REQUEST_PHYSICAL, 1);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4,
+ VMC_TAP_PTE_REQUEST_PHYSICAL, 1);
+ } else {
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4,
+ VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4,
+ VMC_TAP_PTE_REQUEST_PHYSICAL, 0);
+ }
+ WREG32_SOC15(MMHUB, 0, regVM_L2_CNTL4, tmp);
+}
+
+static void mmhub_v1_7_enable_system_domain(struct amdgpu_device *adev)
+{
+ uint32_t tmp;
+
+ tmp = RREG32_SOC15(MMHUB, 0, regVM_CONTEXT0_CNTL);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH,
+ adev->gmc.vmid0_page_table_depth);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_BLOCK_SIZE,
+ adev->gmc.vmid0_page_table_block_size);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL,
+ RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
+ WREG32_SOC15(MMHUB, 0, regVM_CONTEXT0_CNTL, tmp);
+}
+
+static void mmhub_v1_7_disable_identity_aperture(struct amdgpu_device *adev)
+{
+ if (amdgpu_sriov_vf(adev))
+ return;
+
+ WREG32_SOC15(MMHUB, 0, regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,
+ 0XFFFFFFFF);
+ WREG32_SOC15(MMHUB, 0, regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32,
+ 0x0000000F);
+
+ WREG32_SOC15(MMHUB, 0,
+ regVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32, 0);
+ WREG32_SOC15(MMHUB, 0,
+ regVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32, 0);
+
+ WREG32_SOC15(MMHUB, 0, regVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32,
+ 0);
+ WREG32_SOC15(MMHUB, 0, regVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32,
+ 0);
+}
+
+static void mmhub_v1_7_setup_vmid_config(struct amdgpu_device *adev)
+{
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
+ unsigned num_level, block_size;
+ uint32_t tmp;
+ int i;
+
+ num_level = adev->vm_manager.num_level;
+ block_size = adev->vm_manager.block_size;
+ if (adev->gmc.translate_further)
+ num_level -= 1;
+ else
+ block_size -= 9;
+
+ for (i = 0; i <= 14; i++) {
+ tmp = RREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT1_CNTL, i);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH,
+ num_level);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT,
+ 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ PAGE_TABLE_BLOCK_SIZE,
+ block_size);
+ /* Send no-retry XNACK on fault to suppress VM fault storm. */
+ tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
+ RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
+ !adev->gmc.noretry);
+ WREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT1_CNTL,
+ i * hub->ctx_distance, tmp);
+ WREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
+ i * hub->ctx_addr_distance, 0);
+ WREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32,
+ i * hub->ctx_addr_distance, 0);
+ WREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32,
+ i * hub->ctx_addr_distance,
+ lower_32_bits(adev->vm_manager.max_pfn - 1));
+ WREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32,
+ i * hub->ctx_addr_distance,
+ upper_32_bits(adev->vm_manager.max_pfn - 1));
+ }
+}
+
+static void mmhub_v1_7_program_invalidation(struct amdgpu_device *adev)
+{
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
+ unsigned i;
+
+ for (i = 0; i < 18; ++i) {
+ WREG32_SOC15_OFFSET(MMHUB, 0, regVM_INVALIDATE_ENG0_ADDR_RANGE_LO32,
+ i * hub->eng_addr_distance, 0xffffffff);
+ WREG32_SOC15_OFFSET(MMHUB, 0, regVM_INVALIDATE_ENG0_ADDR_RANGE_HI32,
+ i * hub->eng_addr_distance, 0x1f);
+ }
+}
+
+static int mmhub_v1_7_gart_enable(struct amdgpu_device *adev)
+{
+ if (amdgpu_sriov_vf(adev)) {
+ /*
+ * MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are
+ * VF copy registers so vbios post doesn't program them, for
+ * SRIOV driver need to program them
+ */
+ WREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_BASE,
+ adev->gmc.vram_start >> 24);
+ WREG32_SOC15(MMHUB, 0, regMC_VM_FB_LOCATION_TOP,
+ adev->gmc.vram_end >> 24);
+ }
+
+ /* GART Enable. */
+ mmhub_v1_7_init_gart_aperture_regs(adev);
+ mmhub_v1_7_init_system_aperture_regs(adev);
+ mmhub_v1_7_init_tlb_regs(adev);
+ mmhub_v1_7_init_cache_regs(adev);
+
+ mmhub_v1_7_enable_system_domain(adev);
+ mmhub_v1_7_disable_identity_aperture(adev);
+ mmhub_v1_7_setup_vmid_config(adev);
+ mmhub_v1_7_program_invalidation(adev);
+
+ return 0;
+}
+
+static void mmhub_v1_7_gart_disable(struct amdgpu_device *adev)
+{
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
+ u32 tmp;
+ u32 i;
+
+ /* Disable all tables */
+ for (i = 0; i < 16; i++)
+ WREG32_SOC15_OFFSET(MMHUB, 0, regVM_CONTEXT0_CNTL,
+ i * hub->ctx_distance, 0);
+
+ /* Setup TLB control */
+ tmp = RREG32_SOC15(MMHUB, 0, regMC_VM_MX_L1_TLB_CNTL);
+ tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
+ tmp = REG_SET_FIELD(tmp,
+ MC_VM_MX_L1_TLB_CNTL,
+ ENABLE_ADVANCED_DRIVER_MODEL,
+ 0);
+ WREG32_SOC15(MMHUB, 0, regMC_VM_MX_L1_TLB_CNTL, tmp);
+
+ if (!amdgpu_sriov_vf(adev)) {
+ /* Setup L2 cache */
+ tmp = RREG32_SOC15(MMHUB, 0, regVM_L2_CNTL);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
+ WREG32_SOC15(MMHUB, 0, regVM_L2_CNTL, tmp);
+ WREG32_SOC15(MMHUB, 0, regVM_L2_CNTL3, 0);
+ }
+}
+
+/**
+ * mmhub_v1_7_set_fault_enable_default - update GART/VM fault handling
+ *
+ * @adev: amdgpu_device pointer
+ * @value: true redirects VM faults to the default page
+ */
+static void mmhub_v1_7_set_fault_enable_default(struct amdgpu_device *adev, bool value)
+{
+ u32 tmp;
+
+ if (amdgpu_sriov_vf(adev))
+ return;
+
+ tmp = RREG32_SOC15(MMHUB, 0, regVM_L2_PROTECTION_FAULT_CNTL);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp,
+ VM_L2_PROTECTION_FAULT_CNTL,
+ TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT,
+ value);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
+ if (!value) {
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ CRASH_ON_NO_RETRY_FAULT, 1);
+ tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
+ CRASH_ON_RETRY_FAULT, 1);
+ }
+
+ WREG32_SOC15(MMHUB, 0, regVM_L2_PROTECTION_FAULT_CNTL, tmp);
+}
+
+static void mmhub_v1_7_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
+
+ hub->ctx0_ptb_addr_lo32 =
+ SOC15_REG_OFFSET(MMHUB, 0,
+ regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32);
+ hub->ctx0_ptb_addr_hi32 =
+ SOC15_REG_OFFSET(MMHUB, 0,
+ regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
+ hub->vm_inv_eng0_req =
+ SOC15_REG_OFFSET(MMHUB, 0, regVM_INVALIDATE_ENG0_REQ);
+ hub->vm_inv_eng0_ack =
+ SOC15_REG_OFFSET(MMHUB, 0, regVM_INVALIDATE_ENG0_ACK);
+ hub->vm_context0_cntl =
+ SOC15_REG_OFFSET(MMHUB, 0, regVM_CONTEXT0_CNTL);
+ hub->vm_l2_pro_fault_status =
+ SOC15_REG_OFFSET(MMHUB, 0, regVM_L2_PROTECTION_FAULT_STATUS);
+ hub->vm_l2_pro_fault_cntl =
+ SOC15_REG_OFFSET(MMHUB, 0, regVM_L2_PROTECTION_FAULT_CNTL);
+
+ hub->ctx_distance = regVM_CONTEXT1_CNTL - regVM_CONTEXT0_CNTL;
+ hub->ctx_addr_distance = regVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 -
+ regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
+ hub->eng_distance = regVM_INVALIDATE_ENG1_REQ - regVM_INVALIDATE_ENG0_REQ;
+ hub->eng_addr_distance = regVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 -
+ regVM_INVALIDATE_ENG0_ADDR_RANGE_LO32;
+
+}
+
+static void mmhub_v1_7_update_medium_grain_clock_gating(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t def, data, def1, data1, def2 = 0, data2 = 0;
+
+ def = data = RREG32_SOC15(MMHUB, 0, regATC_L2_MISC_CG);
+
+ def1 = data1 = RREG32_SOC15(MMHUB, 0, regDAGB0_CNTL_MISC2);
+ def2 = data2 = RREG32_SOC15(MMHUB, 0, regDAGB1_CNTL_MISC2);
+
+ if (enable) {
+ data |= ATC_L2_MISC_CG__ENABLE_MASK;
+
+ data1 &= ~(DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
+ DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
+ DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
+ DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
+ DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
+ DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
+
+ data2 &= ~(DAGB1_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
+ DAGB1_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
+ DAGB1_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
+ DAGB1_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
+ DAGB1_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
+ DAGB1_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
+ } else {
+ data &= ~ATC_L2_MISC_CG__ENABLE_MASK;
+
+ data1 |= (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
+ DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
+ DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
+ DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
+ DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
+ DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
+
+ data2 |= (DAGB1_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
+ DAGB1_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
+ DAGB1_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
+ DAGB1_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
+ DAGB1_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
+ DAGB1_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
+ }
+
+ if (def != data)
+ WREG32_SOC15(MMHUB, 0, regATC_L2_MISC_CG, data);
+
+ if (def1 != data1)
+ WREG32_SOC15(MMHUB, 0, regDAGB0_CNTL_MISC2, data1);
+
+ if (def2 != data2)
+ WREG32_SOC15(MMHUB, 0, regDAGB1_CNTL_MISC2, data2);
+}
+
+static void mmhub_v1_7_update_medium_grain_light_sleep(struct amdgpu_device *adev,
+ bool enable)
+{
+ uint32_t def, data;
+
+ def = data = RREG32_SOC15(MMHUB, 0, regATC_L2_MISC_CG);
+
+ if (enable)
+ data |= ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
+ else
+ data &= ~ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
+
+ if (def != data)
+ WREG32_SOC15(MMHUB, 0, regATC_L2_MISC_CG, data);
+}
+
+static int mmhub_v1_7_set_clockgating(struct amdgpu_device *adev,
+ enum amd_clockgating_state state)
+{
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
+ /* Change state only if MCCG support is enabled through driver */
+ if (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)
+ mmhub_v1_7_update_medium_grain_clock_gating(adev,
+ state == AMD_CG_STATE_GATE);
+
+ /* Change state only if LS support is enabled through driver */
+ if (adev->cg_flags & AMD_CG_SUPPORT_MC_LS)
+ mmhub_v1_7_update_medium_grain_light_sleep(adev,
+ state == AMD_CG_STATE_GATE);
+
+ return 0;
+}
+
+static void mmhub_v1_7_get_clockgating(struct amdgpu_device *adev, u32 *flags)
+{
+ int data, data1;
+
+ if (amdgpu_sriov_vf(adev))
+ *flags = 0;
+
+ data = RREG32_SOC15(MMHUB, 0, regATC_L2_MISC_CG);
+
+ data1 = RREG32_SOC15(MMHUB, 0, regDAGB0_CNTL_MISC2);
+
+ /* AMD_CG_SUPPORT_MC_MGCG */
+ if ((data & ATC_L2_MISC_CG__ENABLE_MASK) &&
+ !(data1 & (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
+ DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
+ DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
+ DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
+ DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
+ DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK)))
+ *flags |= AMD_CG_SUPPORT_MC_MGCG;
+
+ /* AMD_CG_SUPPORT_MC_LS */
+ if (data & ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK)
+ *flags |= AMD_CG_SUPPORT_MC_LS;
+}
+
+static const struct soc15_ras_field_entry mmhub_v1_7_ras_fields[] = {
+ /* MMHUB Range 0 */
+ { "MMEA0_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA0_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA0_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA0_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT, RRET_TAGMEM_DED_COUNT),
+ },
+ { "MMEA0_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT, WRET_TAGMEM_DED_COUNT),
+ },
+ { "MMEA0_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT, IOWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT, IOWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA0_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA0_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA0_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT, IORD_CMDMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA0_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA0_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA0_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA0_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA0_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA0_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA0_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D0MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D0MEM_DED_COUNT),
+ },
+ { "MMEA0_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D1MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D1MEM_DED_COUNT),
+ },
+ { "MMEA0_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D2MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D2MEM_DED_COUNT),
+ },
+ { "MMEA0_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D3MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D3MEM_DED_COUNT),
+ },
+ { "MMEA0_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA0_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA0_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA0_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA0_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA0_EDC_CNT3, IORD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA0_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA0_EDC_CNT3, IOWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA0_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA0_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA0_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA0_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT),
+ },
+
+ /* MMHUB Range 1 */
+ { "MMEA1_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA1_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA1_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA1_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT, RRET_TAGMEM_DED_COUNT),
+ },
+ { "MMEA1_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT, WRET_TAGMEM_DED_COUNT),
+ },
+ { "MMEA1_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT, IOWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT, IOWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA1_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA1_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA1_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT, IORD_CMDMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA1_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA1_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA1_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA1_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA1_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA1_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA1_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D0MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D0MEM_DED_COUNT),
+ },
+ { "MMEA1_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D1MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D1MEM_DED_COUNT),
+ },
+ { "MMEA1_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D2MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D2MEM_DED_COUNT),
+ },
+ { "MMEA1_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D3MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D3MEM_DED_COUNT),
+ },
+ { "MMEA1_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA1_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA1_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA1_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA1_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA1_EDC_CNT3, IORD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA1_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA1_EDC_CNT3, IOWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA1_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA1_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA1_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA1_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT),
+ },
+
+ /* MMHAB Range 2*/
+ { "MMEA2_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA2_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA2_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA2_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT, RRET_TAGMEM_DED_COUNT),
+ },
+ { "MMEA2_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT, WRET_TAGMEM_DED_COUNT),
+ },
+ { "MMEA2_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT, IOWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT, IOWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA2_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA2_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA2_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT, IORD_CMDMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA2_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA2_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA2_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA2_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA2_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA2_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA2_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D0MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D0MEM_DED_COUNT),
+ },
+ { "MMEA2_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D1MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D1MEM_DED_COUNT),
+ },
+ { "MMEA2_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D2MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D2MEM_DED_COUNT),
+ },
+ { "MMEA2_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D3MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D3MEM_DED_COUNT),
+ },
+ { "MMEA2_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA2_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA2_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA2_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA2_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA2_EDC_CNT3, IORD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA2_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA2_EDC_CNT3, IOWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA2_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA2_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA2_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA2_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT),
+ },
+
+ /* MMHUB Rang 3 */
+ { "MMEA3_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA3_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA3_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA3_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT, RRET_TAGMEM_DED_COUNT),
+ },
+ { "MMEA3_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT, WRET_TAGMEM_DED_COUNT),
+ },
+ { "MMEA3_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT, IOWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT, IOWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA3_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA3_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA3_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT, IORD_CMDMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA3_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA3_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA3_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA3_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA3_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA3_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA3_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D0MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D0MEM_DED_COUNT),
+ },
+ { "MMEA3_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D1MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D1MEM_DED_COUNT),
+ },
+ { "MMEA3_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D2MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D2MEM_DED_COUNT),
+ },
+ { "MMEA3_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D3MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D3MEM_DED_COUNT),
+ },
+ { "MMEA3_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA3_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA3_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA3_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA3_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA3_EDC_CNT3, IORD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA3_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA3_EDC_CNT3, IOWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA3_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA3_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA3_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA3_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT),
+ },
+
+ /* MMHUB Range 4 */
+ { "MMEA4_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA4_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA4_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA4_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT, RRET_TAGMEM_DED_COUNT),
+ },
+ { "MMEA4_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT, WRET_TAGMEM_DED_COUNT),
+ },
+ { "MMEA4_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT, IOWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT, IOWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA4_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA4_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA4_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT, IORD_CMDMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA4_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA4_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA4_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA4_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA4_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA4_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA4_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D0MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D0MEM_DED_COUNT),
+ },
+ { "MMEA4_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D1MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D1MEM_DED_COUNT),
+ },
+ { "MMEA4_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D2MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D2MEM_DED_COUNT),
+ },
+ { "MMEA4_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D3MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D3MEM_DED_COUNT),
+ },
+ { "MMEA4_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA4_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA4_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA4_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA4_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA4_EDC_CNT3, IORD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA4_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA4_EDC_CNT3, IOWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA4_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA4_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA4_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA4_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT),
+ },
+
+ /* MMHUAB Range 5 */
+ { "MMEA5_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA5_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA5_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA5_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT, RRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT, RRET_TAGMEM_DED_COUNT),
+ },
+ { "MMEA5_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT, WRET_TAGMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT, WRET_TAGMEM_DED_COUNT),
+ },
+ { "MMEA5_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT, IOWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT, IOWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA5_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA5_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA5_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT, IORD_CMDMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA5_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT, IOWR_CMDMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA5_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA5_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA5_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT),
+ },
+ { "MMEA5_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA5_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT),
+ 0, 0,
+ },
+ { "MMEA5_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D0MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D0MEM_DED_COUNT),
+ },
+ { "MMEA5_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D1MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D1MEM_DED_COUNT),
+ },
+ { "MMEA5_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D2MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D2MEM_DED_COUNT),
+ },
+ { "MMEA5_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT2),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D3MEM_SED_COUNT),
+ SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D3MEM_DED_COUNT),
+ },
+ { "MMEA5_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA5_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA5_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA5_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA5_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA5_EDC_CNT3, IORD_CMDMEM_DED_COUNT),
+ },
+ { "MMEA5_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA5_EDC_CNT3, IOWR_CMDMEM_DED_COUNT),
+ },
+ { "MMEA5_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA5_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT),
+ },
+ { "MMEA5_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT3),
+ 0, 0,
+ SOC15_REG_FIELD(MMEA5_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT),
+ },
+};
+
+static const struct soc15_reg_entry mmhub_v1_7_edc_cnt_regs[] = {
+ { SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT2), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_EDC_CNT3), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT2), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_EDC_CNT3), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT2), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_EDC_CNT3), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT2), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_EDC_CNT3), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT2), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_EDC_CNT3), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT2), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_EDC_CNT3), 0, 0, 0 },
+};
+
+static int mmhub_v1_7_get_ras_error_count(struct amdgpu_device *adev,
+ const struct soc15_reg_entry *reg,
+ uint32_t value,
+ uint32_t *sec_count,
+ uint32_t *ded_count)
+{
+ uint32_t i;
+ uint32_t sec_cnt, ded_cnt;
+
+ for (i = 0; i < ARRAY_SIZE(mmhub_v1_7_ras_fields); i++) {
+ if(mmhub_v1_7_ras_fields[i].reg_offset != reg->reg_offset)
+ continue;
+
+ sec_cnt = (value &
+ mmhub_v1_7_ras_fields[i].sec_count_mask) >>
+ mmhub_v1_7_ras_fields[i].sec_count_shift;
+ if (sec_cnt) {
+ dev_info(adev->dev, "MMHUB SubBlock %s, SEC %d\n",
+ mmhub_v1_7_ras_fields[i].name,
+ sec_cnt);
+ *sec_count += sec_cnt;
+ }
+
+ ded_cnt = (value &
+ mmhub_v1_7_ras_fields[i].ded_count_mask) >>
+ mmhub_v1_7_ras_fields[i].ded_count_shift;
+ if (ded_cnt) {
+ dev_info(adev->dev, "MMHUB SubBlock %s, DED %d\n",
+ mmhub_v1_7_ras_fields[i].name,
+ ded_cnt);
+ *ded_count += ded_cnt;
+ }
+ }
+
+ return 0;
+}
+
+static void mmhub_v1_7_query_ras_error_count(struct amdgpu_device *adev,
+ void *ras_error_status)
+{
+ struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+ uint32_t sec_count = 0, ded_count = 0;
+ uint32_t i;
+ uint32_t reg_value;
+
+ err_data->ue_count = 0;
+ err_data->ce_count = 0;
+
+ for (i = 0; i < ARRAY_SIZE(mmhub_v1_7_edc_cnt_regs); i++) {
+ reg_value =
+ RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v1_7_edc_cnt_regs[i]));
+ if (reg_value)
+ mmhub_v1_7_get_ras_error_count(adev, &mmhub_v1_7_edc_cnt_regs[i],
+ reg_value, &sec_count, &ded_count);
+ }
+
+ err_data->ce_count += sec_count;
+ err_data->ue_count += ded_count;
+}
+
+static void mmhub_v1_7_reset_ras_error_count(struct amdgpu_device *adev)
+{
+ uint32_t i;
+
+ /* write 0 to reset the edc counters */
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB)) {
+ for (i = 0; i < ARRAY_SIZE(mmhub_v1_7_edc_cnt_regs); i++)
+ WREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v1_7_edc_cnt_regs[i]), 0);
+ }
+}
+
+static const struct soc15_reg_entry mmhub_v1_7_err_status_regs[] = {
+ { SOC15_REG_ENTRY(MMHUB, 0, regMMEA0_ERR_STATUS), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, regMMEA1_ERR_STATUS), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, regMMEA2_ERR_STATUS), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, regMMEA3_ERR_STATUS), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, regMMEA4_ERR_STATUS), 0, 0, 0 },
+ { SOC15_REG_ENTRY(MMHUB, 0, regMMEA5_ERR_STATUS), 0, 0, 0 },
+};
+
+static void mmhub_v1_7_query_ras_error_status(struct amdgpu_device *adev)
+{
+ int i;
+ uint32_t reg_value;
+
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(mmhub_v1_7_err_status_regs); i++) {
+ reg_value =
+ RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v1_7_err_status_regs[i]));
+ if (reg_value)
+ dev_warn(adev->dev, "MMHUB EA err detected at instance: %d, status: 0x%x!\n",
+ i, reg_value);
+ }
+}
+
+const struct amdgpu_mmhub_funcs mmhub_v1_7_funcs = {
+ .ras_late_init = amdgpu_mmhub_ras_late_init,
+ .query_ras_error_count = mmhub_v1_7_query_ras_error_count,
+ .reset_ras_error_count = mmhub_v1_7_reset_ras_error_count,
+ .get_fb_location = mmhub_v1_7_get_fb_location,
+ .init = mmhub_v1_7_init,
+ .gart_enable = mmhub_v1_7_gart_enable,
+ .set_fault_enable_default = mmhub_v1_7_set_fault_enable_default,
+ .gart_disable = mmhub_v1_7_gart_disable,
+ .set_clockgating = mmhub_v1_7_set_clockgating,
+ .get_clockgating = mmhub_v1_7_get_clockgating,
+ .setup_vm_pt_regs = mmhub_v1_7_setup_vm_pt_regs,
+ .query_ras_error_status = mmhub_v1_7_query_ras_error_status,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.h b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.h
new file mode 100644
index 000000000000..bf2fbeb172d1
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __MMHUB_V1_7_H__
+#define __MMHUB_V1_7_H__
+
+extern const struct amdgpu_mmhub_funcs mmhub_v1_7_funcs;
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
index 4bc1d1434065..c477f8972d5d 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
@@ -52,6 +52,20 @@
#define BIF_MMSCH1_DOORBELL_RANGE__OFFSET_MASK 0x00000FFCL
#define BIF_MMSCH1_DOORBELL_RANGE__SIZE_MASK 0x001F0000L
+#define BIF_MMSCH1_DOORBELL_RANGE__OFFSET_MASK 0x00000FFCL
+#define BIF_MMSCH1_DOORBELL_RANGE__SIZE_MASK 0x001F0000L
+
+#define mmBIF_MMSCH1_DOORBELL_RANGE_ALDE 0x01d8
+#define mmBIF_MMSCH1_DOORBELL_RANGE_ALDE_BASE_IDX 2
+//BIF_MMSCH1_DOORBELL_ALDE_RANGE
+#define BIF_MMSCH1_DOORBELL_RANGE_ALDE__OFFSET__SHIFT 0x2
+#define BIF_MMSCH1_DOORBELL_RANGE_ALDE__SIZE__SHIFT 0x10
+#define BIF_MMSCH1_DOORBELL_RANGE_ALDE__OFFSET_MASK 0x00000FFCL
+#define BIF_MMSCH1_DOORBELL_RANGE_ALDE__SIZE_MASK 0x001F0000L
+
+#define mmRCC_DEV0_EPF0_STRAP0_ALDE 0x0015
+#define mmRCC_DEV0_EPF0_STRAP0_ALDE_BASE_IDX 2
+
static void nbio_v7_4_query_ras_error_count(struct amdgpu_device *adev,
void *ras_error_status);
@@ -65,7 +79,12 @@ static void nbio_v7_4_remap_hdp_registers(struct amdgpu_device *adev)
static u32 nbio_v7_4_get_rev_id(struct amdgpu_device *adev)
{
- u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0);
+ u32 tmp;
+
+ if (adev->asic_type == CHIP_ALDEBARAN)
+ tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0_ALDE);
+ else
+ tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0);
tmp &= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK;
tmp >>= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT;
@@ -92,10 +111,10 @@ static void nbio_v7_4_sdma_doorbell_range(struct amdgpu_device *adev, int instan
{
u32 reg, doorbell_range;
- if (instance < 2)
+ if (instance < 2) {
reg = instance +
SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE);
- else
+ } else {
/*
* These registers address of SDMA2~7 is not consecutive
* from SDMA0~1. Need plus 4 dwords offset.
@@ -103,9 +122,19 @@ static void nbio_v7_4_sdma_doorbell_range(struct amdgpu_device *adev, int instan
* BIF_SDMA0_DOORBELL_RANGE: 0x3bc0
* BIF_SDMA1_DOORBELL_RANGE: 0x3bc4
* BIF_SDMA2_DOORBELL_RANGE: 0x3bd8
++ * BIF_SDMA4_DOORBELL_RANGE:
++ * ARCTURUS: 0x3be0
++ * ALDEBARAN: 0x3be4
*/
- reg = instance + 0x4 +
- SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE);
+ if (adev->asic_type == CHIP_ALDEBARAN && instance == 4)
+ reg = instance + 0x4 + 0x1 +
+ SOC15_REG_OFFSET(NBIO, 0,
+ mmBIF_SDMA0_DOORBELL_RANGE);
+ else
+ reg = instance + 0x4 +
+ SOC15_REG_OFFSET(NBIO, 0,
+ mmBIF_SDMA0_DOORBELL_RANGE);
+ }
doorbell_range = RREG32(reg);
@@ -124,9 +153,12 @@ static void nbio_v7_4_vcn_doorbell_range(struct amdgpu_device *adev, bool use_do
u32 reg;
u32 doorbell_range;
- if (instance)
- reg = SOC15_REG_OFFSET(NBIO, 0, mmBIF_MMSCH1_DOORBELL_RANGE);
- else
+ if (instance) {
+ if (adev->asic_type == CHIP_ALDEBARAN)
+ reg = SOC15_REG_OFFSET(NBIO, 0, mmBIF_MMSCH1_DOORBELL_RANGE_ALDE);
+ else
+ reg = SOC15_REG_OFFSET(NBIO, 0, mmBIF_MMSCH1_DOORBELL_RANGE);
+ } else
reg = SOC15_REG_OFFSET(NBIO, 0, mmBIF_MMSCH0_DOORBELL_RANGE);
doorbell_range = RREG32(reg);
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index c625c5d8ed89..5846eac292c3 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -25,6 +25,8 @@
#include <linux/module.h>
#include <linux/pci.h>
+#include <drm/amdgpu_drm.h>
+
#include "amdgpu.h"
#include "amdgpu_atombios.h"
#include "amdgpu_ih.h"
@@ -65,6 +67,184 @@
static const struct amd_ip_funcs nv_common_ip_funcs;
+/* Navi */
+static const struct amdgpu_video_codec_info nv_video_codecs_encode_array[] =
+{
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
+ .max_width = 4096,
+ .max_height = 2304,
+ .max_pixels_per_frame = 4096 * 2304,
+ .max_level = 0,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
+ .max_width = 4096,
+ .max_height = 2304,
+ .max_pixels_per_frame = 4096 * 2304,
+ .max_level = 0,
+ },
+};
+
+static const struct amdgpu_video_codecs nv_video_codecs_encode =
+{
+ .codec_count = ARRAY_SIZE(nv_video_codecs_encode_array),
+ .codec_array = nv_video_codecs_encode_array,
+};
+
+/* Navi1x */
+static const struct amdgpu_video_codec_info nv_video_codecs_decode_array[] =
+{
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
+ .max_width = 4096,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 3,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
+ .max_width = 4096,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 5,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
+ .max_width = 4096,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 52,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
+ .max_width = 4096,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 4,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
+ .max_width = 8192,
+ .max_height = 4352,
+ .max_pixels_per_frame = 8192 * 4352,
+ .max_level = 186,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG,
+ .max_width = 4096,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 0,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9,
+ .max_width = 8192,
+ .max_height = 4352,
+ .max_pixels_per_frame = 8192 * 4352,
+ .max_level = 0,
+ },
+};
+
+static const struct amdgpu_video_codecs nv_video_codecs_decode =
+{
+ .codec_count = ARRAY_SIZE(nv_video_codecs_decode_array),
+ .codec_array = nv_video_codecs_decode_array,
+};
+
+/* Sienna Cichlid */
+static const struct amdgpu_video_codec_info sc_video_codecs_decode_array[] =
+{
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
+ .max_width = 4096,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 3,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
+ .max_width = 4096,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 5,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
+ .max_width = 4096,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 52,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
+ .max_width = 4096,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 4,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
+ .max_width = 8192,
+ .max_height = 4352,
+ .max_pixels_per_frame = 8192 * 4352,
+ .max_level = 186,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG,
+ .max_width = 4096,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 0,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9,
+ .max_width = 8192,
+ .max_height = 4352,
+ .max_pixels_per_frame = 8192 * 4352,
+ .max_level = 0,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1,
+ .max_width = 8192,
+ .max_height = 4352,
+ .max_pixels_per_frame = 8192 * 4352,
+ .max_level = 0,
+ },
+};
+
+static const struct amdgpu_video_codecs sc_video_codecs_decode =
+{
+ .codec_count = ARRAY_SIZE(sc_video_codecs_decode_array),
+ .codec_array = sc_video_codecs_decode_array,
+};
+
+static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode,
+ const struct amdgpu_video_codecs **codecs)
+{
+ switch (adev->asic_type) {
+ case CHIP_SIENNA_CICHLID:
+ case CHIP_NAVY_FLOUNDER:
+ case CHIP_DIMGREY_CAVEFISH:
+ case CHIP_VANGOGH:
+ if (encode)
+ *codecs = &nv_video_codecs_encode;
+ else
+ *codecs = &sc_video_codecs_decode;
+ return 0;
+ case CHIP_NAVI10:
+ case CHIP_NAVI14:
+ case CHIP_NAVI12:
+ if (encode)
+ *codecs = &nv_video_codecs_encode;
+ else
+ *codecs = &nv_video_codecs_decode;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
/*
* Indirect registers accessor
*/
@@ -304,44 +484,6 @@ static int nv_read_register(struct amdgpu_device *adev, u32 se_num,
return -EINVAL;
}
-static int nv_asic_mode1_reset(struct amdgpu_device *adev)
-{
- u32 i;
- int ret = 0;
-
- amdgpu_atombios_scratch_regs_engine_hung(adev, true);
-
- /* disable BM */
- pci_clear_master(adev->pdev);
-
- amdgpu_device_cache_pci_state(adev->pdev);
-
- if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
- dev_info(adev->dev, "GPU smu mode1 reset\n");
- ret = amdgpu_dpm_mode1_reset(adev);
- } else {
- dev_info(adev->dev, "GPU psp mode1 reset\n");
- ret = psp_gpu_reset(adev);
- }
-
- if (ret)
- dev_err(adev->dev, "GPU mode1 reset failed\n");
- amdgpu_device_load_pci_state(adev->pdev);
-
- /* wait for asic to come out of reset */
- for (i = 0; i < adev->usec_timeout; i++) {
- u32 memsize = adev->nbio.funcs->get_memsize(adev);
-
- if (memsize != 0xffffffff)
- break;
- udelay(1);
- }
-
- amdgpu_atombios_scratch_regs_engine_hung(adev, false);
-
- return ret;
-}
-
static int nv_asic_mode2_reset(struct amdgpu_device *adev)
{
u32 i;
@@ -444,7 +586,7 @@ static int nv_asic_reset(struct amdgpu_device *adev)
break;
default:
dev_info(adev->dev, "MODE1 reset\n");
- ret = nv_asic_mode1_reset(adev);
+ ret = amdgpu_device_mode1_reset(adev);
break;
}
@@ -847,6 +989,7 @@ static const struct amdgpu_asic_funcs nv_asic_funcs =
.supports_baco = &nv_asic_supports_baco,
.pre_asic_init = &nv_pre_asic_init,
.update_umd_stable_pstate = &nv_update_umd_stable_pstate,
+ .query_video_codecs = &nv_query_video_codecs,
};
static int nv_common_early_init(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
index 3ba7bdfde65d..a41b054fe0ba 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
@@ -235,6 +235,7 @@ enum psp_gfx_fw_type {
GFX_FW_TYPE_SDMA6 = 56, /* SDMA6 MI */
GFX_FW_TYPE_SDMA7 = 57, /* SDMA7 MI */
GFX_FW_TYPE_VCN1 = 58, /* VCN1 MI */
+ GFX_FW_TYPE_REG_LIST = 67, /* REG_LIST MI */
GFX_FW_TYPE_MAX
};
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
new file mode 100644
index 000000000000..fcdce46445d6
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
@@ -0,0 +1,378 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "amdgpu_psp.h"
+#include "amdgpu_ucode.h"
+#include "soc15_common.h"
+#include "psp_v13_0.h"
+
+#include "mp/mp_13_0_2_offset.h"
+#include "mp/mp_13_0_2_sh_mask.h"
+
+MODULE_FIRMWARE("amdgpu/aldebaran_sos.bin");
+MODULE_FIRMWARE("amdgpu/aldebaran_ta.bin");
+
+static int psp_v13_0_init_microcode(struct psp_context *psp)
+{
+ struct amdgpu_device *adev = psp->adev;
+ const char *chip_name;
+ int err = 0;
+
+ switch (adev->asic_type) {
+ case CHIP_ALDEBARAN:
+ chip_name = "aldebaran";
+ break;
+ default:
+ BUG();
+ }
+
+ err = psp_init_sos_microcode(psp, chip_name);
+ if (err)
+ return err;
+
+ err = psp_init_ta_microcode(&adev->psp, chip_name);
+
+ return err;
+}
+
+static bool psp_v13_0_is_sos_alive(struct psp_context *psp)
+{
+ struct amdgpu_device *adev = psp->adev;
+ uint32_t sol_reg;
+
+ sol_reg = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81);
+
+ return sol_reg != 0x0;
+}
+
+static int psp_v13_0_wait_for_bootloader(struct psp_context *psp)
+{
+ struct amdgpu_device *adev = psp->adev;
+
+ int ret;
+ int retry_loop;
+
+ for (retry_loop = 0; retry_loop < 10; retry_loop++) {
+ /* Wait for bootloader to signify that is
+ ready having bit 31 of C2PMSG_35 set to 1 */
+ ret = psp_wait_for(psp,
+ SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35),
+ 0x80000000,
+ 0x80000000,
+ false);
+
+ if (ret == 0)
+ return 0;
+ }
+
+ return ret;
+}
+
+static int psp_v13_0_bootloader_load_kdb(struct psp_context *psp)
+{
+ int ret;
+ uint32_t psp_gfxdrv_command_reg = 0;
+ struct amdgpu_device *adev = psp->adev;
+
+ /* Check tOS sign of life register to confirm sys driver and sOS
+ * are already been loaded.
+ */
+ if (psp_v13_0_is_sos_alive(psp))
+ return 0;
+
+ ret = psp_v13_0_wait_for_bootloader(psp);
+ if (ret)
+ return ret;
+
+ memset(psp->fw_pri_buf, 0, PSP_1_MEG);
+
+ /* Copy PSP KDB binary to memory */
+ memcpy(psp->fw_pri_buf, psp->kdb_start_addr, psp->kdb_bin_size);
+
+ /* Provide the PSP KDB to bootloader */
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_36,
+ (uint32_t)(psp->fw_pri_mc_addr >> 20));
+ psp_gfxdrv_command_reg = PSP_BL__LOAD_KEY_DATABASE;
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_35,
+ psp_gfxdrv_command_reg);
+
+ ret = psp_v13_0_wait_for_bootloader(psp);
+
+ return ret;
+}
+
+static int psp_v13_0_bootloader_load_sysdrv(struct psp_context *psp)
+{
+ int ret;
+ uint32_t psp_gfxdrv_command_reg = 0;
+ struct amdgpu_device *adev = psp->adev;
+
+ /* Check sOS sign of life register to confirm sys driver and sOS
+ * are already been loaded.
+ */
+ if (psp_v13_0_is_sos_alive(psp))
+ return 0;
+
+ ret = psp_v13_0_wait_for_bootloader(psp);
+ if (ret)
+ return ret;
+
+ memset(psp->fw_pri_buf, 0, PSP_1_MEG);
+
+ /* Copy PSP System Driver binary to memory */
+ memcpy(psp->fw_pri_buf, psp->sys_start_addr, psp->sys_bin_size);
+
+ /* Provide the sys driver to bootloader */
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_36,
+ (uint32_t)(psp->fw_pri_mc_addr >> 20));
+ psp_gfxdrv_command_reg = PSP_BL__LOAD_SYSDRV;
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_35,
+ psp_gfxdrv_command_reg);
+
+ /* there might be handshake issue with hardware which needs delay */
+ mdelay(20);
+
+ ret = psp_v13_0_wait_for_bootloader(psp);
+
+ return ret;
+}
+
+static int psp_v13_0_bootloader_load_sos(struct psp_context *psp)
+{
+ int ret;
+ unsigned int psp_gfxdrv_command_reg = 0;
+ struct amdgpu_device *adev = psp->adev;
+
+ /* Check sOS sign of life register to confirm sys driver and sOS
+ * are already been loaded.
+ */
+ if (psp_v13_0_is_sos_alive(psp))
+ return 0;
+
+ ret = psp_v13_0_wait_for_bootloader(psp);
+ if (ret)
+ return ret;
+
+ memset(psp->fw_pri_buf, 0, PSP_1_MEG);
+
+ /* Copy Secure OS binary to PSP memory */
+ memcpy(psp->fw_pri_buf, psp->sos_start_addr, psp->sos_bin_size);
+
+ /* Provide the PSP secure OS to bootloader */
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_36,
+ (uint32_t)(psp->fw_pri_mc_addr >> 20));
+ psp_gfxdrv_command_reg = PSP_BL__LOAD_SOSDRV;
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_35,
+ psp_gfxdrv_command_reg);
+
+ /* there might be handshake issue with hardware which needs delay */
+ mdelay(20);
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_81),
+ RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81),
+ 0, true);
+
+ return ret;
+}
+
+static int psp_v13_0_ring_init(struct psp_context *psp,
+ enum psp_ring_type ring_type)
+{
+ int ret = 0;
+ struct psp_ring *ring;
+ struct amdgpu_device *adev = psp->adev;
+
+ ring = &psp->km_ring;
+
+ ring->ring_type = ring_type;
+
+ /* allocate 4k Page of Local Frame Buffer memory for ring */
+ ring->ring_size = 0x1000;
+ ret = amdgpu_bo_create_kernel(adev, ring->ring_size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->firmware.rbuf,
+ &ring->ring_mem_mc_addr,
+ (void **)&ring->ring_mem);
+ if (ret) {
+ ring->ring_size = 0;
+ return ret;
+ }
+
+ return 0;
+}
+
+static int psp_v13_0_ring_stop(struct psp_context *psp,
+ enum psp_ring_type ring_type)
+{
+ int ret = 0;
+ struct amdgpu_device *adev = psp->adev;
+
+ if (amdgpu_sriov_vf(adev)) {
+ /* Write the ring destroy command*/
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_101,
+ GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING);
+ /* there might be handshake issue with hardware which needs delay */
+ mdelay(20);
+ /* Wait for response flag (bit 31) */
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101),
+ 0x80000000, 0x80000000, false);
+ } else {
+ /* Write the ring destroy command*/
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_64,
+ GFX_CTRL_CMD_ID_DESTROY_RINGS);
+ /* there might be handshake issue with hardware which needs delay */
+ mdelay(20);
+ /* Wait for response flag (bit 31) */
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64),
+ 0x80000000, 0x80000000, false);
+ }
+
+ return ret;
+}
+
+static int psp_v13_0_ring_create(struct psp_context *psp,
+ enum psp_ring_type ring_type)
+{
+ int ret = 0;
+ unsigned int psp_ring_reg = 0;
+ struct psp_ring *ring = &psp->km_ring;
+ struct amdgpu_device *adev = psp->adev;
+
+ if (amdgpu_sriov_vf(adev)) {
+ ret = psp_v13_0_ring_stop(psp, ring_type);
+ if (ret) {
+ DRM_ERROR("psp_v13_0_ring_stop_sriov failed!\n");
+ return ret;
+ }
+
+ /* Write low address of the ring to C2PMSG_102 */
+ psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_102, psp_ring_reg);
+ /* Write high address of the ring to C2PMSG_103 */
+ psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_103, psp_ring_reg);
+
+ /* Write the ring initialization command to C2PMSG_101 */
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_101,
+ GFX_CTRL_CMD_ID_INIT_GPCOM_RING);
+
+ /* there might be handshake issue with hardware which needs delay */
+ mdelay(20);
+
+ /* Wait for response flag (bit 31) in C2PMSG_101 */
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101),
+ 0x80000000, 0x8000FFFF, false);
+
+ } else {
+ /* Wait for sOS ready for ring creation */
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64),
+ 0x80000000, 0x80000000, false);
+ if (ret) {
+ DRM_ERROR("Failed to wait for trust OS ready for ring creation\n");
+ return ret;
+ }
+
+ /* Write low address of the ring to C2PMSG_69 */
+ psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_69, psp_ring_reg);
+ /* Write high address of the ring to C2PMSG_70 */
+ psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_70, psp_ring_reg);
+ /* Write size of ring to C2PMSG_71 */
+ psp_ring_reg = ring->ring_size;
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_71, psp_ring_reg);
+ /* Write the ring initialization command to C2PMSG_64 */
+ psp_ring_reg = ring_type;
+ psp_ring_reg = psp_ring_reg << 16;
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_64, psp_ring_reg);
+
+ /* there might be handshake issue with hardware which needs delay */
+ mdelay(20);
+
+ /* Wait for response flag (bit 31) in C2PMSG_64 */
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64),
+ 0x80000000, 0x8000FFFF, false);
+ }
+
+ return ret;
+}
+
+static int psp_v13_0_ring_destroy(struct psp_context *psp,
+ enum psp_ring_type ring_type)
+{
+ int ret = 0;
+ struct psp_ring *ring = &psp->km_ring;
+ struct amdgpu_device *adev = psp->adev;
+
+ ret = psp_v13_0_ring_stop(psp, ring_type);
+ if (ret)
+ DRM_ERROR("Fail to stop psp ring\n");
+
+ amdgpu_bo_free_kernel(&adev->firmware.rbuf,
+ &ring->ring_mem_mc_addr,
+ (void **)&ring->ring_mem);
+
+ return ret;
+}
+
+static uint32_t psp_v13_0_ring_get_wptr(struct psp_context *psp)
+{
+ uint32_t data;
+ struct amdgpu_device *adev = psp->adev;
+
+ if (amdgpu_sriov_vf(adev))
+ data = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_102);
+ else
+ data = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_67);
+
+ return data;
+}
+
+static void psp_v13_0_ring_set_wptr(struct psp_context *psp, uint32_t value)
+{
+ struct amdgpu_device *adev = psp->adev;
+
+ if (amdgpu_sriov_vf(adev)) {
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_102, value);
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_101,
+ GFX_CTRL_CMD_ID_CONSUME_CMD);
+ } else
+ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_67, value);
+}
+
+static const struct psp_funcs psp_v13_0_funcs = {
+ .init_microcode = psp_v13_0_init_microcode,
+ .bootloader_load_kdb = psp_v13_0_bootloader_load_kdb,
+ .bootloader_load_sysdrv = psp_v13_0_bootloader_load_sysdrv,
+ .bootloader_load_sos = psp_v13_0_bootloader_load_sos,
+ .ring_init = psp_v13_0_ring_init,
+ .ring_create = psp_v13_0_ring_create,
+ .ring_stop = psp_v13_0_ring_stop,
+ .ring_destroy = psp_v13_0_ring_destroy,
+ .ring_get_wptr = psp_v13_0_ring_get_wptr,
+ .ring_set_wptr = psp_v13_0_ring_set_wptr,
+};
+
+void psp_v13_0_set_psp_funcs(struct psp_context *psp)
+{
+ psp->funcs = &psp_v13_0_funcs;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.h b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.h
new file mode 100644
index 000000000000..b2414a729ca1
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __PSP_V13_0_H__
+#define __PSP_V13_0_H__
+
+#include "amdgpu_psp.h"
+
+void psp_v13_0_set_psp_funcs(struct psp_context *psp);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index c8c22c1d1e65..98b7db516438 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -56,6 +56,7 @@
#include "ivsrcid/sdma1/irqsrcs_sdma1_4_0.h"
#include "amdgpu_ras.h"
+#include "sdma_v4_4.h"
MODULE_FIRMWARE("amdgpu/vega10_sdma.bin");
MODULE_FIRMWARE("amdgpu/vega10_sdma1.bin");
@@ -69,6 +70,7 @@ MODULE_FIRMWARE("amdgpu/raven2_sdma.bin");
MODULE_FIRMWARE("amdgpu/arcturus_sdma.bin");
MODULE_FIRMWARE("amdgpu/renoir_sdma.bin");
MODULE_FIRMWARE("amdgpu/green_sardine_sdma.bin");
+MODULE_FIRMWARE("amdgpu/aldebaran_sdma.bin");
#define SDMA0_POWER_CNTL__ON_OFF_CONDITION_HOLD_TIME_MASK 0x000000F8L
#define SDMA0_POWER_CNTL__ON_OFF_STATUS_DURATION_TIME_MASK 0xFC000000L
@@ -259,6 +261,24 @@ static const struct soc15_reg_golden golden_settings_sdma_arct[] =
SOC15_REG_GOLDEN_VALUE(SDMA7, 0, mmSDMA7_UTCL1_TIMEOUT, 0xffffffff, 0x00010001)
};
+static const struct soc15_reg_golden golden_settings_sdma_aldebaran[] = {
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
+ SOC15_REG_GOLDEN_VALUE(SDMA2, 0, mmSDMA2_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
+ SOC15_REG_GOLDEN_VALUE(SDMA2, 0, mmSDMA2_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002),
+ SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA2_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
+ SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA3_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
+ SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA3_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002),
+ SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA3_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
+ SOC15_REG_GOLDEN_VALUE(SDMA4, 0, mmSDMA4_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
+ SOC15_REG_GOLDEN_VALUE(SDMA4, 0, mmSDMA4_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002),
+ SOC15_REG_GOLDEN_VALUE(SDMA4, 0, mmSDMA4_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
+};
+
static const struct soc15_reg_golden golden_settings_sdma_4_3[] = {
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CLK_CTRL, 0xffffffff, 0x3f000100),
@@ -482,6 +502,11 @@ static void sdma_v4_0_init_golden_registers(struct amdgpu_device *adev)
golden_settings_sdma_arct,
ARRAY_SIZE(golden_settings_sdma_arct));
break;
+ case CHIP_ALDEBARAN:
+ soc15_program_register_sequence(adev,
+ golden_settings_sdma_aldebaran,
+ ARRAY_SIZE(golden_settings_sdma_aldebaran));
+ break;
case CHIP_RAVEN:
soc15_program_register_sequence(adev,
golden_settings_sdma_4_1,
@@ -564,7 +589,8 @@ static void sdma_v4_0_destroy_inst_ctx(struct amdgpu_device *adev)
/* arcturus shares the same FW memory across
all SDMA isntances */
- if (adev->asic_type == CHIP_ARCTURUS)
+ if (adev->asic_type == CHIP_ARCTURUS ||
+ adev->asic_type == CHIP_ALDEBARAN)
break;
}
@@ -621,6 +647,9 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev)
else
chip_name = "green_sardine";
break;
+ case CHIP_ALDEBARAN:
+ chip_name = "aldebaran";
+ break;
default:
BUG();
}
@@ -636,8 +665,9 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev)
goto out;
for (i = 1; i < adev->sdma.num_instances; i++) {
- if (adev->asic_type == CHIP_ARCTURUS) {
- /* Acturus will leverage the same FW memory
+ if (adev->asic_type == CHIP_ARCTURUS ||
+ adev->asic_type == CHIP_ALDEBARAN) {
+ /* Acturus & Aldebaran will leverage the same FW memory
for every SDMA instance */
memcpy((void *)&adev->sdma.instance[i],
(void *)&adev->sdma.instance[0],
@@ -1825,6 +1855,8 @@ static int sdma_v4_0_early_init(void *handle)
adev->sdma.num_instances = 1;
else if (adev->asic_type == CHIP_ARCTURUS)
adev->sdma.num_instances = 8;
+ else if (adev->asic_type == CHIP_ALDEBARAN)
+ adev->sdma.num_instances = 5;
else
adev->sdma.num_instances = 2;
@@ -1895,6 +1927,33 @@ static int sdma_v4_0_sw_init(void *handle)
return r;
}
+ /* SDMA VM_HOLE/DOORBELL_INV/POLL_TIMEOUT/SRBM_WRITE_PROTECTION event*/
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ r = amdgpu_irq_add_id(adev, sdma_v4_0_seq_to_irq_id(i),
+ SDMA0_4_0__SRCID__SDMA_VM_HOLE,
+ &adev->sdma.vm_hole_irq);
+ if (r)
+ return r;
+
+ r = amdgpu_irq_add_id(adev, sdma_v4_0_seq_to_irq_id(i),
+ SDMA0_4_0__SRCID__SDMA_DOORBELL_INVALID,
+ &adev->sdma.doorbell_invalid_irq);
+ if (r)
+ return r;
+
+ r = amdgpu_irq_add_id(adev, sdma_v4_0_seq_to_irq_id(i),
+ SDMA0_4_0__SRCID__SDMA_POLL_TIMEOUT,
+ &adev->sdma.pool_timeout_irq);
+ if (r)
+ return r;
+
+ r = amdgpu_irq_add_id(adev, sdma_v4_0_seq_to_irq_id(i),
+ SDMA0_4_0__SRCID__SDMA_SRBMWRITE,
+ &adev->sdma.srbm_write_irq);
+ if (r)
+ return r;
+ }
+
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
ring->ring_obj = NULL;
@@ -2149,6 +2208,72 @@ static int sdma_v4_0_set_ecc_irq_state(struct amdgpu_device *adev,
return 0;
}
+static int sdma_v4_0_print_iv_entry(struct amdgpu_device *adev,
+ struct amdgpu_iv_entry *entry)
+{
+ int instance;
+ struct amdgpu_task_info task_info;
+ u64 addr;
+
+ instance = sdma_v4_0_irq_id_to_seq(entry->client_id);
+ if (instance < 0 || instance >= adev->sdma.num_instances) {
+ dev_err(adev->dev, "sdma instance invalid %d\n", instance);
+ return -EINVAL;
+ }
+
+ addr = (u64)entry->src_data[0] << 12;
+ addr |= ((u64)entry->src_data[1] & 0xf) << 44;
+
+ memset(&task_info, 0, sizeof(struct amdgpu_task_info));
+ amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
+
+ dev_info(adev->dev,
+ "[sdma%d] address:0x%016llx src_id:%u ring:%u vmid:%u "
+ "pasid:%u, for process %s pid %d thread %s pid %d\n",
+ instance, addr, entry->src_id, entry->ring_id, entry->vmid,
+ entry->pasid, task_info.process_name, task_info.tgid,
+ task_info.task_name, task_info.pid);
+ return 0;
+}
+
+static int sdma_v4_0_process_vm_hole_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ dev_err(adev->dev, "MC or SEM address in VM hole\n");
+ sdma_v4_0_print_iv_entry(adev, entry);
+ return 0;
+}
+
+static int sdma_v4_0_process_doorbell_invalid_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ dev_err(adev->dev, "SDMA received a doorbell from BIF with byte_enable !=0xff\n");
+ sdma_v4_0_print_iv_entry(adev, entry);
+ return 0;
+}
+
+static int sdma_v4_0_process_pool_timeout_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ dev_err(adev->dev,
+ "Polling register/memory timeout executing POLL_REG/MEM with finite timer\n");
+ sdma_v4_0_print_iv_entry(adev, entry);
+ return 0;
+}
+
+static int sdma_v4_0_process_srbm_write_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ dev_err(adev->dev,
+ "SDMA gets an Register Write SRBM_WRITE command in non-privilege command buffer\n");
+ sdma_v4_0_print_iv_entry(adev, entry);
+ return 0;
+}
+
static void sdma_v4_0_update_medium_grain_clock_gating(
struct amdgpu_device *adev,
bool enable)
@@ -2222,21 +2347,10 @@ static int sdma_v4_0_set_clockgating_state(void *handle,
if (amdgpu_sriov_vf(adev))
return 0;
- switch (adev->asic_type) {
- case CHIP_VEGA10:
- case CHIP_VEGA12:
- case CHIP_VEGA20:
- case CHIP_RAVEN:
- case CHIP_ARCTURUS:
- case CHIP_RENOIR:
- sdma_v4_0_update_medium_grain_clock_gating(adev,
- state == AMD_CG_STATE_GATE);
- sdma_v4_0_update_medium_grain_light_sleep(adev,
- state == AMD_CG_STATE_GATE);
- break;
- default:
- break;
- }
+ sdma_v4_0_update_medium_grain_clock_gating(adev,
+ state == AMD_CG_STATE_GATE);
+ sdma_v4_0_update_medium_grain_light_sleep(adev,
+ state == AMD_CG_STATE_GATE);
return 0;
}
@@ -2249,7 +2363,7 @@ static int sdma_v4_0_set_powergating_state(void *handle,
case CHIP_RAVEN:
case CHIP_RENOIR:
sdma_v4_1_update_power_gating(adev,
- state == AMD_PG_STATE_GATE ? true : false);
+ state == AMD_PG_STATE_GATE);
break;
default:
break;
@@ -2465,7 +2579,21 @@ static const struct amdgpu_irq_src_funcs sdma_v4_0_ecc_irq_funcs = {
.process = amdgpu_sdma_process_ecc_irq,
};
+static const struct amdgpu_irq_src_funcs sdma_v4_0_vm_hole_irq_funcs = {
+ .process = sdma_v4_0_process_vm_hole_irq,
+};
+
+static const struct amdgpu_irq_src_funcs sdma_v4_0_doorbell_invalid_irq_funcs = {
+ .process = sdma_v4_0_process_doorbell_invalid_irq,
+};
+
+static const struct amdgpu_irq_src_funcs sdma_v4_0_pool_timeout_irq_funcs = {
+ .process = sdma_v4_0_process_pool_timeout_irq,
+};
+static const struct amdgpu_irq_src_funcs sdma_v4_0_srbm_write_irq_funcs = {
+ .process = sdma_v4_0_process_srbm_write_irq,
+};
static void sdma_v4_0_set_irq_funcs(struct amdgpu_device *adev)
{
@@ -2474,9 +2602,17 @@ static void sdma_v4_0_set_irq_funcs(struct amdgpu_device *adev)
adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_INSTANCE1;
adev->sdma.ecc_irq.num_types = AMDGPU_SDMA_IRQ_INSTANCE1;
break;
+ case 5:
+ adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_INSTANCE5;
+ adev->sdma.ecc_irq.num_types = AMDGPU_SDMA_IRQ_INSTANCE5;
+ break;
case 8:
adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
adev->sdma.ecc_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
+ adev->sdma.vm_hole_irq.num_types = AMDGPU_SDMA_IRQ_INSTANCE5;
+ adev->sdma.doorbell_invalid_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
+ adev->sdma.pool_timeout_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
+ adev->sdma.srbm_write_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
break;
case 2:
default:
@@ -2487,6 +2623,10 @@ static void sdma_v4_0_set_irq_funcs(struct amdgpu_device *adev)
adev->sdma.trap_irq.funcs = &sdma_v4_0_trap_irq_funcs;
adev->sdma.illegal_inst_irq.funcs = &sdma_v4_0_illegal_inst_irq_funcs;
adev->sdma.ecc_irq.funcs = &sdma_v4_0_ecc_irq_funcs;
+ adev->sdma.vm_hole_irq.funcs = &sdma_v4_0_vm_hole_irq_funcs;
+ adev->sdma.doorbell_invalid_irq.funcs = &sdma_v4_0_doorbell_invalid_irq_funcs;
+ adev->sdma.pool_timeout_irq.funcs = &sdma_v4_0_pool_timeout_irq_funcs;
+ adev->sdma.srbm_write_irq.funcs = &sdma_v4_0_srbm_write_irq_funcs;
}
/**
@@ -2655,6 +2795,9 @@ static void sdma_v4_0_set_ras_funcs(struct amdgpu_device *adev)
case CHIP_ARCTURUS:
adev->sdma.funcs = &sdma_v4_0_ras_funcs;
break;
+ case CHIP_ALDEBARAN:
+ adev->sdma.funcs = &sdma_v4_4_ras_funcs;
+ break;
default:
break;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4.c
new file mode 100644
index 000000000000..6fcb95c89999
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4.c
@@ -0,0 +1,232 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "sdma/sdma_4_4_0_offset.h"
+#include "sdma/sdma_4_4_0_sh_mask.h"
+#include "soc15.h"
+#include "amdgpu_ras.h"
+
+#define SDMA1_REG_OFFSET 0x600
+#define SDMA2_REG_OFFSET 0x1cda0
+#define SDMA3_REG_OFFSET 0x1d1a0
+#define SDMA4_REG_OFFSET 0x1d5a0
+
+/* helper function that allow only use sdma0 register offset
+ * to calculate register offset for all the sdma instances */
+static uint32_t sdma_v4_4_get_reg_offset(struct amdgpu_device *adev,
+ uint32_t instance,
+ uint32_t offset)
+{
+ uint32_t sdma_base = adev->reg_offset[SDMA0_HWIP][0][0];
+
+ switch (instance) {
+ case 0:
+ return (sdma_base + offset);
+ case 1:
+ return (sdma_base + SDMA1_REG_OFFSET + offset);
+ case 2:
+ return (sdma_base + SDMA2_REG_OFFSET + offset);
+ case 3:
+ return (sdma_base + SDMA3_REG_OFFSET + offset);
+ case 4:
+ return (sdma_base + SDMA4_REG_OFFSET + offset);
+ default:
+ break;
+ }
+ return 0;
+}
+
+static const struct soc15_ras_field_entry sdma_v4_4_ras_fields[] = {
+ { "SDMA_MBANK_DATA_BUF0_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF0_SED),
+ 0, 0,
+ },
+ { "SDMA_MBANK_DATA_BUF1_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF1_SED),
+ 0, 0,
+ },
+ { "SDMA_MBANK_DATA_BUF2_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF2_SED),
+ 0, 0,
+ },
+ { "SDMA_MBANK_DATA_BUF3_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF3_SED),
+ 0, 0,
+ },
+ { "SDMA_MBANK_DATA_BUF4_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF4_SED),
+ 0, 0,
+ },
+ { "SDMA_MBANK_DATA_BUF5_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF5_SED),
+ 0, 0,
+ },
+ { "SDMA_MBANK_DATA_BUF6_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF6_SED),
+ 0, 0,
+ },
+ { "SDMA_MBANK_DATA_BUF7_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF7_SED),
+ 0, 0,
+ },
+ { "SDMA_MBANK_DATA_BUF8_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF8_SED),
+ 0, 0,
+ },
+ { "SDMA_MBANK_DATA_BUF9_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF9_SED),
+ 0, 0,
+ },
+ { "SDMA_MBANK_DATA_BUF10_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF10_SED),
+ 0, 0,
+ },
+ { "SDMA_MBANK_DATA_BUF11_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF11_SED),
+ 0, 0,
+ },
+ { "SDMA_MBANK_DATA_BUF12_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF12_SED),
+ 0, 0,
+ },
+ { "SDMA_MBANK_DATA_BUF13_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF13_SED),
+ 0, 0,
+ },
+ { "SDMA_MBANK_DATA_BUF14_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF14_SED),
+ 0, 0,
+ },
+ { "SDMA_MBANK_DATA_BUF15_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER, SDMA_MBANK_DATA_BUF15_SED),
+ 0, 0,
+ },
+ { "SDMA_UCODE_BUF_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER2),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER2, SDMA_UCODE_BUF_SED),
+ 0, 0,
+ },
+ { "SDMA_RB_CMD_BUF_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER2),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER2, SDMA_RB_CMD_BUF_SED),
+ 0, 0,
+ },
+ { "SDMA_IB_CMD_BUF_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER2),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER2, SDMA_IB_CMD_BUF_SED),
+ 0, 0,
+ },
+ { "SDMA_UTCL1_RD_FIFO_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER2),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER2, SDMA_UTCL1_RD_FIFO_SED),
+ 0, 0,
+ },
+ { "SDMA_UTCL1_RDBST_FIFO_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER2),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER2, SDMA_UTCL1_RDBST_FIFO_SED),
+ 0, 0,
+ },
+ { "SDMA_DATA_LUT_FIFO_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER2),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER2, SDMA_DATA_LUT_FIFO_SED),
+ 0, 0,
+ },
+ { "SDMA_SPLIT_DATA_BUF_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER2),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER2, SDMA_SPLIT_DATA_BUF_SED),
+ 0, 0,
+ },
+ { "SDMA_MC_WR_ADDR_FIFO_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER2),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER2, SDMA_MC_WR_ADDR_FIFO_SED),
+ 0, 0,
+ },
+ { "SDMA_MC_RDRET_BUF_SED", SOC15_REG_ENTRY(SDMA0, 0, regSDMA0_EDC_COUNTER2),
+ SOC15_REG_FIELD(SDMA0_EDC_COUNTER2, SDMA_MC_WR_ADDR_FIFO_SED),
+ 0, 0,
+ },
+};
+
+static void sdma_v4_4_get_ras_error_count(struct amdgpu_device *adev,
+ uint32_t value,
+ uint32_t instance,
+ uint32_t *sec_count)
+{
+ uint32_t i;
+ uint32_t sec_cnt;
+
+ /* double bits error (multiple bits) error detection is not supported */
+ for (i = 0; i < ARRAY_SIZE(sdma_v4_4_ras_fields); i++) {
+ /* the SDMA_EDC_COUNTER register in each sdma instance
+ * shares the same sed shift_mask
+ * */
+ sec_cnt = (value &
+ sdma_v4_4_ras_fields[i].sec_count_mask) >>
+ sdma_v4_4_ras_fields[i].sec_count_shift;
+ if (sec_cnt) {
+ dev_info(adev->dev, "Detected %s in SDMA%d, SED %d\n",
+ sdma_v4_4_ras_fields[i].name,
+ instance, sec_cnt);
+ *sec_count += sec_cnt;
+ }
+ }
+}
+
+static int sdma_v4_4_query_ras_error_count(struct amdgpu_device *adev,
+ uint32_t instance,
+ void *ras_error_status)
+{
+ struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+ uint32_t sec_count = 0;
+ uint32_t reg_value = 0;
+ uint32_t reg_offset = 0;
+
+ reg_offset = sdma_v4_4_get_reg_offset(adev, instance, regSDMA0_EDC_COUNTER);
+ reg_value = RREG32(reg_offset);
+ /* double bit error is not supported */
+ if (reg_value)
+ sdma_v4_4_get_ras_error_count(adev, reg_value, instance, &sec_count);
+ /* err_data->ce_count should be initialized to 0
+ * before calling into this function */
+ err_data->ce_count += sec_count;
+ /* double bit error is not supported
+ * set ue count to 0 */
+ err_data->ue_count = 0;
+
+ return 0;
+};
+
+static void sdma_v4_4_reset_ras_error_count(struct amdgpu_device *adev)
+{
+ int i;
+ uint32_t reg_offset;
+
+ /* write 0 to EDC_COUNTER reg to clear sdma edc counters */
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) {
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ reg_offset = sdma_v4_4_get_reg_offset(adev, i, regSDMA0_EDC_COUNTER);
+ WREG32(reg_offset, 0);
+ reg_offset = sdma_v4_4_get_reg_offset(adev, i, regSDMA0_EDC_COUNTER2);
+ WREG32(reg_offset, 0);
+ }
+ }
+}
+
+const struct amdgpu_sdma_ras_funcs sdma_v4_4_ras_funcs = {
+ .ras_late_init = amdgpu_sdma_ras_late_init,
+ .ras_fini = amdgpu_sdma_ras_fini,
+ .query_ras_error_count = sdma_v4_4_query_ras_error_count,
+ .reset_ras_error_count = sdma_v4_4_reset_ras_error_count,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4.h b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4.h
new file mode 100644
index 000000000000..74a6e5b5e949
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __SDMA_V4_4_H__
+#define __SDMA_V4_4_H__
+
+extern const struct amdgpu_sdma_ras_funcs sdma_v4_4_ras_funcs;
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
index 690a5090475a..b39e7db0e299 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
@@ -1595,9 +1595,9 @@ static int sdma_v5_2_set_clockgating_state(void *handle,
case CHIP_VANGOGH:
case CHIP_DIMGREY_CAVEFISH:
sdma_v5_2_update_medium_grain_clock_gating(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
sdma_v5_2_update_medium_grain_light_sleep(adev,
- state == AMD_CG_STATE_GATE ? true : false);
+ state == AMD_CG_STATE_GATE);
break;
default:
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index 6b5cf7882a12..7cbc2bb03bc6 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -26,6 +26,8 @@
#include <linux/module.h>
#include <linux/pci.h>
+#include <drm/amdgpu_drm.h>
+
#include "amdgpu.h"
#include "amdgpu_atombios.h"
#include "amdgpu_ih.h"
@@ -905,6 +907,114 @@ static const u32 hainan_mgcg_cgcg_init[] =
0x3630, 0xfffffff0, 0x00000100,
};
+/* XXX: update when we support VCE */
+#if 0
+/* tahiti, pitcarin, verde */
+static const struct amdgpu_video_codec_info tahiti_video_codecs_encode_array[] =
+{
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
+ .max_width = 2048,
+ .max_height = 1152,
+ .max_pixels_per_frame = 2048 * 1152,
+ .max_level = 0,
+ },
+};
+
+static const struct amdgpu_video_codecs tahiti_video_codecs_encode =
+{
+ .codec_count = ARRAY_SIZE(tahiti_video_codecs_encode_array),
+ .codec_array = tahiti_video_codecs_encode_array,
+};
+#else
+static const struct amdgpu_video_codecs tahiti_video_codecs_encode =
+{
+ .codec_count = 0,
+ .codec_array = NULL,
+};
+#endif
+/* oland and hainan don't support encode */
+static const struct amdgpu_video_codecs hainan_video_codecs_encode =
+{
+ .codec_count = 0,
+ .codec_array = NULL,
+};
+
+/* tahiti, pitcarin, verde, oland */
+static const struct amdgpu_video_codec_info tahiti_video_codecs_decode_array[] =
+{
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
+ .max_width = 2048,
+ .max_height = 1152,
+ .max_pixels_per_frame = 2048 * 1152,
+ .max_level = 3,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
+ .max_width = 2048,
+ .max_height = 1152,
+ .max_pixels_per_frame = 2048 * 1152,
+ .max_level = 5,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
+ .max_width = 2048,
+ .max_height = 1152,
+ .max_pixels_per_frame = 2048 * 1152,
+ .max_level = 41,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
+ .max_width = 2048,
+ .max_height = 1152,
+ .max_pixels_per_frame = 2048 * 1152,
+ .max_level = 4,
+ },
+};
+
+static const struct amdgpu_video_codecs tahiti_video_codecs_decode =
+{
+ .codec_count = ARRAY_SIZE(tahiti_video_codecs_decode_array),
+ .codec_array = tahiti_video_codecs_decode_array,
+};
+
+/* hainan doesn't support decode */
+static const struct amdgpu_video_codecs hainan_video_codecs_decode =
+{
+ .codec_count = 0,
+ .codec_array = NULL,
+};
+
+static int si_query_video_codecs(struct amdgpu_device *adev, bool encode,
+ const struct amdgpu_video_codecs **codecs)
+{
+ switch (adev->asic_type) {
+ case CHIP_VERDE:
+ case CHIP_TAHITI:
+ case CHIP_PITCAIRN:
+ if (encode)
+ *codecs = &tahiti_video_codecs_encode;
+ else
+ *codecs = &tahiti_video_codecs_decode;
+ return 0;
+ case CHIP_OLAND:
+ if (encode)
+ *codecs = &hainan_video_codecs_encode;
+ else
+ *codecs = &tahiti_video_codecs_decode;
+ return 0;
+ case CHIP_HAINAN:
+ if (encode)
+ *codecs = &hainan_video_codecs_encode;
+ else
+ *codecs = &hainan_video_codecs_decode;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
static u32 si_pcie_rreg(struct amdgpu_device *adev, u32 reg)
{
unsigned long flags;
@@ -1903,6 +2013,7 @@ static const struct amdgpu_asic_funcs si_asic_funcs =
.get_pcie_replay_count = &si_get_pcie_replay_count,
.supports_baco = &si_asic_supports_baco,
.pre_asic_init = &si_pre_asic_init,
+ .query_video_codecs = &si_query_video_codecs,
};
static uint32_t si_get_rev_id(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/smuio_v13_0.c b/drivers/gpu/drm/amd/amdgpu/smuio_v13_0.c
new file mode 100644
index 000000000000..079b094c48ad
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/smuio_v13_0.c
@@ -0,0 +1,121 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "smuio_v13_0.h"
+#include "smuio/smuio_13_0_2_offset.h"
+#include "smuio/smuio_13_0_2_sh_mask.h"
+
+#define SMUIO_MCM_CONFIG__HOST_GPU_XGMI_MASK 0x00000001L
+
+static u32 smuio_v13_0_get_rom_index_offset(struct amdgpu_device *adev)
+{
+ return SOC15_REG_OFFSET(SMUIO, 0, regROM_INDEX);
+}
+
+static u32 smuio_v13_0_get_rom_data_offset(struct amdgpu_device *adev)
+{
+ return SOC15_REG_OFFSET(SMUIO, 0, regROM_DATA);
+}
+
+static void smuio_v13_0_update_rom_clock_gating(struct amdgpu_device *adev, bool enable)
+{
+ u32 def, data;
+
+ /* enable/disable ROM CG is not supported on APU */
+ if (adev->flags & AMD_IS_APU)
+ return;
+
+ def = data = RREG32_SOC15(SMUIO, 0, regCGTT_ROM_CLK_CTRL0);
+
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG))
+ data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
+ CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK);
+ else
+ data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
+ CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK;
+
+ if (def != data)
+ WREG32_SOC15(SMUIO, 0, regCGTT_ROM_CLK_CTRL0, data);
+}
+
+static void smuio_v13_0_get_clock_gating_state(struct amdgpu_device *adev, u32 *flags)
+{
+ u32 data;
+
+ /* CGTT_ROM_CLK_CTRL0 is not available for APU */
+ if (adev->flags & AMD_IS_APU)
+ return;
+
+ data = RREG32_SOC15(SMUIO, 0, regCGTT_ROM_CLK_CTRL0);
+ if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK))
+ *flags |= AMD_CG_SUPPORT_ROM_MGCG;
+}
+
+/**
+ * smuio_v13_0_get_die_id - query die id from FCH.
+ *
+ * @adev: amdgpu device pointer
+ *
+ * Returns die id
+ */
+static u32 smuio_v13_0_get_die_id(struct amdgpu_device *adev)
+{
+ u32 data, die_id;
+
+ data = RREG32_SOC15(SMUIO, 0, regSMUIO_MCM_CONFIG);
+ die_id = REG_GET_FIELD(data, SMUIO_MCM_CONFIG, DIE_ID);
+
+ return die_id;
+}
+
+/**
+ * smuio_v13_0_supports_host_gpu_xgmi - detect xgmi interface between cpu and gpu/s.
+ *
+ * @adev: amdgpu device pointer
+ *
+ * Returns true on success or false otherwise.
+ */
+static bool smuio_v13_0_is_host_gpu_xgmi_supported(struct amdgpu_device *adev)
+{
+ u32 data;
+
+ data = RREG32_SOC15(SMUIO, 0, regSMUIO_MCM_CONFIG);
+ data = REG_GET_FIELD(data, SMUIO_MCM_CONFIG, TOPOLOGY_ID);
+ /* data[4:0]
+ * bit 0 == 0 host-gpu interface is PCIE
+ * bit 0 == 1 host-gpu interface is Alternate Protocal
+ * for AMD, this is XGMI
+ */
+ data &= SMUIO_MCM_CONFIG__HOST_GPU_XGMI_MASK;
+
+ return data ? true : false;
+}
+
+const struct amdgpu_smuio_funcs smuio_v13_0_funcs = {
+ .get_rom_index_offset = smuio_v13_0_get_rom_index_offset,
+ .get_rom_data_offset = smuio_v13_0_get_rom_data_offset,
+ .get_die_id = smuio_v13_0_get_die_id,
+ .is_host_gpu_xgmi_supported = smuio_v13_0_is_host_gpu_xgmi_supported,
+ .update_rom_clock_gating = smuio_v13_0_update_rom_clock_gating,
+ .get_clock_gating_state = smuio_v13_0_get_clock_gating_state,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/smuio_v13_0.h b/drivers/gpu/drm/amd/amdgpu/smuio_v13_0.h
new file mode 100644
index 000000000000..a3bfe3e4fb46
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/smuio_v13_0.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __SMUIO_V13_0_H__
+#define __SMUIO_V13_0_H__
+
+#include "soc15_common.h"
+
+extern const struct amdgpu_smuio_funcs smuio_v13_0_funcs;
+
+#endif /* __SMUIO_V13_0_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 1221aa6b40a9..3808402cd964 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -25,6 +25,8 @@
#include <linux/module.h>
#include <linux/pci.h>
+#include <drm/amdgpu_drm.h>
+
#include "amdgpu.h"
#include "amdgpu_atombios.h"
#include "amdgpu_ih.h"
@@ -71,6 +73,7 @@
#include "jpeg_v2_5.h"
#include "smuio_v9_0.h"
#include "smuio_v11_0.h"
+#include "smuio_v13_0.h"
#include "dce_virtual.h"
#include "mxgpu_ai.h"
#include "amdgpu_smu.h"
@@ -83,6 +86,234 @@
#define mmMP0_MISC_LIGHT_SLEEP_CTRL 0x01ba
#define mmMP0_MISC_LIGHT_SLEEP_CTRL_BASE_IDX 0
+/* Vega, Raven, Arcturus */
+static const struct amdgpu_video_codec_info vega_video_codecs_encode_array[] =
+{
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
+ .max_width = 4096,
+ .max_height = 2304,
+ .max_pixels_per_frame = 4096 * 2304,
+ .max_level = 0,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
+ .max_width = 4096,
+ .max_height = 2304,
+ .max_pixels_per_frame = 4096 * 2304,
+ .max_level = 0,
+ },
+};
+
+static const struct amdgpu_video_codecs vega_video_codecs_encode =
+{
+ .codec_count = ARRAY_SIZE(vega_video_codecs_encode_array),
+ .codec_array = vega_video_codecs_encode_array,
+};
+
+/* Vega */
+static const struct amdgpu_video_codec_info vega_video_codecs_decode_array[] =
+{
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
+ .max_width = 4096,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 3,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
+ .max_width = 4096,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 5,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
+ .max_width = 4096,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 52,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
+ .max_width = 4096,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 4,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
+ .max_width = 4096,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 186,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG,
+ .max_width = 4096,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 0,
+ },
+};
+
+static const struct amdgpu_video_codecs vega_video_codecs_decode =
+{
+ .codec_count = ARRAY_SIZE(vega_video_codecs_decode_array),
+ .codec_array = vega_video_codecs_decode_array,
+};
+
+/* Raven */
+static const struct amdgpu_video_codec_info rv_video_codecs_decode_array[] =
+{
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
+ .max_width = 4096,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 3,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
+ .max_width = 4096,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 5,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
+ .max_width = 4096,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 52,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
+ .max_width = 4096,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 4,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
+ .max_width = 4096,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 186,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG,
+ .max_width = 4096,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 0,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9,
+ .max_width = 4096,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 0,
+ },
+};
+
+static const struct amdgpu_video_codecs rv_video_codecs_decode =
+{
+ .codec_count = ARRAY_SIZE(rv_video_codecs_decode_array),
+ .codec_array = rv_video_codecs_decode_array,
+};
+
+/* Renoir, Arcturus */
+static const struct amdgpu_video_codec_info rn_video_codecs_decode_array[] =
+{
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
+ .max_width = 4096,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 3,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
+ .max_width = 4096,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 5,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
+ .max_width = 4096,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 52,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
+ .max_width = 4096,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 4,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
+ .max_width = 8192,
+ .max_height = 4352,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 186,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG,
+ .max_width = 4096,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 0,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9,
+ .max_width = 8192,
+ .max_height = 4352,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 0,
+ },
+};
+
+static const struct amdgpu_video_codecs rn_video_codecs_decode =
+{
+ .codec_count = ARRAY_SIZE(rn_video_codecs_decode_array),
+ .codec_array = rn_video_codecs_decode_array,
+};
+
+static int soc15_query_video_codecs(struct amdgpu_device *adev, bool encode,
+ const struct amdgpu_video_codecs **codecs)
+{
+ switch (adev->asic_type) {
+ case CHIP_VEGA20:
+ case CHIP_VEGA10:
+ case CHIP_VEGA12:
+ if (encode)
+ *codecs = &vega_video_codecs_encode;
+ else
+ *codecs = &vega_video_codecs_decode;
+ return 0;
+ case CHIP_RAVEN:
+ if (encode)
+ *codecs = &vega_video_codecs_encode;
+ else
+ *codecs = &rv_video_codecs_decode;
+ return 0;
+ case CHIP_ARCTURUS:
+ case CHIP_RENOIR:
+ if (encode)
+ *codecs = &vega_video_codecs_encode;
+ else
+ *codecs = &rn_video_codecs_decode;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
/*
* Indirect registers accessor
*/
@@ -419,40 +650,6 @@ void soc15_program_register_sequence(struct amdgpu_device *adev,
}
-static int soc15_asic_mode1_reset(struct amdgpu_device *adev)
-{
- u32 i;
- int ret = 0;
-
- amdgpu_atombios_scratch_regs_engine_hung(adev, true);
-
- dev_info(adev->dev, "GPU mode1 reset\n");
-
- /* disable BM */
- pci_clear_master(adev->pdev);
-
- amdgpu_device_cache_pci_state(adev->pdev);
-
- ret = psp_gpu_reset(adev);
- if (ret)
- dev_err(adev->dev, "GPU mode1 reset failed\n");
-
- amdgpu_device_load_pci_state(adev->pdev);
-
- /* wait for asic to come out of reset */
- for (i = 0; i < adev->usec_timeout; i++) {
- u32 memsize = adev->nbio.funcs->get_memsize(adev);
-
- if (memsize != 0xffffffff)
- break;
- udelay(1);
- }
-
- amdgpu_atombios_scratch_regs_engine_hung(adev, false);
-
- return ret;
-}
-
static int soc15_asic_baco_reset(struct amdgpu_device *adev)
{
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
@@ -477,13 +674,21 @@ static enum amd_reset_method
soc15_asic_reset_method(struct amdgpu_device *adev)
{
bool baco_reset = false;
+ bool connected_to_cpu = false;
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+ if (adev->gmc.xgmi.supported && adev->gmc.xgmi.connected_to_cpu)
+ connected_to_cpu = true;
+
if (amdgpu_reset_method == AMD_RESET_METHOD_MODE1 ||
amdgpu_reset_method == AMD_RESET_METHOD_MODE2 ||
amdgpu_reset_method == AMD_RESET_METHOD_BACO ||
- amdgpu_reset_method == AMD_RESET_METHOD_PCI)
- return amdgpu_reset_method;
+ amdgpu_reset_method == AMD_RESET_METHOD_PCI) {
+ /* If connected to cpu, driver only support mode2 */
+ if (connected_to_cpu)
+ return AMD_RESET_METHOD_MODE2;
+ return amdgpu_reset_method;
+ }
if (amdgpu_reset_method != -1)
dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n",
@@ -509,6 +714,14 @@ soc15_asic_reset_method(struct amdgpu_device *adev)
if ((ras && ras->supported) && adev->pm.fw_version <= 0x283400)
baco_reset = false;
break;
+ case CHIP_ALDEBARAN:
+ /*
+ * 1.connected to cpu: driver issue mode2 reset
+ * 2.discret gpu: driver issue mode1 reset
+ */
+ if (connected_to_cpu)
+ return AMD_RESET_METHOD_MODE2;
+ break;
default:
break;
}
@@ -538,7 +751,7 @@ static int soc15_asic_reset(struct amdgpu_device *adev)
return amdgpu_dpm_mode2_reset(adev);
default:
dev_info(adev->dev, "MODE1 reset\n");
- return soc15_asic_mode1_reset(adev);
+ return amdgpu_device_mode1_reset(adev);
}
}
@@ -661,6 +874,9 @@ static void soc15_reg_base_init(struct amdgpu_device *adev)
case CHIP_ARCTURUS:
arct_reg_base_init(adev);
break;
+ case CHIP_ALDEBARAN:
+ aldebaran_reg_base_init(adev);
+ break;
default:
DRM_ERROR("Unsupported asic type: %d!\n", adev->asic_type);
break;
@@ -683,14 +899,12 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
if (!amdgpu_sriov_vf(adev))
soc15_reg_base_init(adev);
- if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS)
- adev->gmc.xgmi.supported = true;
-
if (adev->flags & AMD_IS_APU) {
adev->nbio.funcs = &nbio_v7_0_funcs;
adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg;
} else if (adev->asic_type == CHIP_VEGA20 ||
- adev->asic_type == CHIP_ARCTURUS) {
+ adev->asic_type == CHIP_ARCTURUS ||
+ adev->asic_type == CHIP_ALDEBARAN) {
adev->nbio.funcs = &nbio_v7_4_funcs;
adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg;
} else {
@@ -699,7 +913,9 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
}
adev->hdp.funcs = &hdp_v4_0_funcs;
- if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS)
+ if (adev->asic_type == CHIP_VEGA20 ||
+ adev->asic_type == CHIP_ARCTURUS ||
+ adev->asic_type == CHIP_ALDEBARAN)
adev->df.funcs = &df_v3_6_funcs;
else
adev->df.funcs = &df_v1_7_funcs;
@@ -707,6 +923,8 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
if (adev->asic_type == CHIP_VEGA20 ||
adev->asic_type == CHIP_ARCTURUS)
adev->smuio.funcs = &smuio_v11_0_funcs;
+ else if (adev->asic_type == CHIP_ALDEBARAN)
+ adev->smuio.funcs = &smuio_v13_0_funcs;
else
adev->smuio.funcs = &smuio_v9_0_funcs;
@@ -826,6 +1044,27 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
break;
+ case CHIP_ALDEBARAN:
+ amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
+ amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
+
+ if (amdgpu_sriov_vf(adev)) {
+ if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
+ amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
+ } else {
+ amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
+ if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
+ amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block);
+ }
+
+ amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
+
+ amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &vcn_v2_6_ip_block);
+ amdgpu_device_ip_block_add(adev, &jpeg_v2_6_ip_block);
+ break;
default:
return -EINVAL;
}
@@ -994,6 +1233,7 @@ static const struct amdgpu_asic_funcs soc15_asic_funcs =
.get_pcie_replay_count = &soc15_get_pcie_replay_count,
.supports_baco = &soc15_supports_baco,
.pre_asic_init = &soc15_pre_asic_init,
+ .query_video_codecs = &soc15_query_video_codecs,
};
static const struct amdgpu_asic_funcs vega20_asic_funcs =
@@ -1015,6 +1255,7 @@ static const struct amdgpu_asic_funcs vega20_asic_funcs =
.get_pcie_replay_count = &soc15_get_pcie_replay_count,
.supports_baco = &soc15_supports_baco,
.pre_asic_init = &soc15_pre_asic_init,
+ .query_video_codecs = &soc15_query_video_codecs,
};
static int soc15_common_early_init(void *handle)
@@ -1244,6 +1485,21 @@ static int soc15_common_early_init(void *handle)
AMD_PG_SUPPORT_JPEG |
AMD_PG_SUPPORT_VCN_DPG;
break;
+ case CHIP_ALDEBARAN:
+ adev->asic_funcs = &vega20_asic_funcs;
+ adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
+ AMD_CG_SUPPORT_GFX_MGLS |
+ AMD_CG_SUPPORT_GFX_CGCG |
+ AMD_CG_SUPPORT_GFX_CGLS |
+ AMD_CG_SUPPORT_GFX_CP_LS |
+ AMD_CG_SUPPORT_HDP_LS |
+ AMD_CG_SUPPORT_SDMA_MGCG |
+ AMD_CG_SUPPORT_SDMA_LS |
+ AMD_CG_SUPPORT_IH_CG;
+ /*AMD_CG_SUPPORT_VCN_MGCG |AMD_CG_SUPPORT_JPEG_MGCG;*/
+ adev->pg_flags = AMD_PG_SUPPORT_VCN_DPG;
+ adev->external_rev_id = adev->rev_id + 0x3c;
+ break;
default:
/* FIXME: not supported yet */
return -EINVAL;
@@ -1477,6 +1733,7 @@ static int soc15_common_set_clockgating_state(void *handle,
state == AMD_CG_STATE_GATE);
break;
case CHIP_ARCTURUS:
+ case CHIP_ALDEBARAN:
adev->hdp.funcs->update_clock_gating(adev,
state == AMD_CG_STATE_GATE);
break;
@@ -1498,15 +1755,18 @@ static void soc15_common_get_clockgating_state(void *handle, u32 *flags)
adev->hdp.funcs->get_clock_gating_state(adev, flags);
- /* AMD_CG_SUPPORT_DRM_MGCG */
- data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0));
- if (!(data & 0x01000000))
- *flags |= AMD_CG_SUPPORT_DRM_MGCG;
+ if (adev->asic_type != CHIP_ALDEBARAN) {
- /* AMD_CG_SUPPORT_DRM_LS */
- data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL));
- if (data & 0x1)
- *flags |= AMD_CG_SUPPORT_DRM_LS;
+ /* AMD_CG_SUPPORT_DRM_MGCG */
+ data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0));
+ if (!(data & 0x01000000))
+ *flags |= AMD_CG_SUPPORT_DRM_MGCG;
+
+ /* AMD_CG_SUPPORT_DRM_LS */
+ data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL));
+ if (data & 0x1)
+ *flags |= AMD_CG_SUPPORT_DRM_LS;
+ }
/* AMD_CG_SUPPORT_ROM_MGCG */
adev->smuio.funcs->get_clock_gating_state(adev, flags);
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h
index 8f38f047265b..034cfdfc4dbe 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.h
@@ -49,6 +49,13 @@ struct soc15_reg_rlcg {
u32 reg;
};
+struct soc15_reg {
+ uint32_t hwip;
+ uint32_t inst;
+ uint32_t seg;
+ uint32_t reg_offset;
+};
+
struct soc15_reg_entry {
uint32_t hwip;
uint32_t inst;
@@ -88,6 +95,10 @@ struct soc15_ras_field_entry {
#define SOC15_REG_FIELD(reg, field) reg##__##field##_MASK, reg##__##field##__SHIFT
+#define SOC15_REG_FIELD_VAL(val, mask, shift) (((val) & mask) >> shift)
+
+#define SOC15_RAS_REG_FIELD_VAL(val, entry, field) SOC15_REG_FIELD_VAL((val), (entry).field##_count_mask, (entry).field##_count_shift)
+
void soc15_grbm_select(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 queue, u32 vmid);
void soc15_set_virt_ops(struct amdgpu_device *adev);
@@ -100,6 +111,7 @@ void soc15_program_register_sequence(struct amdgpu_device *adev,
int vega10_reg_base_init(struct amdgpu_device *adev);
int vega20_reg_base_init(struct amdgpu_device *adev);
int arct_reg_base_init(struct amdgpu_device *adev);
+int aldebaran_reg_base_init(struct amdgpu_device *adev);
void vega10_doorbell_index_init(struct amdgpu_device *adev);
void vega20_doorbell_index_init(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15_common.h b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
index a5c00ab8b021..8cdf5d1685cb 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15_common.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
@@ -100,6 +100,30 @@
} \
} while (0)
+#define WREG32_RLC_EX(prefix, reg, value) \
+ do { \
+ if (amdgpu_sriov_fullaccess(adev)) { \
+ uint32_t i = 0; \
+ uint32_t retries = 50000; \
+ uint32_t r0 = adev->reg_offset[GC_HWIP][0][prefix##SCRATCH_REG0_BASE_IDX] + prefix##SCRATCH_REG0; \
+ uint32_t r1 = adev->reg_offset[GC_HWIP][0][prefix##SCRATCH_REG1_BASE_IDX] + prefix##SCRATCH_REG1; \
+ uint32_t spare_int = adev->reg_offset[GC_HWIP][0][prefix##RLC_SPARE_INT_BASE_IDX] + prefix##RLC_SPARE_INT; \
+ WREG32(r0, value); \
+ WREG32(r1, (reg | 0x80000000)); \
+ WREG32(spare_int, 0x1); \
+ for (i = 0; i < retries; i++) { \
+ u32 tmp = RREG32(r1); \
+ if (!(tmp & 0x80000000)) \
+ break; \
+ udelay(10); \
+ } \
+ if (i >= retries) \
+ pr_err("timeout: rlcg program reg:0x%05x failed !\n", reg); \
+ } else { \
+ WREG32(reg, value); \
+ } \
+ } while (0)
+
#define WREG32_SOC15_RLC_SHADOW(ip, inst, reg, value) \
do { \
uint32_t target_reg = adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg;\
@@ -118,12 +142,36 @@
} \
} while (0)
+#define WREG32_SOC15_RLC_SHADOW_EX(prefix, ip, inst, reg, value) \
+ do { \
+ uint32_t target_reg = adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg;\
+ if (amdgpu_sriov_fullaccess(adev)) { \
+ uint32_t r2 = adev->reg_offset[GC_HWIP][0][prefix##SCRATCH_REG1_BASE_IDX] + prefix##SCRATCH_REG2; \
+ uint32_t r3 = adev->reg_offset[GC_HWIP][0][prefix##SCRATCH_REG1_BASE_IDX] + prefix##SCRATCH_REG3; \
+ uint32_t grbm_cntl = adev->reg_offset[GC_HWIP][0][prefix##GRBM_GFX_CNTL_BASE_IDX] + prefix##GRBM_GFX_CNTL; \
+ uint32_t grbm_idx = adev->reg_offset[GC_HWIP][0][prefix##GRBM_GFX_INDEX_BASE_IDX] + prefix##GRBM_GFX_INDEX; \
+ if (target_reg == grbm_cntl) \
+ WREG32(r2, value); \
+ else if (target_reg == grbm_idx) \
+ WREG32(r3, value); \
+ WREG32(target_reg, value); \
+ } else { \
+ WREG32(target_reg, value); \
+ } \
+ } while (0)
+
#define WREG32_SOC15_RLC(ip, inst, reg, value) \
do { \
uint32_t target_reg = adev->reg_offset[GC_HWIP][0][reg##_BASE_IDX] + reg;\
WREG32_RLC(target_reg, value); \
} while (0)
+#define WREG32_SOC15_RLC_EX(prefix, ip, inst, reg, value) \
+ do { \
+ uint32_t target_reg = adev->reg_offset[GC_HWIP][0][reg##_BASE_IDX] + reg;\
+ WREG32_RLC_EX(prefix, target_reg, value); \
+ } while (0)
+
#define WREG32_FIELD15_RLC(ip, idx, reg, field, val) \
WREG32_RLC((adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg), \
(RREG32(adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg) \
diff --git a/drivers/gpu/drm/amd/amdgpu/ta_secureDisplay_if.h b/drivers/gpu/drm/amd/amdgpu/ta_secureDisplay_if.h
index 5039375bb1d4..cf8ff064dc72 100644
--- a/drivers/gpu/drm/amd/amdgpu/ta_secureDisplay_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/ta_secureDisplay_if.h
@@ -50,6 +50,7 @@ enum ta_securedisplay_status {
TA_SECUREDISPLAY_STATUS__I2C_WRITE_ERROR = 0x04, /* Fail to Write to I2C */
TA_SECUREDISPLAY_STATUS__READ_DIO_SCRATCH_ERROR = 0x05, /*Fail Read DIO Scratch Register*/
TA_SECUREDISPLAY_STATUS__READ_CRC_ERROR = 0x06, /* Fail to Read CRC*/
+ TA_SECUREDISPLAY_STATUS__I2C_INIT_ERROR = 0x07, /* Failed to initialize I2C */
TA_SECUREDISPLAY_STATUS__MAX = 0x7FFFFFFF,/* Maximum Value for status*/
};
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index b6e0f4ba6272..87ec883f7e06 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -1545,6 +1545,36 @@ static const struct amdgpu_ring_funcs vcn_v2_5_dec_ring_vm_funcs = {
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
};
+static const struct amdgpu_ring_funcs vcn_v2_6_dec_ring_vm_funcs = {
+ .type = AMDGPU_RING_TYPE_VCN_DEC,
+ .align_mask = 0xf,
+ .vmhub = AMDGPU_MMHUB_0,
+ .get_rptr = vcn_v2_5_dec_ring_get_rptr,
+ .get_wptr = vcn_v2_5_dec_ring_get_wptr,
+ .set_wptr = vcn_v2_5_dec_ring_set_wptr,
+ .emit_frame_size =
+ SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
+ 8 + /* vcn_v2_0_dec_ring_emit_vm_flush */
+ 14 + 14 + /* vcn_v2_0_dec_ring_emit_fence x2 vm fence */
+ 6,
+ .emit_ib_size = 8, /* vcn_v2_0_dec_ring_emit_ib */
+ .emit_ib = vcn_v2_0_dec_ring_emit_ib,
+ .emit_fence = vcn_v2_0_dec_ring_emit_fence,
+ .emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush,
+ .test_ring = vcn_v2_0_dec_ring_test_ring,
+ .test_ib = amdgpu_vcn_dec_ring_test_ib,
+ .insert_nop = vcn_v2_0_dec_ring_insert_nop,
+ .insert_start = vcn_v2_0_dec_ring_insert_start,
+ .insert_end = vcn_v2_0_dec_ring_insert_end,
+ .pad_ib = amdgpu_ring_generic_pad_ib,
+ .begin_use = amdgpu_vcn_ring_begin_use,
+ .end_use = amdgpu_vcn_ring_end_use,
+ .emit_wreg = vcn_v2_0_dec_ring_emit_wreg,
+ .emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait,
+ .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+};
+
/**
* vcn_v2_5_enc_ring_get_rptr - get enc read pointer
*
@@ -1644,6 +1674,36 @@ static const struct amdgpu_ring_funcs vcn_v2_5_enc_ring_vm_funcs = {
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
};
+static const struct amdgpu_ring_funcs vcn_v2_6_enc_ring_vm_funcs = {
+ .type = AMDGPU_RING_TYPE_VCN_ENC,
+ .align_mask = 0x3f,
+ .nop = VCN_ENC_CMD_NO_OP,
+ .vmhub = AMDGPU_MMHUB_0,
+ .get_rptr = vcn_v2_5_enc_ring_get_rptr,
+ .get_wptr = vcn_v2_5_enc_ring_get_wptr,
+ .set_wptr = vcn_v2_5_enc_ring_set_wptr,
+ .emit_frame_size =
+ SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
+ SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
+ 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
+ 5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
+ 1, /* vcn_v2_0_enc_ring_insert_end */
+ .emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
+ .emit_ib = vcn_v2_0_enc_ring_emit_ib,
+ .emit_fence = vcn_v2_0_enc_ring_emit_fence,
+ .emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
+ .test_ring = amdgpu_vcn_enc_ring_test_ring,
+ .test_ib = amdgpu_vcn_enc_ring_test_ib,
+ .insert_nop = amdgpu_ring_insert_nop,
+ .insert_end = vcn_v2_0_enc_ring_insert_end,
+ .pad_ib = amdgpu_ring_generic_pad_ib,
+ .begin_use = amdgpu_vcn_ring_begin_use,
+ .end_use = amdgpu_vcn_ring_end_use,
+ .emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
+ .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
+ .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
+};
+
static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev)
{
int i;
@@ -1651,7 +1711,10 @@ static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev)
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
continue;
- adev->vcn.inst[i].ring_dec.funcs = &vcn_v2_5_dec_ring_vm_funcs;
+ if (adev->asic_type == CHIP_ARCTURUS)
+ adev->vcn.inst[i].ring_dec.funcs = &vcn_v2_5_dec_ring_vm_funcs;
+ else /* CHIP_ALDEBARAN */
+ adev->vcn.inst[i].ring_dec.funcs = &vcn_v2_6_dec_ring_vm_funcs;
adev->vcn.inst[i].ring_dec.me = i;
DRM_INFO("VCN(%d) decode is enabled in VM mode\n", i);
}
@@ -1665,7 +1728,10 @@ static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev)
if (adev->vcn.harvest_config & (1 << j))
continue;
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
- adev->vcn.inst[j].ring_enc[i].funcs = &vcn_v2_5_enc_ring_vm_funcs;
+ if (adev->asic_type == CHIP_ARCTURUS)
+ adev->vcn.inst[j].ring_enc[i].funcs = &vcn_v2_5_enc_ring_vm_funcs;
+ else /* CHIP_ALDEBARAN */
+ adev->vcn.inst[j].ring_enc[i].funcs = &vcn_v2_6_enc_ring_vm_funcs;
adev->vcn.inst[j].ring_enc[i].me = j;
}
DRM_INFO("VCN(%d) encode is enabled in VM mode\n", j);
@@ -1830,6 +1896,26 @@ static const struct amd_ip_funcs vcn_v2_5_ip_funcs = {
.set_powergating_state = vcn_v2_5_set_powergating_state,
};
+static const struct amd_ip_funcs vcn_v2_6_ip_funcs = {
+ .name = "vcn_v2_6",
+ .early_init = vcn_v2_5_early_init,
+ .late_init = NULL,
+ .sw_init = vcn_v2_5_sw_init,
+ .sw_fini = vcn_v2_5_sw_fini,
+ .hw_init = vcn_v2_5_hw_init,
+ .hw_fini = vcn_v2_5_hw_fini,
+ .suspend = vcn_v2_5_suspend,
+ .resume = vcn_v2_5_resume,
+ .is_idle = vcn_v2_5_is_idle,
+ .wait_for_idle = vcn_v2_5_wait_for_idle,
+ .check_soft_reset = NULL,
+ .pre_soft_reset = NULL,
+ .soft_reset = NULL,
+ .post_soft_reset = NULL,
+ .set_clockgating_state = vcn_v2_5_set_clockgating_state,
+ .set_powergating_state = vcn_v2_5_set_powergating_state,
+};
+
const struct amdgpu_ip_block_version vcn_v2_5_ip_block =
{
.type = AMD_IP_BLOCK_TYPE_VCN,
@@ -1838,3 +1924,12 @@ const struct amdgpu_ip_block_version vcn_v2_5_ip_block =
.rev = 0,
.funcs = &vcn_v2_5_ip_funcs,
};
+
+const struct amdgpu_ip_block_version vcn_v2_6_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_VCN,
+ .major = 2,
+ .minor = 6,
+ .rev = 0,
+ .funcs = &vcn_v2_6_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.h b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.h
index 8d9c0800b8e0..e72f799ed0fd 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.h
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.h
@@ -25,5 +25,6 @@
#define __VCN_V2_5_H__
extern const struct amdgpu_ip_block_version vcn_v2_5_ip_block;
+extern const struct amdgpu_ip_block_version vcn_v2_6_ip_block;
#endif /* __VCN_V2_5_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
index def583916294..b61d1ba1aa9d 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
@@ -238,7 +238,8 @@ static int vcn_v3_0_sw_init(void *handle)
fw_shared = adev->vcn.inst[i].fw_shared_cpu_addr;
fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_SW_RING_FLAG) |
- cpu_to_le32(AMDGPU_VCN_MULTI_QUEUE_FLAG);
+ cpu_to_le32(AMDGPU_VCN_MULTI_QUEUE_FLAG) |
+ cpu_to_le32(AMDGPU_VCN_FW_SHARED_FLAG_0_RB);
fw_shared->sw_ring.is_enabled = cpu_to_le32(DEC_SW_RING_ENABLED);
}
@@ -1074,7 +1075,13 @@ static int vcn_v3_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR,
lower_32_bits(ring->wptr));
+ /* Reset FW shared memory RBC WPTR/RPTR */
+ fw_shared->rb.rptr = 0;
+ fw_shared->rb.wptr = lower_32_bits(ring->wptr);
+
+ /*resetting done, fw can check RB ring */
fw_shared->multi_queue.decode_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
+
/* Unstall DPG */
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
@@ -1239,9 +1246,11 @@ static int vcn_v3_0_start(struct amdgpu_device *adev)
/* Initialize the ring buffer's read and write pointers */
WREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR, 0);
+ WREG32_SOC15(VCN, i, mmUVD_SCRATCH2, 0);
ring->wptr = RREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR);
WREG32_SOC15(VCN, i, mmUVD_RBC_RB_WPTR,
lower_32_bits(ring->wptr));
+ fw_shared->rb.wptr = lower_32_bits(ring->wptr);
fw_shared->multi_queue.decode_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
fw_shared->multi_queue.encode_generalpurpose_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
@@ -1662,6 +1671,10 @@ static int vcn_v3_0_pause_dpg_mode(struct amdgpu_device *adev,
WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
fw_shared->multi_queue.encode_lowlatency_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
+ /* restore wptr/rptr with pointers saved in FW shared memory*/
+ WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR, fw_shared->rb.rptr);
+ WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR, fw_shared->rb.wptr);
+
/* Unstall DPG */
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
@@ -1721,6 +1734,15 @@ static uint64_t vcn_v3_0_dec_ring_get_wptr(struct amdgpu_ring *ring)
static void vcn_v3_0_dec_ring_set_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
+ volatile struct amdgpu_fw_shared *fw_shared;
+
+ if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
+ /*whenever update RBC_RB_WPTR, we save the wptr in shared rb.wptr and scratch2 */
+ fw_shared = adev->vcn.inst[ring->me].fw_shared_cpu_addr;
+ fw_shared->rb.wptr = lower_32_bits(ring->wptr);
+ WREG32_SOC15(VCN, ring->me, mmUVD_SCRATCH2,
+ lower_32_bits(ring->wptr));
+ }
if (ring->use_doorbell) {
adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
index 88626d83e07b..ca8efa5c6978 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
@@ -220,10 +220,8 @@ static int vega10_ih_enable_ring(struct amdgpu_device *adev,
tmp = vega10_ih_rb_cntl(ih, tmp);
if (ih == &adev->irq.ih)
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RPTR_REARM, !!adev->irq.msi_enabled);
- if (ih == &adev->irq.ih1) {
- tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 0);
+ if (ih == &adev->irq.ih1)
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_FULL_DRAIN_ENABLE, 1);
- }
if (amdgpu_sriov_vf(adev)) {
if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) {
dev_err(adev->dev, "PSP program IH_RB_CNTL failed!\n");
@@ -265,7 +263,6 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
u32 ih_chicken;
int ret;
int i;
- u32 tmp;
/* disable irqs */
ret = vega10_ih_toggle_interrupts(adev, false);
@@ -291,15 +288,6 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
}
}
- tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL);
- tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL,
- CLIENT18_IS_STORM_CLIENT, 1);
- WREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL, tmp);
-
- tmp = RREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL);
- tmp = REG_SET_FIELD(tmp, IH_INT_FLOOD_CNTL, FLOOD_CNTL_ENABLE, 1);
- WREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL, tmp);
-
pci_set_master(adev->pdev);
/* enable interrupts */
@@ -345,11 +333,17 @@ static u32 vega10_ih_get_wptr(struct amdgpu_device *adev,
u32 wptr, tmp;
struct amdgpu_ih_regs *ih_regs;
- wptr = le32_to_cpu(*ih->wptr_cpu);
- ih_regs = &ih->ih_regs;
+ if (ih == &adev->irq.ih) {
+ /* Only ring0 supports writeback. On other rings fall back
+ * to register-based code with overflow checking below.
+ */
+ wptr = le32_to_cpu(*ih->wptr_cpu);
- if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
- goto out;
+ if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
+ goto out;
+ }
+
+ ih_regs = &ih->ih_regs;
/* Double check that the overflow wasn't already cleared. */
wptr = RREG32_NO_KIQ(ih_regs->ih_rb_wptr);
@@ -440,15 +434,11 @@ static int vega10_ih_self_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
- uint32_t wptr = cpu_to_le32(entry->src_data[0]);
-
switch (entry->ring_id) {
case 1:
- *adev->irq.ih1.wptr_cpu = wptr;
schedule_work(&adev->irq.ih1_work);
break;
case 2:
- *adev->irq.ih2.wptr_cpu = wptr;
schedule_work(&adev->irq.ih2_work);
break;
default: break;
diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
index 5a3c867d5881..6c3cb3513b98 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
@@ -35,6 +35,9 @@
#define MAX_REARM_RETRY 10
+#define mmIH_CHICKEN_ALDEBARAN 0x18d
+#define mmIH_CHICKEN_ALDEBARAN_BASE_IDX 0
+
static void vega20_ih_set_interrupt_funcs(struct amdgpu_device *adev);
/**
@@ -104,6 +107,8 @@ static int vega20_ih_toggle_ring_interrupts(struct amdgpu_device *adev,
tmp = RREG32(ih_regs->ih_rb_cntl);
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_ENABLE, (enable ? 1 : 0));
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_GPU_TS_ENABLE, 1);
+
/* enable_intr field is only valid in ring0 */
if (ih == &adev->irq.ih)
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, ENABLE_INTR, (enable ? 1 : 0));
@@ -220,10 +225,8 @@ static int vega20_ih_enable_ring(struct amdgpu_device *adev,
tmp = vega20_ih_rb_cntl(ih, tmp);
if (ih == &adev->irq.ih)
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RPTR_REARM, !!adev->irq.msi_enabled);
- if (ih == &adev->irq.ih1) {
- tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 0);
+ if (ih == &adev->irq.ih1)
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_FULL_DRAIN_ENABLE, 1);
- }
if (amdgpu_sriov_vf(adev)) {
if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) {
dev_err(adev->dev, "PSP program IH_RB_CNTL failed!\n");
@@ -297,7 +300,6 @@ static int vega20_ih_irq_init(struct amdgpu_device *adev)
u32 ih_chicken;
int ret;
int i;
- u32 tmp;
/* disable irqs */
ret = vega20_ih_toggle_interrupts(adev, false);
@@ -316,6 +318,18 @@ static int vega20_ih_irq_init(struct amdgpu_device *adev)
WREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN, ih_chicken);
}
+ /* psp firmware won't program IH_CHICKEN for aldebaran
+ * driver needs to program it properly according to
+ * MC_SPACE type in IH_RB_CNTL */
+ if (adev->asic_type == CHIP_ALDEBARAN) {
+ ih_chicken = RREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN_ALDEBARAN);
+ if (adev->irq.ih.use_bus_addr) {
+ ih_chicken = REG_SET_FIELD(ih_chicken, IH_CHICKEN,
+ MC_SPACE_GPA_ENABLE, 1);
+ }
+ WREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN_ALDEBARAN, ih_chicken);
+ }
+
for (i = 0; i < ARRAY_SIZE(ih); i++) {
if (ih[i]->ring_size) {
if (i == 1)
@@ -326,15 +340,6 @@ static int vega20_ih_irq_init(struct amdgpu_device *adev)
}
}
- tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL);
- tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL,
- CLIENT18_IS_STORM_CLIENT, 1);
- WREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL, tmp);
-
- tmp = RREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL);
- tmp = REG_SET_FIELD(tmp, IH_INT_FLOOD_CNTL, FLOOD_CNTL_ENABLE, 1);
- WREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL, tmp);
-
pci_set_master(adev->pdev);
/* enable interrupts */
@@ -380,11 +385,17 @@ static u32 vega20_ih_get_wptr(struct amdgpu_device *adev,
u32 wptr, tmp;
struct amdgpu_ih_regs *ih_regs;
- wptr = le32_to_cpu(*ih->wptr_cpu);
- ih_regs = &ih->ih_regs;
+ if (ih == &adev->irq.ih) {
+ /* Only ring0 supports writeback. On other rings fall back
+ * to register-based code with overflow checking below.
+ */
+ wptr = le32_to_cpu(*ih->wptr_cpu);
- if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
- goto out;
+ if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
+ goto out;
+ }
+
+ ih_regs = &ih->ih_regs;
/* Double check that the overflow wasn't already cleared. */
wptr = RREG32_NO_KIQ(ih_regs->ih_rb_wptr);
@@ -476,15 +487,11 @@ static int vega20_ih_self_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
- uint32_t wptr = cpu_to_le32(entry->src_data[0]);
-
switch (entry->ring_id) {
case 1:
- *adev->irq.ih1.wptr_cpu = wptr;
schedule_work(&adev->irq.ih1_work);
break;
case 2:
- *adev->irq.ih2.wptr_cpu = wptr;
schedule_work(&adev->irq.ih2_work);
break;
default: break;
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index eafb76aebd00..ea338de5818a 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -24,6 +24,8 @@
#include <linux/pci.h>
#include <linux/slab.h>
+#include <drm/amdgpu_drm.h>
+
#include "amdgpu.h"
#include "amdgpu_atombios.h"
#include "amdgpu_ih.h"
@@ -79,6 +81,193 @@
#include "mxgpu_vi.h"
#include "amdgpu_dm.h"
+/* Topaz */
+static const struct amdgpu_video_codecs topaz_video_codecs_encode =
+{
+ .codec_count = 0,
+ .codec_array = NULL,
+};
+
+/* Tonga, CZ, ST, Fiji */
+static const struct amdgpu_video_codec_info tonga_video_codecs_encode_array[] =
+{
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
+ .max_width = 4096,
+ .max_height = 2304,
+ .max_pixels_per_frame = 4096 * 2304,
+ .max_level = 0,
+ },
+};
+
+static const struct amdgpu_video_codecs tonga_video_codecs_encode =
+{
+ .codec_count = ARRAY_SIZE(tonga_video_codecs_encode_array),
+ .codec_array = tonga_video_codecs_encode_array,
+};
+
+/* Polaris */
+static const struct amdgpu_video_codec_info polaris_video_codecs_encode_array[] =
+{
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
+ .max_width = 4096,
+ .max_height = 2304,
+ .max_pixels_per_frame = 4096 * 2304,
+ .max_level = 0,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
+ .max_width = 4096,
+ .max_height = 2304,
+ .max_pixels_per_frame = 4096 * 2304,
+ .max_level = 0,
+ },
+};
+
+static const struct amdgpu_video_codecs polaris_video_codecs_encode =
+{
+ .codec_count = ARRAY_SIZE(polaris_video_codecs_encode_array),
+ .codec_array = polaris_video_codecs_encode_array,
+};
+
+/* Topaz */
+static const struct amdgpu_video_codecs topaz_video_codecs_decode =
+{
+ .codec_count = 0,
+ .codec_array = NULL,
+};
+
+/* Tonga */
+static const struct amdgpu_video_codec_info tonga_video_codecs_decode_array[] =
+{
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
+ .max_width = 4096,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 3,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
+ .max_width = 4096,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 5,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
+ .max_width = 4096,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 52,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
+ .max_width = 4096,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 4,
+ },
+};
+
+static const struct amdgpu_video_codecs tonga_video_codecs_decode =
+{
+ .codec_count = ARRAY_SIZE(tonga_video_codecs_decode_array),
+ .codec_array = tonga_video_codecs_decode_array,
+};
+
+/* CZ, ST, Fiji, Polaris */
+static const struct amdgpu_video_codec_info cz_video_codecs_decode_array[] =
+{
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2,
+ .max_width = 4096,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 3,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4,
+ .max_width = 4096,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 5,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC,
+ .max_width = 4096,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 52,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1,
+ .max_width = 4096,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 4,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC,
+ .max_width = 4096,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 186,
+ },
+ {
+ .codec_type = AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG,
+ .max_width = 4096,
+ .max_height = 4096,
+ .max_pixels_per_frame = 4096 * 4096,
+ .max_level = 0,
+ },
+};
+
+static const struct amdgpu_video_codecs cz_video_codecs_decode =
+{
+ .codec_count = ARRAY_SIZE(cz_video_codecs_decode_array),
+ .codec_array = cz_video_codecs_decode_array,
+};
+
+static int vi_query_video_codecs(struct amdgpu_device *adev, bool encode,
+ const struct amdgpu_video_codecs **codecs)
+{
+ switch (adev->asic_type) {
+ case CHIP_TOPAZ:
+ if (encode)
+ *codecs = &topaz_video_codecs_encode;
+ else
+ *codecs = &topaz_video_codecs_decode;
+ return 0;
+ case CHIP_TONGA:
+ if (encode)
+ *codecs = &tonga_video_codecs_encode;
+ else
+ *codecs = &tonga_video_codecs_decode;
+ return 0;
+ case CHIP_POLARIS10:
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
+ case CHIP_VEGAM:
+ if (encode)
+ *codecs = &polaris_video_codecs_encode;
+ else
+ *codecs = &cz_video_codecs_decode;
+ return 0;
+ case CHIP_FIJI:
+ case CHIP_CARRIZO:
+ case CHIP_STONEY:
+ if (encode)
+ *codecs = &tonga_video_codecs_encode;
+ else
+ *codecs = &cz_video_codecs_decode;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
/*
* Indirect registers accessor
*/
@@ -1085,6 +1274,7 @@ static const struct amdgpu_asic_funcs vi_asic_funcs =
.get_pcie_replay_count = &vi_get_pcie_replay_count,
.supports_baco = &vi_asic_supports_baco,
.pre_asic_init = &vi_pre_asic_init,
+ .query_video_codecs = &vi_query_video_codecs,
};
#define CZ_REV_BRISTOL(rev) \