aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
diff options
context:
space:
mode:
authorGravatar Lijo Lazar <lijo.lazar@amd.com> 2023-09-11 13:48:11 +0530
committerGravatar Alex Deucher <alexander.deucher@amd.com> 2023-09-20 12:23:28 -0400
commit4e8303cf2c4dd27374a16a8881ec1a1cd5baf86f (patch)
tree1b95c903467e89eec1b8dacdc9f776a838bb3423 /drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
parentdrm/amdgpu: add remap_hdp_registers callback for nbio 7.11 (diff)
downloadlinux-4e8303cf2c4dd27374a16a8881ec1a1cd5baf86f.tar.gz
linux-4e8303cf2c4dd27374a16a8881ec1a1cd5baf86f.tar.bz2
linux-4e8303cf2c4dd27374a16a8881ec1a1cd5baf86f.zip
drm/amdgpu: Use function for IP version check
Use an inline function for version check. Gives more flexibility to handle any format changes. Signed-off-by: Lijo Lazar <lijo.lazar@amd.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c48
1 files changed, 29 insertions, 19 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index cd37f45e01a1..8562ac7f7ff0 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -469,7 +469,7 @@ static int sdma_v4_0_irq_id_to_seq(unsigned client_id)
static void sdma_v4_0_init_golden_registers(struct amdgpu_device *adev)
{
- switch (adev->ip_versions[SDMA0_HWIP][0]) {
+ switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
case IP_VERSION(4, 0, 0):
soc15_program_register_sequence(adev,
golden_settings_sdma_4,
@@ -539,7 +539,7 @@ static void sdma_v4_0_setup_ulv(struct amdgpu_device *adev)
* The only chips with SDMAv4 and ULV are VG10 and VG20.
* Server SKUs take a different hysteresis setting from other SKUs.
*/
- switch (adev->ip_versions[SDMA0_HWIP][0]) {
+ switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
case IP_VERSION(4, 0, 0):
if (adev->pdev->device == 0x6860)
break;
@@ -578,8 +578,10 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev)
int ret, i;
for (i = 0; i < adev->sdma.num_instances; i++) {
- if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) ||
- adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 4, 0)) {
+ if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) ==
+ IP_VERSION(4, 2, 2) ||
+ amdgpu_ip_version(adev, SDMA0_HWIP, 0) ==
+ IP_VERSION(4, 4, 0)) {
/* Acturus & Aldebaran will leverage the same FW memory
for every SDMA instance */
ret = amdgpu_sdma_init_microcode(adev, 0, true);
@@ -978,7 +980,8 @@ static void sdma_v4_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
* Arcturus for the moment and firmware version 14
* and above.
*/
- if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) &&
+ if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) ==
+ IP_VERSION(4, 2, 2) &&
adev->sdma.instance[i].fw_version >= 14)
WREG32_SDMA(i, mmSDMA0_PUB_DUMMY_REG2, enable);
/* Extend page fault timeout to avoid interrupt storm */
@@ -1255,7 +1258,7 @@ static void sdma_v4_0_init_pg(struct amdgpu_device *adev)
if (!(adev->pg_flags & AMD_PG_SUPPORT_SDMA))
return;
- switch (adev->ip_versions[SDMA0_HWIP][0]) {
+ switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
case IP_VERSION(4, 1, 0):
case IP_VERSION(4, 1, 1):
case IP_VERSION(4, 1, 2):
@@ -1698,7 +1701,7 @@ static bool sdma_v4_0_fw_support_paging_queue(struct amdgpu_device *adev)
{
uint fw_version = adev->sdma.instance[0].fw_version;
- switch (adev->ip_versions[SDMA0_HWIP][0]) {
+ switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
case IP_VERSION(4, 0, 0):
return fw_version >= 430;
case IP_VERSION(4, 0, 1):
@@ -1723,7 +1726,7 @@ static int sdma_v4_0_early_init(void *handle)
}
/* TODO: Page queue breaks driver reload under SRIOV */
- if ((adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 0, 0)) &&
+ if ((amdgpu_ip_version(adev, SDMA0_HWIP, 0) == IP_VERSION(4, 0, 0)) &&
amdgpu_sriov_vf((adev)))
adev->sdma.has_page_queue = false;
else if (sdma_v4_0_fw_support_paging_queue(adev))
@@ -1823,7 +1826,9 @@ static int sdma_v4_0_sw_init(void *handle)
* On Arcturus, SDMA instance 5~7 has a different vmhub
* type(AMDGPU_MMHUB1).
*/
- if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) && i >= 5)
+ if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) ==
+ IP_VERSION(4, 2, 2) &&
+ i >= 5)
ring->vm_hub = AMDGPU_MMHUB1(0);
else
ring->vm_hub = AMDGPU_MMHUB0(0);
@@ -1843,8 +1848,10 @@ static int sdma_v4_0_sw_init(void *handle)
/* paging queue use same doorbell index/routing as gfx queue
* with 0x400 (4096 dwords) offset on second doorbell page
*/
- if (adev->ip_versions[SDMA0_HWIP][0] >= IP_VERSION(4, 0, 0) &&
- adev->ip_versions[SDMA0_HWIP][0] < IP_VERSION(4, 2, 0)) {
+ if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) >=
+ IP_VERSION(4, 0, 0) &&
+ amdgpu_ip_version(adev, SDMA0_HWIP, 0) <
+ IP_VERSION(4, 2, 0)) {
ring->doorbell_index =
adev->doorbell_index.sdma_engine[i] << 1;
ring->doorbell_index += 0x400;
@@ -1856,7 +1863,9 @@ static int sdma_v4_0_sw_init(void *handle)
(adev->doorbell_index.sdma_engine[i] + 1) << 1;
}
- if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) && i >= 5)
+ if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) ==
+ IP_VERSION(4, 2, 2) &&
+ i >= 5)
ring->vm_hub = AMDGPU_MMHUB1(0);
else
ring->vm_hub = AMDGPU_MMHUB0(0);
@@ -1890,8 +1899,8 @@ static int sdma_v4_0_sw_fini(void *handle)
amdgpu_ring_fini(&adev->sdma.instance[i].page);
}
- if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) ||
- adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 4, 0))
+ if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) == IP_VERSION(4, 2, 2) ||
+ amdgpu_ip_version(adev, SDMA0_HWIP, 0) == IP_VERSION(4, 4, 0))
amdgpu_sdma_destroy_inst_ctx(adev, true);
else
amdgpu_sdma_destroy_inst_ctx(adev, false);
@@ -2036,14 +2045,16 @@ static int sdma_v4_0_process_trap_irq(struct amdgpu_device *adev,
amdgpu_fence_process(&adev->sdma.instance[instance].ring);
break;
case 1:
- if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 0))
+ if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) ==
+ IP_VERSION(4, 2, 0))
amdgpu_fence_process(&adev->sdma.instance[instance].page);
break;
case 2:
/* XXX compute */
break;
case 3:
- if (adev->ip_versions[SDMA0_HWIP][0] != IP_VERSION(4, 2, 0))
+ if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) !=
+ IP_VERSION(4, 2, 0))
amdgpu_fence_process(&adev->sdma.instance[instance].page);
break;
}
@@ -2259,7 +2270,7 @@ static int sdma_v4_0_set_powergating_state(void *handle,
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- switch (adev->ip_versions[SDMA0_HWIP][0]) {
+ switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
case IP_VERSION(4, 1, 0):
case IP_VERSION(4, 1, 1):
case IP_VERSION(4, 1, 2):
@@ -2622,7 +2633,7 @@ static struct amdgpu_sdma_ras sdma_v4_0_ras = {
static void sdma_v4_0_set_ras_funcs(struct amdgpu_device *adev)
{
- switch (adev->ip_versions[SDMA0_HWIP][0]) {
+ switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
case IP_VERSION(4, 2, 0):
case IP_VERSION(4, 2, 2):
adev->sdma.ras = &sdma_v4_0_ras;
@@ -2633,7 +2644,6 @@ static void sdma_v4_0_set_ras_funcs(struct amdgpu_device *adev)
default:
break;
}
-
}
const struct amdgpu_ip_block_version sdma_v4_0_ip_block = {