amdgpu,radeon: Add workarounds for the Spacemit K1-X RISC-V platform
1. In various GMC versions (v6-v11), the DMA address mask is reduced to 34 bits. This is necessary because PCIe devices on the Spacemit K1-X platform support a maximum of 34-bit physical addressing. 2. In amdgpu_ttm.c, force the use of the `ttm_write_combined` cache mode. This is required because the K1-X platform lacks PCIe cache coherency. 3. In radeon_ttm.c and amdgpu_vram_mgr.c, switch the caching for IO memory to `ttm_uncached`.This is done to guarantee data correctness during VRAM access on this platform. Signed-off-by: liyeshan <yeshan.li@spacemit.com> Change-Id: I294b741d1cc7923bfe535299c304f9f443673d92
This commit is contained in:
@@ -1103,6 +1103,9 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
|
||||
else
|
||||
caching = ttm_cached;
|
||||
|
||||
#ifdef CONFIG_SOC_SPACEMIT_K1X
|
||||
caching = ttm_write_combined;
|
||||
#endif
|
||||
/* allocate space for the uninitialized page entries */
|
||||
if (ttm_sg_tt_init(>t->ttm, bo, page_flags, caching)) {
|
||||
kfree(gtt);
|
||||
|
||||
@@ -588,7 +588,11 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
|
||||
if (adev->gmc.xgmi.connected_to_cpu)
|
||||
vres->base.bus.caching = ttm_cached;
|
||||
else
|
||||
#ifdef CONFIG_SOC_SPACEMIT_K1X
|
||||
vres->base.bus.caching = ttm_uncached;
|
||||
#else
|
||||
vres->base.bus.caching = ttm_write_combined;
|
||||
#endif
|
||||
|
||||
atomic64_add(vis_usage, &mgr->vis_usage);
|
||||
*res = &vres->base;
|
||||
|
||||
@@ -965,13 +965,21 @@ static int gmc_v10_0_sw_init(void *handle)
|
||||
*/
|
||||
adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
|
||||
|
||||
#ifdef CONFIG_SOC_SPACEMIT_K1X
|
||||
r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(34));
|
||||
#else
|
||||
r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
|
||||
#endif
|
||||
if (r) {
|
||||
dev_warn(adev->dev, "amdgpu: No suitable DMA available.\n");
|
||||
return r;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SOC_SPACEMIT_K1X
|
||||
adev->need_swiotlb = drm_need_swiotlb(34);
|
||||
#else
|
||||
adev->need_swiotlb = drm_need_swiotlb(44);
|
||||
#endif
|
||||
|
||||
r = gmc_v10_0_mc_init(adev);
|
||||
if (r)
|
||||
|
||||
@@ -822,13 +822,21 @@ static int gmc_v11_0_sw_init(void *handle)
|
||||
*/
|
||||
adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
|
||||
|
||||
#ifdef CONFIG_SOC_SPACEMIT_K1X
|
||||
r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(34));
|
||||
#else
|
||||
r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
|
||||
#endif
|
||||
if (r) {
|
||||
dev_warn(adev->dev, "amdgpu: No suitable DMA available.\n");
|
||||
return r;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SOC_SPACEMIT_K1X
|
||||
adev->need_swiotlb = drm_need_swiotlb(34);
|
||||
#else
|
||||
adev->need_swiotlb = drm_need_swiotlb(44);
|
||||
#endif
|
||||
|
||||
r = gmc_v11_0_mc_init(adev);
|
||||
if (r)
|
||||
|
||||
@@ -831,12 +831,20 @@ static int gmc_v6_0_sw_init(void *handle)
|
||||
|
||||
adev->gmc.mc_mask = 0xffffffffffULL;
|
||||
|
||||
#ifdef CONFIG_SOC_SPACEMIT_K1X
|
||||
r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(34));
|
||||
#else
|
||||
r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(40));
|
||||
#endif
|
||||
if (r) {
|
||||
dev_warn(adev->dev, "No suitable DMA available.\n");
|
||||
return r;
|
||||
}
|
||||
#ifdef CONFIG_SOC_SPACEMIT_K1X
|
||||
adev->need_swiotlb = drm_need_swiotlb(34);
|
||||
#else
|
||||
adev->need_swiotlb = drm_need_swiotlb(40);
|
||||
#endif
|
||||
|
||||
r = gmc_v6_0_init_microcode(adev);
|
||||
if (r) {
|
||||
|
||||
@@ -1010,12 +1010,20 @@ static int gmc_v7_0_sw_init(void *handle)
|
||||
*/
|
||||
adev->gmc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
|
||||
|
||||
#ifdef CONFIG_SOC_SPACEMIT_K1X
|
||||
r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(34));
|
||||
#else
|
||||
r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(40));
|
||||
#endif
|
||||
if (r) {
|
||||
pr_warn("No suitable DMA available\n");
|
||||
return r;
|
||||
}
|
||||
#ifdef CONFIG_SOC_SPACEMIT_K1X
|
||||
adev->need_swiotlb = drm_need_swiotlb(34);
|
||||
#else
|
||||
adev->need_swiotlb = drm_need_swiotlb(40);
|
||||
#endif
|
||||
|
||||
r = gmc_v7_0_init_microcode(adev);
|
||||
if (r) {
|
||||
|
||||
@@ -1123,12 +1123,20 @@ static int gmc_v8_0_sw_init(void *handle)
|
||||
*/
|
||||
adev->gmc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
|
||||
|
||||
#ifdef CONFIG_SOC_SPACEMIT_K1X
|
||||
r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(34));
|
||||
#else
|
||||
r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(40));
|
||||
#endif
|
||||
if (r) {
|
||||
pr_warn("No suitable DMA available\n");
|
||||
return r;
|
||||
}
|
||||
#ifdef CONFIG_SOC_SPACEMIT_K1X
|
||||
adev->need_swiotlb = drm_need_swiotlb(34);
|
||||
#else
|
||||
adev->need_swiotlb = drm_need_swiotlb(40);
|
||||
#endif
|
||||
|
||||
r = gmc_v8_0_init_microcode(adev);
|
||||
if (r) {
|
||||
|
||||
@@ -2156,7 +2156,11 @@ static int gmc_v9_0_sw_init(void *handle)
|
||||
*/
|
||||
adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
|
||||
|
||||
#ifdef CONFIG_SOC_SPACEMIT_K1X
|
||||
dma_addr_bits = 34;
|
||||
#else
|
||||
dma_addr_bits = adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 4, 2) ? 48:44;
|
||||
#endif
|
||||
r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(dma_addr_bits));
|
||||
if (r) {
|
||||
dev_warn(adev->dev, "amdgpu: No suitable DMA available.\n");
|
||||
|
||||
@@ -288,7 +288,11 @@ static int radeon_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resourc
|
||||
return -EINVAL;
|
||||
mem->bus.offset += rdev->mc.aper_base;
|
||||
mem->bus.is_iomem = true;
|
||||
#ifdef CONFIG_SOC_SPACEMIT_K1X
|
||||
mem->bus.caching = ttm_uncached;
|
||||
#else
|
||||
mem->bus.caching = ttm_write_combined;
|
||||
#endif
|
||||
#ifdef __alpha__
|
||||
/*
|
||||
* Alpha: use bus.addr to hold the ioremap() return,
|
||||
|
||||
Reference in New Issue
Block a user