drm/amdgpu: reserve backup pages for bad page retirment

To ensure user has a constant of VRAM accessible in run-time, driver
reserves limit backup pages when init, and return ones when bad pages
retired, to keep no change of unused memory size.

v2: refine codes to calculate badpags threshold

Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
Signed-off-by: Dennis Li <Dennis.Li@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Dennis Li
2021-02-22 18:22:57 +08:00
committed by Alex Deucher
parent 6c65a582ee
commit f89b881c81
4 changed files with 118 additions and 15 deletions

View File

@@ -28,6 +28,9 @@
#include "amdgpu_atomfirmware.h"
#include "atom.h"
static int amdgpu_vram_mgr_free_backup_pages(struct amdgpu_vram_mgr *mgr,
uint32_t num_pages);
static inline struct amdgpu_vram_mgr *to_vram_mgr(struct ttm_resource_manager *man)
{
return container_of(man, struct amdgpu_vram_mgr, manager);
@@ -186,6 +189,7 @@ int amdgpu_vram_mgr_init(struct amdgpu_device *adev)
spin_lock_init(&mgr->lock);
INIT_LIST_HEAD(&mgr->reservations_pending);
INIT_LIST_HEAD(&mgr->reserved_pages);
INIT_LIST_HEAD(&mgr->backup_pages);
/* Add the two VRAM-related sysfs files */
ret = sysfs_create_files(&adev->dev->kobj, amdgpu_vram_mgr_attributes);
@@ -226,6 +230,11 @@ void amdgpu_vram_mgr_fini(struct amdgpu_device *adev)
drm_mm_remove_node(&rsv->mm_node);
kfree(rsv);
}
list_for_each_entry_safe(rsv, temp, &mgr->backup_pages, node) {
drm_mm_remove_node(&rsv->mm_node);
kfree(rsv);
}
drm_mm_takedown(&mgr->mm);
spin_unlock(&mgr->lock);
@@ -297,12 +306,14 @@ static void amdgpu_vram_mgr_do_reserve(struct ttm_resource_manager *man)
continue;
dev_dbg(adev->dev, "Reservation 0x%llx - %lld, Succeeded\n",
rsv->mm_node.start, rsv->mm_node.size);
rsv->mm_node.start << PAGE_SHIFT, rsv->mm_node.size);
vis_usage = amdgpu_vram_mgr_vis_size(adev, &rsv->mm_node);
atomic64_add(vis_usage, &mgr->vis_usage);
atomic64_add(rsv->mm_node.size << PAGE_SHIFT, &mgr->usage);
list_move(&rsv->node, &mgr->reserved_pages);
amdgpu_vram_mgr_free_backup_pages(mgr, rsv->mm_node.size);
}
}
@@ -319,6 +330,7 @@ int amdgpu_vram_mgr_reserve_range(struct ttm_resource_manager *man,
uint64_t start, uint64_t size)
{
struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
struct amdgpu_device *adev = to_amdgpu_device(mgr);
struct amdgpu_vram_reservation *rsv;
rsv = kzalloc(sizeof(*rsv), GFP_KERNEL);
@@ -329,14 +341,94 @@ int amdgpu_vram_mgr_reserve_range(struct ttm_resource_manager *man,
rsv->mm_node.start = start >> PAGE_SHIFT;
rsv->mm_node.size = size >> PAGE_SHIFT;
dev_dbg(adev->dev, "Pending Reservation: 0x%llx\n", start);
spin_lock(&mgr->lock);
list_add_tail(&mgr->reservations_pending, &rsv->node);
list_add_tail(&rsv->node, &mgr->reservations_pending);
amdgpu_vram_mgr_do_reserve(man);
spin_unlock(&mgr->lock);
return 0;
}
static int amdgpu_vram_mgr_free_backup_pages(struct amdgpu_vram_mgr *mgr,
uint32_t num_pages)
{
struct amdgpu_device *adev = to_amdgpu_device(mgr);
struct amdgpu_vram_reservation *rsv;
uint32_t i;
uint64_t vis_usage = 0, total_usage = 0;
if (num_pages > mgr->num_backup_pages) {
dev_warn(adev->dev, "No enough backup pages\n");
return -EINVAL;
}
for (i = 0; i < num_pages; i++) {
rsv = list_first_entry(&mgr->backup_pages,
struct amdgpu_vram_reservation, node);
vis_usage += amdgpu_vram_mgr_vis_size(adev, &rsv->mm_node);
total_usage += (rsv->mm_node.size << PAGE_SHIFT);
drm_mm_remove_node(&rsv->mm_node);
list_del(&rsv->node);
kfree(rsv);
mgr->num_backup_pages--;
}
atomic64_sub(total_usage, &mgr->usage);
atomic64_sub(vis_usage, &mgr->vis_usage);
return 0;
}
int amdgpu_vram_mgr_reserve_backup_pages(struct ttm_resource_manager *man,
uint32_t num_pages)
{
struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
struct amdgpu_device *adev = to_amdgpu_device(mgr);
struct amdgpu_vram_reservation *rsv;
struct drm_mm *mm = &mgr->mm;
uint32_t i;
int ret = 0;
uint64_t vis_usage, total_usage;
for (i = 0; i < num_pages; i++) {
rsv = kzalloc(sizeof(*rsv), GFP_KERNEL);
if (!rsv) {
ret = -ENOMEM;
goto pro_end;
}
INIT_LIST_HEAD(&rsv->node);
ret = drm_mm_insert_node(mm, &rsv->mm_node, 1);
if (ret) {
dev_err(adev->dev, "failed to reserve backup page %d, ret 0x%x\n", i, ret);
kfree(rsv);
goto pro_end;
}
vis_usage = amdgpu_vram_mgr_vis_size(adev, &rsv->mm_node);
total_usage = (rsv->mm_node.size << PAGE_SHIFT);
spin_lock(&mgr->lock);
atomic64_add(vis_usage, &mgr->vis_usage);
atomic64_add(total_usage, &mgr->usage);
list_add_tail(&rsv->node, &mgr->backup_pages);
mgr->num_backup_pages++;
spin_unlock(&mgr->lock);
}
pro_end:
if (ret) {
spin_lock(&mgr->lock);
amdgpu_vram_mgr_free_backup_pages(mgr, mgr->num_backup_pages);
spin_unlock(&mgr->lock);
}
return ret;
}
/**
* amdgpu_vram_mgr_query_page_status - query the reservation status
*