dect
/
linux-2.6
Archived
13
0
Fork 0

drm/ttm: split no_wait argument in 2 GPU or reserve wait

There is case where we want to be able to wait only for the
GPU while not waiting for other buffer to be unreserved. This
patch split the no_wait argument all the way down in the whole
ttm path so that upper level can decide on what to wait on or
not.

[airlied: squashed these 4 for bisectability reasons.]
drm/radeon/kms: update to TTM no_wait splitted argument
drm/nouveau: update to TTM no_wait splitted argument
drm/vmwgfx: update to TTM no_wait splitted argument
[vmwgfx patch: Reviewed-by: Thomas Hellstrom <thellstrom@vmware.com>]

Signed-off-by: Jerome Glisse <jglisse@redhat.com>
Acked-by: Thomas Hellstrom <thellstrom@vmware.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>
This commit is contained in:
Jerome Glisse 2010-04-07 10:21:19 +00:00 committed by Dave Airlie
parent 3a89b4a9ca
commit 9d87fa2138
11 changed files with 115 additions and 88 deletions

View File

@ -219,7 +219,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
for (i = 0; i < nvbo->placement.num_placement; i++)
nvbo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
ret = ttm_bo_validate(bo, &nvbo->placement, false, false);
ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false);
if (ret == 0) {
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
@ -256,7 +256,7 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
for (i = 0; i < nvbo->placement.num_placement; i++)
nvbo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
ret = ttm_bo_validate(bo, &nvbo->placement, false, false);
ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false);
if (ret == 0) {
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
@ -456,7 +456,8 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
static int
nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
struct nouveau_bo *nvbo, bool evict, bool no_wait,
struct nouveau_bo *nvbo, bool evict,
bool no_wait_reserve, bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
struct nouveau_fence *fence = NULL;
@ -467,7 +468,7 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
return ret;
ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL,
evict, no_wait, new_mem);
evict, no_wait_reserve, no_wait_gpu, new_mem);
if (nvbo->channel && nvbo->channel != chan)
ret = nouveau_fence_wait(fence, NULL, false, false);
nouveau_fence_unref((void *)&fence);
@ -491,7 +492,8 @@ nouveau_bo_mem_ctxdma(struct nouveau_bo *nvbo, struct nouveau_channel *chan,
static int
nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
int no_wait, struct ttm_mem_reg *new_mem)
bool no_wait_reserve, bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
@ -569,12 +571,13 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
dst_offset += (PAGE_SIZE * line_count);
}
return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait, new_mem);
return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait_reserve, no_wait_gpu, new_mem);
}
static int
nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
bool no_wait, struct ttm_mem_reg *new_mem)
bool no_wait_reserve, bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
struct ttm_placement placement;
@ -587,7 +590,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait);
ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
if (ret)
return ret;
@ -595,11 +598,11 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
if (ret)
goto out;
ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait, &tmp_mem);
ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem);
if (ret)
goto out;
ret = ttm_bo_move_ttm(bo, evict, no_wait, new_mem);
ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
out:
if (tmp_mem.mm_node) {
spin_lock(&bo->bdev->glob->lru_lock);
@ -612,7 +615,8 @@ out:
static int
nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
bool no_wait, struct ttm_mem_reg *new_mem)
bool no_wait_reserve, bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
struct ttm_placement placement;
@ -625,15 +629,15 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait);
ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
if (ret)
return ret;
ret = ttm_bo_move_ttm(bo, evict, no_wait, &tmp_mem);
ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, &tmp_mem);
if (ret)
goto out;
ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait, new_mem);
ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
if (ret)
goto out;
@ -700,7 +704,8 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
static int
nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
bool no_wait, struct ttm_mem_reg *new_mem)
bool no_wait_reserve, bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo);
@ -715,7 +720,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
/* Software copy if the card isn't up and running yet. */
if (dev_priv->init_state != NOUVEAU_CARD_INIT_DONE ||
!dev_priv->channel) {
ret = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
goto out;
}
@ -729,17 +734,17 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
/* Hardware assisted copy. */
if (new_mem->mem_type == TTM_PL_SYSTEM)
ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait, new_mem);
ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
else if (old_mem->mem_type == TTM_PL_SYSTEM)
ret = nouveau_bo_move_flips(bo, evict, intr, no_wait, new_mem);
ret = nouveau_bo_move_flips(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
else
ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait, new_mem);
ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
if (!ret)
goto out;
/* Fallback to software copy. */
ret = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
out:
if (ret)

View File

@ -387,7 +387,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
nvbo->channel = chan;
ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
false, false);
false, false, false);
nvbo->channel = NULL;
if (unlikely(ret)) {
NV_ERROR(dev, "fail ttm_validate\n");

View File

@ -191,7 +191,7 @@ int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
}
for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false);
if (likely(r == 0)) {
bo->pin_count = 1;
if (gpu_addr != NULL)
@ -215,7 +215,7 @@ int radeon_bo_unpin(struct radeon_bo *bo)
return 0;
for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false);
if (unlikely(r != 0))
dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
return r;
@ -330,7 +330,7 @@ int radeon_bo_list_validate(struct list_head *head)
lobj->rdomain);
}
r = ttm_bo_validate(&bo->tbo, &bo->placement,
true, false);
true, false, false);
if (unlikely(r))
return r;
}

View File

@ -243,9 +243,9 @@ static void radeon_move_null(struct ttm_buffer_object *bo,
}
static int radeon_move_blit(struct ttm_buffer_object *bo,
bool evict, int no_wait,
struct ttm_mem_reg *new_mem,
struct ttm_mem_reg *old_mem)
bool evict, int no_wait_reserve, bool no_wait_gpu,
struct ttm_mem_reg *new_mem,
struct ttm_mem_reg *old_mem)
{
struct radeon_device *rdev;
uint64_t old_start, new_start;
@ -289,13 +289,14 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
r = radeon_copy(rdev, old_start, new_start, new_mem->num_pages, fence);
/* FIXME: handle copy error */
r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL,
evict, no_wait, new_mem);
evict, no_wait_reserve, no_wait_gpu, new_mem);
radeon_fence_unref(&fence);
return r;
}
static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
bool evict, bool interruptible, bool no_wait,
bool evict, bool interruptible,
bool no_wait_reserve, bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
struct radeon_device *rdev;
@ -316,7 +317,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
placement.busy_placement = &placements;
placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
interruptible, no_wait);
interruptible, no_wait_reserve, no_wait_gpu);
if (unlikely(r)) {
return r;
}
@ -330,11 +331,11 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
if (unlikely(r)) {
goto out_cleanup;
}
r = radeon_move_blit(bo, true, no_wait, &tmp_mem, old_mem);
r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem, old_mem);
if (unlikely(r)) {
goto out_cleanup;
}
r = ttm_bo_move_ttm(bo, true, no_wait, new_mem);
r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
out_cleanup:
if (tmp_mem.mm_node) {
struct ttm_bo_global *glob = rdev->mman.bdev.glob;
@ -348,7 +349,8 @@ out_cleanup:
}
static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
bool evict, bool interruptible, bool no_wait,
bool evict, bool interruptible,
bool no_wait_reserve, bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
struct radeon_device *rdev;
@ -368,15 +370,15 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
placement.num_busy_placement = 1;
placement.busy_placement = &placements;
placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait);
r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait_reserve, no_wait_gpu);
if (unlikely(r)) {
return r;
}
r = ttm_bo_move_ttm(bo, true, no_wait, &tmp_mem);
r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem);
if (unlikely(r)) {
goto out_cleanup;
}
r = radeon_move_blit(bo, true, no_wait, new_mem, old_mem);
r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, new_mem, old_mem);
if (unlikely(r)) {
goto out_cleanup;
}
@ -393,8 +395,9 @@ out_cleanup:
}
static int radeon_bo_move(struct ttm_buffer_object *bo,
bool evict, bool interruptible, bool no_wait,
struct ttm_mem_reg *new_mem)
bool evict, bool interruptible,
bool no_wait_reserve, bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
struct radeon_device *rdev;
struct ttm_mem_reg *old_mem = &bo->mem;
@ -421,18 +424,18 @@ static int radeon_bo_move(struct ttm_buffer_object *bo,
if (old_mem->mem_type == TTM_PL_VRAM &&
new_mem->mem_type == TTM_PL_SYSTEM) {
r = radeon_move_vram_ram(bo, evict, interruptible,
no_wait, new_mem);
no_wait_reserve, no_wait_gpu, new_mem);
} else if (old_mem->mem_type == TTM_PL_SYSTEM &&
new_mem->mem_type == TTM_PL_VRAM) {
r = radeon_move_ram_vram(bo, evict, interruptible,
no_wait, new_mem);
no_wait_reserve, no_wait_gpu, new_mem);
} else {
r = radeon_move_blit(bo, evict, no_wait, new_mem, old_mem);
r = radeon_move_blit(bo, evict, no_wait_reserve, no_wait_gpu, new_mem, old_mem);
}
if (r) {
memcpy:
r = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
}
return r;

View File

@ -357,7 +357,8 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem,
bool evict, bool interruptible, bool no_wait)
bool evict, bool interruptible,
bool no_wait_reserve, bool no_wait_gpu)
{
struct ttm_bo_device *bdev = bo->bdev;
bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
@ -402,12 +403,12 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
ret = ttm_bo_move_ttm(bo, evict, no_wait, mem);
ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, mem);
else if (bdev->driver->move)
ret = bdev->driver->move(bo, evict, interruptible,
no_wait, mem);
no_wait_reserve, no_wait_gpu, mem);
else
ret = ttm_bo_move_memcpy(bo, evict, no_wait, mem);
ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, mem);
if (ret)
goto out_err;
@ -606,7 +607,7 @@ void ttm_bo_unref(struct ttm_buffer_object **p_bo)
EXPORT_SYMBOL(ttm_bo_unref);
static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
bool no_wait)
bool no_wait_reserve, bool no_wait_gpu)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_bo_global *glob = bo->glob;
@ -615,7 +616,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
int ret = 0;
spin_lock(&bo->lock);
ret = ttm_bo_wait(bo, false, interruptible, no_wait);
ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
spin_unlock(&bo->lock);
if (unlikely(ret != 0)) {
@ -638,7 +639,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
placement.num_busy_placement = 0;
bdev->driver->evict_flags(bo, &placement);
ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
no_wait);
no_wait_reserve, no_wait_gpu);
if (ret) {
if (ret != -ERESTARTSYS) {
printk(KERN_ERR TTM_PFX
@ -650,7 +651,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
}
ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
no_wait);
no_wait_reserve, no_wait_gpu);
if (ret) {
if (ret != -ERESTARTSYS)
printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
@ -670,7 +671,8 @@ out:
static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
uint32_t mem_type,
bool interruptible, bool no_wait)
bool interruptible, bool no_wait_reserve,
bool no_wait_gpu)
{
struct ttm_bo_global *glob = bdev->glob;
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
@ -687,11 +689,11 @@ retry:
bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
kref_get(&bo->list_kref);
ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
ret = ttm_bo_reserve_locked(bo, false, no_wait_reserve, false, 0);
if (unlikely(ret == -EBUSY)) {
spin_unlock(&glob->lru_lock);
if (likely(!no_wait))
if (likely(!no_wait_gpu))
ret = ttm_bo_wait_unreserved(bo, interruptible);
kref_put(&bo->list_kref, ttm_bo_release_list);
@ -713,7 +715,7 @@ retry:
while (put_count--)
kref_put(&bo->list_kref, ttm_bo_ref_bug);
ret = ttm_bo_evict(bo, interruptible, no_wait);
ret = ttm_bo_evict(bo, interruptible, no_wait_reserve, no_wait_gpu);
ttm_bo_unreserve(bo);
kref_put(&bo->list_kref, ttm_bo_release_list);
@ -764,7 +766,9 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
uint32_t mem_type,
struct ttm_placement *placement,
struct ttm_mem_reg *mem,
bool interruptible, bool no_wait)
bool interruptible,
bool no_wait_reserve,
bool no_wait_gpu)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_bo_global *glob = bdev->glob;
@ -785,7 +789,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
}
spin_unlock(&glob->lru_lock);
ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
no_wait);
no_wait_reserve, no_wait_gpu);
if (unlikely(ret != 0))
return ret;
} while (1);
@ -855,7 +859,8 @@ static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
int ttm_bo_mem_space(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
struct ttm_mem_reg *mem,
bool interruptible, bool no_wait)
bool interruptible, bool no_wait_reserve,
bool no_wait_gpu)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man;
@ -952,7 +957,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
}
ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
interruptible, no_wait);
interruptible, no_wait_reserve, no_wait_gpu);
if (ret == 0 && mem->mm_node) {
mem->placement = cur_flags;
mem->mm_node->private = bo;
@ -978,7 +983,8 @@ EXPORT_SYMBOL(ttm_bo_wait_cpu);
int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
bool interruptible, bool no_wait)
bool interruptible, bool no_wait_reserve,
bool no_wait_gpu)
{
struct ttm_bo_global *glob = bo->glob;
int ret = 0;
@ -992,7 +998,7 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
* instead of doing it here.
*/
spin_lock(&bo->lock);
ret = ttm_bo_wait(bo, false, interruptible, no_wait);
ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
spin_unlock(&bo->lock);
if (ret)
return ret;
@ -1002,10 +1008,10 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
/*
* Determine where to move the buffer.
*/
ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait);
ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait_reserve, no_wait_gpu);
if (ret)
goto out_unlock;
ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait);
ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait_reserve, no_wait_gpu);
out_unlock:
if (ret && mem.mm_node) {
spin_lock(&glob->lru_lock);
@ -1039,7 +1045,8 @@ static int ttm_bo_mem_compat(struct ttm_placement *placement,
int ttm_bo_validate(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
bool interruptible, bool no_wait)
bool interruptible, bool no_wait_reserve,
bool no_wait_gpu)
{
int ret;
@ -1054,7 +1061,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
*/
ret = ttm_bo_mem_compat(placement, &bo->mem);
if (ret < 0) {
ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait);
ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait_reserve, no_wait_gpu);
if (ret)
return ret;
} else {
@ -1175,7 +1182,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
goto out_err;
}
ret = ttm_bo_validate(bo, placement, interruptible, false);
ret = ttm_bo_validate(bo, placement, interruptible, false, false);
if (ret)
goto out_err;
@ -1249,7 +1256,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
spin_lock(&glob->lru_lock);
while (!list_empty(&man->lru)) {
spin_unlock(&glob->lru_lock);
ret = ttm_mem_evict_first(bdev, mem_type, false, false);
ret = ttm_mem_evict_first(bdev, mem_type, false, false, false);
if (ret) {
if (allow_errors) {
return ret;
@ -1839,7 +1846,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
evict_mem.mem_type = TTM_PL_SYSTEM;
ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
false, false);
false, false, false);
if (unlikely(ret != 0))
goto out;
}

View File

@ -49,7 +49,8 @@ void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
}
int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
bool evict, bool no_wait, struct ttm_mem_reg *new_mem)
bool evict, bool no_wait_reserve,
bool no_wait_gpu, struct ttm_mem_reg *new_mem)
{
struct ttm_tt *ttm = bo->ttm;
struct ttm_mem_reg *old_mem = &bo->mem;
@ -207,7 +208,8 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
}
int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
bool evict, bool no_wait, struct ttm_mem_reg *new_mem)
bool evict, bool no_wait_reserve, bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
@ -525,7 +527,8 @@ int ttm_bo_pfn_prot(struct ttm_buffer_object *bo,
int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
void *sync_obj,
void *sync_obj_arg,
bool evict, bool no_wait,
bool evict, bool no_wait_reserve,
bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
struct ttm_bo_device *bdev = bo->bdev;

View File

@ -570,7 +570,7 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
* Put BO in VRAM, only if there is space.
*/
ret = ttm_bo_validate(bo, &vmw_vram_sys_placement, true, false);
ret = ttm_bo_validate(bo, &vmw_vram_sys_placement, true, false, false);
if (unlikely(ret == -ERESTARTSYS))
return ret;
@ -590,7 +590,7 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
* previous contents.
*/
ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false);
ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false, false);
return ret;
}

View File

@ -628,7 +628,7 @@ int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv,
if (unlikely(ret != 0))
return ret;
ret = ttm_bo_validate(bo, &vmw_sys_placement, false, false);
ret = ttm_bo_validate(bo, &vmw_sys_placement, false, false, false);
ttm_bo_unreserve(bo);
return ret;
@ -652,7 +652,7 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
if (unlikely(ret != 0))
goto err_unlock;
ret = ttm_bo_validate(bo, &ne_placement, false, false);
ret = ttm_bo_validate(bo, &ne_placement, false, false, false);
ttm_bo_unreserve(bo);
err_unlock:
ttm_write_unlock(&vmw_priv->active_master->lock);

View File

@ -118,7 +118,7 @@ static int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
if (pin)
overlay_placement = &vmw_vram_ne_placement;
ret = ttm_bo_validate(bo, overlay_placement, interruptible, false);
ret = ttm_bo_validate(bo, overlay_placement, interruptible, false, false);
ttm_bo_unreserve(bo);

View File

@ -313,7 +313,8 @@ extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
* @bo: The buffer object.
* @placement: Proposed placement for the buffer object.
* @interruptible: Sleep interruptible if sleeping.
* @no_wait: Return immediately if the buffer is busy.
* @no_wait_reserve: Return immediately if other buffers are busy.
* @no_wait_gpu: Return immediately if the GPU is busy.
*
* Changes placement and caching policy of the buffer object
* according proposed placement.
@ -325,7 +326,8 @@ extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
*/
extern int ttm_bo_validate(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
bool interruptible, bool no_wait);
bool interruptible, bool no_wait_reserve,
bool no_wait_gpu);
/**
* ttm_bo_unref

View File

@ -311,7 +311,8 @@ struct ttm_bo_driver {
*/
int (*move) (struct ttm_buffer_object *bo,
bool evict, bool interruptible,
bool no_wait, struct ttm_mem_reg *new_mem);
bool no_wait_reserve, bool no_wait_gpu,
struct ttm_mem_reg *new_mem);
/**
* struct ttm_bo_driver_member verify_access
@ -633,7 +634,8 @@ extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev,
* @proposed_placement: Proposed new placement for the buffer object.
* @mem: A struct ttm_mem_reg.
* @interruptible: Sleep interruptible when sliping.
* @no_wait: Don't sleep waiting for space to become available.
* @no_wait_reserve: Return immediately if other buffers are busy.
* @no_wait_gpu: Return immediately if the GPU is busy.
*
* Allocate memory space for the buffer object pointed to by @bo, using
* the placement flags in @mem, potentially evicting other idle buffer objects.
@ -647,7 +649,8 @@ extern bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev,
extern int ttm_bo_mem_space(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
struct ttm_mem_reg *mem,
bool interruptible, bool no_wait);
bool interruptible,
bool no_wait_reserve, bool no_wait_gpu);
/**
* ttm_bo_wait_for_cpu
*
@ -826,7 +829,8 @@ extern void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo);
*
* @bo: A pointer to a struct ttm_buffer_object.
* @evict: 1: This is an eviction. Don't try to pipeline.
* @no_wait: Never sleep, but rather return with -EBUSY.
* @no_wait_reserve: Return immediately if other buffers are busy.
* @no_wait_gpu: Return immediately if the GPU is busy.
* @new_mem: struct ttm_mem_reg indicating where to move.
*
* Optimized move function for a buffer object with both old and
@ -840,15 +844,16 @@ extern void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo);
*/
extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
bool evict, bool no_wait,
struct ttm_mem_reg *new_mem);
bool evict, bool no_wait_reserve,
bool no_wait_gpu, struct ttm_mem_reg *new_mem);
/**
* ttm_bo_move_memcpy
*
* @bo: A pointer to a struct ttm_buffer_object.
* @evict: 1: This is an eviction. Don't try to pipeline.
* @no_wait: Never sleep, but rather return with -EBUSY.
* @no_wait_reserve: Return immediately if other buffers are busy.
* @no_wait_gpu: Return immediately if the GPU is busy.
* @new_mem: struct ttm_mem_reg indicating where to move.
*
* Fallback move function for a mappable buffer object in mappable memory.
@ -862,8 +867,8 @@ extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
*/
extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
bool evict,
bool no_wait, struct ttm_mem_reg *new_mem);
bool evict, bool no_wait_reserve,
bool no_wait_gpu, struct ttm_mem_reg *new_mem);
/**
* ttm_bo_free_old_node
@ -882,7 +887,8 @@ extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
* @sync_obj_arg: An argument to pass to the sync object idle / wait
* functions.
* @evict: This is an evict move. Don't return until the buffer is idle.
* @no_wait: Never sleep, but rather return with -EBUSY.
* @no_wait_reserve: Return immediately if other buffers are busy.
* @no_wait_gpu: Return immediately if the GPU is busy.
* @new_mem: struct ttm_mem_reg indicating where to move.
*
* Accelerated move function to be called when an accelerated move
@ -896,7 +902,8 @@ extern void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
extern int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
void *sync_obj,
void *sync_obj_arg,
bool evict, bool no_wait,
bool evict, bool no_wait_reserve,
bool no_wait_gpu,
struct ttm_mem_reg *new_mem);
/**
* ttm_io_prot