evict
bool evict = false;
evict = true;
if (!evict && (pteg[rr] & PTE_V)) {
bool evict,
amdgpu_vm_bo_move(abo, new_mem, evict);
bool evict,
bool evict,
r = ttm_bo_move_accel_cleanup(bo, fence, evict, true, new_mem);
static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
amdgpu_bo_move_notify(bo, evict, new_mem);
amdgpu_bo_move_notify(bo, evict, new_mem);
amdgpu_bo_move_notify(bo, evict, new_mem);
amdgpu_bo_move_notify(bo, evict, new_mem);
amdgpu_bo_move_notify(bo, evict, new_mem);
r = amdgpu_move_blit(bo, evict, new_mem, old_mem);
if (evict)
if (obj->funcs->evict)
return obj->funcs->evict(obj);
bool evict,
bool evict,
return drm_gem_vram_bo_driver_move(gbo, evict, ctx, new_mem);
INIT_LIST_HEAD(&gpuvm->evict.list);
spin_lock_init(&gpuvm->evict.lock);
drm_WARN(gpuvm->drm, !list_empty(&gpuvm->evict.list),
drm_gpuvm_bo_list_add(vm_bo, evict, false);
LIST_HEAD(evict);
for_each_vm_bo_in_list(gpuvm, evict, &evict, vm_bo) {
restore_vm_bo_list(gpuvm, evict);
list_for_each_entry_safe(vm_bo, next, &gpuvm->evict.list,
list.entry.evict) {
drm_gpuvm_bo_list_del_init(vm_bo, evict, false);
INIT_LIST_HEAD(&vm_bo->list.entry.evict);
drm_gpuvm_bo_list_del(vm_bo, evict, lock);
drm_gpuvm_bo_list_del(vm_bo, evict, true);
drm_gpuvm_bo_list_del(vm_bo, evict, false);
drm_gpuvm_bo_evict(struct drm_gpuvm_bo *vm_bo, bool evict)
vm_bo->evicted = evict;
if (evict)
drm_gpuvm_bo_list_add(vm_bo, evict, lock);
drm_gpuvm_bo_list_del_init(vm_bo, evict, lock);
int i915_ttm_move(struct ttm_buffer_object *bo, bool evict,
if (I915_SELFTEST_ONLY(evict && fail_gpu_migration))
ret = ttm_bo_move_accel_cleanup(bo, migration_fence, evict,
int i915_ttm_move(struct ttm_buffer_object *bo, bool evict,
struct drm_mm_node evict = arg->vma->node;
err = i915_gem_evict_for_node(vm, NULL, &evict, 0);
selftest(evict, i915_gem_evict_live_selftests)
selftest(evict, i915_gem_evict_mock_selftests)
bool evict,
return evict(obj, ticket);
{ &priv->lru.willneed, evict, can_swap() },
nouveau_bo_move(struct ttm_buffer_object *bo, bool evict,
drm_gpuvm_bo_gem_evict(obj, evict);
ret = nouveau_bo_move_m2mf(bo, evict, ctx,
drm_gpuvm_bo_gem_evict(obj, !evict);
bool force = false, evict = false;
evict = true;
bool error = evict;
if (evict) {
nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict,
ret = ttm_bo_move_accel_cleanup(bo, &fence->base, evict, false,
static int qxl_bo_move(struct ttm_buffer_object *bo, bool evict,
bool evict,
r = ttm_bo_move_accel_cleanup(bo, &fence->base, evict, false, new_mem);
static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict,
r = radeon_move_blit(bo, evict, new_mem, old_mem);
static int mock_move(struct ttm_buffer_object *bo, bool evict,
struct ttm_resource *mem, bool evict,
ret = bdev->funcs->move(bo, evict, ctx, mem, hop);
bool evict,
if (!evict)
bool evict,
static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
ret = ttm_bo_move_accel_cleanup(ttm_bo, fence, evict, true,
} while (!list_empty(&vm->gpuvm.evict.list));
if (handler->ops->evict(handler->ops_arg, rbnode, evict_arg,
int (*evict)(void *ops_arg, struct mmu_rb_node *mnode,
.evict = sdma_rb_evict,
evict(inode);
evict(inode);
int (*evict)(struct drm_gem_object *obj);
} evict;
struct list_head evict;
void drm_gpuvm_bo_evict(struct drm_gpuvm_bo *vm_bo, bool evict);
drm_gpuvm_bo_gem_evict(struct drm_gem_object *obj, bool evict)
drm_gpuvm_bo_evict(vm_bo, evict);
struct dma_fence *fence, bool evict,
int (*move)(struct ttm_buffer_object *bo, bool evict,