#ifndef _XE_VM_H_
#define _XE_VM_H_
#include "xe_assert.h"
#include "xe_bo_types.h"
#include "xe_macros.h"
#include "xe_map.h"
#include "xe_vm_types.h"
struct drm_device;
struct drm_printer;
struct drm_file;
struct ttm_buffer_object;
struct dma_fence;
struct xe_exec_queue;
struct xe_file;
struct xe_sync_entry;
struct xe_svm_range;
struct drm_exec;
struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags, struct xe_file *xef);
struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id);
int xe_vma_cmp_vma_cb(const void *key, const struct rb_node *node);
static inline struct xe_vm *xe_vm_get(struct xe_vm *vm)
{
drm_gpuvm_get(&vm->gpuvm);
return vm;
}
static inline void xe_vm_put(struct xe_vm *vm)
{
drm_gpuvm_put(&vm->gpuvm);
}
int xe_vm_lock(struct xe_vm *vm, bool intr);
void xe_vm_unlock(struct xe_vm *vm);
static inline bool xe_vm_is_closed(struct xe_vm *vm)
{
return !vm->size;
}
static inline bool xe_vm_is_banned(struct xe_vm *vm)
{
return vm->flags & XE_VM_FLAG_BANNED;
}
static inline bool xe_vm_is_closed_or_banned(struct xe_vm *vm)
{
lockdep_assert_held(&vm->lock);
return xe_vm_is_closed(vm) || xe_vm_is_banned(vm);
}
struct xe_vma *
xe_vm_find_overlapping_vma(struct xe_vm *vm, u64 start, u64 range);
bool xe_vma_has_default_mem_attrs(struct xe_vma *vma);
void xe_vm_find_cpu_addr_mirror_vma_range(struct xe_vm *vm,
u64 *start,
u64 *end);
static inline bool xe_vm_has_scratch(const struct xe_vm *vm)
{
return vm->flags & XE_VM_FLAG_SCRATCH_PAGE;
}
static inline struct xe_vm *gpuvm_to_vm(struct drm_gpuvm *gpuvm)
{
return container_of(gpuvm, struct xe_vm, gpuvm);
}
static inline struct xe_vm *gpuva_to_vm(struct drm_gpuva *gpuva)
{
return gpuvm_to_vm(gpuva->vm);
}
static inline struct xe_vma *gpuva_to_vma(struct drm_gpuva *gpuva)
{
return container_of(gpuva, struct xe_vma, gpuva);
}
static inline struct xe_vma_op *gpuva_op_to_vma_op(struct drm_gpuva_op *op)
{
return container_of(op, struct xe_vma_op, base);
}
static inline u64 xe_vma_start(struct xe_vma *vma)
{
return vma->gpuva.va.addr;
}
static inline u64 xe_vma_size(struct xe_vma *vma)
{
return vma->gpuva.va.range;
}
static inline u64 xe_vma_end(struct xe_vma *vma)
{
return xe_vma_start(vma) + xe_vma_size(vma);
}
static inline u64 xe_vma_bo_offset(struct xe_vma *vma)
{
return vma->gpuva.gem.offset;
}
static inline struct xe_bo *xe_vma_bo(struct xe_vma *vma)
{
return !vma->gpuva.gem.obj ? NULL :
container_of(vma->gpuva.gem.obj, struct xe_bo, ttm.base);
}
static inline struct xe_vm *xe_vma_vm(struct xe_vma *vma)
{
return container_of(vma->gpuva.vm, struct xe_vm, gpuvm);
}
static inline bool xe_vma_read_only(struct xe_vma *vma)
{
return vma->gpuva.flags & XE_VMA_READ_ONLY;
}
static inline u64 xe_vma_userptr(struct xe_vma *vma)
{
return vma->gpuva.gem.offset;
}
static inline bool xe_vma_is_null(struct xe_vma *vma)
{
return vma->gpuva.flags & DRM_GPUVA_SPARSE;
}
static inline bool xe_vma_is_cpu_addr_mirror(struct xe_vma *vma)
{
return vma->gpuva.flags & XE_VMA_SYSTEM_ALLOCATOR;
}
static inline bool xe_vma_has_no_bo(struct xe_vma *vma)
{
return !xe_vma_bo(vma);
}
static inline bool xe_vma_is_userptr(struct xe_vma *vma)
{
return xe_vma_has_no_bo(vma) && !xe_vma_is_null(vma) &&
!xe_vma_is_cpu_addr_mirror(vma);
}
struct xe_vma *xe_vm_find_vma_by_addr(struct xe_vm *vm, u64 page_addr);
int xe_vma_need_vram_for_atomic(struct xe_device *xe, struct xe_vma *vma, bool is_atomic);
int xe_vm_alloc_madvise_vma(struct xe_vm *vm, uint64_t addr, uint64_t size);
int xe_vm_alloc_cpu_addr_mirror_vma(struct xe_vm *vm, uint64_t addr, uint64_t size);
static inline struct xe_userptr_vma *to_userptr_vma(struct xe_vma *vma)
{
xe_assert(xe_vma_vm(vma)->xe, xe_vma_is_userptr(vma));
return container_of(vma, struct xe_userptr_vma, vma);
}
u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile);
int xe_vm_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
int xe_vm_bind_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
int xe_vm_query_vmas_attrs_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
void xe_vm_close_and_put(struct xe_vm *vm);
static inline bool xe_vm_in_fault_mode(struct xe_vm *vm)
{
return vm->flags & XE_VM_FLAG_FAULT_MODE;
}
static inline bool xe_vm_in_lr_mode(struct xe_vm *vm)
{
return vm->flags & XE_VM_FLAG_LR_MODE;
}
static inline bool xe_vm_in_preempt_fence_mode(struct xe_vm *vm)
{
return xe_vm_in_lr_mode(vm) && !xe_vm_in_fault_mode(vm);
}
int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q);
void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q);
int xe_vm_rebind(struct xe_vm *vm, bool rebind_worker);
struct dma_fence *xe_vma_rebind(struct xe_vm *vm, struct xe_vma *vma,
u8 tile_mask);
struct dma_fence *xe_vm_range_rebind(struct xe_vm *vm,
struct xe_vma *vma,
struct xe_svm_range *range,
u8 tile_mask);
struct dma_fence *xe_vm_range_unbind(struct xe_vm *vm,
struct xe_svm_range *range);
int xe_vm_range_tilemask_tlb_inval(struct xe_vm *vm, u64 start,
u64 end, u8 tile_mask);
int xe_vm_invalidate_vma(struct xe_vma *vma);
int xe_vm_validate_protected(struct xe_vm *vm);
static inline void xe_vm_queue_rebind_worker(struct xe_vm *vm)
{
xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm));
queue_work(vm->xe->ordered_wq, &vm->preempt.rebind_work);
}
static inline void xe_vm_reactivate_rebind(struct xe_vm *vm)
{
if (xe_vm_in_preempt_fence_mode(vm) && vm->preempt.rebind_deactivated) {
vm->preempt.rebind_deactivated = false;
xe_vm_queue_rebind_worker(vm);
}
}
int xe_vm_lock_vma(struct drm_exec *exec, struct xe_vma *vma);
int xe_vm_validate_rebind(struct xe_vm *vm, struct drm_exec *exec,
unsigned int num_fences);
struct dma_fence *xe_vm_bind_kernel_bo(struct xe_vm *vm, struct xe_bo *bo,
struct xe_exec_queue *q, u64 addr,
enum xe_cache_level cache_lvl);
void xe_vm_resume_rebind_worker(struct xe_vm *vm);
static inline struct dma_resv *xe_vm_resv(struct xe_vm *vm)
{
return drm_gpuvm_resv(&vm->gpuvm);
}
void xe_vm_kill(struct xe_vm *vm, bool unlocked);
#define xe_vm_assert_held(vm) dma_resv_assert_held(xe_vm_resv(vm))
int xe_vm_drm_exec_lock(struct xe_vm *vm, struct drm_exec *exec);
#if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)
#define vm_dbg drm_dbg
#else
__printf(2, 3)
static inline void vm_dbg(const struct drm_device *dev,
const char *format, ...)
{ }
#endif
struct xe_vm_snapshot *xe_vm_snapshot_capture(struct xe_vm *vm);
void xe_vm_snapshot_capture_delayed(struct xe_vm_snapshot *snap);
void xe_vm_snapshot_print(struct xe_vm_snapshot *snap, struct drm_printer *p);
void xe_vm_snapshot_free(struct xe_vm_snapshot *snap);
static inline void xe_vm_set_validating(struct xe_vm *vm, bool allow_res_evict)
{
if (vm && !allow_res_evict) {
xe_vm_assert_held(vm);
WRITE_ONCE(vm->validation.validating, current);
}
}
static inline void xe_vm_clear_validating(struct xe_vm *vm, bool allow_res_evict)
{
if (vm && !allow_res_evict) {
WRITE_ONCE(vm->validation.validating, NULL);
}
}
static inline bool xe_vm_is_validating(struct xe_vm *vm)
{
if (READ_ONCE(vm->validation.validating) == current) {
xe_vm_assert_held(vm);
return true;
}
return false;
}
static inline void xe_vm_set_validation_exec(struct xe_vm *vm, struct drm_exec *exec)
{
xe_vm_assert_held(vm);
xe_assert(vm->xe, !!exec ^ !!vm->validation._exec);
vm->validation._exec = exec;
}
static inline struct drm_exec *xe_vm_validation_exec(struct xe_vm *vm)
{
xe_vm_assert_held(vm);
return vm->validation._exec;
}
#define xe_vm_has_valid_gpu_mapping(tile, tile_present, tile_invalidated) \
((READ_ONCE(tile_present) & ~READ_ONCE(tile_invalidated)) & BIT((tile)->id))
void xe_vma_mem_attr_copy(struct xe_vma_mem_attr *to, struct xe_vma_mem_attr *from);
#endif