lib/libkvm/kvm_alpha.c
111
vm = kd->vmst;
lib/libkvm/kvm_alpha.c
115
#define PAGE_SHIFT vm->page_shift
lib/libkvm/kvm_alpha.c
70
struct vmstate *vm;
lib/libkvm/kvm_alpha.c
72
vm = (struct vmstate *)_kvm_malloc(kd, sizeof(*vm));
lib/libkvm/kvm_alpha.c
73
if (vm == NULL)
lib/libkvm/kvm_alpha.c
79
for (vm->page_shift = 0; (1L << vm->page_shift) < cpu_kh->page_size;
lib/libkvm/kvm_alpha.c
80
vm->page_shift++)
lib/libkvm/kvm_alpha.c
82
if ((1L << vm->page_shift) != cpu_kh->page_size) {
lib/libkvm/kvm_alpha.c
83
free(vm);
lib/libkvm/kvm_alpha.c
87
kd->vmst = vm;
lib/libkvm/kvm_alpha.c
95
struct vmstate *vm;
lib/libkvm/kvm_i386.c
100
#define ptei(vm,VA) (((VA) & (vm)->PT_mask) >> PAGE_SHIFT)
lib/libkvm/kvm_i386.c
117
struct vmstate *vm;
lib/libkvm/kvm_i386.c
120
vm = _kvm_malloc(kd, sizeof(*vm));
lib/libkvm/kvm_i386.c
121
if (vm == NULL)
lib/libkvm/kvm_i386.c
123
kd->vmst = vm;
lib/libkvm/kvm_i386.c
125
vm->PTD = NULL;
lib/libkvm/kvm_i386.c
149
vm->PTD = _kvm_malloc(kd, PTDsize);
lib/libkvm/kvm_i386.c
151
if (_kvm_pread(kd, kd->pmfd, vm->PTD, PTDsize,
lib/libkvm/kvm_i386.c
156
vm->PD_mask = PAE_PD_MASK;
lib/libkvm/kvm_i386.c
157
vm->PT_mask = PAE_PT_MASK;
lib/libkvm/kvm_i386.c
159
vm->PD_shift = PAE_PDSHIFT - 1;
lib/libkvm/kvm_i386.c
160
vm->PG_shift = PAGE_SHIFT - 1;
lib/libkvm/kvm_i386.c
162
vm->PD_mask = PD_MASK;
lib/libkvm/kvm_i386.c
163
vm->PT_mask = PT_MASK;
lib/libkvm/kvm_i386.c
164
vm->PD_shift = PDSHIFT;
lib/libkvm/kvm_i386.c
165
vm->PG_shift = PAGE_SHIFT;
lib/libkvm/kvm_i386.c
171
free(vm->PTD);
lib/libkvm/kvm_i386.c
172
vm->PTD = NULL;
lib/libkvm/kvm_i386.c
183
struct vmstate *vm;
lib/libkvm/kvm_i386.c
196
vm = kd->vmst;
lib/libkvm/kvm_i386.c
203
if (vm->PTD == NULL) {
lib/libkvm/kvm_i386.c
207
if ((vm->PTD[pdei(vm,va)] & PG_V) == 0)
lib/libkvm/kvm_i386.c
210
pte_pa = (vm->PTD[pdei(vm,va)] & PG_FRAME) +
lib/libkvm/kvm_i386.c
211
(ptei(vm,va) * sizeof(ptd_entry_t));
lib/libkvm/kvm_i386.c
99
#define pdei(vm,VA) (((VA) & (vm)->PD_mask) >> (vm)->PD_shift)
lib/libkvm/kvm_mips64.c
100
if (KREAD(kd, (u_long)nl[0].n_value, &vm->Sysmap)) {
lib/libkvm/kvm_mips64.c
104
if (KREAD(kd, (u_long)nl[1].n_value, &vm->Sysmapsize)) {
lib/libkvm/kvm_mips64.c
118
vm->pagesize = uvmexp.pagesize;
lib/libkvm/kvm_mips64.c
119
vm->pagemask = uvmexp.pagemask;
lib/libkvm/kvm_mips64.c
120
vm->pageshift = uvmexp.pageshift;
lib/libkvm/kvm_mips64.c
130
KREAD(kd, (u_long)nl[0].n_value, &vm->Sysmapbase))
lib/libkvm/kvm_mips64.c
131
vm->Sysmapbase = (vaddr_t)CKSSEG_BASE;
lib/libkvm/kvm_mips64.c
142
struct vmstate *vm;
lib/libkvm/kvm_mips64.c
151
vm = kd->vmst;
lib/libkvm/kvm_mips64.c
152
offset = (int)va & vm->pagemask;
lib/libkvm/kvm_mips64.c
157
if (vm->Sysmap == 0) {
lib/libkvm/kvm_mips64.c
159
return vm->pagesize - offset;
lib/libkvm/kvm_mips64.c
166
return vm->pagesize - offset;
lib/libkvm/kvm_mips64.c
170
return vm->pagesize - offset;
lib/libkvm/kvm_mips64.c
172
if (va < vm->Sysmapbase)
lib/libkvm/kvm_mips64.c
174
idx = (va - vm->Sysmapbase) >> vm->pageshift;
lib/libkvm/kvm_mips64.c
175
if (idx >= vm->Sysmapsize)
lib/libkvm/kvm_mips64.c
177
addr = (u_long)vm->Sysmap + idx;
lib/libkvm/kvm_mips64.c
188
return vm->pagesize - offset;
lib/libkvm/kvm_mips64.c
82
struct vmstate *vm;
lib/libkvm/kvm_mips64.c
86
vm = (struct vmstate *)_kvm_malloc(kd, sizeof(*vm));
lib/libkvm/kvm_mips64.c
87
if (vm == 0)
lib/libkvm/kvm_mips64.c
89
kd->vmst = vm;
lib/libkvm/kvm_proc.c
170
struct vmspace vm;
lib/libkvm/kvm_proc.c
188
if (KREAD(kd, (u_long)p->p_vmspace, &vm))
lib/libkvm/kvm_proc.c
190
addr = (u_long)vm.vm_map.addr.rbh_root.rbt_root;
lib/libkvm/kvm_proc2.c
126
struct vmspace vm, *vmp;
lib/libkvm/kvm_proc2.c
292
!KREAD(kd, (u_long)process.ps_vmspace, &vm))
lib/libkvm/kvm_proc2.c
293
vmp = &vm;
sys/arch/alpha/alpha/trap.c
364
struct vmspace *vm = NULL;
sys/arch/alpha/alpha/trap.c
391
vm = NULL;
sys/arch/alpha/alpha/trap.c
394
vm = p->p_vmspace;
sys/arch/alpha/alpha/trap.c
395
map = &vm->vm_map;
sys/arch/alpha/include/pmap.h
163
#define pmap_remove_holes(vm) do { /* nothing */ } while (0)
sys/arch/amd64/amd64/vmm_machdep.c
124
int vmm_get_guest_memtype(struct vm *, paddr_t);
sys/arch/amd64/amd64/vmm_machdep.c
125
vaddr_t vmm_translate_gpa(struct vm *, paddr_t);
sys/arch/amd64/amd64/vmm_machdep.c
3345
struct vm *vm;
sys/arch/amd64/amd64/vmm_machdep.c
3353
ret = vm_find(vrp->vrp_vm_id, &vm);
sys/arch/amd64/amd64/vmm_machdep.c
3357
vcpu = vm_find_vcpu(vm, vrp->vrp_vcpu_id);
sys/arch/amd64/amd64/vmm_machdep.c
3414
refcnt_rele_wake(&vm->vm_refcnt);
sys/arch/amd64/amd64/vmm_machdep.c
347
struct vm *vm;
sys/arch/amd64/amd64/vmm_machdep.c
360
SLIST_FOREACH(vm, &vmm_softc->vm_list, vm_link) {
sys/arch/amd64/amd64/vmm_machdep.c
362
SLIST_FOREACH(vcpu, &vm->vm_vcpu_list, vc_vcpu_link) {
sys/arch/amd64/amd64/vmm_machdep.c
371
__func__, vcpu->vc_id, vm->vm_id);
sys/arch/amd64/amd64/vmm_machdep.c
384
vcpu->vc_id, vm->vm_id);
sys/arch/amd64/amd64/vmm_machdep.c
392
vcpu->vc_id, vm->vm_id);
sys/arch/amd64/amd64/vmm_machdep.c
401
vcpu->vc_id, vm->vm_id);
sys/arch/amd64/amd64/vmm_machdep.c
4497
struct vm *vm = vcpu->vc_parent;
sys/arch/amd64/amd64/vmm_machdep.c
4507
if (!pmap_extract(vm->vm_pmap, ghcb_gpa, &ghcb_hpa))
sys/arch/amd64/amd64/vmm_machdep.c
485
struct vm *vm;
sys/arch/amd64/amd64/vmm_machdep.c
4866
vmm_get_guest_memtype(struct vm *vm, paddr_t gpa)
sys/arch/amd64/amd64/vmm_machdep.c
4872
for (i = 0; i < vm->vm_nmemranges; i++) {
sys/arch/amd64/amd64/vmm_machdep.c
4873
vmr = &vm->vm_memranges[i];
sys/arch/amd64/amd64/vmm_machdep.c
4894
vmm_translate_gpa(struct vm *vm, paddr_t gpa)
sys/arch/amd64/amd64/vmm_machdep.c
4904
for (i = 0; i < vm->vm_nmemranges; i++) {
sys/arch/amd64/amd64/vmm_machdep.c
4905
vmr = &vm->vm_memranges[i];
sys/arch/amd64/amd64/vmm_machdep.c
493
error = vm_find(vip->vip_vm_id, &vm);
sys/arch/amd64/amd64/vmm_machdep.c
499
vcpu = vm_find_vcpu(vm, vip->vip_vcpu_id);
sys/arch/amd64/amd64/vmm_machdep.c
514
refcnt_rele_wake(&vm->vm_refcnt);
sys/arch/amd64/amd64/vmm_machdep.c
536
struct vm *vm;
sys/arch/amd64/amd64/vmm_machdep.c
541
error = vm_find(vpp->vpp_vm_id, &vm);
sys/arch/amd64/amd64/vmm_machdep.c
547
vcpu = vm_find_vcpu(vm, vpp->vpp_vcpu_id);
sys/arch/amd64/amd64/vmm_machdep.c
568
refcnt_rele_wake(&vm->vm_refcnt);
sys/arch/amd64/amd64/vmm_machdep.c
592
struct vm *vm;
sys/arch/amd64/amd64/vmm_machdep.c
598
error = vm_find(vrwp->vrwp_vm_id, &vm);
sys/arch/amd64/amd64/vmm_machdep.c
604
vcpu = vm_find_vcpu(vm, vrwp->vrwp_vcpu_id);
sys/arch/amd64/amd64/vmm_machdep.c
626
refcnt_rele_wake(&vm->vm_refcnt);
sys/arch/amd64/amd64/vmm_machdep.c
6999
struct vm *vm = vcpu->vc_parent;
sys/arch/amd64/amd64/vmm_machdep.c
7003
for (i = 0; i < vm->vm_nmemranges; ++i) {
sys/arch/amd64/amd64/vmm_machdep.c
7004
vmr = &vm->vm_memranges[i];
sys/arch/amd64/amd64/vmm_machdep.c
7046
struct vm *vm = vcpu->vc_parent;
sys/arch/amd64/amd64/vmm_machdep.c
7051
if (!pmap_extract(vm->vm_pmap, pvclock_gpa, &pvclock_hpa))
sys/arch/amd64/amd64/vmm_machdep.c
7079
struct vm *vm = vcpu->vc_parent;
sys/arch/amd64/amd64/vmm_machdep.c
7089
if (!pmap_extract(vm->vm_pmap, gpa, &hpa))
sys/arch/amd64/amd64/vmm_machdep.c
7409
struct vm *vm;
sys/arch/amd64/amd64/vmm_machdep.c
7413
error = vm_find(vmid, &vm);
sys/arch/amd64/amd64/vmm_machdep.c
7417
vcpu = vm_find_vcpu(vm, vcpuid);
sys/arch/amd64/amd64/vmm_machdep.c
7427
refcnt_rele_wake(&vm->vm_refcnt);
sys/arch/amd64/amd64/vmm_machdep.c
916
vm_impl_init(struct vm *vm, struct proc *p)
sys/arch/amd64/amd64/vmm_machdep.c
921
pmap_convert(vm->vm_pmap, PMAP_TYPE_EPT);
sys/arch/amd64/amd64/vmm_machdep.c
924
pmap_convert(vm->vm_pmap, PMAP_TYPE_RVI);
sys/arch/amd64/amd64/vmm_machdep.c
935
vm_impl_deinit(struct vm *vm)
sys/arch/amd64/include/pmap.h
380
#define pmap_remove_holes(vm) do { /* nothing */ } while (0)
sys/arch/amd64/include/vmmvar.h
1063
int vm_impl_init(struct vm *, struct proc *);
sys/arch/amd64/include/vmmvar.h
1064
void vm_impl_deinit(struct vm *);
sys/arch/amd64/include/vmmvar.h
624
struct vm;
sys/arch/amd64/include/vmmvar.h
964
struct vm *vc_parent; /* [I] */
sys/arch/arm/include/pmap.h
234
#define pmap_remove_holes(vm) do { /* nothing */ } while (0)
sys/arch/arm64/arm64/pmap.c
2079
pmap_remove_holes(struct vmspace *vm)
sys/arch/hppa/hppa/trap.c
154
struct vmspace *vm;
sys/arch/hppa/hppa/trap.c
412
vm = p->p_vmspace;
sys/arch/hppa/hppa/trap.c
413
map = &vm->vm_map;
sys/arch/hppa/hppa/trap.c
474
vm = p->p_vmspace;
sys/arch/hppa/hppa/trap.c
475
map = &vm->vm_map;
sys/arch/hppa/include/pmap.h
116
#define pmap_remove_holes(vm) do { /* nothing */ } while (0)
sys/arch/i386/include/pmap.h
215
#define pmap_remove_holes(vm) do { /* nothing */ } while (0)
sys/arch/m88k/include/pmap.h
69
#define pmap_remove_holes(vm) do { /* nothing */ } while (0)
sys/arch/m88k/m88k/trap.c
225
struct vmspace *vm;
sys/arch/m88k/m88k/trap.c
304
vm = p->p_vmspace;
sys/arch/m88k/m88k/trap.c
413
vm = p->p_vmspace;
sys/arch/m88k/m88k/trap.c
414
map = &vm->vm_map;
sys/arch/m88k/m88k/trap.c
625
struct vmspace *vm;
sys/arch/m88k/m88k/trap.c
834
vm = p->p_vmspace;
sys/arch/m88k/m88k/trap.c
906
vm = p->p_vmspace;
sys/arch/m88k/m88k/trap.c
907
map = &vm->vm_map;
sys/arch/mips64/include/pmap.h
170
#define pmap_remove_holes(vm) do { /* nothing */ } while (0)
sys/arch/mips64/mips64/trap.c
337
struct vmspace *vm;
sys/arch/mips64/mips64/trap.c
341
vm = p->p_vmspace;
sys/arch/mips64/mips64/trap.c
342
map = &vm->vm_map;
sys/arch/powerpc/include/pmap.h
154
#define pmap_remove_holes(vm) do { /* nothing */ } while (0)
sys/arch/powerpc64/include/pmap.h
69
#define pmap_remove_holes(vm)
sys/arch/riscv64/riscv64/pmap.c
1896
pmap_remove_holes(struct vmspace *vm)
sys/arch/sh/include/pmap.h
65
#define pmap_remove_holes(vm) do { /* nothing */ } while (0)
sys/arch/sparc64/dev/vdsk.c
472
struct vio_msg *vm = (struct vio_msg *)lp;
sys/arch/sparc64/dev/vdsk.c
474
switch (vm->type) {
sys/arch/sparc64/dev/vdsk.c
479
vdsk_rx_vio_ctrl(lc->lc_sc, vm);
sys/arch/sparc64/dev/vdsk.c
485
vdsk_rx_vio_data(lc->lc_sc, vm);
sys/arch/sparc64/dev/vdsk.c
489
DPRINTF(("Unhandled packet type 0x%02x\n", vm->type));
sys/arch/sparc64/dev/vdsk.c
496
vdsk_rx_vio_ctrl(struct vdsk_softc *sc, struct vio_msg *vm)
sys/arch/sparc64/dev/vdsk.c
498
struct vio_msg_tag *tag = (struct vio_msg_tag *)&vm->type;
sys/arch/sparc64/dev/vdsk.c
665
vdsk_rx_vio_data(struct vdsk_softc *sc, struct vio_msg *vm)
sys/arch/sparc64/dev/vdsk.c
667
struct vio_msg_tag *tag = (struct vio_msg_tag *)&vm->type;
sys/arch/sparc64/dev/vdsp.c
502
struct vio_msg *vm = (struct vio_msg *)lp;
sys/arch/sparc64/dev/vdsp.c
504
switch (vm->type) {
sys/arch/sparc64/dev/vdsp.c
509
vdsp_rx_vio_ctrl(lc->lc_sc, vm);
sys/arch/sparc64/dev/vdsp.c
515
vdsp_rx_vio_data(lc->lc_sc, vm);
sys/arch/sparc64/dev/vdsp.c
519
DPRINTF(("Unhandled packet type 0x%02x\n", vm->type));
sys/arch/sparc64/dev/vdsp.c
526
vdsp_rx_vio_ctrl(struct vdsp_softc *sc, struct vio_msg *vm)
sys/arch/sparc64/dev/vdsp.c
528
struct vio_msg_tag *tag = (struct vio_msg_tag *)&vm->type;
sys/arch/sparc64/dev/vdsp.c
690
vdsp_rx_vio_data(struct vdsp_softc *sc, struct vio_msg *vm)
sys/arch/sparc64/dev/vdsp.c
692
struct vio_msg_tag *tag = (struct vio_msg_tag *)&vm->type;
sys/arch/sparc64/dev/vnet.c
462
struct vio_msg *vm = (struct vio_msg *)lp;
sys/arch/sparc64/dev/vnet.c
464
switch (vm->type) {
sys/arch/sparc64/dev/vnet.c
469
vnet_rx_vio_ctrl(lc->lc_sc, vm);
sys/arch/sparc64/dev/vnet.c
475
vnet_rx_vio_data(lc->lc_sc, vm);
sys/arch/sparc64/dev/vnet.c
479
DPRINTF(("Unhandled packet type 0x%02x\n", vm->type));
sys/arch/sparc64/dev/vnet.c
486
vnet_rx_vio_ctrl(struct vnet_softc *sc, struct vio_msg *vm)
sys/arch/sparc64/dev/vnet.c
488
struct vio_msg_tag *tag = (struct vio_msg_tag *)&vm->type;
sys/arch/sparc64/dev/vnet.c
688
vnet_rx_vio_data(struct vnet_softc *sc, struct vio_msg *vm)
sys/arch/sparc64/dev/vnet.c
690
struct vio_msg_tag *tag = (struct vio_msg_tag *)&vm->type;
sys/arch/sparc64/sparc64/pmap.c
2837
pmap_remove_holes(struct vmspace *vm)
sys/arch/sparc64/sparc64/pmap.c
2840
struct vm_map *map = &vm->vm_map;
sys/dev/pci/drm/amd/amdgpu/amdgpu.h
503
struct amdgpu_vm vm;
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd.h
299
((struct drm_file *)(drm_priv))->driver_priv)->vm)
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd.h
369
struct amdgpu_vm *vm);
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd.h
386
struct amdgpu_vm *vm)
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
1152
struct amdgpu_vm *vm,
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
1158
WARN_ON(!vm);
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
1164
ret = amdgpu_vm_lock_pd(vm, &ctx->exec, 2);
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
1193
struct amdgpu_vm *vm, enum bo_vm_match map_type,
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
1206
if ((vm && vm != entry->bo_va->base.vm) ||
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
1211
ret = amdgpu_vm_lock_pd(entry->bo_va->base.vm,
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
1261
struct amdgpu_vm *vm = bo_va->base.vm;
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
1271
if (!amdgpu_vm_ready(vm))
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
1274
(void)amdgpu_vm_clear_freed(adev, vm, &bo_va->last_pt_update);
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
1389
static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
1424
if (cmpxchg(&vm->process_info, NULL, *process_info) != NULL) {
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
1430
ret = amdgpu_bo_reserve(vm->root.bo, true);
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
1433
ret = vm_validate_pt_pd_bos(vm, NULL);
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
1438
ret = amdgpu_bo_sync_wait(vm->root.bo,
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
1442
ret = dma_resv_reserve_fences(vm->root.bo->tbo.base.resv, 1);
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
1445
dma_resv_add_fence(vm->root.bo->tbo.base.resv,
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
1446
&vm->process_info->eviction_fence->base,
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
1448
amdgpu_bo_unreserve(vm->root.bo);
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
1451
mutex_lock(&vm->process_info->lock);
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
1452
list_add_tail(&vm->vm_list_node,
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
1453
&(vm->process_info->vm_list_head));
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
1454
vm->process_info->n_vms++;
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
1456
*ef = dma_fence_get(&vm->process_info->eviction_fence->base);
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
1457
mutex_unlock(&vm->process_info->lock);
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
1464
amdgpu_bo_unreserve(vm->root.bo);
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
1466
vm->process_info = NULL;
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
1573
struct amdgpu_vm *vm)
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
1575
struct amdkfd_process_info *process_info = vm->process_info;
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
1583
list_del(&vm->vm_list_node);
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
1586
vm->process_info = NULL;
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
1697
struct amdgpu_fpriv *fpriv = container_of(avm, struct amdgpu_fpriv, vm);
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
2067
if (entry->bo_va->base.vm != avm || entry->is_mapped)
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
2107
struct amdgpu_vm *vm;
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
2110
vm = drm_priv_to_vm(drm_priv);
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
2119
if (entry->bo_va->base.vm != vm)
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
2165
if (entry->bo_va->base.vm != avm || !entry->is_mapped)
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
2237
bo, bo->vm_bo->vm->process_info->eviction_fence);
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
3010
container_of(peer_vm, struct amdgpu_fpriv, vm);
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
3219
struct amdgpu_vm *vm = drm_priv_to_vm(drm_priv);
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
3223
if (entry->is_mapped && entry->bo_va->base.vm == vm)
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
481
static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm,
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
484
struct amdgpu_bo *pd = vm->root.bo;
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
488
ret = amdgpu_vm_validate(adev, vm, ticket,
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
495
vm->pd_phys_addr = amdgpu_gmc_pd_addr(vm->root.bo);
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
500
static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
502
struct amdgpu_bo *pd = vm->root.bo;
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
506
ret = amdgpu_vm_update_pdes(adev, vm, false);
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
510
return amdgpu_sync_fence(sync, vm->last_update, GFP_KERNEL);
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
513
static uint64_t get_pte_flags(struct amdgpu_device *adev, struct amdgpu_vm *vm,
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
83
if (entry->bo_va->base.vm == avm)
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
872
struct amdgpu_vm *vm, bool is_aql)
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
914
va + bo_size, vm);
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
967
bo_va = amdgpu_vm_bo_find(vm, bo[i]);
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
969
bo_va = amdgpu_vm_bo_add(adev, vm, bo[i]);
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
981
attachment[i]->pte_flags = get_pte_flags(adev, vm, mem);
sys/dev/pci/drm/amd/amdgpu/amdgpu_cs.c
1097
struct amdgpu_vm *vm = &fpriv->vm;
sys/dev/pci/drm/amd/amdgpu/amdgpu_cs.c
1113
if (amdgpu_vmid_uses_reserved(vm, ring->vm_hub))
sys/dev/pci/drm/amd/amdgpu/amdgpu_cs.c
1118
if (!amdgpu_vm_ready(vm))
sys/dev/pci/drm/amd/amdgpu/amdgpu_cs.c
1121
r = amdgpu_vm_clear_freed(adev, vm, NULL);
sys/dev/pci/drm/amd/amdgpu/amdgpu_cs.c
1167
r = amdgpu_vm_handle_moved(adev, vm, &p->exec.ticket);
sys/dev/pci/drm/amd/amdgpu/amdgpu_cs.c
1171
r = amdgpu_vm_update_pdes(adev, vm, false);
sys/dev/pci/drm/amd/amdgpu/amdgpu_cs.c
1175
r = amdgpu_sync_fence(&p->sync, vm->last_update, GFP_KERNEL);
sys/dev/pci/drm/amd/amdgpu/amdgpu_cs.c
1182
if (!job->vm)
sys/dev/pci/drm/amd/amdgpu/amdgpu_cs.c
1185
job->vm_pd_addr = amdgpu_gmc_pd_addr(vm->root.bo);
sys/dev/pci/drm/amd/amdgpu/amdgpu_cs.c
1230
&fpriv->vm);
sys/dev/pci/drm/amd/amdgpu/amdgpu_cs.c
1370
amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->exec.ticket);
sys/dev/pci/drm/amd/amdgpu/amdgpu_cs.c
1378
amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm);
sys/dev/pci/drm/amd/amdgpu/amdgpu_cs.c
1784
struct amdgpu_vm *vm = &fpriv->vm;
sys/dev/pci/drm/amd/amdgpu/amdgpu_cs.c
1790
mapping = amdgpu_vm_bo_lookup_mapping(vm, addr);
sys/dev/pci/drm/amd/amdgpu/amdgpu_cs.c
181
struct amdgpu_vm *vm = &fpriv->vm;
sys/dev/pci/drm/amd/amdgpu/amdgpu_cs.c
283
ret = amdgpu_job_alloc(p->adev, vm, p->entities[i], vm,
sys/dev/pci/drm/amd/amdgpu/amdgpu_cs.c
320
amdgpu_vm_set_task_info(vm);
sys/dev/pci/drm/amd/amdgpu/amdgpu_cs.c
345
struct amdgpu_vm *vm = &fpriv->vm;
sys/dev/pci/drm/amd/amdgpu/amdgpu_cs.c
389
r = amdgpu_ib_get(p->adev, vm, ring->funcs->parse_cs ?
sys/dev/pci/drm/amd/amdgpu/amdgpu_cs.c
860
struct amdgpu_vm *vm = &fpriv->vm;
sys/dev/pci/drm/amd/amdgpu/amdgpu_cs.c
908
r = amdgpu_vm_lock_pd(&fpriv->vm, &p->exec, 1 + p->gang_size);
sys/dev/pci/drm/amd/amdgpu/amdgpu_cs.c
921
e->bo_va = amdgpu_vm_bo_find(vm, e->bo);
sys/dev/pci/drm/amd/amdgpu/amdgpu_cs.c
963
r = amdgpu_vm_validate(p->adev, &fpriv->vm, NULL,
sys/dev/pci/drm/amd/amdgpu/amdgpu_csa.c
105
int amdgpu_unmap_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
sys/dev/pci/drm/amd/amdgpu/amdgpu_csa.c
114
r = amdgpu_vm_lock_pd(vm, &exec, 0);
sys/dev/pci/drm/amd/amdgpu/amdgpu_csa.c
65
int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
sys/dev/pci/drm/amd/amdgpu/amdgpu_csa.c
74
r = amdgpu_vm_lock_pd(vm, &exec, 0);
sys/dev/pci/drm/amd/amdgpu/amdgpu_csa.c
84
*bo_va = amdgpu_vm_bo_add(adev, vm, bo);
sys/dev/pci/drm/amd/amdgpu/amdgpu_csa.h
34
int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
sys/dev/pci/drm/amd/amdgpu/amdgpu_csa.h
37
int amdgpu_unmap_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
sys/dev/pci/drm/amd/amdgpu/amdgpu_ctx.c
339
ctx->generation = amdgpu_vm_generation(mgr->adev, &fpriv->vm);
sys/dev/pci/drm/amd/amdgpu/amdgpu_ctx.c
600
if (ctx->generation != amdgpu_vm_generation(adev, &fpriv->vm))
sys/dev/pci/drm/amd/amdgpu/amdgpu_debugfs.c
1784
struct amdgpu_vm *vm = &fpriv->vm;
sys/dev/pci/drm/amd/amdgpu/amdgpu_debugfs.c
1787
ti = amdgpu_vm_get_task_info_vm(vm);
sys/dev/pci/drm/amd/amdgpu/amdgpu_debugfs.c
1793
r = amdgpu_bo_reserve(vm->root.bo, true);
sys/dev/pci/drm/amd/amdgpu/amdgpu_debugfs.c
1796
amdgpu_debugfs_vm_bo_info(vm, m);
sys/dev/pci/drm/amd/amdgpu/amdgpu_debugfs.c
1797
amdgpu_bo_unreserve(vm->root.bo);
sys/dev/pci/drm/amd/amdgpu/amdgpu_debugfs.c
2148
if (!fpriv || !fpriv->vm.root.bo)
sys/dev/pci/drm/amd/amdgpu/amdgpu_debugfs.c
2151
root_bo = amdgpu_bo_ref(fpriv->vm.root.bo);
sys/dev/pci/drm/amd/amdgpu/amdgpu_debugfs.c
2158
seq_printf(m, "pd_address: 0x%llx\n", amdgpu_gmc_pd_addr(fpriv->vm.root.bo));
sys/dev/pci/drm/amd/amdgpu/amdgpu_device.c
6000
if (job && job->vm)
sys/dev/pci/drm/amd/amdgpu/amdgpu_dma_buf.c
480
struct amdgpu_vm *vm = bo_base->vm;
sys/dev/pci/drm/amd/amdgpu/amdgpu_dma_buf.c
481
struct dma_resv *resv = vm->root.bo->tbo.base.resv;
sys/dev/pci/drm/amd/amdgpu/amdgpu_dma_buf.c
504
r = amdgpu_vm_clear_freed(adev, vm, NULL);
sys/dev/pci/drm/amd/amdgpu/amdgpu_dma_buf.c
513
r = amdgpu_vm_handle_moved(adev, vm, NULL);
sys/dev/pci/drm/amd/amdgpu/amdgpu_drv.c
3032
timeout = amdgpu_vm_wait_idle(&fpriv->vm, timeout);
sys/dev/pci/drm/amd/amdgpu/amdgpu_fdinfo.c
63
struct amdgpu_vm *vm = &fpriv->vm;
sys/dev/pci/drm/amd/amdgpu/amdgpu_fdinfo.c
79
amdgpu_vm_get_memory(vm, stats);
sys/dev/pci/drm/amd/amdgpu/amdgpu_fdinfo.c
88
drm_printf(p, "pasid:\t%u\n", fpriv->vm.pasid);
sys/dev/pci/drm/amd/amdgpu/amdgpu_gem.c
1017
r = amdgpu_vm_lock_pd(&fpriv->vm, &exec, 2);
sys/dev/pci/drm/amd/amdgpu/amdgpu_gem.c
1025
bo_va = amdgpu_vm_bo_find(&fpriv->vm, abo);
sys/dev/pci/drm/amd/amdgpu/amdgpu_gem.c
1060
r = amdgpu_vm_bo_clear_mappings(adev, &fpriv->vm,
sys/dev/pci/drm/amd/amdgpu/amdgpu_gem.c
1079
fence = amdgpu_gem_va_update_vm(adev, &fpriv->vm, bo_va,
sys/dev/pci/drm/amd/amdgpu/amdgpu_gem.c
1135
r = amdgpu_vm_lock_pd(&fpriv->vm, &exec, 0);
sys/dev/pci/drm/amd/amdgpu/amdgpu_gem.c
1168
amdgpu_ttm_adev(base->vm->root.bo->tbo.bdev))) {
sys/dev/pci/drm/amd/amdgpu/amdgpu_gem.c
1186
struct amdgpu_bo_va *bo_va = amdgpu_vm_bo_find(&fpriv->vm, robj);
sys/dev/pci/drm/amd/amdgpu/amdgpu_gem.c
319
struct amdgpu_vm *vm = &fpriv->vm;
sys/dev/pci/drm/amd/amdgpu/amdgpu_gem.c
333
!amdgpu_vm_is_bo_always_valid(vm, abo))
sys/dev/pci/drm/amd/amdgpu/amdgpu_gem.c
341
bo_va = amdgpu_vm_bo_find(vm, abo);
sys/dev/pci/drm/amd/amdgpu/amdgpu_gem.c
343
bo_va = amdgpu_vm_bo_add(adev, vm, abo);
sys/dev/pci/drm/amd/amdgpu/amdgpu_gem.c
368
if (!vm->is_compute_context || !vm->process_info)
sys/dev/pci/drm/amd/amdgpu/amdgpu_gem.c
373
mutex_lock_nested(&vm->process_info->lock, 1);
sys/dev/pci/drm/amd/amdgpu/amdgpu_gem.c
374
if (!WARN_ON(!vm->process_info->eviction_fence)) {
sys/dev/pci/drm/amd/amdgpu/amdgpu_gem.c
376
&vm->process_info->eviction_fence->base);
sys/dev/pci/drm/amd/amdgpu/amdgpu_gem.c
378
struct amdgpu_task_info *ti = amdgpu_vm_get_task_info_vm(vm);
sys/dev/pci/drm/amd/amdgpu/amdgpu_gem.c
387
mutex_unlock(&vm->process_info->lock);
sys/dev/pci/drm/amd/amdgpu/amdgpu_gem.c
398
struct amdgpu_vm *vm = &fpriv->vm;
sys/dev/pci/drm/amd/amdgpu/amdgpu_gem.c
412
r = amdgpu_vm_lock_pd(vm, &exec, 0);
sys/dev/pci/drm/amd/amdgpu/amdgpu_gem.c
418
if (!amdgpu_vm_is_bo_always_valid(vm, bo))
sys/dev/pci/drm/amd/amdgpu/amdgpu_gem.c
421
bo_va = amdgpu_vm_bo_find(vm, bo);
sys/dev/pci/drm/amd/amdgpu/amdgpu_gem.c
427
if (!amdgpu_vm_ready(vm))
sys/dev/pci/drm/amd/amdgpu/amdgpu_gem.c
430
r = amdgpu_vm_clear_freed(adev, vm, &fence);
sys/dev/pci/drm/amd/amdgpu/amdgpu_gem.c
512
struct amdgpu_vm *vm = &fpriv->vm;
sys/dev/pci/drm/amd/amdgpu/amdgpu_gem.c
554
r = amdgpu_bo_reserve(vm->root.bo, false);
sys/dev/pci/drm/amd/amdgpu/amdgpu_gem.c
558
resv = vm->root.bo->tbo.base.resv;
sys/dev/pci/drm/amd/amdgpu/amdgpu_gem.c
584
abo->parent = amdgpu_bo_ref(vm->root.bo);
sys/dev/pci/drm/amd/amdgpu/amdgpu_gem.c
586
amdgpu_bo_unreserve(vm->root.bo);
sys/dev/pci/drm/amd/amdgpu/amdgpu_gem.c
839
struct amdgpu_vm *vm,
sys/dev/pci/drm/amd/amdgpu/amdgpu_gem.c
847
fence = dma_fence_get(vm->last_update);
sys/dev/pci/drm/amd/amdgpu/amdgpu_gem.c
849
if (!amdgpu_vm_ready(vm))
sys/dev/pci/drm/amd/amdgpu/amdgpu_gem.c
859
r = amdgpu_vm_clear_freed(adev, vm, &fence);
sys/dev/pci/drm/amd/amdgpu/amdgpu_gem.c
872
r = amdgpu_vm_update_pdes(adev, vm, false);
sys/dev/pci/drm/amd/amdgpu/amdgpu_gem.c
900
if (amdgpu_vm_is_bo_always_valid(vm, bo_va->base.bo))
sys/dev/pci/drm/amd/amdgpu/amdgpu_gem.c
901
fence = dma_fence_get(vm->last_update);
sys/dev/pci/drm/amd/amdgpu/amdgpu_gmc.h
162
struct amdgpu_vm *vm,
sys/dev/pci/drm/amd/amdgpu/amdgpu_gmc.h
168
struct amdgpu_vm *vm,
sys/dev/pci/drm/amd/amdgpu/amdgpu_gmc.h
360
#define amdgpu_gmc_get_vm_pte(adev, vm, bo, vm_flags, pte_flags) \
sys/dev/pci/drm/amd/amdgpu/amdgpu_gmc.h
361
((adev)->gmc.gmc_funcs->get_vm_pte((adev), (vm), (bo), (vm_flags), \
sys/dev/pci/drm/amd/amdgpu/amdgpu_gmc.h
363
#define amdgpu_gmc_override_vm_pte_flags(adev, vm, addr, pte_flags) \
sys/dev/pci/drm/amd/amdgpu/amdgpu_gmc.h
365
((adev), (vm), (addr), (pte_flags))
sys/dev/pci/drm/amd/amdgpu/amdgpu_ib.c
133
struct amdgpu_vm *vm;
sys/dev/pci/drm/amd/amdgpu/amdgpu_ib.c
150
vm = job->vm;
sys/dev/pci/drm/amd/amdgpu/amdgpu_ib.c
164
vm = NULL;
sys/dev/pci/drm/amd/amdgpu/amdgpu_ib.c
178
if (vm && !job->vmid) {
sys/dev/pci/drm/amd/amdgpu/amdgpu_ib.c
64
int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
sys/dev/pci/drm/amd/amdgpu/amdgpu_ib.c
82
if (!vm)
sys/dev/pci/drm/amd/amdgpu/amdgpu_ids.c
280
static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
sys/dev/pci/drm/amd/amdgpu/amdgpu_ids.c
289
bool needs_flush = vm->use_cpu_for_update;
sys/dev/pci/drm/amd/amdgpu/amdgpu_ids.c
290
uint64_t updates = amdgpu_vm_tlb_seq(vm);
sys/dev/pci/drm/amd/amdgpu/amdgpu_ids.c
293
*id = vm->reserved_vmid[vmhub];
sys/dev/pci/drm/amd/amdgpu/amdgpu_ids.c
294
if ((*id)->owner != vm->immediate.fence_context ||
sys/dev/pci/drm/amd/amdgpu/amdgpu_ids.c
302
if ((*id)->owner != vm->immediate.fence_context ||
sys/dev/pci/drm/amd/amdgpu/amdgpu_ids.c
345
static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
sys/dev/pci/drm/amd/amdgpu/amdgpu_ids.c
354
uint64_t updates = amdgpu_vm_tlb_seq(vm);
sys/dev/pci/drm/amd/amdgpu/amdgpu_ids.c
357
job->vm_needs_flush = vm->use_cpu_for_update;
sys/dev/pci/drm/amd/amdgpu/amdgpu_ids.c
361
bool needs_flush = vm->use_cpu_for_update;
sys/dev/pci/drm/amd/amdgpu/amdgpu_ids.c
364
if ((*id)->owner != vm->immediate.fence_context)
sys/dev/pci/drm/amd/amdgpu/amdgpu_ids.c
408
int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
sys/dev/pci/drm/amd/amdgpu/amdgpu_ids.c
423
if (amdgpu_vmid_uses_reserved(vm, vmhub)) {
sys/dev/pci/drm/amd/amdgpu/amdgpu_ids.c
424
r = amdgpu_vmid_grab_reserved(vm, ring, job, &id, fence);
sys/dev/pci/drm/amd/amdgpu/amdgpu_ids.c
428
r = amdgpu_vmid_grab_used(vm, ring, job, &id);
sys/dev/pci/drm/amd/amdgpu/amdgpu_ids.c
451
id->flushed_updates = amdgpu_vm_tlb_seq(vm);
sys/dev/pci/drm/amd/amdgpu/amdgpu_ids.c
456
job->pasid = vm->pasid;
sys/dev/pci/drm/amd/amdgpu/amdgpu_ids.c
465
id->owner = vm->immediate.fence_context;
sys/dev/pci/drm/amd/amdgpu/amdgpu_ids.c
467
trace_amdgpu_vm_grab_id(vm, ring, job);
sys/dev/pci/drm/amd/amdgpu/amdgpu_ids.c
481
bool amdgpu_vmid_uses_reserved(struct amdgpu_vm *vm, unsigned int vmhub)
sys/dev/pci/drm/amd/amdgpu/amdgpu_ids.c
483
return vm->reserved_vmid[vmhub];
sys/dev/pci/drm/amd/amdgpu/amdgpu_ids.c
496
int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev, struct amdgpu_vm *vm,
sys/dev/pci/drm/amd/amdgpu/amdgpu_ids.c
504
if (vm->reserved_vmid[vmhub])
sys/dev/pci/drm/amd/amdgpu/amdgpu_ids.c
513
vm->reserved_vmid[vmhub] = id;
sys/dev/pci/drm/amd/amdgpu/amdgpu_ids.c
529
void amdgpu_vmid_free_reserved(struct amdgpu_device *adev, struct amdgpu_vm *vm,
sys/dev/pci/drm/amd/amdgpu/amdgpu_ids.c
535
if (vm->reserved_vmid[vmhub]) {
sys/dev/pci/drm/amd/amdgpu/amdgpu_ids.c
536
list_add(&vm->reserved_vmid[vmhub]->list,
sys/dev/pci/drm/amd/amdgpu/amdgpu_ids.c
538
vm->reserved_vmid[vmhub] = NULL;
sys/dev/pci/drm/amd/amdgpu/amdgpu_ids.h
81
bool amdgpu_vmid_uses_reserved(struct amdgpu_vm *vm, unsigned int vmhub);
sys/dev/pci/drm/amd/amdgpu/amdgpu_ids.h
82
int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev, struct amdgpu_vm *vm,
sys/dev/pci/drm/amd/amdgpu/amdgpu_ids.h
84
void amdgpu_vmid_free_reserved(struct amdgpu_device *adev, struct amdgpu_vm *vm,
sys/dev/pci/drm/amd/amdgpu/amdgpu_ids.h
86
int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
sys/dev/pci/drm/amd/amdgpu/amdgpu_job.c
183
int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm,
sys/dev/pci/drm/amd/amdgpu/amdgpu_job.c
195
(*job)->vm = vm;
sys/dev/pci/drm/amd/amdgpu/amdgpu_job.c
198
(*job)->generation = amdgpu_vm_generation(adev, vm);
sys/dev/pci/drm/amd/amdgpu/amdgpu_job.c
362
if (job->vm && !job->vmid) {
sys/dev/pci/drm/amd/amdgpu/amdgpu_job.c
363
r = amdgpu_vmid_grab(job->vm, ring, job, &fence);
sys/dev/pci/drm/amd/amdgpu/amdgpu_job.c
392
if (job->generation != amdgpu_vm_generation(adev, job->vm) ||
sys/dev/pci/drm/amd/amdgpu/amdgpu_job.h
108
int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm,
sys/dev/pci/drm/amd/amdgpu/amdgpu_job.h
65
struct amdgpu_vm *vm;
sys/dev/pci/drm/amd/amdgpu/amdgpu_kms.c
1342
struct amdgpu_vm *vm = &fpriv->vm;
sys/dev/pci/drm/amd/amdgpu/amdgpu_kms.c
1346
if (!vm)
sys/dev/pci/drm/amd/amdgpu/amdgpu_kms.c
1352
gpuvm_fault.addr = vm->fault_info.addr;
sys/dev/pci/drm/amd/amdgpu/amdgpu_kms.c
1353
gpuvm_fault.status = vm->fault_info.status;
sys/dev/pci/drm/amd/amdgpu/amdgpu_kms.c
1354
gpuvm_fault.vmhub = vm->fault_info.vmhub;
sys/dev/pci/drm/amd/amdgpu/amdgpu_kms.c
1431
r = amdgpu_vm_init(adev, &fpriv->vm, fpriv->xcp_id, pasid);
sys/dev/pci/drm/amd/amdgpu/amdgpu_kms.c
1435
fpriv->prt_va = amdgpu_vm_bo_add(adev, &fpriv->vm, NULL);
sys/dev/pci/drm/amd/amdgpu/amdgpu_kms.c
1444
r = amdgpu_map_static_csa(adev, &fpriv->vm, adev->virt.csa_obj,
sys/dev/pci/drm/amd/amdgpu/amdgpu_kms.c
1450
r = amdgpu_seq64_map(adev, &fpriv->vm, &fpriv->seq64_va);
sys/dev/pci/drm/amd/amdgpu/amdgpu_kms.c
1471
amdgpu_vm_fini(adev, &fpriv->vm);
sys/dev/pci/drm/amd/amdgpu/amdgpu_kms.c
1518
WARN_ON(amdgpu_unmap_static_csa(adev, &fpriv->vm, adev->virt.csa_obj,
sys/dev/pci/drm/amd/amdgpu/amdgpu_kms.c
1525
pasid = fpriv->vm.pasid;
sys/dev/pci/drm/amd/amdgpu/amdgpu_kms.c
1526
pd = amdgpu_bo_ref(fpriv->vm.root.bo);
sys/dev/pci/drm/amd/amdgpu/amdgpu_kms.c
1533
amdgpu_vm_fini(adev, &fpriv->vm);
sys/dev/pci/drm/amd/amdgpu/amdgpu_ring.h
561
int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
sys/dev/pci/drm/amd/amdgpu/amdgpu_seq64.c
128
struct amdgpu_vm *vm;
sys/dev/pci/drm/amd/amdgpu/amdgpu_seq64.c
140
vm = &fpriv->vm;
sys/dev/pci/drm/amd/amdgpu/amdgpu_seq64.c
144
r = amdgpu_vm_lock_pd(vm, &exec, 0);
sys/dev/pci/drm/amd/amdgpu/amdgpu_seq64.c
67
int amdgpu_seq64_map(struct amdgpu_device *adev, struct amdgpu_vm *vm,
sys/dev/pci/drm/amd/amdgpu/amdgpu_seq64.c
81
r = amdgpu_vm_lock_pd(vm, &exec, 0);
sys/dev/pci/drm/amd/amdgpu/amdgpu_seq64.c
89
*bo_va = amdgpu_vm_bo_add(adev, vm, bo);
sys/dev/pci/drm/amd/amdgpu/amdgpu_seq64.h
44
int amdgpu_seq64_map(struct amdgpu_device *adev, struct amdgpu_vm *vm,
sys/dev/pci/drm/amd/amdgpu/amdgpu_trace.h
215
TP_PROTO(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
sys/dev/pci/drm/amd/amdgpu/amdgpu_trace.h
217
TP_ARGS(vm, ring, job),
sys/dev/pci/drm/amd/amdgpu/amdgpu_trace.h
229
__entry->pasid = vm->pasid;
sys/dev/pci/drm/amd/amdgpu/amdgpu_userq.c
532
queue->vm = &fpriv->vm;
sys/dev/pci/drm/amd/amdgpu/amdgpu_userq.c
69
struct amdgpu_vm *vm = queue->vm;
sys/dev/pci/drm/amd/amdgpu/amdgpu_userq.c
77
r = amdgpu_bo_reserve(vm->root.bo, false);
sys/dev/pci/drm/amd/amdgpu/amdgpu_userq.c
771
struct amdgpu_vm *vm)
sys/dev/pci/drm/amd/amdgpu/amdgpu_userq.c
778
spin_lock(&vm->status_lock);
sys/dev/pci/drm/amd/amdgpu/amdgpu_userq.c
779
while (!list_empty(&vm->invalidated)) {
sys/dev/pci/drm/amd/amdgpu/amdgpu_userq.c
780
bo_va = list_first_entry(&vm->invalidated,
sys/dev/pci/drm/amd/amdgpu/amdgpu_userq.c
783
spin_unlock(&vm->status_lock);
sys/dev/pci/drm/amd/amdgpu/amdgpu_userq.c
800
spin_lock(&vm->status_lock);
sys/dev/pci/drm/amd/amdgpu/amdgpu_userq.c
802
spin_unlock(&vm->status_lock);
sys/dev/pci/drm/amd/amdgpu/amdgpu_userq.c
81
va_map = amdgpu_vm_bo_lookup_mapping(vm, user_addr);
sys/dev/pci/drm/amd/amdgpu/amdgpu_userq.c
813
struct amdgpu_vm *vm = &fpriv->vm;
sys/dev/pci/drm/amd/amdgpu/amdgpu_userq.c
820
ret = amdgpu_vm_lock_pd(vm, &exec, 1);
sys/dev/pci/drm/amd/amdgpu/amdgpu_userq.c
825
ret = amdgpu_vm_lock_done_list(vm, &exec, 1);
sys/dev/pci/drm/amd/amdgpu/amdgpu_userq.c
831
ret = amdgpu_vm_validate(adev, vm, NULL,
sys/dev/pci/drm/amd/amdgpu/amdgpu_userq.c
838
ret = amdgpu_userq_bo_validate(adev, &exec, vm);
sys/dev/pci/drm/amd/amdgpu/amdgpu_userq.c
844
ret = amdgpu_vm_handle_moved(adev, vm, NULL);
sys/dev/pci/drm/amd/amdgpu/amdgpu_userq.c
848
ret = amdgpu_vm_update_pdes(adev, vm, false);
sys/dev/pci/drm/amd/amdgpu/amdgpu_userq.c
857
list_for_each_entry(bo_va, &vm->done, base.vm_status)
sys/dev/pci/drm/amd/amdgpu/amdgpu_userq.c
859
dma_fence_wait(vm->last_update, false);
sys/dev/pci/drm/amd/amdgpu/amdgpu_userq.c
90
amdgpu_bo_unreserve(vm->root.bo);
sys/dev/pci/drm/amd/amdgpu/amdgpu_userq.c
96
amdgpu_bo_unreserve(vm->root.bo);
sys/dev/pci/drm/amd/amdgpu/amdgpu_userq.h
63
struct amdgpu_vm *vm;
sys/dev/pci/drm/amd/amdgpu/amdgpu_userq_fence.c
398
r = amdgpu_bo_reserve(queue->vm->root.bo, false);
sys/dev/pci/drm/amd/amdgpu/amdgpu_userq_fence.c
405
mapping = amdgpu_vm_bo_lookup_mapping(queue->vm, addr >> PAGE_SHIFT);
sys/dev/pci/drm/amd/amdgpu/amdgpu_userq_fence.c
407
amdgpu_bo_unreserve(queue->vm->root.bo);
sys/dev/pci/drm/amd/amdgpu/amdgpu_userq_fence.c
413
amdgpu_bo_unreserve(queue->vm->root.bo);
sys/dev/pci/drm/amd/amdgpu/amdgpu_uvd.c
1092
job->vm = NULL;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vce.c
752
job->vm = NULL;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
1024
struct amdgpu_vm *vm, bool immediate)
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
1032
amdgpu_vm_assert_locked(vm);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
1034
spin_lock(&vm->status_lock);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
1035
list_splice_init(&vm->relocated, &relocated);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
1036
spin_unlock(&vm->status_lock);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
1046
params.vm = vm;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
1049
r = vm->update_funcs->prepare(¶ms, NULL,
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
1063
r = vm->update_funcs->commit(¶ms, &vm->last_update);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
1068
atomic64_inc(&vm->tlb_seq);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
1094
atomic64_inc(&tlb_cb->vm->tlb_seq);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
1112
struct amdgpu_vm *vm = params->vm;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
1114
tlb_cb->vm = vm;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
1122
dma_fence_put(vm->last_tlb_flush);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
1123
vm->last_tlb_flush = dma_fence_get(*fence);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
1132
amdgpu_vm_tlb_fence_create(params->adev, vm, fence);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
1135
dma_resv_add_fence(vm->root.bo->tbo.base.resv, *fence,
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
1164
int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
1199
params.vm = vm;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
1207
amdgpu_vm_eviction_lock(vm);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
1208
if (vm->evicting) {
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
1213
if (!unlocked && !dma_fence_is_signaled(vm->last_unlocked)) {
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
1216
amdgpu_bo_fence(vm->root.bo, vm->last_unlocked, true);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
1217
swap(vm->last_unlocked, tmp);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
1221
r = vm->update_funcs->prepare(¶ms, sync,
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
1280
r = vm->update_funcs->commit(¶ms, fence);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
1293
amdgpu_vm_eviction_unlock(vm);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
1298
void amdgpu_vm_get_memory(struct amdgpu_vm *vm,
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
1301
spin_lock(&vm->status_lock);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
1302
memcpy(stats, vm->stats, sizeof(*stats) * __AMDGPU_PL_NUM);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
1303
spin_unlock(&vm->status_lock);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
1322
struct amdgpu_vm *vm = bo_va->base.vm;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
1341
r = amdgpu_sync_resv(adev, &sync, vm->root.bo->tbo.base.resv,
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
1342
AMDGPU_SYNC_EQ_OWNER, vm);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
1376
AMDGPU_SYNC_EXPLICIT, vm);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
1398
if (clear || amdgpu_vm_is_bo_always_valid(vm, bo))
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
1399
last_update = &vm->last_update;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
1423
amdgpu_gmc_get_vm_pte(adev, vm, bo, mapping->flags,
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
1428
r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb,
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
1441
if (amdgpu_vm_is_bo_always_valid(vm, bo)) {
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
1561
struct amdgpu_vm *vm,
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
1578
static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
1580
struct dma_resv *resv = vm->root.bo->tbo.base.resv;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
1607
struct amdgpu_vm *vm,
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
1621
r = amdgpu_sync_resv(adev, &sync, vm->root.bo->tbo.base.resv,
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
1622
AMDGPU_SYNC_EQ_OWNER, vm);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
1626
while (!list_empty(&vm->freed)) {
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
1627
mapping = list_first_entry(&vm->freed,
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
1631
r = amdgpu_vm_update_range(adev, vm, false, false, true, false,
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
1634
amdgpu_vm_free_mapping(adev, vm, mapping, f);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
1669
struct amdgpu_vm *vm,
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
1677
spin_lock(&vm->status_lock);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
1678
while (!list_empty(&vm->moved)) {
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
1679
bo_va = list_first_entry(&vm->moved, struct amdgpu_bo_va,
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
1681
spin_unlock(&vm->status_lock);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
1687
spin_lock(&vm->status_lock);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
1690
while (!list_empty(&vm->invalidated)) {
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
1691
bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va,
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
1694
spin_unlock(&vm->status_lock);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
1720
if (vm->is_compute_context &&
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
1726
spin_lock(&vm->status_lock);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
1728
spin_unlock(&vm->status_lock);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
1747
struct amdgpu_vm *vm,
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
1751
uint64_t tlb_seq = amdgpu_vm_tlb_seq(vm);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
1755
WARN_ON_ONCE(!vm->is_compute_context);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
1762
if (atomic64_xchg(&vm->kfd_last_flushed_seq, tlb_seq) == tlb_seq)
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
1770
r = amdgpu_gmc_flush_gpu_tlb_pasid(adev, vm->pasid, flush_type,
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
1794
struct amdgpu_vm *vm,
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
1803
amdgpu_vm_bo_base_init(&bo_va->base, vm, bo);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
182
struct amdgpu_vm *vm;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
1837
struct amdgpu_vm *vm = bo_va->base.vm;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
1842
amdgpu_vm_it_insert(mapping, &vm->va);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
1847
if (amdgpu_vm_is_bo_always_valid(vm, bo) && !bo_va->base.moved)
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
1908
struct amdgpu_vm *vm = bo_va->base.vm;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
1919
tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
196
static void amdgpu_vm_assert_locked(struct amdgpu_vm *vm)
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
1979
r = amdgpu_vm_bo_clear_mappings(adev, bo_va->base.vm, saddr, size);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
198
dma_resv_assert_held(vm->root.bo->tbo.base.resv);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2017
struct amdgpu_vm *vm = bo_va->base.vm;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2040
amdgpu_vm_it_remove(mapping, &vm->va);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2045
list_add(&mapping->list, &vm->freed);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2047
amdgpu_vm_free_mapping(adev, vm, mapping,
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2067
struct amdgpu_vm *vm,
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2096
tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
211
struct amdgpu_vm *vm = vm_bo->vm;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2127
amdgpu_vm_it_remove(tmp, &vm->va);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2136
list_add(&tmp->list, &vm->freed);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2144
amdgpu_vm_it_insert(before, &vm->va);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2148
if (amdgpu_vm_is_bo_always_valid(vm, bo) &&
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
215
amdgpu_vm_assert_locked(vm);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2159
amdgpu_vm_it_insert(after, &vm->va);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
216
spin_lock(&vm_bo->vm->status_lock);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2163
if (amdgpu_vm_is_bo_always_valid(vm, bo) &&
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
218
list_move(&vm_bo->vm_status, &vm->evicted);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2185
struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2188
return amdgpu_vm_it_iter_first(&vm->va, addr, addr);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2199
void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket)
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
220
list_move_tail(&vm_bo->vm_status, &vm->evicted);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2206
for (mapping = amdgpu_vm_it_iter_first(&vm->va, 0, U64_MAX); mapping;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
221
spin_unlock(&vm_bo->vm->status_lock);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2236
struct amdgpu_vm *vm = bo_va->base.vm;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2239
dma_resv_assert_held(vm->root.bo->tbo.base.resv);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2243
if (amdgpu_vm_is_bo_always_valid(vm, bo))
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2257
spin_lock(&vm->status_lock);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2259
spin_unlock(&vm->status_lock);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2263
amdgpu_vm_it_remove(mapping, &vm->va);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2266
list_add(&mapping->list, &vm->freed);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2270
amdgpu_vm_it_remove(mapping, &vm->va);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2271
amdgpu_vm_free_mapping(adev, vm, mapping,
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2295
if (!bo_base || !bo_base->vm)
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2303
if (!amdgpu_vm_eviction_trylock(bo_base->vm))
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2307
if (!dma_fence_is_signaled(bo_base->vm->last_unlocked)) {
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2308
amdgpu_vm_eviction_unlock(bo_base->vm);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2312
bo_base->vm->evicting = true;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2313
amdgpu_vm_eviction_unlock(bo_base->vm);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
233
amdgpu_vm_assert_locked(vm_bo->vm);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2330
struct amdgpu_vm *vm = bo_base->vm;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2332
if (evicted && amdgpu_vm_is_bo_always_valid(vm, bo)) {
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
234
spin_lock(&vm_bo->vm->status_lock);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2343
else if (amdgpu_vm_is_bo_always_valid(vm, bo))
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
235
list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
236
spin_unlock(&vm_bo->vm->status_lock);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2365
struct amdgpu_vm *vm = bo_base->vm;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2367
spin_lock(&vm->status_lock);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2370
spin_unlock(&vm->status_lock);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
249
amdgpu_vm_assert_locked(vm_bo->vm);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
250
spin_lock(&vm_bo->vm->status_lock);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2505
long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2507
timeout = drm_sched_entity_flush(&vm->immediate, timeout);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
251
list_move(&vm_bo->vm_status, &vm_bo->vm->idle);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2511
return drm_sched_entity_flush(&vm->delayed, timeout);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
252
spin_unlock(&vm_bo->vm->status_lock);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2524
struct amdgpu_vm *vm;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2528
vm = xa_load(&adev->vm_manager.pasids, pasid);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2531
return vm;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2556
amdgpu_vm_get_task_info_vm(struct amdgpu_vm *vm)
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2560
if (vm) {
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2561
ti = vm->task_info;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2562
kref_get(&vm->task_info->refcount);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2584
static int amdgpu_vm_create_task_info(struct amdgpu_vm *vm)
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2586
vm->task_info = kzalloc(sizeof(struct amdgpu_task_info), GFP_KERNEL);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2587
if (!vm->task_info)
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2590
kref_init(&vm->task_info->refcount);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2599
void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2601
if (!vm->task_info)
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2605
if (vm->task_info->task.pid == current->pid)
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2608
vm->task_info->task.pid = current->pid;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2609
get_task_comm(vm->task_info->task.comm, current);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2614
vm->task_info->tgid = current->group_leader->pid;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2615
get_task_comm(vm->task_info->process_name, current->group_leader);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2617
if (vm->task_info->task.pid == curproc->p_tid)
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2621
vm->task_info->task.pid = curproc->p_tid;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2622
strlcpy(vm->task_info->task.comm, curproc->p_p->ps_comm,
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2623
sizeof(vm->task_info->task.comm));
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2626
vm->task_info->tgid = curproc->p_p->ps_pid;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2627
strlcpy(vm->task_info->process_name, curproc->p_p->ps_comm,
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2628
sizeof(vm->task_info->process_name));
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2645
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2652
vm->va = RB_ROOT_CACHED;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2654
vm->reserved_vmid[i] = NULL;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2655
INIT_LIST_HEAD(&vm->evicted);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2656
INIT_LIST_HEAD(&vm->evicted_user);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2657
INIT_LIST_HEAD(&vm->relocated);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2658
INIT_LIST_HEAD(&vm->moved);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2659
INIT_LIST_HEAD(&vm->idle);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
266
spin_lock(&vm_bo->vm->status_lock);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2660
INIT_LIST_HEAD(&vm->invalidated);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2661
mtx_init(&vm->status_lock, IPL_NONE);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2662
INIT_LIST_HEAD(&vm->freed);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2663
INIT_LIST_HEAD(&vm->done);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2665
INIT_KFIFO(vm->faults);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2667
SIMPLEQ_INIT(&vm->faults);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
267
list_move(&vm_bo->vm_status, &vm_bo->vm->invalidated);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2670
r = amdgpu_vm_init_entities(adev, vm);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2674
ttm_lru_bulk_move_init(&vm->lru_bulk_move);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2676
vm->is_compute_context = false;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2678
vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
268
spin_unlock(&vm_bo->vm->status_lock);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2682
vm->use_cpu_for_update ? "CPU" : "SDMA");
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2683
WARN_ONCE((vm->use_cpu_for_update &&
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2687
if (vm->use_cpu_for_update)
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2688
vm->update_funcs = &amdgpu_vm_cpu_funcs;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2690
vm->update_funcs = &amdgpu_vm_sdma_funcs;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2692
vm->last_update = dma_fence_get_stub();
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2693
vm->last_unlocked = dma_fence_get_stub();
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2694
vm->last_tlb_flush = dma_fence_get_stub();
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2695
vm->generation = amdgpu_vm_generation(adev, NULL);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2697
rw_init(&vm->eviction_lock, "avmev");
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2698
vm->evicting = false;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2699
vm->tlb_fence_context = dma_fence_context_alloc(1);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2701
r = amdgpu_vm_pt_create(adev, vm, adev->vm_manager.root_level,
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2713
amdgpu_vm_bo_base_init(&vm->root, vm, root_bo);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2718
r = amdgpu_vm_pt_clear(adev, vm, root, false);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2722
r = amdgpu_vm_create_task_info(vm);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2728
r = xa_err(xa_store_irq(&adev->vm_manager.pasids, pasid, vm, GFP_KERNEL));
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2732
vm->pasid = pasid;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2735
amdgpu_bo_unreserve(vm->root.bo);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2742
if (vm->pasid != 0) {
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2743
xa_erase_irq(&adev->vm_manager.pasids, vm->pasid);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2744
vm->pasid = 0;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2746
amdgpu_vm_pt_free_root(adev, vm);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2747
amdgpu_bo_unreserve(vm->root.bo);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2751
dma_fence_put(vm->last_tlb_flush);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2752
dma_fence_put(vm->last_unlocked);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2753
ttm_lru_bulk_move_fini(&adev->mman.bdev, &vm->lru_bulk_move);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2754
amdgpu_vm_fini_entities(vm);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2778
int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2782
r = amdgpu_bo_reserve(vm->root.bo, true);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2787
vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2790
vm->use_cpu_for_update ? "CPU" : "SDMA");
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2791
WARN_ONCE((vm->use_cpu_for_update &&
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2795
if (vm->use_cpu_for_update) {
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2797
r = amdgpu_bo_sync_wait(vm->root.bo,
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2802
vm->update_funcs = &amdgpu_vm_cpu_funcs;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2803
r = amdgpu_vm_pt_map_tables(adev, vm);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2808
vm->update_funcs = &amdgpu_vm_sdma_funcs;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2811
dma_fence_put(vm->last_update);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2812
vm->last_update = dma_fence_get_stub();
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2813
vm->is_compute_context = true;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2816
amdgpu_bo_unreserve(vm->root.bo);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
282
spin_lock(&vm_bo->vm->status_lock);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2820
static int amdgpu_vm_stats_is_zero(struct amdgpu_vm *vm)
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2823
if (!(drm_memory_stats_is_zero(&vm->stats[i].drm) &&
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2824
vm->stats[i].evicted == 0))
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
283
list_move(&vm_bo->vm_status, &vm_bo->vm->evicted_user);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2839
void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
284
spin_unlock(&vm_bo->vm->status_lock);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2847
amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2849
root = amdgpu_bo_ref(vm->root.bo);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2852
if (vm->pasid != 0) {
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2853
xa_erase_irq(&adev->vm_manager.pasids, vm->pasid);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2854
vm->pasid = 0;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2856
dma_fence_wait(vm->last_unlocked, false);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2857
dma_fence_put(vm->last_unlocked);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2858
dma_fence_wait(vm->last_tlb_flush, false);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2860
spin_lock_irqsave(vm->last_tlb_flush->lock, flags);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2861
spin_unlock_irqrestore(vm->last_tlb_flush->lock, flags);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2862
dma_fence_put(vm->last_tlb_flush);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2864
list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2866
amdgpu_vm_prt_fini(adev, vm);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2871
amdgpu_vm_free_mapping(adev, vm, mapping, NULL);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2874
amdgpu_vm_pt_free_root(adev, vm);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2877
WARN_ON(vm->root.bo);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2879
amdgpu_vm_fini_entities(vm);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2881
if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2885
&vm->va.rb_root, rb) {
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2893
dma_fence_put(vm->last_update);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2896
amdgpu_vmid_free_reserved(adev, vm, i);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2899
ttm_lru_bulk_move_fini(&adev->mman.bdev, &vm->lru_bulk_move);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2901
if (!amdgpu_vm_stats_is_zero(vm)) {
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2902
struct amdgpu_task_info *ti = vm->task_info;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2909
amdgpu_vm_put_task_info(vm->task_info);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
297
amdgpu_vm_assert_locked(vm_bo->vm);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
299
spin_lock(&vm_bo->vm->status_lock);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2993
struct amdgpu_vm *vm = &fpriv->vm;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
300
list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
3002
return amdgpu_vmid_alloc_reserved(adev, vm, AMDGPU_GFXHUB(0));
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
3004
amdgpu_vmid_free_reserved(adev, vm, AMDGPU_GFXHUB(0));
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
301
spin_unlock(&vm_bo->vm->status_lock);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
3035
struct amdgpu_vm *vm;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
3039
vm = xa_load(&adev->vm_manager.pasids, pasid);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
3040
if (vm) {
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
3041
root = amdgpu_bo_ref(vm->root.bo);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
3042
is_compute_context = vm->is_compute_context;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
3065
vm = xa_load(&adev->vm_manager.pasids, pasid);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
3066
if (vm && vm->root.bo != root)
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
3067
vm = NULL;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
3069
if (!vm)
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
3098
r = amdgpu_vm_update_range(adev, vm, true, false, false, false,
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
3103
r = amdgpu_vm_update_pdes(adev, vm, true);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
3125
void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m)
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
3142
amdgpu_vm_assert_locked(vm);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
3144
spin_lock(&vm->status_lock);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
3146
list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) {
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
3155
list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status) {
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
3164
list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status) {
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
317
amdgpu_vm_assert_locked(vm_bo->vm);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
3173
list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
318
spin_lock(&vm_bo->vm->status_lock);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
3182
list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) {
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
319
list_move(&vm_bo->vm_status, &vm_bo->vm->done);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
3191
list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status) {
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
3196
spin_unlock(&vm->status_lock);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
320
spin_unlock(&vm_bo->vm->status_lock);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
3230
struct amdgpu_vm *vm;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
3235
vm = xa_load(&adev->vm_manager.pasids, pasid);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
3241
if (vm && status) {
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
3242
vm->fault_info.addr = addr;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
3243
vm->fault_info.status = status;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
3253
vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_GFX;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
3254
vm->fault_info.vmhub |=
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
3257
vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_MM0;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
3258
vm->fault_info.vmhub |=
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
3261
vm->fault_info.vmhub = AMDGPU_VMHUB_TYPE_MM1;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
3262
vm->fault_info.vmhub |=
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
3280
bool amdgpu_vm_is_bo_always_valid(struct amdgpu_vm *vm, struct amdgpu_bo *bo)
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
3282
return bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
330
static void amdgpu_vm_bo_reset_state_machine(struct amdgpu_vm *vm)
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
334
amdgpu_vm_assert_locked(vm);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
336
spin_lock(&vm->status_lock);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
337
list_splice_init(&vm->done, &vm->invalidated);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
338
list_for_each_entry(vm_bo, &vm->invalidated, vm_status)
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
341
list_for_each_entry_safe(vm_bo, tmp, &vm->idle, vm_status) {
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
346
list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
348
list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
350
spin_unlock(&vm->status_lock);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
363
struct amdgpu_vm *vm = base->vm;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
370
spin_lock(&vm->status_lock);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
375
vm->stats[bo_memtype].drm.shared += size;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
376
vm->stats[bo_memtype].drm.private -= size;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
378
vm->stats[bo_memtype].drm.shared -= size;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
379
vm->stats[bo_memtype].drm.private += size;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
382
spin_unlock(&vm->status_lock);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
413
struct amdgpu_vm *vm = base->vm;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
422
vm->stats[bo_memtype].drm.shared += size;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
424
vm->stats[bo_memtype].drm.private += size;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
429
vm->stats[res_memtype].drm.resident += size;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
434
vm->stats[res_memtype].drm.purgeable += size;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
436
vm->stats[bo_memtype].evicted += size;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
452
struct amdgpu_vm *vm = base->vm;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
454
spin_lock(&vm->status_lock);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
456
spin_unlock(&vm->status_lock);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
470
struct amdgpu_vm *vm, struct amdgpu_bo *bo)
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
472
base->vm = vm;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
482
spin_lock(&vm->status_lock);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
485
spin_unlock(&vm->status_lock);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
487
if (!amdgpu_vm_is_bo_always_valid(vm, bo))
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
490
dma_resv_assert_held(vm->root.bo->tbo.base.resv);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
492
ttm_bo_set_bulk_move(&bo->tbo, &vm->lru_bulk_move);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
519
int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec,
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
523
return drm_exec_prepare_obj(exec, &vm->root.bo->tbo.base,
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
535
int amdgpu_vm_lock_done_list(struct amdgpu_vm *vm, struct drm_exec *exec,
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
538
struct list_head *prev = &vm->done;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
544
spin_lock(&vm->status_lock);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
545
while (!list_is_head(prev->next, &vm->done)) {
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
547
spin_unlock(&vm->status_lock);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
555
spin_lock(&vm->status_lock);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
558
spin_unlock(&vm->status_lock);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
573
struct amdgpu_vm *vm)
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
576
ttm_lru_bulk_move_tail(&vm->lru_bulk_move);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
582
struct amdgpu_vm *vm)
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
586
r = drm_sched_entity_init(&vm->immediate, DRM_SCHED_PRIORITY_NORMAL,
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
592
return drm_sched_entity_init(&vm->delayed, DRM_SCHED_PRIORITY_NORMAL,
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
597
drm_sched_entity_destroy(&vm->immediate);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
602
static void amdgpu_vm_fini_entities(struct amdgpu_vm *vm)
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
604
drm_sched_entity_destroy(&vm->immediate);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
605
drm_sched_entity_destroy(&vm->delayed);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
617
uint64_t amdgpu_vm_generation(struct amdgpu_device *adev, struct amdgpu_vm *vm)
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
621
if (!vm)
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
624
result += lower_32_bits(vm->generation);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
626
if (drm_sched_entity_error(&vm->delayed))
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
648
int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
653
uint64_t new_vm_generation = amdgpu_vm_generation(adev, vm);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
658
if (vm->generation != new_vm_generation) {
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
659
vm->generation = new_vm_generation;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
660
amdgpu_vm_bo_reset_state_machine(vm);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
661
amdgpu_vm_fini_entities(vm);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
662
r = amdgpu_vm_init_entities(adev, vm);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
667
spin_lock(&vm->status_lock);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
668
while (!list_empty(&vm->evicted)) {
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
669
bo_base = list_first_entry(&vm->evicted,
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
672
spin_unlock(&vm->status_lock);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
683
vm->update_funcs->map_table(to_amdgpu_bo_vm(bo));
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
686
spin_lock(&vm->status_lock);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
688
while (ticket && !list_empty(&vm->evicted_user)) {
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
689
bo_base = list_first_entry(&vm->evicted_user,
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
692
spin_unlock(&vm->status_lock);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
703
spin_lock(&vm->status_lock);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
705
spin_unlock(&vm->status_lock);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
707
amdgpu_vm_eviction_lock(vm);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
708
vm->evicting = false;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
709
amdgpu_vm_eviction_unlock(vm);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
724
bool amdgpu_vm_ready(struct amdgpu_vm *vm)
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
728
amdgpu_vm_assert_locked(vm);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
730
amdgpu_vm_eviction_lock(vm);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
731
ret = !vm->evicting;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
732
amdgpu_vm_eviction_unlock(vm);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
734
spin_lock(&vm->status_lock);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
735
ret &= list_empty(&vm->evicted);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
736
spin_unlock(&vm->status_lock);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
738
spin_lock(&vm->immediate.lock);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
739
ret &= !vm->immediate.stopped;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
740
spin_unlock(&vm->immediate.lock);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
742
spin_lock(&vm->delayed.lock);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
743
ret &= !vm->delayed.stopped;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
744
spin_unlock(&vm->delayed.lock);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
970
struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
976
if (base->vm != vm)
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.h
200
struct amdgpu_vm *vm;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.h
262
struct amdgpu_vm *vm;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.h
513
long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.h
514
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp_id, uint32_t pasid);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.h
515
int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.h
516
void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.h
517
int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec,
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.h
519
int amdgpu_vm_lock_done_list(struct amdgpu_vm *vm, struct drm_exec *exec,
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.h
521
bool amdgpu_vm_ready(struct amdgpu_vm *vm);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.h
522
uint64_t amdgpu_vm_generation(struct amdgpu_device *adev, struct amdgpu_vm *vm);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.h
523
int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.h
529
struct amdgpu_vm *vm, bool immediate);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.h
531
struct amdgpu_vm *vm,
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.h
534
struct amdgpu_vm *vm,
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.h
537
struct amdgpu_vm *vm,
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.h
541
struct amdgpu_vm *vm, struct amdgpu_bo *bo);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.h
542
int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.h
560
struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.h
563
struct amdgpu_vm *vm,
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.h
577
struct amdgpu_vm *vm,
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.h
579
struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.h
581
void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.h
596
amdgpu_vm_get_task_info_vm(struct amdgpu_vm *vm);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.h
604
void amdgpu_vm_set_task_info(struct amdgpu_vm *vm);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.h
607
struct amdgpu_vm *vm);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.h
608
void amdgpu_vm_get_memory(struct amdgpu_vm *vm,
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.h
611
int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm,
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.h
613
int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm,
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.h
616
void amdgpu_vm_pt_free_root(struct amdgpu_device *adev, struct amdgpu_vm *vm);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.h
628
void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.h
631
int amdgpu_vm_pt_map_tables(struct amdgpu_device *adev, struct amdgpu_vm *vm);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.h
633
bool amdgpu_vm_is_bo_always_valid(struct amdgpu_vm *vm, struct amdgpu_bo *bo);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.h
642
static inline uint64_t amdgpu_vm_tlb_seq(struct amdgpu_vm *vm)
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.h
653
lock = vm->last_tlb_flush->lock;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.h
659
return atomic64_read(&vm->tlb_seq);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.h
667
static inline void amdgpu_vm_eviction_lock(struct amdgpu_vm *vm)
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.h
669
mutex_lock(&vm->eviction_lock);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.h
671
vm->saved_flags = memalloc_noreclaim_save();
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.h
675
static inline bool amdgpu_vm_eviction_trylock(struct amdgpu_vm *vm)
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.h
677
if (mutex_trylock(&vm->eviction_lock)) {
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.h
679
vm->saved_flags = memalloc_noreclaim_save();
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.h
686
static inline void amdgpu_vm_eviction_unlock(struct amdgpu_vm *vm)
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.h
689
memalloc_noreclaim_restore(vm->saved_flags);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.h
691
mutex_unlock(&vm->eviction_lock);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.h
700
struct amdgpu_vm *vm,
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm_cpu.c
112
atomic64_inc(&p->vm->tlb_seq);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm_pt.c
158
struct amdgpu_vm *vm, uint64_t start,
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm_pt.c
163
cursor->entry = &vm->root;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm_pt.c
287
struct amdgpu_vm *vm,
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm_pt.c
294
amdgpu_vm_pt_start(adev, vm, 0, cursor);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm_pt.c
341
#define for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry) \
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm_pt.c
342
for (amdgpu_vm_pt_first_dfs((adev), (vm), (start), &(cursor)), \
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm_pt.c
360
int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm,
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm_pt.c
390
r = vm->update_funcs->map_table(vmbo);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm_pt.c
396
params.vm = vm;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm_pt.c
399
r = vm->update_funcs->prepare(¶ms, NULL,
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm_pt.c
419
r = vm->update_funcs->update(¶ms, vmbo, addr, 0, entries,
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm_pt.c
424
r = vm->update_funcs->commit(¶ms, NULL);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm_pt.c
440
int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm,
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm_pt.c
468
if (vm->use_cpu_for_update)
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm_pt.c
475
if (vm->root.bo)
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm_pt.c
476
bp.resv = vm->root.bo->tbo.base.resv;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm_pt.c
496
struct amdgpu_vm *vm,
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm_pt.c
508
amdgpu_vm_eviction_unlock(vm);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm_pt.c
509
r = amdgpu_vm_pt_create(adev, vm, cursor->level, immediate, &pt,
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm_pt.c
510
vm->root.bo->xcp_id);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm_pt.c
511
amdgpu_vm_eviction_lock(vm);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm_pt.c
520
amdgpu_vm_bo_base_init(entry, vm, pt_bo);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm_pt.c
521
r = amdgpu_vm_pt_clear(adev, vm, pt, immediate);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm_pt.c
546
spin_lock(&entry->vm->status_lock);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm_pt.c
548
spin_unlock(&entry->vm->status_lock);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm_pt.c
592
spin_lock(¶ms->vm->status_lock);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm_pt.c
593
for_each_amdgpu_vm_pt_dfs_safe(params->adev, params->vm, cursor, seek, entry) {
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm_pt.c
600
spin_unlock(¶ms->vm->status_lock);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm_pt.c
610
void amdgpu_vm_pt_free_root(struct amdgpu_device *adev, struct amdgpu_vm *vm)
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm_pt.c
615
for_each_amdgpu_vm_pt_dfs_safe(adev, vm, NULL, cursor, entry) {
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm_pt.c
634
struct amdgpu_vm *vm = params->vm;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm_pt.c
648
return vm->update_funcs->update(params, to_amdgpu_bo_vm(bo), pde, pt,
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm_pt.c
716
amdgpu_gmc_override_vm_pte_flags(adev, params->vm, addr, &flags);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm_pt.c
718
params->vm->update_funcs->update(params, pt, pe, addr, count, incr,
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm_pt.c
812
amdgpu_vm_pt_start(adev, params->vm, start, &cursor);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm_pt.c
822
r = amdgpu_vm_pt_alloc(params->adev, params->vm,
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm_pt.c
893
struct amdgpu_vm *vm = params->vm;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm_pt.c
906
vm->task_info ? vm->task_info->tgid : 0,
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm_pt.c
907
vm->immediate.fence_context);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm_pt.c
957
int amdgpu_vm_pt_map_tables(struct amdgpu_device *adev, struct amdgpu_vm *vm)
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm_pt.c
962
for_each_amdgpu_vm_pt_dfs_safe(adev, vm, NULL, cursor, entry) {
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm_pt.c
969
r = vm->update_funcs->map_table(bo);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm_sdma.c
113
ring = container_of(p->vm->delayed.rq->sched, struct amdgpu_ring,
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm_sdma.c
120
atomic64_inc(&p->vm->tlb_seq);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm_sdma.c
128
swap(p->vm->last_unlocked, tmp);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm_sdma.c
131
dma_resv_add_fence(p->vm->root.bo->tbo.base.resv, f,
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm_sdma.c
47
struct drm_sched_entity *entity = p->immediate ? &p->vm->immediate
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm_sdma.c
48
: &p->vm->delayed;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c
104
vm->tlb_fence_context, atomic64_read(&vm->tlb_seq));
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c
78
void amdgpu_vm_tlb_fence_create(struct amdgpu_device *adev, struct amdgpu_vm *vm,
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c
92
amdgpu_gmc_flush_gpu_tlb_pasid(adev, vm->pasid, 2, true, 0);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c
99
f->pasid = vm->pasid;
sys/dev/pci/drm/amd/amdgpu/amdgpu_xcp.c
460
fpriv->vm.mem_id = fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION ? -1 :
sys/dev/pci/drm/amd/amdgpu/gfx_v10_0.c
4548
u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id)
sys/dev/pci/drm/amd/amdgpu/gfx_v10_0.c
4550
nv_grbm_select(adev, me, pipe, q, vm);
sys/dev/pci/drm/amd/amdgpu/gfx_v11_0.c
1041
u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id)
sys/dev/pci/drm/amd/amdgpu/gfx_v11_0.c
1043
soc21_grbm_select(adev, me, pipe, q, vm);
sys/dev/pci/drm/amd/amdgpu/gfx_v12_0.c
897
u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id)
sys/dev/pci/drm/amd/amdgpu/gfx_v12_0.c
899
soc24_grbm_select(adev, me, pipe, q, vm);
sys/dev/pci/drm/amd/amdgpu/gfx_v6_0.c
3003
u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id)
sys/dev/pci/drm/amd/amdgpu/gfx_v7_0.c
4072
u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id)
sys/dev/pci/drm/amd/amdgpu/gfx_v7_0.c
4074
cik_srbm_select(adev, me, pipe, q, vm);
sys/dev/pci/drm/amd/amdgpu/gfx_v8_0.c
3412
u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id)
sys/dev/pci/drm/amd/amdgpu/gfx_v8_0.c
3414
vi_srbm_select(adev, me, pipe, q, vm);
sys/dev/pci/drm/amd/amdgpu/gfx_v9_0.c
1995
u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id)
sys/dev/pci/drm/amd/amdgpu/gfx_v9_0.c
1997
soc15_grbm_select(adev, me, pipe, q, vm, 0);
sys/dev/pci/drm/amd/amdgpu/gfx_v9_4_3.c
782
u32 me, u32 pipe, u32 q, u32 vm, u32 xcc_id)
sys/dev/pci/drm/amd/amdgpu/gfx_v9_4_3.c
784
soc15_grbm_select(adev, me, pipe, q, vm, GET_INST(GC, xcc_id));
sys/dev/pci/drm/amd/amdgpu/gmc_v10_0.c
493
struct amdgpu_vm *vm,
sys/dev/pci/drm/amd/amdgpu/gmc_v11_0.c
484
struct amdgpu_vm *vm,
sys/dev/pci/drm/amd/amdgpu/gmc_v12_0.c
522
struct amdgpu_vm *vm,
sys/dev/pci/drm/amd/amdgpu/gmc_v6_0.c
385
struct amdgpu_vm *vm,
sys/dev/pci/drm/amd/amdgpu/gmc_v7_0.c
507
struct amdgpu_vm *vm,
sys/dev/pci/drm/amd/amdgpu/gmc_v8_0.c
719
struct amdgpu_vm *vm,
sys/dev/pci/drm/amd/amdgpu/gmc_v9_0.c
1110
struct amdgpu_vm *vm,
sys/dev/pci/drm/amd/amdgpu/gmc_v9_0.c
1187
KFD_XCP_MEM_ID(adev, bo->xcp_id) == vm->mem_id);
sys/dev/pci/drm/amd/amdgpu/gmc_v9_0.c
1226
struct amdgpu_vm *vm,
sys/dev/pci/drm/amd/amdgpu/gmc_v9_0.c
1262
gmc_v9_0_get_coherence_flags(adev, vm, bo, vm_flags, flags);
sys/dev/pci/drm/amd/amdgpu/gmc_v9_0.c
1266
struct amdgpu_vm *vm,
sys/dev/pci/drm/amd/amdgpu/gmc_v9_0.c
1302
if (adev->gmc.is_app_apu && vm->mem_id >= 0) {
sys/dev/pci/drm/amd/amdgpu/gmc_v9_0.c
1303
local_node = adev->gmc.mem_partitions[vm->mem_id].numa.node;
sys/dev/pci/drm/amd/amdgpu/gmc_v9_0.c
1320
vm->mem_id, local_node, nid);
sys/dev/pci/drm/amd/amdgpu/mes_userqueue.c
139
queue_input.process_id = queue->vm->pasid;
sys/dev/pci/drm/amd/amdgpu/mes_userqueue.c
145
queue_input.page_table_base_addr = amdgpu_gmc_pd_addr(queue->vm->root.bo);
sys/dev/pci/drm/amd/amdgpu/mes_userqueue.c
71
wptr_vm = queue->vm;
sys/dev/pci/drm/amd/amdgpu/vcn_v1_0.c
2062
struct amdgpu_vm *vm = &fpriv->vm;
sys/dev/pci/drm/amd/amdgpu/vcn_v1_0.c
2073
mapping = amdgpu_vm_bo_lookup_mapping(vm, addr/AMDGPU_GPU_PAGE_SIZE);
sys/dev/pci/drm/amd/amdkfd/kfd_events.c
1307
ti = amdgpu_vm_get_task_info_vm(&drv_priv->vm);
sys/dev/pci/drm/amd/amdkfd/kfd_priv.h
1322
int kfd_queue_buffer_get(struct amdgpu_vm *vm, void __user *addr, struct amdgpu_bo **pbo,
sys/dev/pci/drm/amd/amdkfd/kfd_priv.h
1327
void kfd_queue_unref_bo_va(struct amdgpu_vm *vm, struct amdgpu_bo **bo);
sys/dev/pci/drm/amd/amdkfd/kfd_priv.h
1536
struct amdgpu_vm *vm = drm_priv_to_vm(pdd->drm_priv);
sys/dev/pci/drm/amd/amdkfd/kfd_priv.h
1538
amdgpu_vm_flush_compute_tlb(adev, vm, type, pdd->dev->xcc_mask);
sys/dev/pci/drm/amd/amdkfd/kfd_process.c
1692
avm = &drv_priv->vm;
sys/dev/pci/drm/amd/amdkfd/kfd_process_queue_manager.c
593
struct amdgpu_vm *vm;
sys/dev/pci/drm/amd/amdkfd/kfd_process_queue_manager.c
600
vm = drm_priv_to_vm(pdd->drm_priv);
sys/dev/pci/drm/amd/amdkfd/kfd_process_queue_manager.c
601
err = amdgpu_bo_reserve(vm->root.bo, false);
sys/dev/pci/drm/amd/amdkfd/kfd_process_queue_manager.c
605
if (kfd_queue_buffer_get(vm, (void *)p->queue_address, &p->ring_bo,
sys/dev/pci/drm/amd/amdkfd/kfd_process_queue_manager.c
609
amdgpu_bo_unreserve(vm->root.bo);
sys/dev/pci/drm/amd/amdkfd/kfd_process_queue_manager.c
613
kfd_queue_unref_bo_va(vm, &pqn->q->properties.ring_bo);
sys/dev/pci/drm/amd/amdkfd/kfd_process_queue_manager.c
615
amdgpu_bo_unreserve(vm->root.bo);
sys/dev/pci/drm/amd/amdkfd/kfd_queue.c
196
int kfd_queue_buffer_get(struct amdgpu_vm *vm, void __user *addr, struct amdgpu_bo **pbo,
sys/dev/pci/drm/amd/amdkfd/kfd_queue.c
206
mapping = amdgpu_vm_bo_lookup_mapping(vm, user_addr);
sys/dev/pci/drm/amd/amdkfd/kfd_queue.c
237
struct amdgpu_vm *vm;
sys/dev/pci/drm/amd/amdkfd/kfd_queue.c
254
vm = drm_priv_to_vm(pdd->drm_priv);
sys/dev/pci/drm/amd/amdkfd/kfd_queue.c
255
err = amdgpu_bo_reserve(vm->root.bo, false);
sys/dev/pci/drm/amd/amdkfd/kfd_queue.c
259
err = kfd_queue_buffer_get(vm, properties->write_ptr, &properties->wptr_bo, PAGE_SIZE);
sys/dev/pci/drm/amd/amdkfd/kfd_queue.c
263
err = kfd_queue_buffer_get(vm, properties->read_ptr, &properties->rptr_bo, PAGE_SIZE);
sys/dev/pci/drm/amd/amdkfd/kfd_queue.c
267
err = kfd_queue_buffer_get(vm, (void *)properties->queue_address,
sys/dev/pci/drm/amd/amdkfd/kfd_queue.c
285
err = kfd_queue_buffer_get(vm, (void *)properties->eop_ring_buffer_address,
sys/dev/pci/drm/amd/amdkfd/kfd_queue.c
312
err = kfd_queue_buffer_get(vm, (void *)properties->ctx_save_restore_area_address,
sys/dev/pci/drm/amd/amdkfd/kfd_queue.c
317
amdgpu_bo_unreserve(vm->root.bo);
sys/dev/pci/drm/amd/amdkfd/kfd_queue.c
327
amdgpu_bo_unreserve(vm->root.bo);
sys/dev/pci/drm/amd/amdkfd/kfd_queue.c
331
amdgpu_bo_unreserve(vm->root.bo);
sys/dev/pci/drm/amd/amdkfd/kfd_queue.c
363
void kfd_queue_unref_bo_va(struct amdgpu_vm *vm, struct amdgpu_bo **bo)
sys/dev/pci/drm/amd/amdkfd/kfd_queue.c
368
bo_va = amdgpu_vm_bo_find(vm, *bo);
sys/dev/pci/drm/amd/amdkfd/kfd_queue.c
377
struct amdgpu_vm *vm;
sys/dev/pci/drm/amd/amdkfd/kfd_queue.c
380
vm = drm_priv_to_vm(pdd->drm_priv);
sys/dev/pci/drm/amd/amdkfd/kfd_queue.c
381
err = amdgpu_bo_reserve(vm->root.bo, false);
sys/dev/pci/drm/amd/amdkfd/kfd_queue.c
385
kfd_queue_unref_bo_va(vm, &properties->wptr_bo);
sys/dev/pci/drm/amd/amdkfd/kfd_queue.c
386
kfd_queue_unref_bo_va(vm, &properties->rptr_bo);
sys/dev/pci/drm/amd/amdkfd/kfd_queue.c
387
kfd_queue_unref_bo_va(vm, &properties->ring_bo);
sys/dev/pci/drm/amd/amdkfd/kfd_queue.c
388
kfd_queue_unref_bo_va(vm, &properties->eop_buf_bo);
sys/dev/pci/drm/amd/amdkfd/kfd_queue.c
389
kfd_queue_unref_bo_va(vm, &properties->cwsr_bo);
sys/dev/pci/drm/amd/amdkfd/kfd_queue.c
391
amdgpu_bo_unreserve(vm->root.bo);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
1211
svm_range_get_pte_flags(struct kfd_node *node, struct amdgpu_vm *vm,
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
1323
amdgpu_gmc_get_vm_pte(node->adev, vm, NULL, mapping_flags, &pte_flags);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
1331
svm_range_unmap_from_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
1339
return amdgpu_vm_update_range(adev, vm, false, true, true, false, NULL, start,
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
1407
struct amdgpu_vm *vm = drm_priv_to_vm(pdd->drm_priv);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
1433
pte_flags = svm_range_get_pte_flags(pdd->dev, vm, prange, last_domain);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
1446
r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb, true,
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
1451
NULL, dma_addr, &vm->last_update);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
1463
r = amdgpu_vm_update_pdes(adev, vm, false);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
1471
*fence = dma_fence_get(vm->last_update);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
1545
struct amdgpu_vm *vm;
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
1558
vm = drm_priv_to_vm(pdd->drm_priv);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
1560
r = amdgpu_vm_lock_pd(vm, &ctx->exec, 2);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
2826
struct amdgpu_vm *vm;
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
2831
vm = drm_priv_to_vm(p->pdds[i]->drm_priv);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
2832
r = amdgpu_bo_reserve(vm->root.bo, false);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
2837
node = interval_tree_iter_first(&vm->va, 0, ~0ULL);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
2857
amdgpu_bo_unreserve(vm->root.bo);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
2860
amdgpu_bo_unreserve(vm->root.bo);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
3373
struct amdgpu_vm *vm;
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
3378
vm = drm_priv_to_vm(p->pdds[i]->drm_priv);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
3379
r = amdgpu_bo_reserve(vm->root.bo, false);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
3383
node = interval_tree_iter_first(&vm->va, start, last);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
3393
amdgpu_bo_unreserve(vm->root.bo);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
3396
amdgpu_bo_unreserve(vm->root.bo);
sys/dev/pci/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
1439
src->dynamic_metadata_enable, src->vm);
sys/dev/pci/drm/amd/display/dc/dml/display_mode_lib.c
187
dml_print("DML PARAMS: vm = %d\n", pipe_src->vm);
sys/dev/pci/drm/amd/display/dc/dml/display_mode_structs.h
390
unsigned char vm;
sys/dev/pci/drm/amd/display/dc/dml/display_mode_vba.c
882
mode_lib->vba.GPUVMEnable = mode_lib->vba.GPUVMEnable || !!pipes[k].pipe.src.gpuvm || !!pipes[k].pipe.src.vm;
sys/dev/pci/drm/amd/display/dc/dml/display_mode_vba.c
890
mode_lib->vba.HostVMEnable = mode_lib->vba.HostVMEnable || !!pipes[k].pipe.src.hostvm || !!pipes[k].pipe.src.vm;
sys/dev/pci/drm/drm_gpuvm.c
1318
struct drm_gpuvm *gpuvm = vm_exec->vm;
sys/dev/pci/drm/drm_gpuvm.c
1410
struct drm_gpuvm *gpuvm = vm_exec->vm;
sys/dev/pci/drm/drm_gpuvm.c
1553
vm_bo->vm = drm_gpuvm_get(gpuvm);
sys/dev/pci/drm/drm_gpuvm.c
1573
struct drm_gpuvm *gpuvm = vm_bo->vm;
sys/dev/pci/drm/drm_gpuvm.c
1632
if (vm_bo->vm == gpuvm)
sys/dev/pci/drm/drm_gpuvm.c
1715
struct drm_gpuvm *gpuvm = __vm_bo->vm;
sys/dev/pci/drm/drm_gpuvm.c
1744
struct drm_gpuvm *gpuvm = vm_bo->vm;
sys/dev/pci/drm/drm_gpuvm.c
1766
struct drm_gpuvm *gpuvm = vm_bo->vm;
sys/dev/pci/drm/drm_gpuvm.c
1799
va->vm = gpuvm;
sys/dev/pci/drm/drm_gpuvm.c
1855
drm_gpuva_it_remove(va, &va->vm->rb.tree);
sys/dev/pci/drm/drm_gpuvm.c
1872
struct drm_gpuvm *gpuvm = va->vm;
sys/dev/pci/drm/drm_gpuvm.c
1881
drm_gpuvm_put(va->vm);
sys/dev/pci/drm/drm_gpuvm.c
1903
struct drm_gpuvm *gpuvm = va->vm;
sys/dev/pci/drm/drm_gpuvm.c
1943
drm_gem_gpuva_assert_lock_held(va->vm, obj);
sys/dev/pci/drm/drm_gpuvm.c
2093
struct drm_gpuvm *gpuvm = va->vm;
sys/dev/pci/drm/drm_gpuvm.c
2652
struct drm_gpuvm *vm;
sys/dev/pci/drm/drm_gpuvm.c
2655
struct drm_gpuvm *gpuvm = args->vm;
sys/dev/pci/drm/drm_gpuvm.c
2716
struct drm_gpuvm *vm;
sys/dev/pci/drm/drm_gpuvm.c
2727
args.vm = gpuvm;
sys/dev/pci/drm/drm_gpuvm.c
2850
struct drm_gpuvm *vm;
sys/dev/pci/drm/drm_gpuvm.c
2861
args.vm = gpuvm;
sys/dev/pci/drm/drm_gpuvm.c
2957
drm_gem_gpuva_assert_lock_held(vm_bo->vm, vm_bo->obj);
sys/dev/pci/drm/drm_gpuvm.c
2966
op = gpuva_op_alloc(vm_bo->vm);
sys/dev/pci/drm/drm_gpuvm.c
2980
drm_gpuva_ops_free(vm_bo->vm, ops);
sys/dev/pci/drm/drm_gpuvm.c
888
__drm_gpuvm_bo_list_add((__vm_bo)->vm, \
sys/dev/pci/drm/drm_gpuvm.c
889
__lock ? &(__vm_bo)->vm->__list_name.lock : \
sys/dev/pci/drm/drm_gpuvm.c
892
&(__vm_bo)->vm->__list_name.list)
sys/dev/pci/drm/drm_gpuvm.c
917
__drm_gpuvm_bo_list_del((__vm_bo)->vm, \
sys/dev/pci/drm/drm_gpuvm.c
918
__lock ? &(__vm_bo)->vm->__list_name.lock : \
sys/dev/pci/drm/drm_gpuvm.c
932
__drm_gpuvm_bo_list_del((__vm_bo)->vm, \
sys/dev/pci/drm/drm_gpuvm.c
933
__lock ? &(__vm_bo)->vm->__list_name.lock : \
sys/dev/pci/drm/drm_modes.c
1071
void drm_display_mode_from_videomode(const struct videomode *vm,
sys/dev/pci/drm/drm_modes.c
1074
dmode->hdisplay = vm->hactive;
sys/dev/pci/drm/drm_modes.c
1075
dmode->hsync_start = dmode->hdisplay + vm->hfront_porch;
sys/dev/pci/drm/drm_modes.c
1076
dmode->hsync_end = dmode->hsync_start + vm->hsync_len;
sys/dev/pci/drm/drm_modes.c
1077
dmode->htotal = dmode->hsync_end + vm->hback_porch;
sys/dev/pci/drm/drm_modes.c
1079
dmode->vdisplay = vm->vactive;
sys/dev/pci/drm/drm_modes.c
1080
dmode->vsync_start = dmode->vdisplay + vm->vfront_porch;
sys/dev/pci/drm/drm_modes.c
1081
dmode->vsync_end = dmode->vsync_start + vm->vsync_len;
sys/dev/pci/drm/drm_modes.c
1082
dmode->vtotal = dmode->vsync_end + vm->vback_porch;
sys/dev/pci/drm/drm_modes.c
1084
dmode->clock = vm->pixelclock / 1000;
sys/dev/pci/drm/drm_modes.c
1087
if (vm->flags & DISPLAY_FLAGS_HSYNC_HIGH)
sys/dev/pci/drm/drm_modes.c
1089
else if (vm->flags & DISPLAY_FLAGS_HSYNC_LOW)
sys/dev/pci/drm/drm_modes.c
1091
if (vm->flags & DISPLAY_FLAGS_VSYNC_HIGH)
sys/dev/pci/drm/drm_modes.c
1093
else if (vm->flags & DISPLAY_FLAGS_VSYNC_LOW)
sys/dev/pci/drm/drm_modes.c
1095
if (vm->flags & DISPLAY_FLAGS_INTERLACED)
sys/dev/pci/drm/drm_modes.c
1097
if (vm->flags & DISPLAY_FLAGS_DOUBLESCAN)
sys/dev/pci/drm/drm_modes.c
1099
if (vm->flags & DISPLAY_FLAGS_DOUBLECLK)
sys/dev/pci/drm/drm_modes.c
1113
struct videomode *vm)
sys/dev/pci/drm/drm_modes.c
1115
vm->hactive = dmode->hdisplay;
sys/dev/pci/drm/drm_modes.c
1116
vm->hfront_porch = dmode->hsync_start - dmode->hdisplay;
sys/dev/pci/drm/drm_modes.c
1117
vm->hsync_len = dmode->hsync_end - dmode->hsync_start;
sys/dev/pci/drm/drm_modes.c
1118
vm->hback_porch = dmode->htotal - dmode->hsync_end;
sys/dev/pci/drm/drm_modes.c
1120
vm->vactive = dmode->vdisplay;
sys/dev/pci/drm/drm_modes.c
1121
vm->vfront_porch = dmode->vsync_start - dmode->vdisplay;
sys/dev/pci/drm/drm_modes.c
1122
vm->vsync_len = dmode->vsync_end - dmode->vsync_start;
sys/dev/pci/drm/drm_modes.c
1123
vm->vback_porch = dmode->vtotal - dmode->vsync_end;
sys/dev/pci/drm/drm_modes.c
1125
vm->pixelclock = dmode->clock * 1000;
sys/dev/pci/drm/drm_modes.c
1127
vm->flags = 0;
sys/dev/pci/drm/drm_modes.c
1129
vm->flags |= DISPLAY_FLAGS_HSYNC_HIGH;
sys/dev/pci/drm/drm_modes.c
1131
vm->flags |= DISPLAY_FLAGS_HSYNC_LOW;
sys/dev/pci/drm/drm_modes.c
1133
vm->flags |= DISPLAY_FLAGS_VSYNC_HIGH;
sys/dev/pci/drm/drm_modes.c
1135
vm->flags |= DISPLAY_FLAGS_VSYNC_LOW;
sys/dev/pci/drm/drm_modes.c
1137
vm->flags |= DISPLAY_FLAGS_INTERLACED;
sys/dev/pci/drm/drm_modes.c
1139
vm->flags |= DISPLAY_FLAGS_DOUBLESCAN;
sys/dev/pci/drm/drm_modes.c
1141
vm->flags |= DISPLAY_FLAGS_DOUBLECLK;
sys/dev/pci/drm/drm_modes.c
1156
void drm_bus_flags_from_videomode(const struct videomode *vm, u32 *bus_flags)
sys/dev/pci/drm/drm_modes.c
1159
if (vm->flags & DISPLAY_FLAGS_PIXDATA_POSEDGE)
sys/dev/pci/drm/drm_modes.c
1161
if (vm->flags & DISPLAY_FLAGS_PIXDATA_NEGEDGE)
sys/dev/pci/drm/drm_modes.c
1164
if (vm->flags & DISPLAY_FLAGS_SYNC_POSEDGE)
sys/dev/pci/drm/drm_modes.c
1166
if (vm->flags & DISPLAY_FLAGS_SYNC_NEGEDGE)
sys/dev/pci/drm/drm_modes.c
1169
if (vm->flags & DISPLAY_FLAGS_DE_LOW)
sys/dev/pci/drm/drm_modes.c
1171
if (vm->flags & DISPLAY_FLAGS_DE_HIGH)
sys/dev/pci/drm/drm_modes.c
1195
struct videomode vm;
sys/dev/pci/drm/drm_modes.c
1198
ret = of_get_videomode(np, &vm, index);
sys/dev/pci/drm/drm_modes.c
1202
drm_display_mode_from_videomode(&vm, dmode);
sys/dev/pci/drm/drm_modes.c
1204
drm_bus_flags_from_videomode(&vm, bus_flags);
sys/dev/pci/drm/drm_modes.c
1207
np, vm.hactive, vm.vactive, DRM_MODE_ARG(dmode));
sys/dev/pci/drm/drm_modes.c
1230
struct videomode vm;
sys/dev/pci/drm/drm_modes.c
1237
videomode_from_timing(&timing, &vm);
sys/dev/pci/drm/drm_modes.c
1240
drm_display_mode_from_videomode(&vm, dmode);
sys/dev/pci/drm/drm_modes.c
1242
drm_bus_flags_from_videomode(&vm, bus_flags);
sys/dev/pci/drm/i915/display/intel_dpt.c
111
static void dpt_unbind_vma(struct i915_address_space *vm,
sys/dev/pci/drm/i915/display/intel_dpt.c
114
vm->clear_range(vm, vma_res->start, vma_res->vma_size);
sys/dev/pci/drm/i915/display/intel_dpt.c
117
static void dpt_cleanup(struct i915_address_space *vm)
sys/dev/pci/drm/i915/display/intel_dpt.c
119
struct i915_dpt *dpt = i915_vm_to_dpt(vm);
sys/dev/pci/drm/i915/display/intel_dpt.c
124
struct i915_vma *intel_dpt_pin_to_ggtt(struct i915_address_space *vm,
sys/dev/pci/drm/i915/display/intel_dpt.c
127
struct drm_i915_private *i915 = vm->i915;
sys/dev/pci/drm/i915/display/intel_dpt.c
129
struct i915_dpt *dpt = i915_vm_to_dpt(vm);
sys/dev/pci/drm/i915/display/intel_dpt.c
177
void intel_dpt_unpin_from_ggtt(struct i915_address_space *vm)
sys/dev/pci/drm/i915/display/intel_dpt.c
179
struct i915_dpt *dpt = i915_vm_to_dpt(vm);
sys/dev/pci/drm/i915/display/intel_dpt.c
19
struct i915_address_space vm;
sys/dev/pci/drm/i915/display/intel_dpt.c
249
struct i915_address_space *vm;
sys/dev/pci/drm/i915/display/intel_dpt.c
26
#define i915_is_dpt(vm) ((vm)->is_dpt)
sys/dev/pci/drm/i915/display/intel_dpt.c
287
vm = &dpt->vm;
sys/dev/pci/drm/i915/display/intel_dpt.c
289
vm->gt = to_gt(i915);
sys/dev/pci/drm/i915/display/intel_dpt.c
29
i915_vm_to_dpt(struct i915_address_space *vm)
sys/dev/pci/drm/i915/display/intel_dpt.c
290
vm->i915 = i915;
sys/dev/pci/drm/i915/display/intel_dpt.c
291
vm->dma = i915->drm.dev;
sys/dev/pci/drm/i915/display/intel_dpt.c
292
vm->total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
sys/dev/pci/drm/i915/display/intel_dpt.c
293
vm->is_dpt = true;
sys/dev/pci/drm/i915/display/intel_dpt.c
295
i915_address_space_init(vm, VM_CLASS_DPT);
sys/dev/pci/drm/i915/display/intel_dpt.c
297
vm->insert_page = dpt_insert_page;
sys/dev/pci/drm/i915/display/intel_dpt.c
298
vm->clear_range = dpt_clear_range;
sys/dev/pci/drm/i915/display/intel_dpt.c
299
vm->insert_entries = dpt_insert_entries;
sys/dev/pci/drm/i915/display/intel_dpt.c
300
vm->cleanup = dpt_cleanup;
sys/dev/pci/drm/i915/display/intel_dpt.c
302
vm->vma_ops.bind_vma = dpt_bind_vma;
sys/dev/pci/drm/i915/display/intel_dpt.c
303
vm->vma_ops.unbind_vma = dpt_unbind_vma;
sys/dev/pci/drm/i915/display/intel_dpt.c
305
vm->pte_encode = vm->gt->ggtt->vm.pte_encode;
sys/dev/pci/drm/i915/display/intel_dpt.c
31
BUILD_BUG_ON(offsetof(struct i915_dpt, vm));
sys/dev/pci/drm/i915/display/intel_dpt.c
310
return &dpt->vm;
sys/dev/pci/drm/i915/display/intel_dpt.c
313
void intel_dpt_destroy(struct i915_address_space *vm)
sys/dev/pci/drm/i915/display/intel_dpt.c
315
struct i915_dpt *dpt = i915_vm_to_dpt(vm);
sys/dev/pci/drm/i915/display/intel_dpt.c
318
i915_vm_put(&dpt->vm);
sys/dev/pci/drm/i915/display/intel_dpt.c
32
drm_WARN_ON(&vm->i915->drm, !i915_is_dpt(vm));
sys/dev/pci/drm/i915/display/intel_dpt.c
33
return container_of(vm, struct i915_dpt, vm);
sys/dev/pci/drm/i915/display/intel_dpt.c
41
static void dpt_insert_page(struct i915_address_space *vm,
sys/dev/pci/drm/i915/display/intel_dpt.c
47
struct i915_dpt *dpt = i915_vm_to_dpt(vm);
sys/dev/pci/drm/i915/display/intel_dpt.c
51
vm->pte_encode(addr, pat_index, flags));
sys/dev/pci/drm/i915/display/intel_dpt.c
54
static void dpt_insert_entries(struct i915_address_space *vm,
sys/dev/pci/drm/i915/display/intel_dpt.c
59
struct i915_dpt *dpt = i915_vm_to_dpt(vm);
sys/dev/pci/drm/i915/display/intel_dpt.c
61
const gen8_pte_t pte_encode = vm->pte_encode(0, pat_index, flags);
sys/dev/pci/drm/i915/display/intel_dpt.c
76
static void dpt_clear_range(struct i915_address_space *vm,
sys/dev/pci/drm/i915/display/intel_dpt.c
81
static void dpt_bind_vma(struct i915_address_space *vm,
sys/dev/pci/drm/i915/display/intel_dpt.c
94
if (vm->has_read_only && vma_res->bi.readonly)
sys/dev/pci/drm/i915/display/intel_dpt.c
99
vm->insert_entries(vm, vma_res, pat_index, pte_flags);
sys/dev/pci/drm/i915/display/intel_dpt.h
16
void intel_dpt_destroy(struct i915_address_space *vm);
sys/dev/pci/drm/i915/display/intel_dpt.h
17
struct i915_vma *intel_dpt_pin_to_ggtt(struct i915_address_space *vm,
sys/dev/pci/drm/i915/display/intel_dpt.h
19
void intel_dpt_unpin_from_ggtt(struct i915_address_space *vm);
sys/dev/pci/drm/i915/display/intel_fb.c
2306
struct i915_address_space *vm;
sys/dev/pci/drm/i915/display/intel_fb.c
2308
vm = intel_dpt_create(intel_fb);
sys/dev/pci/drm/i915/display/intel_fb.c
2309
if (IS_ERR(vm)) {
sys/dev/pci/drm/i915/display/intel_fb.c
2311
ret = PTR_ERR(vm);
sys/dev/pci/drm/i915/display/intel_fb.c
2315
intel_fb->dpt_vm = vm;
sys/dev/pci/drm/i915/display/intel_fb_pin.c
28
struct i915_address_space *vm)
sys/dev/pci/drm/i915/display/intel_fb_pin.c
43
if (drm_WARN_ON(&dev_priv->drm, vm->bind_async_flags))
sys/dev/pci/drm/i915/display/intel_fb_pin.c
77
vma = i915_vma_instance(obj, vm, view);
sys/dev/pci/drm/i915/display/intel_hdcp_gsc.c
64
vma = i915_vma_instance(obj, >->ggtt->vm, NULL);
sys/dev/pci/drm/i915/display/intel_plane_initial.c
210
ret = i915_gem_gtt_reserve(&ggtt->vm, NULL, &orig_mm,
sys/dev/pci/drm/i915/display/intel_plane_initial.c
217
vma = i915_vma_instance(obj, &to_gt(i915)->ggtt->vm, NULL);
sys/dev/pci/drm/i915/display/intel_plane_initial.c
92
dma_addr = intel_ggtt_read_entry(&ggtt->vm, base, &is_present, &is_local);
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1293
struct i915_address_space *vm;
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1305
vm = ctx->vm;
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1306
if (vm)
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1307
i915_vm_put(vm);
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1629
struct i915_address_space *vm = NULL;
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1648
if (pc->vm) {
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1649
vm = i915_vm_get(pc->vm);
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1660
ppgtt->vm.fpriv = pc->fpriv;
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1661
vm = &ppgtt->vm;
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1663
if (vm)
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1664
ctx->vm = vm;
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1714
if (ctx->vm)
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1715
i915_vm_put(ctx->vm);
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1818
struct i915_address_space *vm;
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1831
xa_for_each(&file_priv->vm_xa, idx, vm)
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1832
i915_vm_put(vm);
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1864
err = xa_alloc(&file_priv->vm_xa, &id, &ppgtt->vm,
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1871
ppgtt->vm.fpriv = file_priv;
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1875
i915_vm_put(&ppgtt->vm);
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1884
struct i915_address_space *vm;
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1892
vm = xa_erase(&file_priv->vm_xa, args->vm_id);
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1893
if (!vm)
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1896
i915_vm_put(vm);
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1904
struct i915_address_space *vm;
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1911
vm = ctx->vm;
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1912
GEM_BUG_ON(!vm);
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1920
i915_vm_get(vm);
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1922
err = xa_alloc(&file_priv->vm_xa, &id, vm, xa_limit_32b, GFP_KERNEL);
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1924
i915_vm_put(vm);
sys/dev/pci/drm/i915/gem/i915_gem_context.c
209
if (pc->vm)
sys/dev/pci/drm/i915/gem/i915_gem_context.c
210
i915_vm_put(pc->vm);
sys/dev/pci/drm/i915/gem/i915_gem_context.c
2569
struct i915_address_space *vm;
sys/dev/pci/drm/i915/gem/i915_gem_context.c
2579
vm = i915_gem_context_get_eb_vm(ctx);
sys/dev/pci/drm/i915/gem/i915_gem_context.c
2580
args->value = vm->total;
sys/dev/pci/drm/i915/gem/i915_gem_context.c
2581
i915_vm_put(vm);
sys/dev/pci/drm/i915/gem/i915_gem_context.c
362
struct i915_address_space *vm;
sys/dev/pci/drm/i915/gem/i915_gem_context.c
365
vm = xa_load(&file_priv->vm_xa, id);
sys/dev/pci/drm/i915/gem/i915_gem_context.c
366
if (vm)
sys/dev/pci/drm/i915/gem/i915_gem_context.c
367
kref_get(&vm->ref);
sys/dev/pci/drm/i915/gem/i915_gem_context.c
370
return vm;
sys/dev/pci/drm/i915/gem/i915_gem_context.c
378
struct i915_address_space *vm;
sys/dev/pci/drm/i915/gem/i915_gem_context.c
389
vm = i915_gem_vm_lookup(fpriv, args->value);
sys/dev/pci/drm/i915/gem/i915_gem_context.c
390
if (!vm)
sys/dev/pci/drm/i915/gem/i915_gem_context.c
393
if (pc->vm)
sys/dev/pci/drm/i915/gem/i915_gem_context.c
394
i915_vm_put(pc->vm);
sys/dev/pci/drm/i915/gem/i915_gem_context.c
395
pc->vm = vm;
sys/dev/pci/drm/i915/gem/i915_gem_context.c
993
i915_vm_put(ce->vm);
sys/dev/pci/drm/i915/gem/i915_gem_context.c
994
ce->vm = i915_gem_context_get_eb_vm(ctx);
sys/dev/pci/drm/i915/gem/i915_gem_context.h
159
return rcu_dereference_protected(ctx->vm, lockdep_is_held(&ctx->mutex));
sys/dev/pci/drm/i915/gem/i915_gem_context.h
164
GEM_BUG_ON(!!ctx->vm != HAS_FULL_PPGTT(ctx->i915));
sys/dev/pci/drm/i915/gem/i915_gem_context.h
166
return !!ctx->vm;
sys/dev/pci/drm/i915/gem/i915_gem_context.h
172
struct i915_address_space *vm;
sys/dev/pci/drm/i915/gem/i915_gem_context.h
174
vm = ctx->vm;
sys/dev/pci/drm/i915/gem/i915_gem_context.h
175
if (!vm)
sys/dev/pci/drm/i915/gem/i915_gem_context.h
176
vm = &to_gt(ctx->i915)->ggtt->vm;
sys/dev/pci/drm/i915/gem/i915_gem_context.h
177
vm = i915_vm_get(vm);
sys/dev/pci/drm/i915/gem/i915_gem_context.h
179
return vm;
sys/dev/pci/drm/i915/gem/i915_gem_context_types.h
194
struct i915_address_space *vm;
sys/dev/pci/drm/i915/gem/i915_gem_context_types.h
283
struct i915_address_space *vm;
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
1195
intel_gt_flush_ggtt_writes(ggtt->vm.gt);
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
1199
ggtt->vm.clear_range(&ggtt->vm,
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
1202
mutex_lock(&ggtt->vm.mutex);
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
1204
mutex_unlock(&ggtt->vm.mutex);
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
1262
intel_gt_flush_ggtt_writes(ggtt->vm.gt);
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
1285
if (!i915_is_ggtt(batch->vm) ||
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
1298
mutex_lock(&ggtt->vm.mutex);
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
1300
(&ggtt->vm.mm, &cache->node,
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
1304
mutex_unlock(&ggtt->vm.mutex);
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
1315
ggtt->vm.insert_page(&ggtt->vm,
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
1318
i915_gem_get_pat_index(ggtt->vm.i915,
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
1459
mutex_lock(&vma->vm->mutex);
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
1463
mutex_unlock(&vma->vm->mutex);
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
2233
struct i915_address_space *vm,
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
2239
vma = i915_vma_instance(obj, vm, NULL);
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
2287
if (!eb->context->vm->has_read_only) {
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
2310
shadow = shadow_batch_pin(eb, pool->obj, eb->context->vm, PIN_USER);
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
2323
&eb->gt->ggtt->vm,
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
2733
if (!i915_vm_tryget(ce->vm)) {
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
2764
i915_vm_put(eb->context->vm);
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
781
err = mutex_lock_interruptible(&eb->context->vm->mutex);
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
783
err = i915_gem_evict_vm(eb->context->vm, &eb->ww, NULL);
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
784
mutex_unlock(&eb->context->vm->mutex);
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
792
err = mutex_lock_interruptible(&eb->context->vm->mutex);
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
796
err = i915_gem_evict_vm(eb->context->vm, &eb->ww, &busy_bo);
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
797
mutex_unlock(&eb->context->vm->mutex);
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
889
struct i915_address_space *vm = eb->context->vm;
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
898
if (likely(vma && vma->vm == vm))
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
924
vma = i915_vma_instance(obj, vm, NULL);
sys/dev/pci/drm/i915/gem/i915_gem_mman.c
1001
mutex_unlock(&to_gt(i915)->ggtt->vm.mutex);
sys/dev/pci/drm/i915/gem/i915_gem_mman.c
417
ret = intel_gt_reset_lock_interruptible(ggtt->vm.gt, &srcu);
sys/dev/pci/drm/i915/gem/i915_gem_mman.c
454
ret = mutex_lock_interruptible(&ggtt->vm.mutex);
sys/dev/pci/drm/i915/gem/i915_gem_mman.c
456
ret = i915_gem_evict_vm(&ggtt->vm, &ww, NULL);
sys/dev/pci/drm/i915/gem/i915_gem_mman.c
457
mutex_unlock(&ggtt->vm.mutex);
sys/dev/pci/drm/i915/gem/i915_gem_mman.c
504
mutex_lock(&to_gt(i915)->ggtt->vm.mutex);
sys/dev/pci/drm/i915/gem/i915_gem_mman.c
507
mutex_unlock(&to_gt(i915)->ggtt->vm.mutex);
sys/dev/pci/drm/i915/gem/i915_gem_mman.c
527
intel_gt_reset_unlock(ggtt->vm.gt, srcu);
sys/dev/pci/drm/i915/gem/i915_gem_mman.c
791
ret = intel_gt_reset_lock_interruptible(ggtt->vm.gt, &srcu);
sys/dev/pci/drm/i915/gem/i915_gem_mman.c
828
ret = mutex_lock_interruptible(&ggtt->vm.mutex);
sys/dev/pci/drm/i915/gem/i915_gem_mman.c
830
ret = i915_gem_evict_vm(&ggtt->vm, &ww, NULL);
sys/dev/pci/drm/i915/gem/i915_gem_mman.c
831
mutex_unlock(&ggtt->vm.mutex);
sys/dev/pci/drm/i915/gem/i915_gem_mman.c
877
mutex_lock(&to_gt(i915)->ggtt->vm.mutex);
sys/dev/pci/drm/i915/gem/i915_gem_mman.c
880
mutex_unlock(&to_gt(i915)->ggtt->vm.mutex);
sys/dev/pci/drm/i915/gem/i915_gem_mman.c
900
intel_gt_reset_unlock(ggtt->vm.gt, srcu);
sys/dev/pci/drm/i915/gem/i915_gem_mman.c
983
mutex_lock(&to_gt(i915)->ggtt->vm.mutex);
sys/dev/pci/drm/i915/gem/i915_gem_shrinker.c
404
mutex_lock(>->ggtt->vm.mutex);
sys/dev/pci/drm/i915/gem/i915_gem_shrinker.c
406
>->ggtt->vm.bound_list, vm_link) {
sys/dev/pci/drm/i915/gem/i915_gem_shrinker.c
421
mutex_unlock(>->ggtt->vm.mutex);
sys/dev/pci/drm/i915/gem/i915_gem_stolen.c
583
if (ggtt->vm.bind_async_flags & I915_VMA_GLOBAL_BIND)
sys/dev/pci/drm/i915/gem/i915_gem_stolen.c
592
ggtt->vm.insert_page(&ggtt->vm, addr,
sys/dev/pci/drm/i915/gem/i915_gem_stolen.c
594
i915_gem_get_pat_index(ggtt->vm.i915,
sys/dev/pci/drm/i915/gem/i915_gem_stolen.c
609
ggtt->vm.clear_range(&ggtt->vm, ggtt->error_capture.start, PAGE_SIZE);
sys/dev/pci/drm/i915/gem/i915_gem_stolen.c
88
struct intel_uncore *uncore = ggtt->vm.gt->uncore;
sys/dev/pci/drm/i915/gem/i915_gem_tiling.c
163
struct drm_i915_private *i915 = vma->vm->i915;
sys/dev/pci/drm/i915/gem/i915_gem_tiling.c
194
mutex_lock(&ggtt->vm.mutex);
sys/dev/pci/drm/i915/gem/i915_gem_tiling.c
198
GEM_BUG_ON(vma->vm != &ggtt->vm);
sys/dev/pci/drm/i915/gem/i915_gem_tiling.c
211
list_splice(&unbind, &ggtt->vm.bound_list);
sys/dev/pci/drm/i915/gem/i915_gem_tiling.c
216
mutex_unlock(&ggtt->vm.mutex);
sys/dev/pci/drm/i915/gem/i915_gem_userptr.c
506
if (!to_gt(i915)->vm->has_read_only)
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
1049
i915_vm_put(vm);
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
1155
vma = i915_vma_instance(obj, ce->vm, NULL);
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
1165
if (err == -ENOSPC && i915_is_ggtt(ce->vm))
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
1236
max = min(max, ce->vm->total);
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
1618
struct i915_address_space *vm;
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
1650
vm = i915_gem_context_get_eb_vm(ctx);
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
1672
vma = i915_vma_instance(obj, vm, NULL);
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
1753
i915_vm_put(vm);
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
1762
struct i915_address_space *vm;
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
1780
vm = i915_gem_context_get_eb_vm(ctx);
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
1806
vma = i915_vma_instance(obj, vm, NULL);
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
1824
i915_vm_put(vm);
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
1833
struct i915_address_space *vm;
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
1860
vm = i915_gem_context_get_eb_vm(ctx);
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
1873
vma = i915_vma_instance(obj, vm, NULL);
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
1959
i915_vm_put(vm);
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
1990
if (!i915_vm_is_4lvl(&ppgtt->vm)) {
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
1997
if (!i915_vm_has_scratch_64K(&ppgtt->vm)) {
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
2006
i915_vm_put(&ppgtt->vm);
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
32
struct i915_address_space *vm;
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
364
struct drm_i915_private *i915 = vma->vm->i915;
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
37
vm = ctx->vm;
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
38
if (vm)
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
39
WRITE_ONCE(vm->scrub_64K, true);
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
425
struct drm_i915_private *i915 = ppgtt->vm.i915;
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
461
vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
501
struct drm_i915_private *i915 = ppgtt->vm.i915;
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
529
vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
579
struct drm_i915_private *i915 = ppgtt->vm.i915;
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
621
vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
717
struct i915_address_space *vm;
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
739
vm = i915_gem_context_get_eb_vm(ctx);
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
740
max_pages = vm->total >> PAGE_SHIFT;
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
771
vma = i915_vma_instance(obj, vm, NULL);
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
851
i915_vm_put(vm);
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
862
struct i915_address_space *vm;
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
945
vm = i915_gem_context_get_eb_vm(ctx);
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
982
vma = i915_vma_instance(obj, vm, NULL);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_client_blt.c
131
if (GRAPHICS_VER(buf->vma->vm->i915) < 9)
sys/dev/pci/drm/i915/gem/selftests/i915_gem_client_blt.c
135
if (buf->tiling == CLIENT_TILING_X && !fastblit_supports_x_tiling(buf->vma->vm->i915))
sys/dev/pci/drm/i915/gem/selftests/i915_gem_client_blt.c
273
struct drm_i915_private *i915 = t->ce->vm->i915;
sys/dev/pci/drm/i915/gem/selftests/i915_gem_client_blt.c
284
vma = i915_vma_instance(obj, t->ce->vm, NULL);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_client_blt.c
435
u64 v = tiled_offset(buf->vma->vm->gt,
sys/dev/pci/drm/i915/gem/selftests/i915_gem_client_blt.c
551
t->align = i915_vm_min_alignment(t->ce->vm, INTEL_MEMORY_LOCAL);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_client_blt.c
553
i915_vm_min_alignment(t->ce->vm, INTEL_MEMORY_SYSTEM));
sys/dev/pci/drm/i915/gem/selftests/i915_gem_client_blt.c
559
mutex_lock(&t->ce->vm->mutex);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_client_blt.c
561
err = drm_mm_insert_node_in_range(&t->ce->vm->mm, &hole,
sys/dev/pci/drm/i915/gem/selftests/i915_gem_client_blt.c
568
mutex_unlock(&t->ce->vm->mutex);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_coherency.c
112
intel_gt_pm_put(vma->vm->gt, wakeref);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_coherency.c
133
wakeref = intel_gt_pm_get(vma->vm->gt);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_coherency.c
146
intel_gt_pm_put(vma->vm->gt, wakeref);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_coherency.c
99
wakeref = intel_gt_pm_get(vma->vm->gt);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
1373
struct i915_address_space *vm;
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
1403
vm = ctx->vm ?: &to_gt(i915)->ggtt->alias->vm;
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
1404
if (!vm || !vm->has_read_only) {
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
1424
obj = create_test_object(ce->vm, file, &objects);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
1490
static int check_scratch(struct i915_address_space *vm, u64 offset)
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
1494
mutex_lock(&vm->mutex);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
1495
node = __drm_mm_interval_first(&vm->mm,
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
1497
mutex_unlock(&vm->mutex);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
1514
struct i915_address_space *vm;
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
1522
err = check_scratch(ctx->vm, offset);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
1545
vm = i915_gem_context_get_eb_vm(ctx);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
1546
vma = i915_vma_instance(obj, vm, NULL);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
1588
i915_vm_put(vm);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
1602
struct i915_address_space *vm;
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
1612
err = check_scratch(ctx->vm, offset);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
1619
vm = i915_gem_context_get_eb_vm(ctx);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
1620
vma = i915_vma_instance(obj, vm, NULL);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
1655
vm = i915_vm_get(&engine->gt->ggtt->vm);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
1656
vma = i915_vma_instance(obj, vm, NULL);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
1736
i915_vm_put(vm);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
1746
struct i915_address_space *vm;
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
1750
vm = ctx->vm;
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
1751
if (!vm)
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
1754
if (!vm->scratch[0]) {
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
1759
vaddr = __px_vaddr(vm->scratch[0]);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
1813
if (ctx_a->vm == ctx_b->vm)
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
1825
vm_total = ctx_a->vm->total;
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
1826
GEM_BUG_ON(ctx_b->vm->total != vm_total);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
448
GEM_BUG_ON(obj->base.size > ce->vm->total);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
451
vma = i915_vma_instance(obj, ce->vm, NULL);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
578
create_test_object(struct i915_address_space *vm,
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
587
intel_gt_retire_requests(vm->gt);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
589
size = min(vm->total / 2, 1024ull * DW_PER_PAGE * PAGE_SIZE);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
592
obj = huge_gem_object(vm->i915, DW_PER_PAGE * PAGE_SIZE, size);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
714
obj = create_test_object(ce->vm, file, &objects);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
812
if (!parent->vm) { /* not full-ppgtt; nothing to share */
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
837
ctx = kernel_context(i915, parent->vm);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
847
obj = create_test_object(parent->vm,
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
919
GEM_BUG_ON(GRAPHICS_VER(vma->vm->i915) < 8);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
934
intel_gt_chipset_flush(vma->vm->gt);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
957
vma = i915_vma_instance(obj, ce->vm, NULL);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
965
batch = i915_vma_instance(rpcs, ce->vm, NULL);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_dmabuf.c
179
vma = i915_vma_instance(import_obj, ce->vm, NULL);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_migrate.c
183
struct i915_address_space *vm,
sys/dev/pci/drm/i915/gem/selftests/i915_gem_migrate.c
203
if (vm) {
sys/dev/pci/drm/i915/gem/selftests/i915_gem_migrate.c
204
vma = i915_vma_instance(obj, vm, NULL);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_migrate.c
425
err = __igt_lmem_pages_migrate(gt, &ppgtt->vm, &deps, &spin,
sys/dev/pci/drm/i915/gem/selftests/i915_gem_migrate.c
436
i915_vm_put(&ppgtt->vm);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_mman.c
1586
vma = i915_vma_instance(obj, engine->kernel_context->vm, NULL);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_mman.c
337
(1 + next_prime_number(to_gt(i915)->ggtt->vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_mman.c
474
(1 + next_prime_number(to_gt(i915)->ggtt->vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_mman.c
551
vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_mman.c
790
wakeref = intel_gt_pm_get(vma->vm->gt);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_mman.c
802
intel_gt_pm_put(vma->vm->gt, wakeref);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_mman.c
817
wakeref = intel_gt_pm_get(vma->vm->gt);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_mman.c
833
intel_gt_pm_put(vma->vm->gt, wakeref);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_object.c
46
to_gt(i915)->ggtt->vm.total + PAGE_SIZE);
sys/dev/pci/drm/i915/gem/selftests/igt_gem_utils.c
142
if (GRAPHICS_VER(ce->vm->i915) <= 5)
sys/dev/pci/drm/i915/gem/selftests/igt_gem_utils.c
48
const int ver = GRAPHICS_VER(vma->vm->i915);
sys/dev/pci/drm/i915/gem/selftests/igt_gem_utils.c
55
obj = i915_gem_object_create_internal(vma->vm->i915, size);
sys/dev/pci/drm/i915/gem/selftests/igt_gem_utils.c
92
intel_gt_chipset_flush(vma->vm->gt);
sys/dev/pci/drm/i915/gem/selftests/igt_gem_utils.c
94
vma = i915_vma_instance(obj, vma->vm, NULL);
sys/dev/pci/drm/i915/gem/selftests/mock_context.c
112
struct i915_address_space *vm)
sys/dev/pci/drm/i915/gem/selftests/mock_context.c
121
if (vm) {
sys/dev/pci/drm/i915/gem/selftests/mock_context.c
122
if (pc->vm)
sys/dev/pci/drm/i915/gem/selftests/mock_context.c
123
i915_vm_put(pc->vm);
sys/dev/pci/drm/i915/gem/selftests/mock_context.c
124
pc->vm = i915_vm_get(vm);
sys/dev/pci/drm/i915/gem/selftests/mock_context.c
45
ctx->vm = &ppgtt->vm;
sys/dev/pci/drm/i915/gem/selftests/mock_context.c
60
if (ctx->vm)
sys/dev/pci/drm/i915/gem/selftests/mock_context.c
61
i915_vm_put(ctx->vm);
sys/dev/pci/drm/i915/gem/selftests/mock_context.h
27
struct i915_address_space *vm);
sys/dev/pci/drm/i915/gt/gen6_ppgtt.c
110
static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
sys/dev/pci/drm/i915/gt/gen6_ppgtt.c
115
struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
sys/dev/pci/drm/i915/gt/gen6_ppgtt.c
120
const u32 pte_encode = vm->pte_encode(0, pat_index, flags);
sys/dev/pci/drm/i915/gt/gen6_ppgtt.c
166
gen6_ggtt_invalidate(ppgtt->base.vm.gt->ggtt);
sys/dev/pci/drm/i915/gt/gen6_ppgtt.c
172
static void gen6_alloc_va_range(struct i915_address_space *vm,
sys/dev/pci/drm/i915/gt/gen6_ppgtt.c
176
struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
sys/dev/pci/drm/i915/gt/gen6_ppgtt.c
193
fill32_px(pt, vm->scratch[0]->encode);
sys/dev/pci/drm/i915/gt/gen6_ppgtt.c
214
with_intel_runtime_pm(&vm->i915->runtime_pm, wakeref)
sys/dev/pci/drm/i915/gt/gen6_ppgtt.c
221
struct i915_address_space * const vm = &ppgtt->base.vm;
sys/dev/pci/drm/i915/gt/gen6_ppgtt.c
224
ret = setup_scratch_page(vm);
sys/dev/pci/drm/i915/gt/gen6_ppgtt.c
228
vm->scratch[0]->encode =
sys/dev/pci/drm/i915/gt/gen6_ppgtt.c
229
vm->pte_encode(px_dma(vm->scratch[0]),
sys/dev/pci/drm/i915/gt/gen6_ppgtt.c
23
dma_addr_t addr = pt ? px_dma(pt) : px_dma(ppgtt->base.vm.scratch[1]);
sys/dev/pci/drm/i915/gt/gen6_ppgtt.c
230
i915_gem_get_pat_index(vm->i915,
sys/dev/pci/drm/i915/gt/gen6_ppgtt.c
234
vm->scratch[1] = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K);
sys/dev/pci/drm/i915/gt/gen6_ppgtt.c
235
if (IS_ERR(vm->scratch[1])) {
sys/dev/pci/drm/i915/gt/gen6_ppgtt.c
236
ret = PTR_ERR(vm->scratch[1]);
sys/dev/pci/drm/i915/gt/gen6_ppgtt.c
240
ret = map_pt_dma(vm, vm->scratch[1]);
sys/dev/pci/drm/i915/gt/gen6_ppgtt.c
244
fill32_px(vm->scratch[1], vm->scratch[0]->encode);
sys/dev/pci/drm/i915/gt/gen6_ppgtt.c
249
i915_gem_object_put(vm->scratch[1]);
sys/dev/pci/drm/i915/gt/gen6_ppgtt.c
251
i915_gem_object_put(vm->scratch[0]);
sys/dev/pci/drm/i915/gt/gen6_ppgtt.c
252
vm->scratch[0] = NULL;
sys/dev/pci/drm/i915/gt/gen6_ppgtt.c
264
free_pt(&ppgtt->base.vm, pt);
sys/dev/pci/drm/i915/gt/gen6_ppgtt.c
267
static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
sys/dev/pci/drm/i915/gt/gen6_ppgtt.c
269
struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
sys/dev/pci/drm/i915/gt/gen6_ppgtt.c
272
free_scratch(vm);
sys/dev/pci/drm/i915/gt/gen6_ppgtt.c
275
free_pd(&ppgtt->base.vm, ppgtt->base.pd);
sys/dev/pci/drm/i915/gt/gen6_ppgtt.c
280
static void pd_vma_bind(struct i915_address_space *vm,
sys/dev/pci/drm/i915/gt/gen6_ppgtt.c
286
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
sys/dev/pci/drm/i915/gt/gen6_ppgtt.c
293
gen6_flush_pd(ppgtt, 0, ppgtt->base.vm.total);
sys/dev/pci/drm/i915/gt/gen6_ppgtt.c
296
static void pd_vma_unbind(struct i915_address_space *vm,
sys/dev/pci/drm/i915/gt/gen6_ppgtt.c
312
free_pt(&ppgtt->base.vm, pt);
sys/dev/pci/drm/i915/gt/gen6_ppgtt.c
329
GEM_BUG_ON(!kref_read(&ppgtt->base.vm.ref));
sys/dev/pci/drm/i915/gt/gen6_ppgtt.c
341
err = i915_vm_lock_objects(&ppgtt->base.vm, ww);
sys/dev/pci/drm/i915/gt/gen6_ppgtt.c
382
struct i915_ggtt * const ggtt = ppgtt->base.vm.gt->ggtt;
sys/dev/pci/drm/i915/gt/gen6_ppgtt.c
390
pd->pt.base = __i915_gem_object_create_internal(ppgtt->base.vm.gt->i915,
sys/dev/pci/drm/i915/gt/gen6_ppgtt.c
399
pd->pt.base->base.resv = i915_vm_resv_get(&ppgtt->base.vm);
sys/dev/pci/drm/i915/gt/gen6_ppgtt.c
400
pd->pt.base->shares_resv_from = &ppgtt->base.vm;
sys/dev/pci/drm/i915/gt/gen6_ppgtt.c
402
ppgtt->vma = i915_vma_instance(pd->pt.base, &ggtt->vm, NULL);
sys/dev/pci/drm/i915/gt/gen6_ppgtt.c
415
free_pd(&ppgtt->base.vm, pd);
sys/dev/pci/drm/i915/gt/gen6_ppgtt.c
441
ppgtt->base.vm.pd_shift = ilog2(SZ_4K * SZ_4K / sizeof(gen6_pte_t));
sys/dev/pci/drm/i915/gt/gen6_ppgtt.c
442
ppgtt->base.vm.top = 1;
sys/dev/pci/drm/i915/gt/gen6_ppgtt.c
444
ppgtt->base.vm.bind_async_flags = I915_VMA_LOCAL_BIND;
sys/dev/pci/drm/i915/gt/gen6_ppgtt.c
445
ppgtt->base.vm.allocate_va_range = gen6_alloc_va_range;
sys/dev/pci/drm/i915/gt/gen6_ppgtt.c
446
ppgtt->base.vm.clear_range = gen6_ppgtt_clear_range;
sys/dev/pci/drm/i915/gt/gen6_ppgtt.c
447
ppgtt->base.vm.insert_entries = gen6_ppgtt_insert_entries;
sys/dev/pci/drm/i915/gt/gen6_ppgtt.c
448
ppgtt->base.vm.cleanup = gen6_ppgtt_cleanup;
sys/dev/pci/drm/i915/gt/gen6_ppgtt.c
450
ppgtt->base.vm.alloc_pt_dma = alloc_pt_dma;
sys/dev/pci/drm/i915/gt/gen6_ppgtt.c
451
ppgtt->base.vm.alloc_scratch_dma = alloc_pt_dma;
sys/dev/pci/drm/i915/gt/gen6_ppgtt.c
452
ppgtt->base.vm.pte_encode = ggtt->vm.pte_encode;
sys/dev/pci/drm/i915/gt/gen6_ppgtt.c
467
i915_vm_put(&ppgtt->base.vm);
sys/dev/pci/drm/i915/gt/gen6_ppgtt.c
74
static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
sys/dev/pci/drm/i915/gt/gen6_ppgtt.c
77
struct gen6_ppgtt * const ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
sys/dev/pci/drm/i915/gt/gen6_ppgtt.c
79
const gen6_pte_t scratch_pte = vm->scratch[0]->encode;
sys/dev/pci/drm/i915/gt/gen7_renderclear.c
380
struct drm_i915_private *i915 = vma->vm->i915;
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
1014
ppgtt->vm.top = i915_vm_is_4lvl(&ppgtt->vm) ? 3 : 2;
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
1015
ppgtt->vm.pd_shift = ilog2(SZ_4K * SZ_4K / sizeof(gen8_pte_t));
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
102
if (i915_vm_is_4lvl(&ppgtt->vm)) {
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
1025
ppgtt->vm.has_read_only = !IS_GRAPHICS_VER(gt->i915, 11, 12);
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
1028
ppgtt->vm.alloc_pt_dma = alloc_pt_lmem;
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
1030
ppgtt->vm.alloc_pt_dma = alloc_pt_dma;
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
1038
ppgtt->vm.alloc_scratch_dma = alloc_pt_dma;
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
1041
ppgtt->vm.pte_encode = gen12_pte_encode;
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
1043
ppgtt->vm.pte_encode = gen8_pte_encode;
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
1045
ppgtt->vm.bind_async_flags = I915_VMA_LOCAL_BIND;
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
1046
ppgtt->vm.insert_entries = gen8_ppgtt_insert;
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
1048
ppgtt->vm.insert_page = xehp_ppgtt_insert_entry;
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
1050
ppgtt->vm.insert_page = gen8_ppgtt_insert_entry;
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
1051
ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc;
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
1052
ppgtt->vm.clear_range = gen8_ppgtt_clear;
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
1053
ppgtt->vm.foreach = gen8_ppgtt_foreach;
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
1054
ppgtt->vm.cleanup = gen8_ppgtt_cleanup;
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
1056
err = gen8_init_scratch(&ppgtt->vm);
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
1060
pd = gen8_alloc_top_pd(&ppgtt->vm);
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
1067
if (!i915_vm_is_4lvl(&ppgtt->vm)) {
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
1076
err = gen8_init_rsvd(&ppgtt->vm);
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
1083
i915_vm_put(&ppgtt->vm);
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
180
static unsigned int gen8_pd_top_count(const struct i915_address_space *vm)
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
182
unsigned int shift = __gen8_pte_shift(vm->top);
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
184
return (vm->total + (1ull << shift) - 1) >> shift;
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
188
gen8_pdp_for_page_index(struct i915_address_space * const vm, const u64 idx)
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
190
struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm);
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
192
if (vm->top == 2)
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
195
return i915_pd_entry(ppgtt->pd, gen8_pd_index(idx, vm->top));
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
199
gen8_pdp_for_page_address(struct i915_address_space * const vm, const u64 addr)
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
201
return gen8_pdp_for_page_index(vm, addr >> GEN8_PTE_SHIFT);
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
204
static void __gen8_ppgtt_cleanup(struct i915_address_space *vm,
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
215
__gen8_ppgtt_cleanup(vm, *pde, GEN8_PDES, lvl - 1);
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
219
free_px(vm, &pd->pt, lvl);
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
222
static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
224
struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
226
if (vm->rsvd.obj)
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
227
i915_gem_object_put(vm->rsvd.obj);
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
229
if (intel_vgpu_active(vm->i915))
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
233
__gen8_ppgtt_cleanup(vm, ppgtt->pd,
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
234
gen8_pd_top_count(vm), vm->top);
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
236
free_scratch(vm);
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
239
static u64 __gen8_ppgtt_clear(struct i915_address_space * const vm,
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
243
const struct drm_i915_gem_object * const scratch = vm->scratch[lvl];
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
246
GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT);
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
250
__func__, vm, lvl + 1, start, end,
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
260
__func__, vm, lvl + 1, idx, start, end);
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
262
__gen8_ppgtt_cleanup(vm, as_pd(pt), I915_PDES, lvl);
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
268
start = __gen8_ppgtt_clear(vm, as_pd(pt),
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
278
__func__, vm, lvl, start, end,
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
293
vm->scratch[0]->encode,
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
301
free_px(vm, pt, lvl);
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
307
static void gen8_ppgtt_clear(struct i915_address_space *vm,
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
312
GEM_BUG_ON(range_overflows(start, length, vm->total));
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
318
__gen8_ppgtt_clear(vm, i915_vm_to_ppgtt(vm)->pd,
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
319
start, start + length, vm->top);
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
322
static void __gen8_ppgtt_alloc(struct i915_address_space * const vm,
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
329
GEM_BUG_ON(end > vm->total >> GEN8_PTE_SHIFT);
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
333
__func__, vm, lvl + 1, *start, end,
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
346
__func__, vm, lvl + 1, idx);
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
351
fill_px(pt, vm->scratch[lvl]->encode);
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
367
__gen8_ppgtt_alloc(vm, stash,
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
377
__func__, vm, lvl, *start, end,
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
390
static void gen8_ppgtt_alloc(struct i915_address_space *vm,
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
396
GEM_BUG_ON(range_overflows(start, length, vm->total));
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
402
__gen8_ppgtt_alloc(vm, stash, i915_vm_to_ppgtt(vm)->pd,
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
403
&start, start + length, vm->top);
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
406
static void __gen8_ppgtt_foreach(struct i915_address_space *vm,
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
409
void (*fn)(struct i915_address_space *vm,
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
426
__gen8_ppgtt_foreach(vm, as_pd(pt), start, end, lvl,
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
429
fn(vm, pt, data);
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
439
static void gen8_ppgtt_foreach(struct i915_address_space *vm,
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
441
void (*fn)(struct i915_address_space *vm,
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
449
__gen8_ppgtt_foreach(vm, i915_vm_to_ppgtt(vm)->pd,
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
450
&start, start + length, vm->top,
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
463
const gen8_pte_t pte_encode = ppgtt->vm.pte_encode(0, pat_index, flags);
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
503
xehp_ppgtt_insert_huge(struct i915_address_space *vm,
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
509
const gen8_pte_t pte_encode = vm->pte_encode(0, pat_index, flags);
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
514
GEM_BUG_ON(!i915_vm_is_4lvl(vm));
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
518
gen8_pdp_for_page_address(vm, start);
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
611
static void gen8_ppgtt_insert_huge(struct i915_address_space *vm,
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
617
const gen8_pte_t pte_encode = vm->pte_encode(0, pat_index, flags);
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
621
GEM_BUG_ON(!i915_vm_is_4lvl(vm));
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
625
gen8_pdp_for_page_address(vm, start);
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
700
(i915_vm_has_scratch_64K(vm) &&
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
718
if (I915_SELFTEST_ONLY(vm->scrub_64K)) {
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
721
encode = vm->scratch[0]->encode;
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
735
static void gen8_ppgtt_insert(struct i915_address_space *vm,
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
740
struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm);
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
744
if (GRAPHICS_VER_FULL(vm->i915) >= IP_VER(12, 55))
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
745
xehp_ppgtt_insert_huge(vm, vma_res, &iter, pat_index, flags);
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
747
gen8_ppgtt_insert_huge(vm, vma_res, &iter, pat_index, flags);
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
753
gen8_pdp_for_page_index(vm, idx);
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
763
static void gen8_ppgtt_insert_entry(struct i915_address_space *vm,
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
771
gen8_pdp_for_page_index(vm, idx);
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
780
vaddr[gen8_pd_index(idx, 0)] = vm->pte_encode(addr, pat_index, flags);
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
784
static void xehp_ppgtt_insert_entry_lm(struct i915_address_space *vm,
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
792
gen8_pdp_for_page_index(vm, idx);
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
810
vaddr[gen8_pd_index(idx, 0) / 16] = vm->pte_encode(addr, pat_index, flags);
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
813
static void xehp_ppgtt_insert_entry(struct i915_address_space *vm,
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
820
return xehp_ppgtt_insert_entry_lm(vm, addr, offset,
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
823
return gen8_ppgtt_insert_entry(vm, addr, offset, pat_index, flags);
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
826
static int gen8_init_scratch(struct i915_address_space *vm)
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
836
if (vm->has_read_only && vm->gt->vm && !i915_is_ggtt(vm->gt->vm)) {
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
837
struct i915_address_space *clone = vm->gt->vm;
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
841
vm->scratch_order = clone->scratch_order;
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
842
for (i = 0; i <= vm->top; i++)
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
843
vm->scratch[i] = i915_gem_object_get(clone->scratch[i]);
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
848
ret = setup_scratch_page(vm);
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
852
pte_flags = vm->has_read_only;
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
853
if (i915_gem_object_is_lmem(vm->scratch[0]))
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
856
vm->scratch[0]->encode =
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
857
vm->pte_encode(px_dma(vm->scratch[0]),
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
858
i915_gem_get_pat_index(vm->i915,
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
862
for (i = 1; i <= vm->top; i++) {
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
865
obj = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K);
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
871
ret = map_pt_dma(vm, obj);
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
877
fill_px(obj, vm->scratch[i - 1]->encode);
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
880
vm->scratch[i] = obj;
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
887
i915_gem_object_put(vm->scratch[i]);
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
888
vm->scratch[0] = NULL;
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
894
struct i915_address_space *vm = &ppgtt->vm;
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
898
GEM_BUG_ON(vm->top != 2);
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
899
GEM_BUG_ON(gen8_pd_top_count(vm) != GEN8_3LVL_PDPES);
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
90
struct drm_i915_private *i915 = ppgtt->vm.i915;
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
905
pde = alloc_pd(vm);
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
909
err = map_pt_dma(vm, pde->pt.base);
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
91
struct intel_uncore *uncore = ppgtt->vm.gt->uncore;
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
911
free_pd(vm, pde);
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
915
fill_px(pde, vm->scratch[1]->encode);
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
925
gen8_alloc_top_pd(struct i915_address_space *vm)
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
927
const unsigned int count = gen8_pd_top_count(vm);
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
937
pd->pt.base = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K);
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
944
err = map_pt_dma(vm, pd->pt.base);
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
948
fill_page_dma(px_base(pd), vm->scratch[vm->top]->encode, count);
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
953
free_pd(vm, pd);
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
957
static int gen8_init_rsvd(struct i915_address_space *vm)
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
959
struct drm_i915_private *i915 = vm->i915;
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
964
if (!intel_gt_needs_wa_16018031267(vm->gt))
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
976
vma = i915_vma_instance(obj, vm, NULL);
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
986
vm->rsvd.vma = i915_vma_make_unshrinkable(vma);
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
987
vm->rsvd.obj = obj;
sys/dev/pci/drm/i915/gt/gen8_ppgtt.c
988
vm->total -= vma->node.size;
sys/dev/pci/drm/i915/gt/intel_context.c
403
GEM_BUG_ON(!engine->gt->vm);
sys/dev/pci/drm/i915/gt/intel_context.c
415
ce->vm = i915_vm_get(engine->gt->vm);
sys/dev/pci/drm/i915/gt/intel_context.c
452
i915_vm_put(ce->vm);
sys/dev/pci/drm/i915/gt/intel_context.h
215
ce->wakeref = intel_gt_pm_get(ce->vm->gt);
sys/dev/pci/drm/i915/gt/intel_context.h
232
intel_gt_pm_put_async(ce->vm->gt, ce->wakeref);
sys/dev/pci/drm/i915/gt/intel_context_types.h
98
struct i915_address_space *vm;
sys/dev/pci/drm/i915/gt/intel_engine.h
281
struct i915_address_space *vm,
sys/dev/pci/drm/i915/gt/intel_engine_cs.c
1096
vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
sys/dev/pci/drm/i915/gt/intel_engine_cs.c
1354
struct i915_address_space *vm,
sys/dev/pci/drm/i915/gt/intel_engine_cs.c
1372
i915_vm_put(ce->vm);
sys/dev/pci/drm/i915/gt/intel_engine_cs.c
1373
ce->vm = i915_vm_get(vm);
sys/dev/pci/drm/i915/gt/intel_engine_cs.c
1401
mutex_lock(&hwsp->vm->mutex);
sys/dev/pci/drm/i915/gt/intel_engine_cs.c
1403
mutex_unlock(&hwsp->vm->mutex);
sys/dev/pci/drm/i915/gt/intel_engine_cs.c
1419
return intel_engine_create_pinned_context(engine, engine->gt->vm, SZ_512K,
sys/dev/pci/drm/i915/gt/intel_engine_cs.c
1429
return intel_engine_create_pinned_context(engine, engine->gt->vm, SZ_4K,
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
2717
struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(rq->context->vm);
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
2792
if (!i915_vm_is_4lvl(request->context->vm)) {
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1005
ppgtt_bind_vma(&i915_vm_to_ggtt(vm)->alias->vm,
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1009
vm->insert_entries(vm, vma_res, pat_index, pte_flags);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1014
static void aliasing_gtt_unbind_vma(struct i915_address_space *vm,
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1018
vm->clear_range(vm, vma_res->start, vma_res->vma_size);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1021
ppgtt_unbind_vma(&i915_vm_to_ggtt(vm)->alias->vm, vma_res);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1030
ppgtt = i915_ppgtt_create(ggtt->vm.gt, 0);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1034
if (GEM_WARN_ON(ppgtt->vm.total < ggtt->vm.total)) {
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1039
err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, ggtt->vm.total);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1043
i915_gem_object_lock(ppgtt->vm.scratch[0], NULL);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1044
err = i915_vm_map_pt_stash(&ppgtt->vm, &stash);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1045
i915_gem_object_unlock(ppgtt->vm.scratch[0]);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1055
ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash, 0, ggtt->vm.total);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1058
ggtt->vm.bind_async_flags |= ppgtt->vm.bind_async_flags;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1060
GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != intel_ggtt_bind_vma);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1061
ggtt->vm.vma_ops.bind_vma = aliasing_gtt_bind_vma;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1063
GEM_BUG_ON(ggtt->vm.vma_ops.unbind_vma != intel_ggtt_unbind_vma);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1064
ggtt->vm.vma_ops.unbind_vma = aliasing_gtt_unbind_vma;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1066
i915_vm_free_pt_stash(&ppgtt->vm, &stash);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1070
i915_vm_free_pt_stash(&ppgtt->vm, &stash);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1072
i915_vm_put(&ppgtt->vm);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1084
i915_vm_put(&ppgtt->vm);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1086
ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1087
ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1111
flush_workqueue(ggtt->vm.i915->wq);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1112
i915_gem_drain_freed_objects(ggtt->vm.i915);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1114
mutex_lock(&ggtt->vm.mutex);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1116
ggtt->vm.skip_pte_rewrite = true;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1118
list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) {
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1137
ggtt->vm.cleanup(&ggtt->vm);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1139
mutex_unlock(&ggtt->vm.mutex);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1140
i915_address_space_fini(&ggtt->vm);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1173
GEM_WARN_ON(kref_read(&ggtt->vm.resv_ref) != 1);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1174
dma_resv_fini(&ggtt->vm._resv);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1230
struct drm_i915_private *i915 = ggtt->vm.i915;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1231
struct intel_uncore *uncore = ggtt->vm.gt->uncore;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1256
kref_init(&ggtt->vm.resv_ref);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1257
ret = setup_scratch_page(&ggtt->vm);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1266
if (i915_gem_object_is_lmem(ggtt->vm.scratch[0]))
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1269
ggtt->vm.scratch[0]->encode =
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1270
ggtt->vm.pte_encode(px_dma(ggtt->vm.scratch[0]),
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1282
struct drm_i915_private *i915 = ggtt->vm.i915;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1283
struct intel_uncore *uncore = ggtt->vm.gt->uncore;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1326
kref_init(&ggtt->vm.resv_ref);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1327
ret = setup_scratch_page(&ggtt->vm);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1336
if (i915_gem_object_is_lmem(ggtt->vm.scratch[0]))
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1339
ggtt->vm.scratch[0]->encode =
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1340
ggtt->vm.pte_encode(px_dma(ggtt->vm.scratch[0]),
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1350
static void gen6_gmch_remove(struct i915_address_space *vm)
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1352
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1357
bus_space_unmap(vm->i915->bst, ggtt->gsm_bsh, ggtt->gsm_size);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1359
free_scratch(vm);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1372
struct drm_i915_private *i915 = ggtt->vm.i915;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1406
ggtt->vm.alloc_pt_dma = alloc_pt_dma;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1407
ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1408
ggtt->vm.lmem_pt_obj_flags = I915_BO_ALLOC_PM_EARLY;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1410
ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1411
ggtt->vm.cleanup = gen6_gmch_remove;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1412
ggtt->vm.insert_page = gen8_ggtt_insert_page;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1413
ggtt->vm.clear_range = nop_clear_range;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1414
ggtt->vm.scratch_range = gen8_ggtt_clear_range;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1416
ggtt->vm.insert_entries = gen8_ggtt_insert_entries;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1417
ggtt->vm.read_entry = gen8_ggtt_read_entry;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1424
ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1425
ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1433
ggtt->vm.raw_insert_page = gen8_ggtt_insert_page;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1434
ggtt->vm.raw_insert_entries = gen8_ggtt_insert_entries;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1436
ggtt->vm.bind_async_flags =
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1441
ggtt->vm.scratch_range = gen8_ggtt_scratch_range_bind;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1442
ggtt->vm.insert_page = gen8_ggtt_insert_page_bind;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1443
ggtt->vm.insert_entries = gen8_ggtt_insert_entries_bind;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1448
ggtt->vm.raw_insert_page = gen8_ggtt_insert_page;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
145
void i915_ggtt_suspend_vm(struct i915_address_space *vm, bool evict_all)
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1451
if (intel_uc_wants_guc_submission(&ggtt->vm.gt->uc))
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1456
ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1457
ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1460
ggtt->vm.pte_encode = mtl_ggtt_pte_encode;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1462
ggtt->vm.pte_encode = gen8_ggtt_pte_encode;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1464
ggtt->vm.pte_decode = gen8_ggtt_pte_decode;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
150
drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
153
i915_gem_drain_freed_objects(vm->i915);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
155
mutex_lock(&vm->mutex);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1575
struct drm_i915_private *i915 = ggtt->vm.i915;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
161
save_skip_rewrite = vm->skip_pte_rewrite;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1615
ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1617
ggtt->vm.alloc_pt_dma = alloc_pt_dma;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1618
ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
162
vm->skip_pte_rewrite = true;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1620
ggtt->vm.clear_range = nop_clear_range;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1622
ggtt->vm.clear_range = gen6_ggtt_clear_range;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1623
ggtt->vm.scratch_range = gen6_ggtt_clear_range;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1624
ggtt->vm.insert_page = gen6_ggtt_insert_page;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1625
ggtt->vm.insert_entries = gen6_ggtt_insert_entries;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1626
ggtt->vm.read_entry = gen6_ggtt_read_entry;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1627
ggtt->vm.cleanup = gen6_gmch_remove;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1632
ggtt->vm.pte_encode = iris_pte_encode;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1634
ggtt->vm.pte_encode = hsw_pte_encode;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1636
ggtt->vm.pte_encode = byt_pte_encode;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1638
ggtt->vm.pte_encode = ivb_pte_encode;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
164
list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link) {
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1640
ggtt->vm.pte_encode = snb_pte_encode;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1642
ggtt->vm.pte_decode = gen6_pte_decode;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1644
ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1645
ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1655
ggtt->vm.gt = gt;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1656
ggtt->vm.i915 = i915;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1657
ggtt->vm.dma = i915->drm.dev;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1658
dma_resv_init(&ggtt->vm._resv);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1668
dma_resv_fini(&ggtt->vm._resv);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1672
if ((ggtt->vm.total - 1) >> 32) {
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1676
ggtt->vm.total >> 20);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1677
ggtt->vm.total = 1ULL << 32;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1679
min_t(u64, ggtt->mappable_end, ggtt->vm.total);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1682
if (ggtt->mappable_end > ggtt->vm.total) {
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1686
&ggtt->mappable_end, ggtt->vm.total);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1687
ggtt->mappable_end = ggtt->vm.total;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1691
drm_dbg(&i915->drm, "GGTT size = %lluM\n", ggtt->vm.total >> 20);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1757
bool i915_ggtt_resume_vm(struct i915_address_space *vm, bool all_evicted)
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1762
drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1765
drm_WARN_ON(&vm->i915->drm, !list_empty(&vm->bound_list));
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1770
vm->clear_range(vm, 0, vm->total);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1773
list_for_each_entry(vma, &vm->bound_list, vm_link) {
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1785
vma->ops->bind_vma(vm, NULL, vma->resource,
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1787
i915_gem_get_pat_index(vm->i915,
sys/dev/pci/drm/i915/gt/intel_ggtt.c
180
mutex_unlock(&vm->mutex);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1808
flush = i915_ggtt_resume_vm(&ggtt->vm, false);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
1811
ggtt->vm.scratch_range(&ggtt->vm, ggtt->error_capture.start,
sys/dev/pci/drm/i915/gt/intel_ggtt.c
187
vm->skip_pte_rewrite = save_skip_rewrite;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
201
vm->clear_range(vm, 0, vm->total);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
203
vm->skip_pte_rewrite = save_skip_rewrite;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
205
mutex_unlock(&vm->mutex);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
207
drm_WARN_ON(&vm->i915->drm, evict_all && !list_empty(&vm->bound_list));
sys/dev/pci/drm/i915/gt/intel_ggtt.c
214
i915_ggtt_suspend_vm(&ggtt->vm, false);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
223
struct intel_uncore *uncore = ggtt->vm.gt->uncore;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
248
struct intel_uncore *uncore = ggtt->vm.gt->uncore;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
256
if (needs_wc_ggtt_mapping(ggtt->vm.i915))
sys/dev/pci/drm/i915/gt/intel_ggtt.c
272
struct drm_i915_private *i915 = ggtt->vm.i915;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
332
struct intel_gt *gt = ggtt->vm.gt;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
340
struct intel_gt *gt = ggtt->vm.gt;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
374
struct intel_gt *gt = ggtt->vm.gt;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
375
const gen8_pte_t scratch_pte = ggtt->vm.scratch[0]->encode;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
481
static void gen8_ggtt_insert_page(struct i915_address_space *vm,
sys/dev/pci/drm/i915/gt/intel_ggtt.c
487
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
491
gen8_set_pte(pte, ggtt->vm.pte_encode(addr, pat_index, flags));
sys/dev/pci/drm/i915/gt/intel_ggtt.c
496
static dma_addr_t gen8_ggtt_read_entry(struct i915_address_space *vm,
sys/dev/pci/drm/i915/gt/intel_ggtt.c
499
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
503
return ggtt->vm.pte_decode(gen8_get_pte(pte), is_present, is_local);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
506
static void gen8_ggtt_insert_page_bind(struct i915_address_space *vm,
sys/dev/pci/drm/i915/gt/intel_ggtt.c
510
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
513
pte = ggtt->vm.pte_encode(addr, pat_index, flags);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
514
if (should_update_ggtt_with_bind(i915_vm_to_ggtt(vm)) &&
sys/dev/pci/drm/i915/gt/intel_ggtt.c
518
gen8_ggtt_insert_page(vm, addr, offset, pat_index, flags);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
521
static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
sys/dev/pci/drm/i915/gt/intel_ggtt.c
526
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
527
const gen8_pte_t pte_encode = ggtt->vm.pte_encode(0, pat_index, flags);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
542
gen8_set_pte(gte++, vm->scratch[0]->encode);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
551
gen8_set_pte(gte++, vm->scratch[0]->encode);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
560
static bool __gen8_ggtt_insert_entries_bind(struct i915_address_space *vm,
sys/dev/pci/drm/i915/gt/intel_ggtt.c
564
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
565
gen8_pte_t scratch_pte = vm->scratch[0]->encode;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
569
pte_encode = ggtt->vm.pte_encode(0, pat_index, flags);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
57
struct drm_i915_private *i915 = ggtt->vm.i915;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
59
i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
591
static void gen8_ggtt_insert_entries_bind(struct i915_address_space *vm,
sys/dev/pci/drm/i915/gt/intel_ggtt.c
595
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
597
if (should_update_ggtt_with_bind(i915_vm_to_ggtt(vm)) &&
sys/dev/pci/drm/i915/gt/intel_ggtt.c
598
__gen8_ggtt_insert_entries_bind(vm, vma_res, pat_index, flags))
sys/dev/pci/drm/i915/gt/intel_ggtt.c
601
gen8_ggtt_insert_entries(vm, vma_res, pat_index, flags);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
604
static void gen8_ggtt_clear_range(struct i915_address_space *vm,
sys/dev/pci/drm/i915/gt/intel_ggtt.c
607
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
61
ggtt->vm.is_ggtt = true;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
610
const gen8_pte_t scratch_pte = vm->scratch[0]->encode;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
625
static void gen8_ggtt_scratch_range_bind(struct i915_address_space *vm,
sys/dev/pci/drm/i915/gt/intel_ggtt.c
628
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
631
const gen8_pte_t scratch_pte = vm->scratch[0]->encode;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
64
ggtt->vm.has_read_only = IS_VALLEYVIEW(i915);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
643
gen8_ggtt_clear_range(vm, start, length);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
646
static void gen6_ggtt_insert_page(struct i915_address_space *vm,
sys/dev/pci/drm/i915/gt/intel_ggtt.c
652
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
656
iowrite32(vm->pte_encode(addr, pat_index, flags), pte);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
661
static dma_addr_t gen6_ggtt_read_entry(struct i915_address_space *vm,
sys/dev/pci/drm/i915/gt/intel_ggtt.c
665
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
669
return vm->pte_decode(ioread32(pte), is_present, is_local);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
67
ggtt->vm.mm.color_adjust = i915_ggtt_color_adjust;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
678
static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
sys/dev/pci/drm/i915/gt/intel_ggtt.c
683
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
694
iowrite32(vm->scratch[0]->encode, gte++);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
697
iowrite32(vm->pte_encode(addr, pat_index, flags), gte++);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
702
iowrite32(vm->scratch[0]->encode, gte++);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
711
static void nop_clear_range(struct i915_address_space *vm,
sys/dev/pci/drm/i915/gt/intel_ggtt.c
716
static void bxt_vtd_ggtt_wa(struct i915_address_space *vm)
sys/dev/pci/drm/i915/gt/intel_ggtt.c
725
intel_uncore_posting_read_fw(vm->gt->uncore, GFX_FLSH_CNTL_GEN6);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
729
struct i915_address_space *vm;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
739
gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset,
sys/dev/pci/drm/i915/gt/intel_ggtt.c
74
ggtt->vm.cleanup(&ggtt->vm);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
741
bxt_vtd_ggtt_wa(arg->vm);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
746
static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm,
sys/dev/pci/drm/i915/gt/intel_ggtt.c
752
struct insert_page arg = { vm, addr, offset, pat_index };
sys/dev/pci/drm/i915/gt/intel_ggtt.c
758
struct i915_address_space *vm;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
768
gen8_ggtt_insert_entries(arg->vm, arg->vma_res,
sys/dev/pci/drm/i915/gt/intel_ggtt.c
770
bxt_vtd_ggtt_wa(arg->vm);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
775
static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
sys/dev/pci/drm/i915/gt/intel_ggtt.c
780
struct insert_entries arg = { vm, vma_res, pat_index, flags };
sys/dev/pci/drm/i915/gt/intel_ggtt.c
785
static void gen6_ggtt_clear_range(struct i915_address_space *vm,
sys/dev/pci/drm/i915/gt/intel_ggtt.c
788
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
801
scratch_pte = vm->scratch[0]->encode;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
806
void intel_ggtt_bind_vma(struct i915_address_space *vm,
sys/dev/pci/drm/i915/gt/intel_ggtt.c
826
vm->insert_entries(vm, vma_res, pat_index, pte_flags);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
830
void intel_ggtt_unbind_vma(struct i915_address_space *vm,
sys/dev/pci/drm/i915/gt/intel_ggtt.c
833
vm->clear_range(vm, vma_res->start, vma_res->vma_size);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
836
dma_addr_t intel_ggtt_read_entry(struct i915_address_space *vm,
sys/dev/pci/drm/i915/gt/intel_ggtt.c
839
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
841
return ggtt->vm.read_entry(vm, offset, is_present, is_local);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
860
if (!intel_uc_uses_guc(&ggtt->vm.gt->uc))
sys/dev/pci/drm/i915/gt/intel_ggtt.c
863
GEM_BUG_ON(ggtt->vm.total <= GUC_TOP_RESERVE_SIZE);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
864
offset = ggtt->vm.total - GUC_TOP_RESERVE_SIZE;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
866
ret = i915_gem_gtt_reserve(&ggtt->vm, NULL, &ggtt->uc_fw,
sys/dev/pci/drm/i915/gt/intel_ggtt.c
870
drm_dbg(&ggtt->vm.i915->drm,
sys/dev/pci/drm/i915/gt/intel_ggtt.c
913
intel_wopcm_guc_size(&ggtt->vm.gt->wopcm));
sys/dev/pci/drm/i915/gt/intel_ggtt.c
945
if (drm_mm_reserve_node(&ggtt->vm.mm, &ggtt->error_capture))
sys/dev/pci/drm/i915/gt/intel_ggtt.c
946
drm_mm_insert_node_in_range(&ggtt->vm.mm,
sys/dev/pci/drm/i915/gt/intel_ggtt.c
957
ggtt->vm.scratch_range(&ggtt->vm, start, size);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
958
drm_dbg(&ggtt->vm.i915->drm,
sys/dev/pci/drm/i915/gt/intel_ggtt.c
973
drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) {
sys/dev/pci/drm/i915/gt/intel_ggtt.c
974
drm_dbg(&ggtt->vm.i915->drm,
sys/dev/pci/drm/i915/gt/intel_ggtt.c
977
ggtt->vm.clear_range(&ggtt->vm, hole_start,
sys/dev/pci/drm/i915/gt/intel_ggtt.c
982
ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
991
static void aliasing_gtt_bind_vma(struct i915_address_space *vm,
sys/dev/pci/drm/i915/gt/intel_ggtt_fencing.c
296
lockdep_assert_held(&vma->vm->mutex);
sys/dev/pci/drm/i915/gt/intel_ggtt_fencing.c
331
struct intel_display *display = ggtt->vm.i915->display;
sys/dev/pci/drm/i915/gt/intel_ggtt_fencing.c
365
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm);
sys/dev/pci/drm/i915/gt/intel_ggtt_fencing.c
370
lockdep_assert_held(&vma->vm->mutex);
sys/dev/pci/drm/i915/gt/intel_ggtt_fencing.c
435
assert_rpm_wakelock_held(vma->vm->gt->uncore->rpm);
sys/dev/pci/drm/i915/gt/intel_ggtt_fencing.c
438
err = mutex_lock_interruptible(&vma->vm->mutex);
sys/dev/pci/drm/i915/gt/intel_ggtt_fencing.c
443
mutex_unlock(&vma->vm->mutex);
sys/dev/pci/drm/i915/gt/intel_ggtt_fencing.c
461
lockdep_assert_held(&ggtt->vm.mutex);
sys/dev/pci/drm/i915/gt/intel_ggtt_fencing.c
496
lockdep_assert_held(&ggtt->vm.mutex);
sys/dev/pci/drm/i915/gt/intel_ggtt_fencing.c
51
return fence->ggtt->vm.i915;
sys/dev/pci/drm/i915/gt/intel_ggtt_fencing.c
56
return fence->ggtt->vm.gt->uncore;
sys/dev/pci/drm/i915/gt/intel_ggtt_fencing.c
574
struct intel_uncore *uncore = ggtt->vm.gt->uncore;
sys/dev/pci/drm/i915/gt/intel_ggtt_fencing.c
575
struct drm_i915_private *i915 = ggtt->vm.i915;
sys/dev/pci/drm/i915/gt/intel_ggtt_fencing.c
841
struct drm_i915_private *i915 = ggtt->vm.i915;
sys/dev/pci/drm/i915/gt/intel_ggtt_fencing.c
842
struct intel_uncore *uncore = ggtt->vm.gt->uncore;
sys/dev/pci/drm/i915/gt/intel_ggtt_gmch.c
100
ggtt->vm.alloc_pt_dma = alloc_pt_dma;
sys/dev/pci/drm/i915/gt/intel_ggtt_gmch.c
101
ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
sys/dev/pci/drm/i915/gt/intel_ggtt_gmch.c
109
ggtt->vm.insert_page = gmch_ggtt_insert_page;
sys/dev/pci/drm/i915/gt/intel_ggtt_gmch.c
110
ggtt->vm.insert_entries = gmch_ggtt_insert_entries;
sys/dev/pci/drm/i915/gt/intel_ggtt_gmch.c
111
ggtt->vm.clear_range = gmch_ggtt_clear_range;
sys/dev/pci/drm/i915/gt/intel_ggtt_gmch.c
112
ggtt->vm.scratch_range = gmch_ggtt_clear_range;
sys/dev/pci/drm/i915/gt/intel_ggtt_gmch.c
113
ggtt->vm.read_entry = gmch_ggtt_read_entry;
sys/dev/pci/drm/i915/gt/intel_ggtt_gmch.c
114
ggtt->vm.cleanup = gmch_ggtt_remove;
sys/dev/pci/drm/i915/gt/intel_ggtt_gmch.c
118
ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma;
sys/dev/pci/drm/i915/gt/intel_ggtt_gmch.c
119
ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma;
sys/dev/pci/drm/i915/gt/intel_ggtt_gmch.c
18
static void gmch_ggtt_insert_page(struct i915_address_space *vm,
sys/dev/pci/drm/i915/gt/intel_ggtt_gmch.c
30
static dma_addr_t gmch_ggtt_read_entry(struct i915_address_space *vm,
sys/dev/pci/drm/i915/gt/intel_ggtt_gmch.c
37
static void gmch_ggtt_insert_entries(struct i915_address_space *vm,
sys/dev/pci/drm/i915/gt/intel_ggtt_gmch.c
54
static void gmch_ggtt_clear_range(struct i915_address_space *vm,
sys/dev/pci/drm/i915/gt/intel_ggtt_gmch.c
60
static void gmch_ggtt_remove(struct i915_address_space *vm)
sys/dev/pci/drm/i915/gt/intel_ggtt_gmch.c
86
struct drm_i915_private *i915 = ggtt->vm.i915;
sys/dev/pci/drm/i915/gt/intel_ggtt_gmch.c
96
intel_gmch_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end);
sys/dev/pci/drm/i915/gt/intel_gt.c
482
vma = i915_vma_instance(obj, >->ggtt->vm, NULL);
sys/dev/pci/drm/i915/gt/intel_gt.c
509
return &i915_ppgtt_create(gt, I915_BO_ALLOC_PM_EARLY)->vm;
sys/dev/pci/drm/i915/gt/intel_gt.c
511
return i915_vm_get(>->ggtt->vm);
sys/dev/pci/drm/i915/gt/intel_gt.c
715
gt->vm = kernel_vm(gt);
sys/dev/pci/drm/i915/gt/intel_gt.c
716
if (!gt->vm) {
sys/dev/pci/drm/i915/gt/intel_gt.c
763
i915_vm_put(fetch_and_zero(>->vm));
sys/dev/pci/drm/i915/gt/intel_gt.c
831
struct i915_address_space *vm;
sys/dev/pci/drm/i915/gt/intel_gt.c
833
vm = fetch_and_zero(>->vm);
sys/dev/pci/drm/i915/gt/intel_gt.c
834
if (vm) /* FIXME being called twice on error paths :( */
sys/dev/pci/drm/i915/gt/intel_gt.c
835
i915_vm_put(vm);
sys/dev/pci/drm/i915/gt/intel_gt_types.h
223
struct i915_address_space *vm;
sys/dev/pci/drm/i915/gt/intel_gtt.c
104
type = intel_gt_coherent_map_type(vm->gt, obj, true);
sys/dev/pci/drm/i915/gt/intel_gtt.c
112
if (IS_METEORLAKE(vm->i915))
sys/dev/pci/drm/i915/gt/intel_gtt.c
123
int map_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object *obj)
sys/dev/pci/drm/i915/gt/intel_gtt.c
128
type = intel_gt_coherent_map_type(vm->gt, obj, true);
sys/dev/pci/drm/i915/gt/intel_gtt.c
136
if (IS_METEORLAKE(vm->i915))
sys/dev/pci/drm/i915/gt/intel_gtt.c
173
i915_vm_resv_get(vma->vm);
sys/dev/pci/drm/i915/gt/intel_gtt.c
182
static void __i915_vm_close(struct i915_address_space *vm)
sys/dev/pci/drm/i915/gt/intel_gtt.c
184
mutex_lock(&vm->mutex);
sys/dev/pci/drm/i915/gt/intel_gtt.c
186
clear_vm_list(&vm->bound_list);
sys/dev/pci/drm/i915/gt/intel_gtt.c
187
clear_vm_list(&vm->unbound_list);
sys/dev/pci/drm/i915/gt/intel_gtt.c
190
GEM_BUG_ON(!list_empty(&vm->bound_list));
sys/dev/pci/drm/i915/gt/intel_gtt.c
191
GEM_BUG_ON(!list_empty(&vm->unbound_list));
sys/dev/pci/drm/i915/gt/intel_gtt.c
193
mutex_unlock(&vm->mutex);
sys/dev/pci/drm/i915/gt/intel_gtt.c
197
int i915_vm_lock_objects(struct i915_address_space *vm,
sys/dev/pci/drm/i915/gt/intel_gtt.c
200
if (vm->scratch[0]->base.resv == &vm->_resv) {
sys/dev/pci/drm/i915/gt/intel_gtt.c
201
return i915_gem_object_lock(vm->scratch[0], ww);
sys/dev/pci/drm/i915/gt/intel_gtt.c
203
struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
sys/dev/pci/drm/i915/gt/intel_gtt.c
210
void i915_address_space_fini(struct i915_address_space *vm)
sys/dev/pci/drm/i915/gt/intel_gtt.c
212
drm_mm_takedown(&vm->mm);
sys/dev/pci/drm/i915/gt/intel_gtt.c
225
struct i915_address_space *vm =
sys/dev/pci/drm/i915/gt/intel_gtt.c
226
container_of(kref, typeof(*vm), resv_ref);
sys/dev/pci/drm/i915/gt/intel_gtt.c
228
dma_resv_fini(&vm->_resv);
sys/dev/pci/drm/i915/gt/intel_gtt.c
229
mutex_destroy(&vm->mutex);
sys/dev/pci/drm/i915/gt/intel_gtt.c
231
kfree(vm);
sys/dev/pci/drm/i915/gt/intel_gtt.c
236
struct i915_address_space *vm =
sys/dev/pci/drm/i915/gt/intel_gtt.c
239
__i915_vm_close(vm);
sys/dev/pci/drm/i915/gt/intel_gtt.c
242
i915_vma_resource_bind_dep_sync_all(vm);
sys/dev/pci/drm/i915/gt/intel_gtt.c
244
vm->cleanup(vm);
sys/dev/pci/drm/i915/gt/intel_gtt.c
245
i915_address_space_fini(vm);
sys/dev/pci/drm/i915/gt/intel_gtt.c
247
i915_vm_resv_put(vm);
sys/dev/pci/drm/i915/gt/intel_gtt.c
252
struct i915_address_space *vm =
sys/dev/pci/drm/i915/gt/intel_gtt.c
255
GEM_BUG_ON(i915_is_ggtt(vm));
sys/dev/pci/drm/i915/gt/intel_gtt.c
256
trace_i915_ppgtt_release(vm);
sys/dev/pci/drm/i915/gt/intel_gtt.c
258
queue_work(vm->i915->wq, &vm->release_work);
sys/dev/pci/drm/i915/gt/intel_gtt.c
261
void i915_address_space_init(struct i915_address_space *vm, int subclass)
sys/dev/pci/drm/i915/gt/intel_gtt.c
263
kref_init(&vm->ref);
sys/dev/pci/drm/i915/gt/intel_gtt.c
269
if (!kref_read(&vm->resv_ref))
sys/dev/pci/drm/i915/gt/intel_gtt.c
270
kref_init(&vm->resv_ref);
sys/dev/pci/drm/i915/gt/intel_gtt.c
272
vm->pending_unbind = RB_ROOT_CACHED;
sys/dev/pci/drm/i915/gt/intel_gtt.c
273
INIT_WORK(&vm->release_work, __i915_vm_release);
sys/dev/pci/drm/i915/gt/intel_gtt.c
280
rw_init(&vm->mutex, "vmlk");
sys/dev/pci/drm/i915/gt/intel_gtt.c
281
lockdep_set_subclass(&vm->mutex, subclass);
sys/dev/pci/drm/i915/gt/intel_gtt.c
283
if (!intel_vm_no_concurrent_access_wa(vm->i915)) {
sys/dev/pci/drm/i915/gt/intel_gtt.c
284
i915_gem_shrinker_taints_mutex(vm->i915, &vm->mutex);
sys/dev/pci/drm/i915/gt/intel_gtt.c
294
mutex_acquire(&vm->mutex.dep_map, 0, 0, _THIS_IP_);
sys/dev/pci/drm/i915/gt/intel_gtt.c
296
mutex_release(&vm->mutex.dep_map, _THIS_IP_);
sys/dev/pci/drm/i915/gt/intel_gtt.c
298
dma_resv_init(&vm->_resv);
sys/dev/pci/drm/i915/gt/intel_gtt.c
300
GEM_BUG_ON(!vm->total);
sys/dev/pci/drm/i915/gt/intel_gtt.c
301
drm_mm_init(&vm->mm, 0, vm->total);
sys/dev/pci/drm/i915/gt/intel_gtt.c
303
memset64(vm->min_alignment, I915_GTT_MIN_ALIGNMENT,
sys/dev/pci/drm/i915/gt/intel_gtt.c
304
ARRAY_SIZE(vm->min_alignment));
sys/dev/pci/drm/i915/gt/intel_gtt.c
306
if (HAS_64K_PAGES(vm->i915)) {
sys/dev/pci/drm/i915/gt/intel_gtt.c
307
vm->min_alignment[INTEL_MEMORY_LOCAL] = I915_GTT_PAGE_SIZE_64K;
sys/dev/pci/drm/i915/gt/intel_gtt.c
308
vm->min_alignment[INTEL_MEMORY_STOLEN_LOCAL] = I915_GTT_PAGE_SIZE_64K;
sys/dev/pci/drm/i915/gt/intel_gtt.c
311
vm->mm.head_node.color = I915_COLOR_UNEVICTABLE;
sys/dev/pci/drm/i915/gt/intel_gtt.c
313
INIT_LIST_HEAD(&vm->bound_list);
sys/dev/pci/drm/i915/gt/intel_gtt.c
314
INIT_LIST_HEAD(&vm->unbound_list);
sys/dev/pci/drm/i915/gt/intel_gtt.c
359
int setup_scratch_page(struct i915_address_space *vm)
sys/dev/pci/drm/i915/gt/intel_gtt.c
375
if (i915_vm_is_4lvl(vm) &&
sys/dev/pci/drm/i915/gt/intel_gtt.c
376
HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K) &&
sys/dev/pci/drm/i915/gt/intel_gtt.c
377
!HAS_64K_PAGES(vm->i915))
sys/dev/pci/drm/i915/gt/intel_gtt.c
383
obj = vm->alloc_scratch_dma(vm, size);
sys/dev/pci/drm/i915/gt/intel_gtt.c
387
if (map_pt_dma(vm, obj))
sys/dev/pci/drm/i915/gt/intel_gtt.c
409
vm->scratch[0] = obj;
sys/dev/pci/drm/i915/gt/intel_gtt.c
41
struct drm_i915_gem_object *alloc_pt_lmem(struct i915_address_space *vm, int sz)
sys/dev/pci/drm/i915/gt/intel_gtt.c
410
vm->scratch_order = get_order(size);
sys/dev/pci/drm/i915/gt/intel_gtt.c
423
void free_scratch(struct i915_address_space *vm)
sys/dev/pci/drm/i915/gt/intel_gtt.c
427
if (!vm->scratch[0])
sys/dev/pci/drm/i915/gt/intel_gtt.c
430
for (i = 0; i <= vm->top; i++)
sys/dev/pci/drm/i915/gt/intel_gtt.c
431
i915_gem_object_put(vm->scratch[i]);
sys/dev/pci/drm/i915/gt/intel_gtt.c
57
obj = __i915_gem_object_create_lmem_with_ps(vm->i915, sz, sz,
sys/dev/pci/drm/i915/gt/intel_gtt.c
58
vm->lmem_pt_obj_flags);
sys/dev/pci/drm/i915/gt/intel_gtt.c
65
obj->base.resv = i915_vm_resv_get(vm);
sys/dev/pci/drm/i915/gt/intel_gtt.c
66
obj->shares_resv_from = vm;
sys/dev/pci/drm/i915/gt/intel_gtt.c
68
if (vm->fpriv)
sys/dev/pci/drm/i915/gt/intel_gtt.c
69
i915_drm_client_add_object(vm->fpriv->client, obj);
sys/dev/pci/drm/i915/gt/intel_gtt.c
695
__vm_create_scratch_for_read(struct i915_address_space *vm, unsigned long size)
sys/dev/pci/drm/i915/gt/intel_gtt.c
700
obj = i915_gem_object_create_internal(vm->i915, PAGE_ALIGN(size));
sys/dev/pci/drm/i915/gt/intel_gtt.c
706
vma = i915_vma_instance(obj, vm, NULL);
sys/dev/pci/drm/i915/gt/intel_gtt.c
716
__vm_create_scratch_for_read_pinned(struct i915_address_space *vm, unsigned long size)
sys/dev/pci/drm/i915/gt/intel_gtt.c
721
vma = __vm_create_scratch_for_read(vm, size);
sys/dev/pci/drm/i915/gt/intel_gtt.c
75
struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz)
sys/dev/pci/drm/i915/gt/intel_gtt.c
79
if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
sys/dev/pci/drm/i915/gt/intel_gtt.c
80
i915_gem_shrink_all(vm->i915);
sys/dev/pci/drm/i915/gt/intel_gtt.c
82
obj = i915_gem_object_create_internal(vm->i915, sz);
sys/dev/pci/drm/i915/gt/intel_gtt.c
89
obj->base.resv = i915_vm_resv_get(vm);
sys/dev/pci/drm/i915/gt/intel_gtt.c
90
obj->shares_resv_from = vm;
sys/dev/pci/drm/i915/gt/intel_gtt.c
92
if (vm->fpriv)
sys/dev/pci/drm/i915/gt/intel_gtt.c
93
i915_drm_client_add_object(vm->fpriv->client, obj);
sys/dev/pci/drm/i915/gt/intel_gtt.c
99
int map_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj)
sys/dev/pci/drm/i915/gt/intel_gtt.h
233
void (*bind_vma)(struct i915_address_space *vm,
sys/dev/pci/drm/i915/gt/intel_gtt.h
242
void (*unbind_vma)(struct i915_address_space *vm,
sys/dev/pci/drm/i915/gt/intel_gtt.h
308
(*alloc_pt_dma)(struct i915_address_space *vm, int sz);
sys/dev/pci/drm/i915/gt/intel_gtt.h
310
(*alloc_scratch_dma)(struct i915_address_space *vm, int sz);
sys/dev/pci/drm/i915/gt/intel_gtt.h
319
void (*allocate_va_range)(struct i915_address_space *vm,
sys/dev/pci/drm/i915/gt/intel_gtt.h
322
void (*clear_range)(struct i915_address_space *vm,
sys/dev/pci/drm/i915/gt/intel_gtt.h
324
void (*scratch_range)(struct i915_address_space *vm,
sys/dev/pci/drm/i915/gt/intel_gtt.h
326
void (*insert_page)(struct i915_address_space *vm,
sys/dev/pci/drm/i915/gt/intel_gtt.h
331
void (*insert_entries)(struct i915_address_space *vm,
sys/dev/pci/drm/i915/gt/intel_gtt.h
335
void (*raw_insert_page)(struct i915_address_space *vm,
sys/dev/pci/drm/i915/gt/intel_gtt.h
340
void (*raw_insert_entries)(struct i915_address_space *vm,
sys/dev/pci/drm/i915/gt/intel_gtt.h
344
dma_addr_t (*read_entry)(struct i915_address_space *vm,
sys/dev/pci/drm/i915/gt/intel_gtt.h
346
void (*cleanup)(struct i915_address_space *vm);
sys/dev/pci/drm/i915/gt/intel_gtt.h
348
void (*foreach)(struct i915_address_space *vm,
sys/dev/pci/drm/i915/gt/intel_gtt.h
350
void (*fn)(struct i915_address_space *vm,
sys/dev/pci/drm/i915/gt/intel_gtt.h
370
struct i915_address_space vm;
sys/dev/pci/drm/i915/gt/intel_gtt.h
415
struct i915_address_space vm;
sys/dev/pci/drm/i915/gt/intel_gtt.h
420
#define i915_is_ggtt(vm) ((vm)->is_ggtt)
sys/dev/pci/drm/i915/gt/intel_gtt.h
421
#define i915_is_dpt(vm) ((vm)->is_dpt)
sys/dev/pci/drm/i915/gt/intel_gtt.h
422
#define i915_is_ggtt_or_dpt(vm) (i915_is_ggtt(vm) || i915_is_dpt(vm))
sys/dev/pci/drm/i915/gt/intel_gtt.h
427
i915_vm_lock_objects(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww);
sys/dev/pci/drm/i915/gt/intel_gtt.h
430
i915_vm_is_4lvl(const struct i915_address_space *vm)
sys/dev/pci/drm/i915/gt/intel_gtt.h
432
return (vm->total - 1) >> 32;
sys/dev/pci/drm/i915/gt/intel_gtt.h
436
i915_vm_has_scratch_64K(struct i915_address_space *vm)
sys/dev/pci/drm/i915/gt/intel_gtt.h
438
return vm->scratch_order == get_order(I915_GTT_PAGE_SIZE_64K);
sys/dev/pci/drm/i915/gt/intel_gtt.h
441
static inline u64 i915_vm_min_alignment(struct i915_address_space *vm,
sys/dev/pci/drm/i915/gt/intel_gtt.h
445
if ((int)type >= ARRAY_SIZE(vm->min_alignment))
sys/dev/pci/drm/i915/gt/intel_gtt.h
448
return vm->min_alignment[type];
sys/dev/pci/drm/i915/gt/intel_gtt.h
451
static inline u64 i915_vm_obj_min_alignment(struct i915_address_space *vm,
sys/dev/pci/drm/i915/gt/intel_gtt.h
457
return i915_vm_min_alignment(vm, type);
sys/dev/pci/drm/i915/gt/intel_gtt.h
461
i915_vm_has_cache_coloring(struct i915_address_space *vm)
sys/dev/pci/drm/i915/gt/intel_gtt.h
463
return i915_is_ggtt(vm) && vm->mm.color_adjust;
sys/dev/pci/drm/i915/gt/intel_gtt.h
467
i915_vm_to_ggtt(struct i915_address_space *vm)
sys/dev/pci/drm/i915/gt/intel_gtt.h
469
BUILD_BUG_ON(offsetof(struct i915_ggtt, vm));
sys/dev/pci/drm/i915/gt/intel_gtt.h
470
GEM_BUG_ON(!i915_is_ggtt(vm));
sys/dev/pci/drm/i915/gt/intel_gtt.h
471
return container_of(vm, struct i915_ggtt, vm);
sys/dev/pci/drm/i915/gt/intel_gtt.h
475
i915_vm_to_ppgtt(struct i915_address_space *vm)
sys/dev/pci/drm/i915/gt/intel_gtt.h
477
BUILD_BUG_ON(offsetof(struct i915_ppgtt, vm));
sys/dev/pci/drm/i915/gt/intel_gtt.h
478
GEM_BUG_ON(i915_is_ggtt_or_dpt(vm));
sys/dev/pci/drm/i915/gt/intel_gtt.h
479
return container_of(vm, struct i915_ppgtt, vm);
sys/dev/pci/drm/i915/gt/intel_gtt.h
483
i915_vm_get(struct i915_address_space *vm)
sys/dev/pci/drm/i915/gt/intel_gtt.h
485
kref_get(&vm->ref);
sys/dev/pci/drm/i915/gt/intel_gtt.h
486
return vm;
sys/dev/pci/drm/i915/gt/intel_gtt.h
490
i915_vm_tryget(struct i915_address_space *vm)
sys/dev/pci/drm/i915/gt/intel_gtt.h
492
return kref_get_unless_zero(&vm->ref) ? vm : NULL;
sys/dev/pci/drm/i915/gt/intel_gtt.h
495
static inline void assert_vm_alive(struct i915_address_space *vm)
sys/dev/pci/drm/i915/gt/intel_gtt.h
497
GEM_BUG_ON(!kref_read(&vm->ref));
sys/dev/pci/drm/i915/gt/intel_gtt.h
506
static inline struct dma_resv *i915_vm_resv_get(struct i915_address_space *vm)
sys/dev/pci/drm/i915/gt/intel_gtt.h
508
kref_get(&vm->resv_ref);
sys/dev/pci/drm/i915/gt/intel_gtt.h
509
return &vm->_resv;
sys/dev/pci/drm/i915/gt/intel_gtt.h
516
static inline void i915_vm_put(struct i915_address_space *vm)
sys/dev/pci/drm/i915/gt/intel_gtt.h
518
kref_put(&vm->ref, i915_vm_release);
sys/dev/pci/drm/i915/gt/intel_gtt.h
525
static inline void i915_vm_resv_put(struct i915_address_space *vm)
sys/dev/pci/drm/i915/gt/intel_gtt.h
527
kref_put(&vm->resv_ref, i915_vm_resv_release);
sys/dev/pci/drm/i915/gt/intel_gtt.h
530
void i915_address_space_init(struct i915_address_space *vm, int subclass);
sys/dev/pci/drm/i915/gt/intel_gtt.h
531
void i915_address_space_fini(struct i915_address_space *vm);
sys/dev/pci/drm/i915/gt/intel_gtt.h
585
return __px_dma(pt ? px_base(pt) : ppgtt->vm.scratch[ppgtt->vm.top]);
sys/dev/pci/drm/i915/gt/intel_gtt.h
590
void intel_ggtt_bind_vma(struct i915_address_space *vm,
sys/dev/pci/drm/i915/gt/intel_gtt.h
595
void intel_ggtt_unbind_vma(struct i915_address_space *vm,
sys/dev/pci/drm/i915/gt/intel_gtt.h
598
dma_addr_t intel_ggtt_read_entry(struct i915_address_space *vm,
sys/dev/pci/drm/i915/gt/intel_gtt.h
619
void i915_ggtt_suspend_vm(struct i915_address_space *vm, bool evict_all);
sys/dev/pci/drm/i915/gt/intel_gtt.h
620
bool i915_ggtt_resume_vm(struct i915_address_space *vm, bool all_evicted);
sys/dev/pci/drm/i915/gt/intel_gtt.h
633
int setup_scratch_page(struct i915_address_space *vm);
sys/dev/pci/drm/i915/gt/intel_gtt.h
634
void free_scratch(struct i915_address_space *vm);
sys/dev/pci/drm/i915/gt/intel_gtt.h
636
struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz);
sys/dev/pci/drm/i915/gt/intel_gtt.h
637
struct drm_i915_gem_object *alloc_pt_lmem(struct i915_address_space *vm, int sz);
sys/dev/pci/drm/i915/gt/intel_gtt.h
638
struct i915_page_table *alloc_pt(struct i915_address_space *vm, int sz);
sys/dev/pci/drm/i915/gt/intel_gtt.h
639
struct i915_page_directory *alloc_pd(struct i915_address_space *vm);
sys/dev/pci/drm/i915/gt/intel_gtt.h
64
#define ggtt_total_entries(ggtt) ((ggtt)->vm.total >> PAGE_SHIFT)
sys/dev/pci/drm/i915/gt/intel_gtt.h
642
int map_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj);
sys/dev/pci/drm/i915/gt/intel_gtt.h
643
int map_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object *obj);
sys/dev/pci/drm/i915/gt/intel_gtt.h
645
void free_px(struct i915_address_space *vm,
sys/dev/pci/drm/i915/gt/intel_gtt.h
647
#define free_pt(vm, px) free_px(vm, px, 0)
sys/dev/pci/drm/i915/gt/intel_gtt.h
648
#define free_pd(vm, px) free_px(vm, px_pt(px), 1)
sys/dev/pci/drm/i915/gt/intel_gtt.h
671
void ppgtt_bind_vma(struct i915_address_space *vm,
sys/dev/pci/drm/i915/gt/intel_gtt.h
676
void ppgtt_unbind_vma(struct i915_address_space *vm,
sys/dev/pci/drm/i915/gt/intel_gtt.h
683
int i915_vm_alloc_pt_stash(struct i915_address_space *vm,
sys/dev/pci/drm/i915/gt/intel_gtt.h
686
int i915_vm_map_pt_stash(struct i915_address_space *vm,
sys/dev/pci/drm/i915/gt/intel_gtt.h
688
void i915_vm_free_pt_stash(struct i915_address_space *vm,
sys/dev/pci/drm/i915/gt/intel_gtt.h
692
__vm_create_scratch_for_read(struct i915_address_space *vm, unsigned long size);
sys/dev/pci/drm/i915/gt/intel_gtt.h
695
__vm_create_scratch_for_read_pinned(struct i915_address_space *vm, unsigned long size);
sys/dev/pci/drm/i915/gt/intel_lrc.c
1114
vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
sys/dev/pci/drm/i915/gt/intel_lrc.c
1423
*cs++ = lower_32_bits(i915_vma_offset(ce->vm->rsvd.vma));
sys/dev/pci/drm/i915/gt/intel_lrc.c
1424
*cs++ = upper_32_bits(i915_vma_offset(ce->vm->rsvd.vma));
sys/dev/pci/drm/i915/gt/intel_lrc.c
1528
if (i915_vm_is_4lvl(ce->vm))
sys/dev/pci/drm/i915/gt/intel_lrc.c
1533
if (GRAPHICS_VER(ce->vm->i915) == 8)
sys/dev/pci/drm/i915/gt/intel_lrc.c
1819
vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
sys/dev/pci/drm/i915/gt/intel_lrc.c
889
if (i915_vm_is_4lvl(&ppgtt->vm)) {
sys/dev/pci/drm/i915/gt/intel_lrc.c
903
static struct i915_ppgtt *vm_alias(struct i915_address_space *vm)
sys/dev/pci/drm/i915/gt/intel_lrc.c
905
if (i915_is_ggtt(vm))
sys/dev/pci/drm/i915/gt/intel_lrc.c
906
return i915_vm_to_ggtt(vm)->alias;
sys/dev/pci/drm/i915/gt/intel_lrc.c
908
return i915_vm_to_ppgtt(vm);
sys/dev/pci/drm/i915/gt/intel_lrc.c
944
init_ppgtt_regs(regs, vm_alias(ce->vm));
sys/dev/pci/drm/i915/gt/intel_migrate.c
145
vm = i915_ppgtt_create(gt, I915_BO_ALLOC_PM_EARLY);
sys/dev/pci/drm/i915/gt/intel_migrate.c
146
if (IS_ERR(vm))
sys/dev/pci/drm/i915/gt/intel_migrate.c
147
return ERR_CAST(vm);
sys/dev/pci/drm/i915/gt/intel_migrate.c
149
if (!vm->vm.allocate_va_range || !vm->vm.foreach) {
sys/dev/pci/drm/i915/gt/intel_migrate.c
191
err = i915_vm_alloc_pt_stash(&vm->vm, &stash, sz);
sys/dev/pci/drm/i915/gt/intel_migrate.c
196
err = i915_vm_lock_objects(&vm->vm, &ww);
sys/dev/pci/drm/i915/gt/intel_migrate.c
199
err = i915_vm_map_pt_stash(&vm->vm, &stash);
sys/dev/pci/drm/i915/gt/intel_migrate.c
203
vm->vm.allocate_va_range(&vm->vm, &stash, base, sz);
sys/dev/pci/drm/i915/gt/intel_migrate.c
205
i915_vm_free_pt_stash(&vm->vm, &stash);
sys/dev/pci/drm/i915/gt/intel_migrate.c
211
vm->vm.foreach(&vm->vm, base, d.offset - base,
sys/dev/pci/drm/i915/gt/intel_migrate.c
214
vm->vm.foreach(&vm->vm,
sys/dev/pci/drm/i915/gt/intel_migrate.c
219
vm->vm.foreach(&vm->vm, base, d.offset - base,
sys/dev/pci/drm/i915/gt/intel_migrate.c
224
return &vm->vm;
sys/dev/pci/drm/i915/gt/intel_migrate.c
227
i915_vm_put(&vm->vm);
sys/dev/pci/drm/i915/gt/intel_migrate.c
249
struct i915_address_space *vm;
sys/dev/pci/drm/i915/gt/intel_migrate.c
256
vm = migrate_vm(gt);
sys/dev/pci/drm/i915/gt/intel_migrate.c
257
if (IS_ERR(vm))
sys/dev/pci/drm/i915/gt/intel_migrate.c
258
return ERR_CAST(vm);
sys/dev/pci/drm/i915/gt/intel_migrate.c
260
ce = intel_engine_create_pinned_context(engine, vm, SZ_512K,
sys/dev/pci/drm/i915/gt/intel_migrate.c
263
i915_vm_put(vm);
sys/dev/pci/drm/i915/gt/intel_migrate.c
321
i915_vm_put(ce->vm);
sys/dev/pci/drm/i915/gt/intel_migrate.c
322
ce->vm = i915_vm_get(m->context->vm);
sys/dev/pci/drm/i915/gt/intel_migrate.c
370
const u64 encode = rq->context->vm->pte_encode(0, pat_index,
sys/dev/pci/drm/i915/gt/intel_migrate.c
38
static void xehp_toggle_pdes(struct i915_address_space *vm,
sys/dev/pci/drm/i915/gt/intel_migrate.c
48
vm->insert_page(vm, 0, d->offset,
sys/dev/pci/drm/i915/gt/intel_migrate.c
49
i915_gem_get_pat_index(vm->i915, I915_CACHE_NONE),
sys/dev/pci/drm/i915/gt/intel_migrate.c
55
static void xehp_insert_pte(struct i915_address_space *vm,
sys/dev/pci/drm/i915/gt/intel_migrate.c
68
vm->insert_page(vm, px_dma(pt), d->offset,
sys/dev/pci/drm/i915/gt/intel_migrate.c
69
i915_gem_get_pat_index(vm->i915, I915_CACHE_NONE),
sys/dev/pci/drm/i915/gt/intel_migrate.c
699
GEM_BUG_ON(ce->vm != ce->engine->gt->migrate.context->vm);
sys/dev/pci/drm/i915/gt/intel_migrate.c
74
static void insert_pte(struct i915_address_space *vm,
sys/dev/pci/drm/i915/gt/intel_migrate.c
80
vm->insert_page(vm, px_dma(pt), d->offset,
sys/dev/pci/drm/i915/gt/intel_migrate.c
81
i915_gem_get_pat_index(vm->i915, I915_CACHE_NONE),
sys/dev/pci/drm/i915/gt/intel_migrate.c
89
struct i915_ppgtt *vm;
sys/dev/pci/drm/i915/gt/intel_migrate.c
998
GEM_BUG_ON(ce->vm != ce->engine->gt->migrate.context->vm);
sys/dev/pci/drm/i915/gt/intel_ppgtt.c
16
struct i915_page_table *alloc_pt(struct i915_address_space *vm, int sz)
sys/dev/pci/drm/i915/gt/intel_ppgtt.c
177
trace_i915_ppgtt_create(&ppgtt->vm);
sys/dev/pci/drm/i915/gt/intel_ppgtt.c
182
void ppgtt_bind_vma(struct i915_address_space *vm,
sys/dev/pci/drm/i915/gt/intel_ppgtt.c
191
vm->allocate_va_range(vm, stash, vma_res->start,
sys/dev/pci/drm/i915/gt/intel_ppgtt.c
203
vm->insert_entries(vm, vma_res, pat_index, pte_flags);
sys/dev/pci/drm/i915/gt/intel_ppgtt.c
207
void ppgtt_unbind_vma(struct i915_address_space *vm,
sys/dev/pci/drm/i915/gt/intel_ppgtt.c
213
vm->clear_range(vm, vma_res->start, vma_res->vma_size);
sys/dev/pci/drm/i915/gt/intel_ppgtt.c
214
vma_invalidate_tlb(vm, vma_res->tlb);
sys/dev/pci/drm/i915/gt/intel_ppgtt.c
223
int i915_vm_alloc_pt_stash(struct i915_address_space *vm,
sys/dev/pci/drm/i915/gt/intel_ppgtt.c
230
shift = vm->pd_shift;
sys/dev/pci/drm/i915/gt/intel_ppgtt.c
238
GEM_BUG_ON(!IS_DGFX(vm->i915));
sys/dev/pci/drm/i915/gt/intel_ppgtt.c
24
pt->base = vm->alloc_pt_dma(vm, sz);
sys/dev/pci/drm/i915/gt/intel_ppgtt.c
246
pt = alloc_pt(vm, pt_sz);
sys/dev/pci/drm/i915/gt/intel_ppgtt.c
248
i915_vm_free_pt_stash(vm, stash);
sys/dev/pci/drm/i915/gt/intel_ppgtt.c
256
for (n = 1; n < vm->top; n++) {
sys/dev/pci/drm/i915/gt/intel_ppgtt.c
262
pd = alloc_pd(vm);
sys/dev/pci/drm/i915/gt/intel_ppgtt.c
264
i915_vm_free_pt_stash(vm, stash);
sys/dev/pci/drm/i915/gt/intel_ppgtt.c
276
int i915_vm_map_pt_stash(struct i915_address_space *vm,
sys/dev/pci/drm/i915/gt/intel_ppgtt.c
284
err = map_pt_dma_locked(vm, pt->base);
sys/dev/pci/drm/i915/gt/intel_ppgtt.c
293
void i915_vm_free_pt_stash(struct i915_address_space *vm,
sys/dev/pci/drm/i915/gt/intel_ppgtt.c
302
free_px(vm, pt, n);
sys/dev/pci/drm/i915/gt/intel_ppgtt.c
312
ppgtt->vm.gt = gt;
sys/dev/pci/drm/i915/gt/intel_ppgtt.c
313
ppgtt->vm.i915 = i915;
sys/dev/pci/drm/i915/gt/intel_ppgtt.c
314
ppgtt->vm.dma = i915->drm.dev;
sys/dev/pci/drm/i915/gt/intel_ppgtt.c
315
ppgtt->vm.total = BIT_ULL(RUNTIME_INFO(i915)->ppgtt_size);
sys/dev/pci/drm/i915/gt/intel_ppgtt.c
316
ppgtt->vm.lmem_pt_obj_flags = lmem_pt_obj_flags;
sys/dev/pci/drm/i915/gt/intel_ppgtt.c
318
dma_resv_init(&ppgtt->vm._resv);
sys/dev/pci/drm/i915/gt/intel_ppgtt.c
319
i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT);
sys/dev/pci/drm/i915/gt/intel_ppgtt.c
321
ppgtt->vm.vma_ops.bind_vma = ppgtt_bind_vma;
sys/dev/pci/drm/i915/gt/intel_ppgtt.c
322
ppgtt->vm.vma_ops.unbind_vma = ppgtt_unbind_vma;
sys/dev/pci/drm/i915/gt/intel_ppgtt.c
53
struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
sys/dev/pci/drm/i915/gt/intel_ppgtt.c
61
pd->pt.base = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K);
sys/dev/pci/drm/i915/gt/intel_ppgtt.c
71
void free_px(struct i915_address_space *vm, struct i915_page_table *pt, int lvl)
sys/dev/pci/drm/i915/gt/intel_renderstate.c
160
so->vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
sys/dev/pci/drm/i915/gt/intel_ring.c
102
if (i915_vma_is_map_and_fenceable(vma) && !HAS_LLC(vma->vm->i915))
sys/dev/pci/drm/i915/gt/intel_ring.c
113
struct i915_address_space *vm = &ggtt->vm;
sys/dev/pci/drm/i915/gt/intel_ring.c
114
struct drm_i915_private *i915 = vm->i915;
sys/dev/pci/drm/i915/gt/intel_ring.c
131
if (vm->has_read_only)
sys/dev/pci/drm/i915/gt/intel_ring.c
134
vma = i915_vma_instance(obj, vm, NULL);
sys/dev/pci/drm/i915/gt/intel_ring.c
57
if (i915_vma_is_map_and_fenceable(vma) && !HAS_LLC(vma->vm->i915)) {
sys/dev/pci/drm/i915/gt/intel_ring.c
60
int type = intel_gt_coherent_map_type(vma->vm->gt, vma->obj, false);
sys/dev/pci/drm/i915/gt/intel_ring_submission.c
1336
vma = i915_vma_instance(obj, engine->gt->vm, NULL);
sys/dev/pci/drm/i915/gt/intel_ring_submission.c
147
static struct i915_address_space *vm_alias(struct i915_address_space *vm)
sys/dev/pci/drm/i915/gt/intel_ring_submission.c
149
if (i915_is_ggtt(vm))
sys/dev/pci/drm/i915/gt/intel_ring_submission.c
150
vm = &i915_vm_to_ggtt(vm)->alias->vm;
sys/dev/pci/drm/i915/gt/intel_ring_submission.c
152
return vm;
sys/dev/pci/drm/i915/gt/intel_ring_submission.c
155
static u32 pp_dir(struct i915_address_space *vm)
sys/dev/pci/drm/i915/gt/intel_ring_submission.c
157
return to_gen6_ppgtt(i915_vm_to_ppgtt(vm))->pp_dir;
sys/dev/pci/drm/i915/gt/intel_ring_submission.c
162
struct i915_address_space *vm = vm_alias(engine->gt->vm);
sys/dev/pci/drm/i915/gt/intel_ring_submission.c
164
if (!vm)
sys/dev/pci/drm/i915/gt/intel_ring_submission.c
168
ENGINE_WRITE_FW(engine, RING_PP_DIR_BASE, pp_dir(vm));
sys/dev/pci/drm/i915/gt/intel_ring_submission.c
530
struct i915_address_space *vm;
sys/dev/pci/drm/i915/gt/intel_ring_submission.c
540
vm = vm_alias(ce->vm);
sys/dev/pci/drm/i915/gt/intel_ring_submission.c
541
if (vm)
sys/dev/pci/drm/i915/gt/intel_ring_submission.c
542
err = gen6_ppgtt_pin(i915_vm_to_ppgtt((vm)), ww);
sys/dev/pci/drm/i915/gt/intel_ring_submission.c
549
struct i915_address_space *vm;
sys/dev/pci/drm/i915/gt/intel_ring_submission.c
551
vm = vm_alias(ce->vm);
sys/dev/pci/drm/i915/gt/intel_ring_submission.c
552
if (vm)
sys/dev/pci/drm/i915/gt/intel_ring_submission.c
553
gen6_ppgtt_unpin(i915_vm_to_ppgtt(vm));
sys/dev/pci/drm/i915/gt/intel_ring_submission.c
595
vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
sys/dev/pci/drm/i915/gt/intel_ring_submission.c
698
struct i915_address_space *vm,
sys/dev/pci/drm/i915/gt/intel_ring_submission.c
714
*cs++ = pp_dir(vm);
sys/dev/pci/drm/i915/gt/intel_ring_submission.c
899
static int switch_mm(struct i915_request *rq, struct i915_address_space *vm)
sys/dev/pci/drm/i915/gt/intel_ring_submission.c
903
if (!vm)
sys/dev/pci/drm/i915/gt/intel_ring_submission.c
918
ret = load_pd_dir(rq, vm, PP_DIR_DCLV_2G);
sys/dev/pci/drm/i915/gt/intel_ring_submission.c
930
ret = switch_mm(rq, vm_alias(engine->kernel_context->vm));
sys/dev/pci/drm/i915/gt/intel_ring_submission.c
976
ret = switch_mm(rq, vm_alias(ce->vm));
sys/dev/pci/drm/i915/gt/intel_timeline.c
178
mutex_lock(&hwsp->vm->mutex);
sys/dev/pci/drm/i915/gt/intel_timeline.c
180
mutex_unlock(&hwsp->vm->mutex);
sys/dev/pci/drm/i915/gt/intel_timeline.c
31
vma = i915_vma_instance(obj, >->ggtt->vm, NULL);
sys/dev/pci/drm/i915/gt/intel_workarounds.c
3042
vma = __vm_create_scratch_for_read(&ce->engine->gt->ggtt->vm,
sys/dev/pci/drm/i915/gt/mock_engine.c
40
struct i915_address_space *vm = &ggtt->vm;
sys/dev/pci/drm/i915/gt/mock_engine.c
41
struct drm_i915_private *i915 = vm->i915;
sys/dev/pci/drm/i915/gt/mock_engine.c
49
vma = i915_vma_instance(obj, vm, NULL);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
1001
vma = i915_vma_instance(obj, >->ggtt->vm, NULL);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
1308
vma = i915_vma_instance(obj, >->ggtt->vm, NULL);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
1567
vma = i915_vma_instance(obj, >->ggtt->vm, NULL);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
2721
vma = i915_vma_instance(obj, ce->vm, NULL);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
3061
vma = i915_vma_instance(obj, result->vm, NULL);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
3131
vma = i915_vma_instance(obj, >->ggtt->vm, NULL);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
3160
vma = i915_vma_instance(global->obj, ce->vm, NULL);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
3495
struct i915_address_space *vm;
sys/dev/pci/drm/i915/gt/selftest_execlists.c
3497
vm = i915_gem_context_get_eb_vm(ctx);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
3498
vma = i915_vma_instance(batch, vm, NULL);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
3499
i915_vm_put(vm);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
4201
__vm_create_scratch_for_read_pinned(&siblings[0]->gt->ggtt->vm,
sys/dev/pci/drm/i915/gt/selftest_hangcheck.c
107
struct i915_address_space *vm = i915_gem_context_get_eb_vm(h->ctx);
sys/dev/pci/drm/i915/gt/selftest_hangcheck.c
118
i915_vm_put(vm);
sys/dev/pci/drm/i915/gt/selftest_hangcheck.c
125
i915_vm_put(vm);
sys/dev/pci/drm/i915/gt/selftest_hangcheck.c
135
vma = i915_vma_instance(h->obj, vm, NULL);
sys/dev/pci/drm/i915/gt/selftest_hangcheck.c
137
i915_vm_put(vm);
sys/dev/pci/drm/i915/gt/selftest_hangcheck.c
1381
struct i915_address_space *vm = arg->vma->vm;
sys/dev/pci/drm/i915/gt/selftest_hangcheck.c
1387
mutex_lock(&vm->mutex);
sys/dev/pci/drm/i915/gt/selftest_hangcheck.c
1388
err = i915_gem_evict_for_node(vm, NULL, &evict, 0);
sys/dev/pci/drm/i915/gt/selftest_hangcheck.c
1389
mutex_unlock(&vm->mutex);
sys/dev/pci/drm/i915/gt/selftest_hangcheck.c
141
hws = i915_vma_instance(h->hws, vm, NULL);
sys/dev/pci/drm/i915/gt/selftest_hangcheck.c
1427
struct i915_address_space *vm,
sys/dev/pci/drm/i915/gt/selftest_hangcheck.c
143
i915_vm_put(vm);
sys/dev/pci/drm/i915/gt/selftest_hangcheck.c
1471
arg.vma = i915_vma_instance(obj, vm, NULL);
sys/dev/pci/drm/i915/gt/selftest_hangcheck.c
149
i915_vm_put(vm);
sys/dev/pci/drm/i915/gt/selftest_hangcheck.c
1585
return __igt_reset_evict_vma(gt, >->ggtt->vm,
sys/dev/pci/drm/i915/gt/selftest_hangcheck.c
1603
err = __igt_reset_evict_vma(gt, &ppgtt->vm,
sys/dev/pci/drm/i915/gt/selftest_hangcheck.c
1605
i915_vm_put(&ppgtt->vm);
sys/dev/pci/drm/i915/gt/selftest_hangcheck.c
1614
return __igt_reset_evict_vma(gt, >->ggtt->vm,
sys/dev/pci/drm/i915/gt/selftest_hangcheck.c
249
i915_vm_put(vm);
sys/dev/pci/drm/i915/gt/selftest_lrc.c
1150
batch = create_user_vma(ce->vm, SZ_64K);
sys/dev/pci/drm/i915/gt/selftest_lrc.c
1387
create_result_vma(struct i915_address_space *vm, unsigned long sz)
sys/dev/pci/drm/i915/gt/selftest_lrc.c
1392
vma = create_user_vma(vm, sz);
sys/dev/pci/drm/i915/gt/selftest_lrc.c
1428
ref[0] = create_result_vma(A->vm, SZ_64K);
sys/dev/pci/drm/i915/gt/selftest_lrc.c
1434
ref[1] = create_result_vma(A->vm, SZ_64K);
sys/dev/pci/drm/i915/gt/selftest_lrc.c
1456
result[0] = create_result_vma(A->vm, SZ_64K);
sys/dev/pci/drm/i915/gt/selftest_lrc.c
1462
result[1] = create_result_vma(A->vm, SZ_64K);
sys/dev/pci/drm/i915/gt/selftest_lrc.c
36
return __vm_create_scratch_for_read_pinned(>->ggtt->vm, PAGE_SIZE);
sys/dev/pci/drm/i915/gt/selftest_lrc.c
934
create_user_vma(struct i915_address_space *vm, unsigned long size)
sys/dev/pci/drm/i915/gt/selftest_lrc.c
940
obj = i915_gem_object_create_internal(vm->i915, size);
sys/dev/pci/drm/i915/gt/selftest_lrc.c
944
vma = i915_vma_instance(obj, vm, NULL);
sys/dev/pci/drm/i915/gt/selftest_lrc.c
979
batch = create_user_vma(ce->vm, SZ_64K);
sys/dev/pci/drm/i915/gt/selftest_migrate.c
151
GEM_BUG_ON(ce->vm != ce->engine->gt->migrate.context->vm);
sys/dev/pci/drm/i915/gt/selftest_mocs.c
80
__vm_create_scratch_for_read_pinned(>->ggtt->vm, PAGE_SIZE);
sys/dev/pci/drm/i915/gt/selftest_reset.c
111
ggtt->vm.clear_range(&ggtt->vm, ggtt->error_capture.start, PAGE_SIZE);
sys/dev/pci/drm/i915/gt/selftest_reset.c
130
ggtt->vm.insert_page(&ggtt->vm, dma,
sys/dev/pci/drm/i915/gt/selftest_reset.c
160
ggtt->vm.clear_range(&ggtt->vm, ggtt->error_capture.start, PAGE_SIZE);
sys/dev/pci/drm/i915/gt/selftest_reset.c
87
ggtt->vm.insert_page(&ggtt->vm, dma,
sys/dev/pci/drm/i915/gt/selftest_ring_submission.c
20
vma = i915_vma_instance(obj, engine->gt->vm, NULL);
sys/dev/pci/drm/i915/gt/selftest_rps.c
58
struct i915_address_space *vm,
sys/dev/pci/drm/i915/gt/selftest_rps.c
645
engine->kernel_context->vm, false,
sys/dev/pci/drm/i915/gt/selftest_rps.c
76
obj = i915_gem_object_create_internal(vm->i915, 64 << 10);
sys/dev/pci/drm/i915/gt/selftest_rps.c
784
engine->kernel_context->vm, true,
sys/dev/pci/drm/i915/gt/selftest_rps.c
82
vma = i915_vma_instance(obj, vm, NULL);
sys/dev/pci/drm/i915/gt/selftest_tlb.c
168
ce->vm->insert_entries(ce->vm, &vb_res, pat_index, pte_flags);
sys/dev/pci/drm/i915/gt/selftest_tlb.c
171
tlbinv(ce->vm, addr & -length, length);
sys/dev/pci/drm/i915/gt/selftest_tlb.c
230
void (*tlbinv)(struct i915_address_space *vm, u64 addr, u64 length))
sys/dev/pci/drm/i915/gt/selftest_tlb.c
283
va = i915_vma_instance(A, &ppgtt->vm, NULL);
sys/dev/pci/drm/i915/gt/selftest_tlb.c
289
vb = i915_vma_instance(B, &ppgtt->vm, NULL);
sys/dev/pci/drm/i915/gt/selftest_tlb.c
307
i915_vm_put(ce->vm);
sys/dev/pci/drm/i915/gt/selftest_tlb.c
308
ce->vm = i915_vm_get(&ppgtt->vm);
sys/dev/pci/drm/i915/gt/selftest_tlb.c
320
if (BIT_ULL(bit) < i915_vm_obj_min_alignment(va->vm, va->obj))
sys/dev/pci/drm/i915/gt/selftest_tlb.c
355
i915_vm_put(&ppgtt->vm);
sys/dev/pci/drm/i915/gt/selftest_tlb.c
36
void (*tlbinv)(struct i915_address_space *vm, u64 addr, u64 length),
sys/dev/pci/drm/i915/gt/selftest_tlb.c
363
static void tlbinv_full(struct i915_address_space *vm, u64 addr, u64 length)
sys/dev/pci/drm/i915/gt/selftest_tlb.c
365
intel_gt_invalidate_tlb_full(vm->gt, intel_gt_tlb_seqno(vm->gt) | 1);
sys/dev/pci/drm/i915/gt/selftest_tlb.c
41
i915_gem_get_pat_index(ce->vm->i915, I915_CACHE_NONE);
sys/dev/pci/drm/i915/gt/selftest_tlb.c
50
batch = i915_gem_object_create_internal(ce->vm->i915, 4096);
sys/dev/pci/drm/i915/gt/selftest_tlb.c
54
vma = i915_vma_instance(batch, ce->vm, NULL);
sys/dev/pci/drm/i915/gt/selftest_tlb.c
67
addr = igt_random_offset(prng, addr, min(ce->vm->total, BIT_ULL(48)),
sys/dev/pci/drm/i915/gt/selftest_workarounds.c
1077
__vm_create_scratch_for_read_pinned(gt->vm, 4096);
sys/dev/pci/drm/i915/gt/selftest_workarounds.c
1084
__vm_create_scratch_for_read_pinned(gt->vm, 4096);
sys/dev/pci/drm/i915/gt/selftest_workarounds.c
1095
if (!engine->kernel_context->vm)
sys/dev/pci/drm/i915/gt/selftest_workarounds.c
125
vma = i915_vma_instance(result, &engine->gt->ggtt->vm, NULL);
sys/dev/pci/drm/i915/gt/selftest_workarounds.c
378
static struct i915_vma *create_batch(struct i915_address_space *vm)
sys/dev/pci/drm/i915/gt/selftest_workarounds.c
384
obj = i915_gem_object_create_internal(vm->i915, 16 * PAGE_SIZE);
sys/dev/pci/drm/i915/gt/selftest_workarounds.c
388
vma = i915_vma_instance(obj, vm, NULL);
sys/dev/pci/drm/i915/gt/selftest_workarounds.c
509
scratch = __vm_create_scratch_for_read_pinned(ce->vm, sz);
sys/dev/pci/drm/i915/gt/selftest_workarounds.c
513
batch = create_batch(ce->vm);
sys/dev/pci/drm/i915/gt/selftest_workarounds.c
894
batch = create_batch(ce->vm);
sys/dev/pci/drm/i915/gt/uc/intel_gsc_uc.c
226
ce = intel_engine_create_pinned_context(engine, engine->gt->vm, SZ_4K,
sys/dev/pci/drm/i915/gt/uc/intel_guc.c
804
vma = i915_vma_instance(obj, >->ggtt->vm, NULL);
sys/dev/pci/drm/i915/gt/uc/intel_uc_fw.c
1065
if (ggtt->vm.raw_insert_entries)
sys/dev/pci/drm/i915/gt/uc/intel_uc_fw.c
1066
ggtt->vm.raw_insert_entries(&ggtt->vm, vma_res,
sys/dev/pci/drm/i915/gt/uc/intel_uc_fw.c
1067
i915_gem_get_pat_index(ggtt->vm.i915,
sys/dev/pci/drm/i915/gt/uc/intel_uc_fw.c
1071
ggtt->vm.insert_entries(&ggtt->vm, vma_res,
sys/dev/pci/drm/i915/gt/uc/intel_uc_fw.c
1072
i915_gem_get_pat_index(ggtt->vm.i915,
sys/dev/pci/drm/i915/gt/uc/intel_uc_fw.c
1085
ggtt->vm.clear_range(&ggtt->vm, vma_res->start, vma_res->node_size);
sys/dev/pci/drm/i915/gvt/aperture_gm.c
103
mutex_lock(>->ggtt->vm.mutex);
sys/dev/pci/drm/i915/gvt/aperture_gm.c
105
mutex_unlock(>->ggtt->vm.mutex);
sys/dev/pci/drm/i915/gvt/aperture_gm.c
114
mutex_lock(>->ggtt->vm.mutex);
sys/dev/pci/drm/i915/gvt/aperture_gm.c
117
mutex_unlock(>->ggtt->vm.mutex);
sys/dev/pci/drm/i915/gvt/aperture_gm.c
180
mutex_lock(&gvt->gt->ggtt->vm.mutex);
sys/dev/pci/drm/i915/gvt/aperture_gm.c
187
mutex_unlock(&gvt->gt->ggtt->vm.mutex);
sys/dev/pci/drm/i915/gvt/aperture_gm.c
203
mutex_lock(&gvt->gt->ggtt->vm.mutex);
sys/dev/pci/drm/i915/gvt/aperture_gm.c
215
mutex_unlock(&gvt->gt->ggtt->vm.mutex);
sys/dev/pci/drm/i915/gvt/aperture_gm.c
229
mutex_unlock(&gvt->gt->ggtt->vm.mutex);
sys/dev/pci/drm/i915/gvt/aperture_gm.c
66
mutex_lock(>->ggtt->vm.mutex);
sys/dev/pci/drm/i915/gvt/aperture_gm.c
68
ret = i915_gem_gtt_insert(>->ggtt->vm, NULL, node,
sys/dev/pci/drm/i915/gvt/aperture_gm.c
73
mutex_unlock(>->ggtt->vm.mutex);
sys/dev/pci/drm/i915/gvt/gvt.h
406
#define gvt_ggtt_gm_sz(gvt) gvt_to_ggtt(gvt)->vm.total
sys/dev/pci/drm/i915/gvt/gvt.h
407
#define gvt_ggtt_sz(gvt) (gvt_to_ggtt(gvt)->vm.total >> PAGE_SHIFT << 3)
sys/dev/pci/drm/i915/gvt/scheduler.c
1300
if (i915_vm_is_4lvl(&ppgtt->vm)) {
sys/dev/pci/drm/i915/gvt/scheduler.c
1327
i915_context_ppgtt_root_restore(s, i915_vm_to_ppgtt(s->shadow[0]->vm));
sys/dev/pci/drm/i915/gvt/scheduler.c
1361
if (i915_vm_is_4lvl(&ppgtt->vm)) {
sys/dev/pci/drm/i915/gvt/scheduler.c
1410
i915_vm_put(ce->vm);
sys/dev/pci/drm/i915/gvt/scheduler.c
1411
ce->vm = i915_vm_get(&ppgtt->vm);
sys/dev/pci/drm/i915/gvt/scheduler.c
1440
i915_vm_put(&ppgtt->vm);
sys/dev/pci/drm/i915/gvt/scheduler.c
1451
i915_vm_put(&ppgtt->vm);
sys/dev/pci/drm/i915/gvt/scheduler.c
438
struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(ce->vm);
sys/dev/pci/drm/i915/i915_gem.c
101
pinned = ggtt->vm.reserved;
sys/dev/pci/drm/i915/i915_gem.c
102
list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
sys/dev/pci/drm/i915/i915_gem.c
106
mutex_unlock(&ggtt->vm.mutex);
sys/dev/pci/drm/i915/i915_gem.c
108
args->aper_size = ggtt->vm.total;
sys/dev/pci/drm/i915/i915_gem.c
160
if (!i915_vm_tryget(vma->vm))
sys/dev/pci/drm/i915/i915_gem.c
180
if (mutex_trylock(&vma->vm->mutex)) {
sys/dev/pci/drm/i915/i915_gem.c
182
mutex_unlock(&vma->vm->mutex);
sys/dev/pci/drm/i915/i915_gem.c
189
i915_vm_put(vma->vm);
sys/dev/pci/drm/i915/i915_gem.c
344
ggtt->vm.clear_range(&ggtt->vm, node->start, node->size);
sys/dev/pci/drm/i915/i915_gem.c
371
ggtt->vm.clear_range(&ggtt->vm, node->start, node->size);
sys/dev/pci/drm/i915/i915_gem.c
419
ggtt->vm.insert_page(&ggtt->vm,
sys/dev/pci/drm/i915/i915_gem.c
599
intel_gt_flush_ggtt_writes(ggtt->vm.gt);
sys/dev/pci/drm/i915/i915_gem.c
600
ggtt->vm.insert_page(&ggtt->vm,
sys/dev/pci/drm/i915/i915_gem.c
627
intel_gt_flush_ggtt_writes(ggtt->vm.gt);
sys/dev/pci/drm/i915/i915_gem.c
65
err = mutex_lock_interruptible(&ggtt->vm.mutex);
sys/dev/pci/drm/i915/i915_gem.c
70
err = drm_mm_insert_node_in_range(&ggtt->vm.mm, node,
sys/dev/pci/drm/i915/i915_gem.c
75
mutex_unlock(&ggtt->vm.mutex);
sys/dev/pci/drm/i915/i915_gem.c
83
mutex_lock(&ggtt->vm.mutex);
sys/dev/pci/drm/i915/i915_gem.c
85
mutex_unlock(&ggtt->vm.mutex);
sys/dev/pci/drm/i915/i915_gem.c
949
vma = i915_vma_instance(obj, &ggtt->vm, view);
sys/dev/pci/drm/i915/i915_gem.c
98
if (mutex_lock_interruptible(&ggtt->vm.mutex))
sys/dev/pci/drm/i915/i915_gem.c
990
mutex_lock(&ggtt->vm.mutex);
sys/dev/pci/drm/i915/i915_gem.c
992
mutex_unlock(&ggtt->vm.mutex);
sys/dev/pci/drm/i915/i915_gem_evict.c
148
i915_gem_evict_something(struct i915_address_space *vm,
sys/dev/pci/drm/i915/i915_gem_evict.c
164
lockdep_assert_held(&vm->mutex);
sys/dev/pci/drm/i915/i915_gem_evict.c
165
trace_i915_gem_evict(vm, min_size, alignment, flags);
sys/dev/pci/drm/i915/i915_gem_evict.c
183
drm_mm_scan_init_with_range(&scan, &vm->mm,
sys/dev/pci/drm/i915/i915_gem_evict.c
187
if (i915_is_ggtt(vm)) {
sys/dev/pci/drm/i915/i915_gem_evict.c
188
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
sys/dev/pci/drm/i915/i915_gem_evict.c
193
intel_gt_retire_requests(vm->gt);
sys/dev/pci/drm/i915/i915_gem_evict.c
199
list_for_each_entry_safe(vma, next, &vm->bound_list, vm_link) {
sys/dev/pci/drm/i915/i915_gem_evict.c
226
list_move_tail(&vma->vm_link, &vm->bound_list);
sys/dev/pci/drm/i915/i915_gem_evict.c
247
if (!i915_is_ggtt(vm) || flags & PIN_NONBLOCK)
sys/dev/pci/drm/i915/i915_gem_evict.c
266
ret = ggtt_flush(vm);
sys/dev/pci/drm/i915/i915_gem_evict.c
328
int i915_gem_evict_for_node(struct i915_address_space *vm,
sys/dev/pci/drm/i915/i915_gem_evict.c
340
lockdep_assert_held(&vm->mutex);
sys/dev/pci/drm/i915/i915_gem_evict.c
344
trace_i915_gem_evict_node(vm, target, flags);
sys/dev/pci/drm/i915/i915_gem_evict.c
352
if (i915_is_ggtt(vm)) {
sys/dev/pci/drm/i915/i915_gem_evict.c
353
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
sys/dev/pci/drm/i915/i915_gem_evict.c
359
intel_gt_retire_requests(vm->gt);
sys/dev/pci/drm/i915/i915_gem_evict.c
362
if (i915_vm_has_cache_coloring(vm)) {
sys/dev/pci/drm/i915/i915_gem_evict.c
372
drm_mm_for_each_node_in_range(node, &vm->mm, start, end) {
sys/dev/pci/drm/i915/i915_gem_evict.c
389
if (i915_vm_has_cache_coloring(vm)) {
sys/dev/pci/drm/i915/i915_gem_evict.c
458
int i915_gem_evict_vm(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww,
sys/dev/pci/drm/i915/i915_gem_evict.c
46
static int ggtt_flush(struct i915_address_space *vm)
sys/dev/pci/drm/i915/i915_gem_evict.c
463
lockdep_assert_held(&vm->mutex);
sys/dev/pci/drm/i915/i915_gem_evict.c
464
trace_i915_gem_evict_vm(vm);
sys/dev/pci/drm/i915/i915_gem_evict.c
471
if (i915_is_ggtt(vm)) {
sys/dev/pci/drm/i915/i915_gem_evict.c
472
ret = ggtt_flush(vm);
sys/dev/pci/drm/i915/i915_gem_evict.c
48
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
sys/dev/pci/drm/i915/i915_gem_evict.c
482
list_for_each_entry(vma, &vm->bound_list, vm_link) {
sys/dev/pci/drm/i915/i915_gem_evict.h
16
int __must_check i915_gem_evict_something(struct i915_address_space *vm,
sys/dev/pci/drm/i915/i915_gem_evict.h
22
int __must_check i915_gem_evict_for_node(struct i915_address_space *vm,
sys/dev/pci/drm/i915/i915_gem_evict.h
26
int i915_gem_evict_vm(struct i915_address_space *vm,
sys/dev/pci/drm/i915/i915_gem_gtt.c
102
int i915_gem_gtt_reserve(struct i915_address_space *vm,
sys/dev/pci/drm/i915/i915_gem_gtt.c
113
GEM_BUG_ON(range_overflows(offset, size, vm->total));
sys/dev/pci/drm/i915/i915_gem_gtt.c
114
GEM_BUG_ON(vm == &to_gt(vm->i915)->ggtt->alias->vm);
sys/dev/pci/drm/i915/i915_gem_gtt.c
121
err = drm_mm_reserve_node(&vm->mm, node);
sys/dev/pci/drm/i915/i915_gem_gtt.c
128
err = i915_gem_evict_for_node(vm, ww, node, flags);
sys/dev/pci/drm/i915/i915_gem_gtt.c
130
err = drm_mm_reserve_node(&vm->mm, node);
sys/dev/pci/drm/i915/i915_gem_gtt.c
195
int i915_gem_gtt_insert(struct i915_address_space *vm,
sys/dev/pci/drm/i915/i915_gem_gtt.c
205
lockdep_assert_held(&vm->mutex);
sys/dev/pci/drm/i915/i915_gem_gtt.c
214
GEM_BUG_ON(vm == &to_gt(vm->i915)->ggtt->alias->vm);
sys/dev/pci/drm/i915/i915_gem_gtt.c
239
err = drm_mm_insert_node_in_range(&vm->mm, node,
sys/dev/pci/drm/i915/i915_gem_gtt.c
246
err = drm_mm_insert_node_in_range(&vm->mm, node,
sys/dev/pci/drm/i915/i915_gem_gtt.c
282
err = i915_gem_gtt_reserve(vm, ww, node, size, offset, color, flags);
sys/dev/pci/drm/i915/i915_gem_gtt.c
290
err = i915_gem_evict_something(vm, ww, size, alignment, color,
sys/dev/pci/drm/i915/i915_gem_gtt.c
295
return drm_mm_insert_node_in_range(&vm->mm, node,
sys/dev/pci/drm/i915/i915_gem_gtt.h
28
int i915_gem_gtt_reserve(struct i915_address_space *vm,
sys/dev/pci/drm/i915/i915_gem_gtt.h
34
int i915_gem_gtt_insert(struct i915_address_space *vm,
sys/dev/pci/drm/i915/i915_gpu_error.c
1235
if (ggtt->vm.raw_insert_page)
sys/dev/pci/drm/i915/i915_gpu_error.c
1236
ggtt->vm.raw_insert_page(&ggtt->vm, dma, slot,
sys/dev/pci/drm/i915/i915_gpu_error.c
1241
ggtt->vm.insert_page(&ggtt->vm, dma, slot,
sys/dev/pci/drm/i915/i915_gpu_error.c
1254
ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
sys/dev/pci/drm/i915/i915_perf.c
1387
scratch = __vm_create_scratch_for_read_pinned(&ce->engine->gt->ggtt->vm, 4);
sys/dev/pci/drm/i915/i915_perf.c
1894
vma = i915_vma_instance(bo, >->ggtt->vm, NULL);
sys/dev/pci/drm/i915/i915_perf.c
2005
vma = i915_vma_instance(bo, >->ggtt->vm, NULL);
sys/dev/pci/drm/i915/i915_perf.c
2273
&stream->engine->gt->ggtt->vm,
sys/dev/pci/drm/i915/i915_vgpu.c
153
struct drm_i915_private *dev_priv = ggtt->vm.i915;
sys/dev/pci/drm/i915/i915_vgpu.c
163
ggtt->vm.reserved -= node->size;
sys/dev/pci/drm/i915/i915_vgpu.c
176
struct drm_i915_private *dev_priv = ggtt->vm.i915;
sys/dev/pci/drm/i915/i915_vgpu.c
179
if (!intel_vgpu_active(ggtt->vm.i915))
sys/dev/pci/drm/i915/i915_vgpu.c
192
struct drm_i915_private *dev_priv = ggtt->vm.i915;
sys/dev/pci/drm/i915/i915_vgpu.c
202
ret = i915_gem_gtt_reserve(&ggtt->vm, NULL, node,
sys/dev/pci/drm/i915/i915_vgpu.c
206
ggtt->vm.reserved += size;
sys/dev/pci/drm/i915/i915_vgpu.c
257
struct drm_i915_private *dev_priv = ggtt->vm.i915;
sys/dev/pci/drm/i915/i915_vgpu.c
259
unsigned long ggtt_end = ggtt->vm.total;
sys/dev/pci/drm/i915/i915_vgpu.c
265
if (!intel_vgpu_active(ggtt->vm.i915))
sys/dev/pci/drm/i915/i915_vma.c
132
intel_gt_pm_get_untracked(vma->vm->gt);
sys/dev/pci/drm/i915/i915_vma.c
1346
drm_err(&vma->vm->i915->drm,
sys/dev/pci/drm/i915/i915_vma.c
1383
void vma_invalidate_tlb(struct i915_address_space *vm, u32 *tlb)
sys/dev/pci/drm/i915/i915_vma.c
1399
for_each_gt(gt, vm->i915, id)
sys/dev/pci/drm/i915/i915_vma.c
1432
lockdep_assert_held(&vma->vm->mutex);
sys/dev/pci/drm/i915/i915_vma.c
147
intel_gt_pm_put_async_untracked(vma->vm->gt);
sys/dev/pci/drm/i915/i915_vma.c
1475
wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
sys/dev/pci/drm/i915/i915_vma.c
1477
if (flags & vma->vm->bind_async_flags) {
sys/dev/pci/drm/i915/i915_vma.c
1479
err = i915_vm_lock_objects(vma->vm, ww);
sys/dev/pci/drm/i915/i915_vma.c
1489
work->vm = vma->vm;
sys/dev/pci/drm/i915/i915_vma.c
1498
if (vma->vm->allocate_va_range) {
sys/dev/pci/drm/i915/i915_vma.c
1499
err = i915_vm_alloc_pt_stash(vma->vm,
sys/dev/pci/drm/i915/i915_vma.c
1505
err = i915_vm_map_pt_stash(vma->vm, &work->stash);
sys/dev/pci/drm/i915/i915_vma.c
1534
err = mutex_lock_interruptible_nested(&vma->vm->mutex,
sys/dev/pci/drm/i915/i915_vma.c
155
struct i915_address_space *vm,
sys/dev/pci/drm/i915/i915_vma.c
1572
if (i915_is_ggtt(vma->vm))
sys/dev/pci/drm/i915/i915_vma.c
1587
list_move_tail(&vma->vm_link, &vma->vm->bound_list);
sys/dev/pci/drm/i915/i915_vma.c
1604
mutex_unlock(&vma->vm->mutex);
sys/dev/pci/drm/i915/i915_vma.c
1617
intel_vm_no_concurrent_access_wa(vma->vm->i915))
sys/dev/pci/drm/i915/i915_vma.c
1623
intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
sys/dev/pci/drm/i915/i915_vma.c
164
GEM_BUG_ON(vm == &vm->gt->ggtt->alias->vm);
sys/dev/pci/drm/i915/i915_vma.c
1666
struct i915_address_space *vm = vma->vm;
sys/dev/pci/drm/i915/i915_vma.c
1668
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
sys/dev/pci/drm/i915/i915_vma.c
1686
if (mutex_lock_interruptible(&vm->mutex) == 0) {
sys/dev/pci/drm/i915/i915_vma.c
1692
i915_gem_evict_vm(vm, NULL, NULL);
sys/dev/pci/drm/i915/i915_vma.c
1693
mutex_unlock(&vm->mutex);
sys/dev/pci/drm/i915/i915_vma.c
170
vma->ops = &vm->vma_ops;
sys/dev/pci/drm/i915/i915_vma.c
1760
struct intel_gt *gt = vma->vm->gt;
sys/dev/pci/drm/i915/i915_vma.c
1782
struct intel_gt *gt = vma->vm->gt;
sys/dev/pci/drm/i915/i915_vma.c
1819
i915_vm_resv_put(vma->vm);
sys/dev/pci/drm/i915/i915_vma.c
1854
lockdep_assert_held(&vma->vm->mutex);
sys/dev/pci/drm/i915/i915_vma.c
1858
release_references(vma, vma->vm->gt, false);
sys/dev/pci/drm/i915/i915_vma.c
1866
mutex_lock(&vma->vm->mutex);
sys/dev/pci/drm/i915/i915_vma.c
1873
gt = vma->vm->gt;
sys/dev/pci/drm/i915/i915_vma.c
1874
mutex_unlock(&vma->vm->mutex);
sys/dev/pci/drm/i915/i915_vma.c
1886
struct i915_address_space *vm = vma->vm;
sys/dev/pci/drm/i915/i915_vma.c
1893
if (!i915_vm_tryget(vm)) {
sys/dev/pci/drm/i915/i915_vma.c
1905
struct i915_address_space *vm = vma->vm;
sys/dev/pci/drm/i915/i915_vma.c
1919
i915_vm_put(vm);
sys/dev/pci/drm/i915/i915_vma.c
1951
unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
sys/dev/pci/drm/i915/i915_vma.c
209
if (unlikely(vma->size > vm->total))
sys/dev/pci/drm/i915/i915_vma.c
2106
kref_read(&vma->vm->ref);
sys/dev/pci/drm/i915/i915_vma.c
2107
vma_res->skip_pte_rewrite = !kref_read(&vma->vm->ref) ||
sys/dev/pci/drm/i915/i915_vma.c
2108
vma->vm->skip_pte_rewrite;
sys/dev/pci/drm/i915/i915_vma.c
2130
vma_invalidate_tlb(vma->vm, vma->obj->mm.tlb);
sys/dev/pci/drm/i915/i915_vma.c
214
err = mutex_lock_interruptible(&vm->mutex);
sys/dev/pci/drm/i915/i915_vma.c
2147
lockdep_assert_held(&vma->vm->mutex);
sys/dev/pci/drm/i915/i915_vma.c
2178
lockdep_assert_held(&vma->vm->mutex);
sys/dev/pci/drm/i915/i915_vma.c
220
vma->vm = vm;
sys/dev/pci/drm/i915/i915_vma.c
221
list_add_tail(&vma->vm_link, &vm->unbound_list);
sys/dev/pci/drm/i915/i915_vma.c
2211
struct i915_address_space *vm = vma->vm;
sys/dev/pci/drm/i915/i915_vma.c
2232
wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
sys/dev/pci/drm/i915/i915_vma.c
2234
err = mutex_lock_interruptible_nested(&vma->vm->mutex, !wakeref);
sys/dev/pci/drm/i915/i915_vma.c
2239
mutex_unlock(&vm->mutex);
sys/dev/pci/drm/i915/i915_vma.c
224
if (i915_is_ggtt(vm)) {
sys/dev/pci/drm/i915/i915_vma.c
2243
intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
sys/dev/pci/drm/i915/i915_vma.c
2250
struct i915_address_space *vm = vma->vm;
sys/dev/pci/drm/i915/i915_vma.c
228
vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
sys/dev/pci/drm/i915/i915_vma.c
2282
wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
sys/dev/pci/drm/i915/i915_vma.c
2284
if (trylock_vm && !mutex_trylock(&vm->mutex)) {
sys/dev/pci/drm/i915/i915_vma.c
2288
err = mutex_lock_interruptible_nested(&vm->mutex, !wakeref);
sys/dev/pci/drm/i915/i915_vma.c
2294
mutex_unlock(&vm->mutex);
sys/dev/pci/drm/i915/i915_vma.c
2305
intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
sys/dev/pci/drm/i915/i915_vma.c
232
vma->fence_size > vm->total))
sys/dev/pci/drm/i915/i915_vma.c
237
vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size,
sys/dev/pci/drm/i915/i915_vma.c
258
cmp = i915_vma_compare(pos, vm, view);
sys/dev/pci/drm/i915/i915_vma.c
281
mutex_unlock(&vm->mutex);
sys/dev/pci/drm/i915/i915_vma.c
288
mutex_unlock(&vm->mutex);
sys/dev/pci/drm/i915/i915_vma.c
296
struct i915_address_space *vm,
sys/dev/pci/drm/i915/i915_vma.c
306
cmp = i915_vma_compare(vma, vm, view);
sys/dev/pci/drm/i915/i915_vma.c
334
struct i915_address_space *vm,
sys/dev/pci/drm/i915/i915_vma.c
339
GEM_BUG_ON(view && !i915_is_ggtt_or_dpt(vm));
sys/dev/pci/drm/i915/i915_vma.c
340
GEM_BUG_ON(!kref_read(&vm->ref));
sys/dev/pci/drm/i915/i915_vma.c
343
vma = i915_vma_lookup(obj, vm, view);
sys/dev/pci/drm/i915/i915_vma.c
348
vma = vma_create(obj, vm, view);
sys/dev/pci/drm/i915/i915_vma.c
350
GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view));
sys/dev/pci/drm/i915/i915_vma.c
356
struct i915_address_space *vm;
sys/dev/pci/drm/i915/i915_vma.c
379
vma_res->ops->bind_vma(vma_res->vm, &vw->stash,
sys/dev/pci/drm/i915/i915_vma.c
390
i915_vm_free_pt_stash(vw->vm, &vw->stash);
sys/dev/pci/drm/i915/i915_vma.c
462
i915_vma_resource_init(vma_res, vma->vm, vma->pages, &vma->page_sizes,
sys/dev/pci/drm/i915/i915_vma.c
492
lockdep_assert_held(&vma->vm->mutex);
sys/dev/pci/drm/i915/i915_vma.c
498
vma->vm->total))) {
sys/dev/pci/drm/i915/i915_vma.c
523
if (work && bind_flags & vma->vm->bind_async_flags)
sys/dev/pci/drm/i915/i915_vma.c
524
ret = i915_vma_resource_bind_dep_await(vma->vm,
sys/dev/pci/drm/i915/i915_vma.c
533
ret = i915_vma_resource_bind_dep_sync(vma->vm, vma->node.start,
sys/dev/pci/drm/i915/i915_vma.c
549
if (work && bind_flags & vma->vm->bind_async_flags) {
sys/dev/pci/drm/i915/i915_vma.c
55
if (kref_read(&vma->vm->ref))
sys/dev/pci/drm/i915/i915_vma.c
583
vma->ops->bind_vma(vma->vm, NULL, vma->resource, pat_index,
sys/dev/pci/drm/i915/i915_vma.c
615
ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
sys/dev/pci/drm/i915/i915_vma.c
662
intel_gt_flush_ggtt_writes(vma->vm->gt);
sys/dev/pci/drm/i915/i915_vma.c
742
i915_vm_to_ggtt(vma->vm)->mappable_end;
sys/dev/pci/drm/i915/i915_vma.c
762
if (!i915_vm_has_cache_coloring(vma->vm))
sys/dev/pci/drm/i915/i915_vma.c
836
end = vma->vm->total;
sys/dev/pci/drm/i915/i915_vma.c
838
end = min_t(u64, end, i915_vm_to_ggtt(vma->vm)->mappable_end);
sys/dev/pci/drm/i915/i915_vma.c
843
alignment = max(alignment, i915_vm_obj_min_alignment(vma->vm, vma->obj));
sys/dev/pci/drm/i915/i915_vma.c
859
if (i915_vm_has_cache_coloring(vma->vm))
sys/dev/pci/drm/i915/i915_vma.c
876
ret = i915_gem_gtt_reserve(vma->vm, ww, &vma->node,
sys/dev/pci/drm/i915/i915_vma.c
894
!HAS_64K_PAGES(vma->vm->i915)) {
sys/dev/pci/drm/i915/i915_vma.c
918
ret = i915_gem_gtt_insert(vma->vm, ww, &vma->node,
sys/dev/pci/drm/i915/i915_vma.c
930
list_move_tail(&vma->vm_link, &vma->vm->bound_list);
sys/dev/pci/drm/i915/i915_vma.c
947
list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
sys/dev/pci/drm/i915/i915_vma.h
189
return i915_vm_to_ggtt(vma->vm)->pin_bias;
sys/dev/pci/drm/i915/i915_vma.h
213
struct i915_address_space *vm,
sys/dev/pci/drm/i915/i915_vma.h
218
GEM_BUG_ON(view && !i915_is_ggtt_or_dpt(vm));
sys/dev/pci/drm/i915/i915_vma.h
220
cmp = ptrdiff(vma->vm, vm);
sys/dev/pci/drm/i915/i915_vma.h
267
void vma_invalidate_tlb(struct i915_address_space *vm, u32 *tlb);
sys/dev/pci/drm/i915/i915_vma.h
45
struct i915_address_space *vm,
sys/dev/pci/drm/i915/i915_vma.h
84
return i915_is_dpt(vma->vm);
sys/dev/pci/drm/i915/i915_vma_resource.c
175
struct i915_address_space *vm;
sys/dev/pci/drm/i915/i915_vma_resource.c
182
vm = vma_res->vm;
sys/dev/pci/drm/i915/i915_vma_resource.c
184
intel_runtime_pm_put(&vm->i915->runtime_pm, vma_res->wakeref);
sys/dev/pci/drm/i915/i915_vma_resource.c
186
vma_res->vm = NULL;
sys/dev/pci/drm/i915/i915_vma_resource.c
188
mutex_lock(&vm->mutex);
sys/dev/pci/drm/i915/i915_vma_resource.c
189
vma_res_itree_remove(vma_res, &vm->pending_unbind);
sys/dev/pci/drm/i915/i915_vma_resource.c
190
mutex_unlock(&vm->mutex);
sys/dev/pci/drm/i915/i915_vma_resource.c
247
struct i915_address_space *vm = vma_res->vm;
sys/dev/pci/drm/i915/i915_vma_resource.c
252
vma_res->ops->unbind_vma(vm, vma_res);
sys/dev/pci/drm/i915/i915_vma_resource.c
303
struct i915_address_space *vm = vma_res->vm;
sys/dev/pci/drm/i915/i915_vma_resource.c
312
vma_res->wakeref = intel_runtime_pm_get_if_in_use(&vm->i915->runtime_pm);
sys/dev/pci/drm/i915/i915_vma_resource.c
318
vma_res_itree_insert(vma_res, &vma_res->vm->pending_unbind);
sys/dev/pci/drm/i915/i915_vma_resource.c
342
i915_vma_resource_color_adjust_range(struct i915_address_space *vm,
sys/dev/pci/drm/i915/i915_vma_resource.c
346
if (i915_vm_has_cache_coloring(vm)) {
sys/dev/pci/drm/i915/i915_vma_resource.c
365
int i915_vma_resource_bind_dep_sync(struct i915_address_space *vm,
sys/dev/pci/drm/i915/i915_vma_resource.c
373
lockdep_assert_held(&vm->mutex);
sys/dev/pci/drm/i915/i915_vma_resource.c
376
i915_vma_resource_color_adjust_range(vm, &offset, &last);
sys/dev/pci/drm/i915/i915_vma_resource.c
377
node = vma_res_itree_iter_first(&vm->pending_unbind, offset, last);
sys/dev/pci/drm/i915/i915_vma_resource.c
400
void i915_vma_resource_bind_dep_sync_all(struct i915_address_space *vm)
sys/dev/pci/drm/i915/i915_vma_resource.c
407
mutex_lock(&vm->mutex);
sys/dev/pci/drm/i915/i915_vma_resource.c
408
node = vma_res_itree_iter_first(&vm->pending_unbind, 0,
sys/dev/pci/drm/i915/i915_vma_resource.c
412
mutex_unlock(&vm->mutex);
sys/dev/pci/drm/i915/i915_vma_resource.c
449
int i915_vma_resource_bind_dep_await(struct i915_address_space *vm,
sys/dev/pci/drm/i915/i915_vma_resource.c
459
lockdep_assert_held(&vm->mutex);
sys/dev/pci/drm/i915/i915_vma_resource.c
463
i915_vma_resource_color_adjust_range(vm, &offset, &last);
sys/dev/pci/drm/i915/i915_vma_resource.c
464
node = vma_res_itree_iter_first(&vm->pending_unbind, offset, last);
sys/dev/pci/drm/i915/i915_vma_resource.h
109
struct i915_address_space *vm;
sys/dev/pci/drm/i915/i915_vma_resource.h
202
struct i915_address_space *vm,
sys/dev/pci/drm/i915/i915_vma_resource.h
217
vma_res->vm = vm;
sys/dev/pci/drm/i915/i915_vma_resource.h
243
int i915_vma_resource_bind_dep_sync(struct i915_address_space *vm,
sys/dev/pci/drm/i915/i915_vma_resource.h
248
int i915_vma_resource_bind_dep_await(struct i915_address_space *vm,
sys/dev/pci/drm/i915/i915_vma_resource.h
255
void i915_vma_resource_bind_dep_sync_all(struct i915_address_space *vm);
sys/dev/pci/drm/i915/i915_vma_types.h
140
struct i915_address_space *vm;
sys/dev/pci/drm/i915/pxp/intel_pxp.c
100
ce = intel_engine_create_pinned_context(engine, engine->gt->vm, SZ_4K,
sys/dev/pci/drm/i915/pxp/intel_pxp_gsccs.c
333
*vma = i915_vma_instance(obj, gt->vm, NULL);
sys/dev/pci/drm/i915/pxp/intel_pxp_gsccs.c
408
i915_vm_put(ce->vm);
sys/dev/pci/drm/i915/pxp/intel_pxp_gsccs.c
409
ce->vm = i915_vm_get(pxp->ctrl_gt->vm);
sys/dev/pci/drm/i915/selftests/i915_gem.c
62
ggtt->vm.insert_page(&ggtt->vm, dma, slot,
sys/dev/pci/drm/i915/selftests/i915_gem.c
74
ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
sys/dev/pci/drm/i915/selftests/i915_gem_evict.c
104
i915_gem_drain_freed_objects(ggtt->vm.i915);
sys/dev/pci/drm/i915/selftests/i915_gem_evict.c
121
mutex_lock(&ggtt->vm.mutex);
sys/dev/pci/drm/i915/selftests/i915_gem_evict.c
122
err = i915_gem_evict_something(&ggtt->vm, NULL,
sys/dev/pci/drm/i915/selftests/i915_gem_evict.c
126
mutex_unlock(&ggtt->vm.mutex);
sys/dev/pci/drm/i915/selftests/i915_gem_evict.c
136
mutex_lock(&ggtt->vm.mutex);
sys/dev/pci/drm/i915/selftests/i915_gem_evict.c
137
err = i915_gem_evict_something(&ggtt->vm, NULL,
sys/dev/pci/drm/i915/selftests/i915_gem_evict.c
141
mutex_unlock(&ggtt->vm.mutex);
sys/dev/pci/drm/i915/selftests/i915_gem_evict.c
208
mutex_lock(&ggtt->vm.mutex);
sys/dev/pci/drm/i915/selftests/i915_gem_evict.c
209
err = i915_gem_evict_for_node(&ggtt->vm, NULL, &target, 0);
sys/dev/pci/drm/i915/selftests/i915_gem_evict.c
210
mutex_unlock(&ggtt->vm.mutex);
sys/dev/pci/drm/i915/selftests/i915_gem_evict.c
220
mutex_lock(&ggtt->vm.mutex);
sys/dev/pci/drm/i915/selftests/i915_gem_evict.c
221
err = i915_gem_evict_for_node(&ggtt->vm, NULL, &target, 0);
sys/dev/pci/drm/i915/selftests/i915_gem_evict.c
222
mutex_unlock(&ggtt->vm.mutex);
sys/dev/pci/drm/i915/selftests/i915_gem_evict.c
262
ggtt->vm.mm.color_adjust = mock_color_adjust;
sys/dev/pci/drm/i915/selftests/i915_gem_evict.c
263
GEM_BUG_ON(!i915_vm_has_cache_coloring(&ggtt->vm));
sys/dev/pci/drm/i915/selftests/i915_gem_evict.c
301
mutex_lock(&ggtt->vm.mutex);
sys/dev/pci/drm/i915/selftests/i915_gem_evict.c
302
err = i915_gem_evict_for_node(&ggtt->vm, NULL, &target, 0);
sys/dev/pci/drm/i915/selftests/i915_gem_evict.c
303
mutex_unlock(&ggtt->vm.mutex);
sys/dev/pci/drm/i915/selftests/i915_gem_evict.c
314
mutex_lock(&ggtt->vm.mutex);
sys/dev/pci/drm/i915/selftests/i915_gem_evict.c
315
err = i915_gem_evict_for_node(&ggtt->vm, NULL, &target, 0);
sys/dev/pci/drm/i915/selftests/i915_gem_evict.c
316
mutex_unlock(&ggtt->vm.mutex);
sys/dev/pci/drm/i915/selftests/i915_gem_evict.c
328
ggtt->vm.mm.color_adjust = NULL;
sys/dev/pci/drm/i915/selftests/i915_gem_evict.c
347
mutex_lock(&ggtt->vm.mutex);
sys/dev/pci/drm/i915/selftests/i915_gem_evict.c
348
err = i915_gem_evict_vm(&ggtt->vm, NULL, NULL);
sys/dev/pci/drm/i915/selftests/i915_gem_evict.c
349
mutex_unlock(&ggtt->vm.mutex);
sys/dev/pci/drm/i915/selftests/i915_gem_evict.c
359
mutex_lock(&ggtt->vm.mutex);
sys/dev/pci/drm/i915/selftests/i915_gem_evict.c
360
err = i915_gem_evict_vm(&ggtt->vm, &ww, NULL);
sys/dev/pci/drm/i915/selftests/i915_gem_evict.c
361
mutex_unlock(&ggtt->vm.mutex);
sys/dev/pci/drm/i915/selftests/i915_gem_evict.c
410
mutex_lock(&ggtt->vm.mutex);
sys/dev/pci/drm/i915/selftests/i915_gem_evict.c
411
err = i915_gem_gtt_insert(&ggtt->vm, NULL, &hole,
sys/dev/pci/drm/i915/selftests/i915_gem_evict.c
413
0, ggtt->vm.total,
sys/dev/pci/drm/i915/selftests/i915_gem_evict.c
423
mutex_unlock(&ggtt->vm.mutex);
sys/dev/pci/drm/i915/selftests/i915_gem_evict.c
425
mutex_lock(&ggtt->vm.mutex);
sys/dev/pci/drm/i915/selftests/i915_gem_evict.c
431
if (i915_gem_gtt_insert(&ggtt->vm, NULL, &r->node,
sys/dev/pci/drm/i915/selftests/i915_gem_evict.c
433
0, ggtt->vm.total,
sys/dev/pci/drm/i915/selftests/i915_gem_evict.c
445
mutex_unlock(&ggtt->vm.mutex);
sys/dev/pci/drm/i915/selftests/i915_gem_evict.c
517
mutex_lock(&ggtt->vm.mutex);
sys/dev/pci/drm/i915/selftests/i915_gem_evict.c
531
mutex_unlock(&ggtt->vm.mutex);
sys/dev/pci/drm/i915/selftests/i915_gem_evict.c
57
obj = i915_gem_object_create_internal(ggtt->vm.i915,
sys/dev/pci/drm/i915/selftests/i915_gem_evict.c
75
count, ggtt->vm.total / PAGE_SIZE);
sys/dev/pci/drm/i915/selftests/i915_gem_evict.c
77
if (list_empty(&ggtt->vm.bound_list)) {
sys/dev/pci/drm/i915/selftests/i915_gem_evict.c
89
list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1004
static int shrink_boom(struct i915_address_space *vm,
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1026
purge = fake_dma_object(vm->i915, size);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1030
vma = i915_vma_instance(purge, vm, NULL);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1043
explode = fake_dma_object(vm->i915, size);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1049
vm->fault_attr.probability = 100;
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1050
vm->fault_attr.interval = 1;
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1051
atomic_set(&vm->fault_attr.times, -1);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1053
vma = i915_vma_instance(explode, vm, NULL);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1068
memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1069
cleanup_freed_objects(vm->i915);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1078
memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1082
static int misaligned_case(struct i915_address_space *vm, struct intel_memory_region *mr,
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1100
vma = i915_vma_instance(obj, vm, NULL);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1124
if (HAS_64K_PAGES(vm->i915) && i915_gem_object_is_lmem(obj)) {
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1143
cleanup_freed_objects(vm->i915);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1147
static int misaligned_pin(struct i915_address_space *vm,
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1157
if (i915_is_ggtt(vm))
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1160
for_each_memory_region(mr, vm->i915, id) {
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1161
u64 min_alignment = i915_vm_min_alignment(vm, mr->type);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1171
err = misaligned_case(vm, mr, addr + (min_alignment / 2), size, flags);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1180
err = misaligned_case(vm, mr, addr, PAGE_SIZE, flags);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1185
err = misaligned_case(vm, mr, addr, size / 2, flags);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1194
int (*func)(struct i915_address_space *vm,
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1215
GEM_BUG_ON(offset_in_page(ppgtt->vm.total));
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1216
assert_vm_alive(&ppgtt->vm);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1218
err = func(&ppgtt->vm, 0, ppgtt->vm.total, end_time);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1220
i915_vm_put(&ppgtt->vm);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1280
int (*func)(struct i915_address_space *vm,
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1291
list_sort(NULL, &ggtt->vm.mm.hole_stack, sort_holes);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1292
drm_mm_for_each_hole(node, &ggtt->vm.mm, hole_start, hole_end) {
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1296
if (ggtt->vm.mm.color_adjust)
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1297
ggtt->vm.mm.color_adjust(node, 0,
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1302
err = func(&ggtt->vm, hole_start, hole_end, end_time);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1368
mutex_lock(&ggtt->vm.mutex);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1369
err = drm_mm_insert_node_in_range(&ggtt->vm.mm, &tmp,
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1374
mutex_unlock(&ggtt->vm.mutex);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1383
ggtt->vm.insert_page(&ggtt->vm,
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1405
intel_gt_flush_ggtt_writes(ggtt->vm.gt);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1427
ggtt->vm.clear_range(&ggtt->vm, tmp.start, tmp.size);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1429
mutex_lock(&ggtt->vm.mutex);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1431
mutex_unlock(&ggtt->vm.mutex);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1451
mutex_lock(&vma->vm->mutex);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1452
list_move_tail(&vma->vm_link, &vma->vm->bound_list);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1453
mutex_unlock(&vma->vm->mutex);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1457
int (*func)(struct i915_address_space *vm,
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1462
struct i915_address_space *vm;
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1471
vm = i915_gem_context_get_eb_vm(ctx);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1472
err = func(vm, 0, min(vm->total, limit), end_time);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1473
i915_vm_put(vm);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1483
return exercise_mock(ggtt->vm.i915, fill_hole);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1490
return exercise_mock(ggtt->vm.i915, walk_hole);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1497
return exercise_mock(ggtt->vm.i915, pot_hole);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1504
return exercise_mock(ggtt->vm.i915, drunk_hole);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1509
struct i915_address_space *vm = vma->vm;
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1518
mutex_lock(&vm->mutex);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1519
err = i915_gem_gtt_reserve(vm, NULL, &vma->node, obj->base.size,
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1529
mutex_unlock(&vm->mutex);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1551
total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1555
obj = i915_gem_object_create_internal(ggtt->vm.i915,
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1569
vma = i915_vma_instance(obj, &ggtt->vm, NULL);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1578
total, ggtt->vm.total, err);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1596
total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1600
obj = i915_gem_object_create_internal(ggtt->vm.i915,
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1615
vma = i915_vma_instance(obj, &ggtt->vm, NULL);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1624
total, ggtt->vm.total, err);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1645
vma = i915_vma_instance(obj, &ggtt->vm, NULL);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1658
0, ggtt->vm.total,
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1665
total, ggtt->vm.total, err);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1691
struct i915_address_space *vm = vma->vm;
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
170
if (!ppgtt->vm.allocate_va_range)
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1700
mutex_lock(&vm->mutex);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1701
err = i915_gem_gtt_insert(vm, NULL, &vma->node, obj->base.size, 0,
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1702
obj->pat_index, 0, vm->total, 0);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1709
mutex_unlock(&vm->mutex);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1725
ggtt->vm.total + I915_GTT_PAGE_SIZE, 0,
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1726
0, ggtt->vm.total,
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1757
mutex_lock(&ggtt->vm.mutex);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1758
err = i915_gem_gtt_insert(&ggtt->vm, NULL, &tmp,
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1763
mutex_unlock(&ggtt->vm.mutex);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1774
total + I915_GTT_PAGE_SIZE <= ggtt->vm.total;
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1778
obj = i915_gem_object_create_internal(ggtt->vm.i915,
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1793
vma = i915_vma_instance(obj, &ggtt->vm, NULL);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1807
total, ggtt->vm.total, err);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
181
limit = min(ppgtt->vm.total, limit);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1819
vma = i915_vma_instance(obj, &ggtt->vm, NULL);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1839
vma = i915_vma_instance(obj, &ggtt->vm, NULL);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
185
err = i915_vm_lock_objects(&ppgtt->vm, &ww);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1857
total, ggtt->vm.total, err);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1873
total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1877
obj = i915_gem_object_create_internal(ggtt->vm.i915,
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1892
vma = i915_vma_instance(obj, &ggtt->vm, NULL);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1901
total, ggtt->vm.total, err);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
193
err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, size);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
197
err = i915_vm_map_pt_stash(&ppgtt->vm, &stash);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1976
GEM_BUG_ON(offset_in_page(to_gt(i915)->ggtt->vm.total));
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
199
i915_vm_free_pt_stash(&ppgtt->vm, &stash);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
203
ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash, 0, size);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
206
ppgtt->vm.clear_range(&ppgtt->vm, 0, size);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
208
i915_vm_free_pt_stash(&ppgtt->vm, &stash);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
215
err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, size - last);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
219
err = i915_vm_map_pt_stash(&ppgtt->vm, &stash);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
221
i915_vm_free_pt_stash(&ppgtt->vm, &stash);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
225
ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash,
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
229
i915_vm_free_pt_stash(&ppgtt->vm, &stash);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
240
i915_vm_put(&ppgtt->vm);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
244
static int lowlevel_hole(struct i915_address_space *vm,
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
249
i915_vm_min_alignment(vm, INTEL_MEMORY_SYSTEM);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
287
GEM_BUG_ON(count * BIT_ULL(aligned_size) > vm->total);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
297
obj = fake_dma_object(vm->i915, BIT_ULL(size));
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
315
GEM_BUG_ON(addr + BIT_ULL(aligned_size) > vm->total);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
324
if (vm->allocate_va_range) {
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
331
err = i915_vm_lock_objects(vm, &ww);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
336
if (i915_vm_alloc_pt_stash(vm, &stash,
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
340
err = i915_vm_map_pt_stash(vm, &stash);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
342
vm->allocate_va_range(vm, &stash,
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
344
i915_vm_free_pt_stash(vm, &stash);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
361
with_intel_runtime_pm(vm->gt->uncore->rpm, wakeref)
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
362
vm->insert_entries(vm, mock_vma_res,
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
363
i915_gem_get_pat_index(vm->i915,
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
374
GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
375
with_intel_runtime_pm(vm->gt->uncore->rpm, wakeref)
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
376
vm->clear_range(vm, addr, BIT_ULL(size));
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
384
cleanup_freed_objects(vm->i915);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
392
struct i915_address_space *vm)
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
400
vma = i915_vma_instance(obj, vm, NULL);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
409
static int fill_hole(struct i915_address_space *vm,
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
416
i915_vm_min_alignment(vm, INTEL_MEMORY_SYSTEM);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
428
if (i915_is_ggtt(vm))
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
444
obj = fake_dma_object(vm->i915, full_size);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
463
vma = i915_vma_instance(obj, vm, NULL);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
503
vma = i915_vma_instance(obj, vm, NULL);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
542
vma = i915_vma_instance(obj, vm, NULL);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
582
vma = i915_vma_instance(obj, vm, NULL);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
624
close_object_list(&objects, vm);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
625
cleanup_freed_objects(vm->i915);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
631
close_object_list(&objects, vm);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
635
static int walk_hole(struct i915_address_space *vm,
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
649
if (i915_is_ggtt(vm))
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
652
min_alignment = i915_vm_min_alignment(vm, INTEL_MEMORY_SYSTEM);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
660
obj = fake_dma_object(vm->i915, size << PAGE_SHIFT);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
664
vma = i915_vma_instance(obj, vm, NULL);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
712
cleanup_freed_objects(vm->i915);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
718
static int pot_hole(struct i915_address_space *vm,
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
730
if (i915_is_ggtt(vm))
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
733
min_alignment = i915_vm_min_alignment(vm, INTEL_MEMORY_SYSTEM);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
735
obj = i915_gem_object_create_internal(vm->i915, 2 * I915_GTT_PAGE_SIZE);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
739
vma = i915_vma_instance(obj, vm, NULL);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
793
static int drunk_hole(struct i915_address_space *vm,
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
803
if (i915_is_ggtt(vm))
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
806
min_alignment = i915_vm_min_alignment(vm, INTEL_MEMORY_SYSTEM);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
843
obj = fake_dma_object(vm->i915, BIT_ULL(size));
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
849
vma = i915_vma_instance(obj, vm, NULL);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
898
cleanup_freed_objects(vm->i915);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
904
static int __shrink_hole(struct i915_address_space *vm,
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
916
min_alignment = i915_vm_min_alignment(vm, INTEL_MEMORY_SYSTEM);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
924
obj = fake_dma_object(vm->i915, size);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
932
vma = i915_vma_instance(obj, vm, NULL);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
977
close_object_list(&objects, vm);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
978
cleanup_freed_objects(vm->i915);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
982
static int shrink_hole(struct i915_address_space *vm,
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
989
vm->fault_attr.probability = 999;
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
990
atomic_set(&vm->fault_attr.times, -1);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
993
vm->fault_attr.interval = prime;
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
994
err = __shrink_hole(vm, hole_start, hole_end, end_time);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
999
memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
sys/dev/pci/drm/i915/selftests/i915_perf.c
316
scratch = __px_vaddr(ce->vm->scratch[0]);
sys/dev/pci/drm/i915/selftests/i915_request.c
1129
vma = i915_vma_instance(obj, gt->vm, NULL);
sys/dev/pci/drm/i915/selftests/i915_request.c
1183
intel_gt_chipset_flush(batch->vm->gt);
sys/dev/pci/drm/i915/selftests/i915_request.c
1232
GEM_BUG_ON(request[idx]->context->vm != batch->vm);
sys/dev/pci/drm/i915/selftests/i915_request.c
1362
GEM_BUG_ON(request[idx]->context->vm != batch->vm);
sys/dev/pci/drm/i915/selftests/i915_request.c
983
vma = i915_vma_instance(obj, gt->vm, NULL);
sys/dev/pci/drm/i915/selftests/i915_vma.c
118
struct i915_address_space *vm;
sys/dev/pci/drm/i915/selftests/i915_vma.c
122
vm = i915_gem_context_get_eb_vm(ctx);
sys/dev/pci/drm/i915/selftests/i915_vma.c
123
vma = checked_vma_instance(obj, vm, NULL);
sys/dev/pci/drm/i915/selftests/i915_vma.c
124
i915_vm_put(vm);
sys/dev/pci/drm/i915/selftests/i915_vma.c
152
struct drm_i915_private *i915 = ggtt->vm.i915;
sys/dev/pci/drm/i915/selftests/i915_vma.c
272
VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | (ggtt->vm.total - 4096)),
sys/dev/pci/drm/i915/selftests/i915_vma.c
276
VALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | (ggtt->vm.total - 4096)),
sys/dev/pci/drm/i915/selftests/i915_vma.c
277
INVALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | ggtt->vm.total),
sys/dev/pci/drm/i915/selftests/i915_vma.c
285
VALID(ggtt->vm.total - 4096, PIN_GLOBAL),
sys/dev/pci/drm/i915/selftests/i915_vma.c
286
VALID(ggtt->vm.total, PIN_GLOBAL),
sys/dev/pci/drm/i915/selftests/i915_vma.c
287
NOSPACE(ggtt->vm.total + 4096, PIN_GLOBAL),
sys/dev/pci/drm/i915/selftests/i915_vma.c
290
INVALID(8192, PIN_GLOBAL | PIN_OFFSET_FIXED | (ggtt->vm.total - 4096)),
sys/dev/pci/drm/i915/selftests/i915_vma.c
303
NOSPACE(0, PIN_GLOBAL | PIN_OFFSET_BIAS | ggtt->vm.total),
sys/dev/pci/drm/i915/selftests/i915_vma.c
305
NOSPACE(8192, PIN_GLOBAL | PIN_OFFSET_BIAS | (ggtt->vm.total - 4096)),
sys/dev/pci/drm/i915/selftests/i915_vma.c
322
GEM_BUG_ON(!drm_mm_clean(&ggtt->vm.mm));
sys/dev/pci/drm/i915/selftests/i915_vma.c
324
obj = i915_gem_object_create_internal(ggtt->vm.i915, PAGE_SIZE);
sys/dev/pci/drm/i915/selftests/i915_vma.c
328
vma = checked_vma_instance(obj, &ggtt->vm, NULL);
sys/dev/pci/drm/i915/selftests/i915_vma.c
43
if (vma->vm != ctx->vm) {
sys/dev/pci/drm/i915/selftests/i915_vma.c
550
struct i915_address_space *vm = &ggtt->vm;
sys/dev/pci/drm/i915/selftests/i915_vma.c
588
obj = i915_gem_object_create_internal(vm->i915, max_pages * PAGE_SIZE);
sys/dev/pci/drm/i915/selftests/i915_vma.c
623
vma = checked_vma_instance(obj, vm, &view);
sys/dev/pci/drm/i915/selftests/i915_vma.c
65
struct i915_address_space *vm,
sys/dev/pci/drm/i915/selftests/i915_vma.c
71
vma = i915_vma_instance(obj, vm, view);
sys/dev/pci/drm/i915/selftests/i915_vma.c
76
if (vma->vm != vm) {
sys/dev/pci/drm/i915/selftests/i915_vma.c
78
vma->vm, vm);
sys/dev/pci/drm/i915/selftests/i915_vma.c
797
struct i915_address_space *vm = &ggtt->vm;
sys/dev/pci/drm/i915/selftests/i915_vma.c
816
obj = i915_gem_object_create_internal(vm->i915, npages * PAGE_SIZE);
sys/dev/pci/drm/i915/selftests/i915_vma.c
82
if (i915_is_ggtt(vm) != i915_vma_is_ggtt(vma)) {
sys/dev/pci/drm/i915/selftests/i915_vma.c
835
vma = checked_vma_instance(obj, vm, &view);
sys/dev/pci/drm/i915/selftests/i915_vma.c
84
i915_vma_is_ggtt(vma), i915_is_ggtt(vm));
sys/dev/pci/drm/i915/selftests/i915_vma.c
88
if (i915_vma_compare(vma, vm, view)) {
sys/dev/pci/drm/i915/selftests/i915_vma.c
882
vma = checked_vma_instance(obj, vm, NULL);
sys/dev/pci/drm/i915/selftests/i915_vma.c
93
if (i915_vma_compare(vma, vma->vm,
sys/dev/pci/drm/i915/selftests/igt_spinner.c
135
GEM_BUG_ON(spin->gt != ce->vm->gt);
sys/dev/pci/drm/i915/selftests/igt_spinner.c
51
*vma = i915_vma_instance(obj, ce->vm, NULL);
sys/dev/pci/drm/i915/selftests/intel_memory_region.c
710
struct i915_address_space *vm;
sys/dev/pci/drm/i915/selftests/intel_memory_region.c
729
vm = ce->vm;
sys/dev/pci/drm/i915/selftests/intel_memory_region.c
740
vma = i915_vma_instance(obj, vm, NULL);
sys/dev/pci/drm/i915/selftests/mock_gem_device.c
236
to_gt(i915)->vm = i915_vm_get(&to_gt(i915)->ggtt->vm);
sys/dev/pci/drm/i915/selftests/mock_gtt.c
102
static void mock_unbind_ggtt(struct i915_address_space *vm,
sys/dev/pci/drm/i915/selftests/mock_gtt.c
111
ggtt->vm.gt = gt;
sys/dev/pci/drm/i915/selftests/mock_gtt.c
112
ggtt->vm.i915 = gt->i915;
sys/dev/pci/drm/i915/selftests/mock_gtt.c
113
ggtt->vm.is_ggtt = true;
sys/dev/pci/drm/i915/selftests/mock_gtt.c
117
ggtt->vm.total = 4096 * PAGE_SIZE;
sys/dev/pci/drm/i915/selftests/mock_gtt.c
119
ggtt->vm.alloc_pt_dma = alloc_pt_dma;
sys/dev/pci/drm/i915/selftests/mock_gtt.c
120
ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
sys/dev/pci/drm/i915/selftests/mock_gtt.c
122
ggtt->vm.clear_range = mock_clear_range;
sys/dev/pci/drm/i915/selftests/mock_gtt.c
123
ggtt->vm.insert_page = mock_insert_page;
sys/dev/pci/drm/i915/selftests/mock_gtt.c
124
ggtt->vm.insert_entries = mock_insert_entries;
sys/dev/pci/drm/i915/selftests/mock_gtt.c
125
ggtt->vm.cleanup = mock_cleanup;
sys/dev/pci/drm/i915/selftests/mock_gtt.c
127
ggtt->vm.vma_ops.bind_vma = mock_bind_ggtt;
sys/dev/pci/drm/i915/selftests/mock_gtt.c
128
ggtt->vm.vma_ops.unbind_vma = mock_unbind_ggtt;
sys/dev/pci/drm/i915/selftests/mock_gtt.c
130
i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT);
sys/dev/pci/drm/i915/selftests/mock_gtt.c
135
i915_address_space_fini(&ggtt->vm);
sys/dev/pci/drm/i915/selftests/mock_gtt.c
27
static void mock_insert_page(struct i915_address_space *vm,
sys/dev/pci/drm/i915/selftests/mock_gtt.c
35
static void mock_insert_entries(struct i915_address_space *vm,
sys/dev/pci/drm/i915/selftests/mock_gtt.c
41
static void mock_bind_ppgtt(struct i915_address_space *vm,
sys/dev/pci/drm/i915/selftests/mock_gtt.c
51
static void mock_unbind_ppgtt(struct i915_address_space *vm,
sys/dev/pci/drm/i915/selftests/mock_gtt.c
56
static void mock_cleanup(struct i915_address_space *vm)
sys/dev/pci/drm/i915/selftests/mock_gtt.c
60
static void mock_clear_range(struct i915_address_space *vm,
sys/dev/pci/drm/i915/selftests/mock_gtt.c
73
ppgtt->vm.gt = to_gt(i915);
sys/dev/pci/drm/i915/selftests/mock_gtt.c
74
ppgtt->vm.i915 = i915;
sys/dev/pci/drm/i915/selftests/mock_gtt.c
75
ppgtt->vm.total = round_down(U64_MAX, PAGE_SIZE);
sys/dev/pci/drm/i915/selftests/mock_gtt.c
76
ppgtt->vm.dma = i915->drm.dev;
sys/dev/pci/drm/i915/selftests/mock_gtt.c
78
i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT);
sys/dev/pci/drm/i915/selftests/mock_gtt.c
80
ppgtt->vm.alloc_pt_dma = alloc_pt_dma;
sys/dev/pci/drm/i915/selftests/mock_gtt.c
81
ppgtt->vm.alloc_scratch_dma = alloc_pt_dma;
sys/dev/pci/drm/i915/selftests/mock_gtt.c
83
ppgtt->vm.clear_range = mock_clear_range;
sys/dev/pci/drm/i915/selftests/mock_gtt.c
84
ppgtt->vm.insert_page = mock_insert_page;
sys/dev/pci/drm/i915/selftests/mock_gtt.c
85
ppgtt->vm.insert_entries = mock_insert_entries;
sys/dev/pci/drm/i915/selftests/mock_gtt.c
86
ppgtt->vm.cleanup = mock_cleanup;
sys/dev/pci/drm/i915/selftests/mock_gtt.c
88
ppgtt->vm.vma_ops.bind_vma = mock_bind_ppgtt;
sys/dev/pci/drm/i915/selftests/mock_gtt.c
89
ppgtt->vm.vma_ops.unbind_vma = mock_unbind_ppgtt;
sys/dev/pci/drm/i915/selftests/mock_gtt.c
94
static void mock_bind_ggtt(struct i915_address_space *vm,
sys/dev/pci/drm/include/drm/drm_gpuvm.h
444
if (va && !list_is_last(&va->rb.entry, &va->vm->rb.list))
sys/dev/pci/drm/include/drm/drm_gpuvm.h
540
struct drm_gpuvm *vm;
sys/dev/pci/drm/include/drm/drm_gpuvm.h
625
drm_gpuvm_resv_add_fence(vm_exec->vm, &vm_exec->exec, fence,
sys/dev/pci/drm/include/drm/drm_gpuvm.h
640
return drm_gpuvm_validate(vm_exec->vm, &vm_exec->exec);
sys/dev/pci/drm/include/drm/drm_gpuvm.h
664
struct drm_gpuvm *vm;
sys/dev/pci/drm/include/drm/drm_gpuvm.h
769
drm_gem_gpuva_assert_lock_held(vm_bo->vm, obj);
sys/dev/pci/drm/include/drm/drm_gpuvm.h
77
struct drm_gpuvm *vm;
sys/dev/pci/drm/include/drm/drm_modes.h
503
void drm_display_mode_from_videomode(const struct videomode *vm,
sys/dev/pci/drm/include/drm/drm_modes.h
506
struct videomode *vm);
sys/dev/pci/drm/include/drm/drm_modes.h
507
void drm_bus_flags_from_videomode(const struct videomode *vm, u32 *bus_flags);
sys/dev/pci/drm/radeon/cik.c
3722
unsigned vm_id = ib->vm ? ib->vm->ids[ib->ring].id : 0;
sys/dev/pci/drm/radeon/cik_sdma.c
136
u32 extra_bits = (ib->vm ? ib->vm->ids[ib->ring].id : 0) & 0xf;
sys/dev/pci/drm/radeon/ni.c
1402
unsigned vm_id = ib->vm ? ib->vm->ids[ib->ring].id : 0;
sys/dev/pci/drm/radeon/ni_dma.c
125
unsigned vm_id = ib->vm ? ib->vm->ids[ib->ring].id : 0;
sys/dev/pci/drm/radeon/radeon.h
1890
} vm;
sys/dev/pci/drm/radeon/radeon.h
2747
#define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev))
sys/dev/pci/drm/radeon/radeon.h
2748
#define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev))
sys/dev/pci/drm/radeon/radeon.h
2749
#define radeon_asic_vm_copy_pages(rdev, ib, pe, src, count) ((rdev)->asic->vm.copy_pages((rdev), (ib), (pe), (src), (count)))
sys/dev/pci/drm/radeon/radeon.h
2750
#define radeon_asic_vm_write_pages(rdev, ib, pe, addr, count, incr, flags) ((rdev)->asic->vm.write_pages((rdev), (ib), (pe), (addr), (count), (incr), (flags)))
sys/dev/pci/drm/radeon/radeon.h
2751
#define radeon_asic_vm_set_pages(rdev, ib, pe, addr, count, incr, flags) ((rdev)->asic->vm.set_pages((rdev), (ib), (pe), (addr), (count), (incr), (flags)))
sys/dev/pci/drm/radeon/radeon.h
2752
#define radeon_asic_vm_pad_ib(rdev, ib) ((rdev)->asic->vm.pad_ib((ib)))
sys/dev/pci/drm/radeon/radeon.h
2879
int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm);
sys/dev/pci/drm/radeon/radeon.h
2880
void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm);
sys/dev/pci/drm/radeon/radeon.h
2882
struct radeon_vm *vm,
sys/dev/pci/drm/radeon/radeon.h
2885
struct radeon_vm *vm, int ring);
sys/dev/pci/drm/radeon/radeon.h
2887
struct radeon_vm *vm,
sys/dev/pci/drm/radeon/radeon.h
2890
struct radeon_vm *vm,
sys/dev/pci/drm/radeon/radeon.h
2894
struct radeon_vm *vm);
sys/dev/pci/drm/radeon/radeon.h
2896
struct radeon_vm *vm);
sys/dev/pci/drm/radeon/radeon.h
2898
struct radeon_vm *vm);
sys/dev/pci/drm/radeon/radeon.h
2904
struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm,
sys/dev/pci/drm/radeon/radeon.h
2907
struct radeon_vm *vm,
sys/dev/pci/drm/radeon/radeon.h
489
struct radeon_vm *vm;
sys/dev/pci/drm/radeon/radeon.h
798
struct radeon_vm *vm;
sys/dev/pci/drm/radeon/radeon.h
934
struct radeon_vm vm;
sys/dev/pci/drm/radeon/radeon.h
977
struct radeon_ib *ib, struct radeon_vm *vm,
sys/dev/pci/drm/radeon/radeon_asic.c
1678
.vm = {
sys/dev/pci/drm/radeon/radeon_asic.c
1796
.vm = {
sys/dev/pci/drm/radeon/radeon_asic.c
1934
.vm = {
sys/dev/pci/drm/radeon/radeon_asic.c
2104
.vm = {
sys/dev/pci/drm/radeon/radeon_asic.c
2217
.vm = {
sys/dev/pci/drm/radeon/radeon_cs.c
192
p->vm_bos = radeon_vm_get_bos(p->rdev, p->ib.vm,
sys/dev/pci/drm/radeon/radeon_cs.c
507
struct radeon_vm *vm)
sys/dev/pci/drm/radeon/radeon_cs.c
513
r = radeon_vm_update_page_directory(rdev, vm);
sys/dev/pci/drm/radeon/radeon_cs.c
517
r = radeon_vm_clear_freed(rdev, vm);
sys/dev/pci/drm/radeon/radeon_cs.c
521
if (vm->ib_bo_va == NULL) {
sys/dev/pci/drm/radeon/radeon_cs.c
526
r = radeon_vm_bo_update(rdev, vm->ib_bo_va,
sys/dev/pci/drm/radeon/radeon_cs.c
535
bo_va = radeon_vm_bo_find(vm, bo);
sys/dev/pci/drm/radeon/radeon_cs.c
537
dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
sys/dev/pci/drm/radeon/radeon_cs.c
552
return radeon_vm_clear_invalids(rdev, vm);
sys/dev/pci/drm/radeon/radeon_cs.c
559
struct radeon_vm *vm = &fpriv->vm;
sys/dev/pci/drm/radeon/radeon_cs.c
582
mutex_lock(&vm->mutex);
sys/dev/pci/drm/radeon/radeon_cs.c
583
r = radeon_bo_vm_update_pte(parser, vm);
sys/dev/pci/drm/radeon/radeon_cs.c
603
mutex_unlock(&vm->mutex);
sys/dev/pci/drm/radeon/radeon_cs.c
620
struct radeon_vm *vm = NULL;
sys/dev/pci/drm/radeon/radeon_cs.c
628
vm = &fpriv->vm;
sys/dev/pci/drm/radeon/radeon_cs.c
638
vm, ib_chunk->length_dw * 4);
sys/dev/pci/drm/radeon/radeon_cs.c
660
vm, ib_chunk->length_dw * 4);
sys/dev/pci/drm/radeon/radeon_gem.c
276
struct radeon_vm *vm = &fpriv->vm;
sys/dev/pci/drm/radeon/radeon_gem.c
290
bo_va = radeon_vm_bo_find(vm, rbo);
sys/dev/pci/drm/radeon/radeon_gem.c
292
bo_va = radeon_vm_bo_add(rdev, vm, rbo);
sys/dev/pci/drm/radeon/radeon_gem.c
307
struct radeon_vm *vm = &fpriv->vm;
sys/dev/pci/drm/radeon/radeon_gem.c
322
bo_va = radeon_vm_bo_find(vm, rbo);
sys/dev/pci/drm/radeon/radeon_gem.c
709
vm_bos = radeon_vm_get_bos(rdev, bo_va->vm, &list);
sys/dev/pci/drm/radeon/radeon_gem.c
737
mutex_lock(&bo_va->vm->mutex);
sys/dev/pci/drm/radeon/radeon_gem.c
738
r = radeon_vm_clear_freed(rdev, bo_va->vm);
sys/dev/pci/drm/radeon/radeon_gem.c
746
mutex_unlock(&bo_va->vm->mutex);
sys/dev/pci/drm/radeon/radeon_gem.c
827
bo_va = radeon_vm_bo_find(&fpriv->vm, rbo);
sys/dev/pci/drm/radeon/radeon_ib.c
147
if (ib->vm) {
sys/dev/pci/drm/radeon/radeon_ib.c
149
vm_id_fence = radeon_vm_grab_id(rdev, ib->vm, ib->ring);
sys/dev/pci/drm/radeon/radeon_ib.c
161
if (ib->vm)
sys/dev/pci/drm/radeon/radeon_ib.c
162
radeon_vm_flush(rdev, ib->vm, ib->ring,
sys/dev/pci/drm/radeon/radeon_ib.c
180
if (ib->vm)
sys/dev/pci/drm/radeon/radeon_ib.c
181
radeon_vm_fence(rdev, ib->vm, ib->fence);
sys/dev/pci/drm/radeon/radeon_ib.c
61
struct radeon_ib *ib, struct radeon_vm *vm,
sys/dev/pci/drm/radeon/radeon_ib.c
77
ib->vm = vm;
sys/dev/pci/drm/radeon/radeon_ib.c
78
if (vm) {
sys/dev/pci/drm/radeon/radeon_kms.c
636
struct radeon_vm *vm;
sys/dev/pci/drm/radeon/radeon_kms.c
657
vm = &fpriv->vm;
sys/dev/pci/drm/radeon/radeon_kms.c
658
r = radeon_vm_init(rdev, vm);
sys/dev/pci/drm/radeon/radeon_kms.c
668
vm->ib_bo_va = radeon_vm_bo_add(rdev, vm,
sys/dev/pci/drm/radeon/radeon_kms.c
670
if (!vm->ib_bo_va) {
sys/dev/pci/drm/radeon/radeon_kms.c
675
r = radeon_vm_bo_set_addr(rdev, vm->ib_bo_va,
sys/dev/pci/drm/radeon/radeon_kms.c
690
radeon_vm_fini(rdev, vm);
sys/dev/pci/drm/radeon/radeon_kms.c
729
struct radeon_vm *vm = &fpriv->vm;
sys/dev/pci/drm/radeon/radeon_kms.c
735
if (vm->ib_bo_va)
sys/dev/pci/drm/radeon/radeon_kms.c
736
radeon_vm_bo_rmv(rdev, vm->ib_bo_va);
sys/dev/pci/drm/radeon/radeon_kms.c
739
radeon_vm_fini(rdev, vm);
sys/dev/pci/drm/radeon/radeon_vm.c
1006
radeon_sync_fence(&ib.sync, vm->ids[i].last_id_use);
sys/dev/pci/drm/radeon/radeon_vm.c
1009
r = radeon_vm_update_ptes(rdev, vm, &ib, bo_va->it.start,
sys/dev/pci/drm/radeon/radeon_vm.c
1026
radeon_vm_fence_pts(vm, bo_va->it.start, bo_va->it.last + 1, ib.fence);
sys/dev/pci/drm/radeon/radeon_vm.c
1046
struct radeon_vm *vm)
sys/dev/pci/drm/radeon/radeon_vm.c
1051
spin_lock(&vm->status_lock);
sys/dev/pci/drm/radeon/radeon_vm.c
1052
while (!list_empty(&vm->freed)) {
sys/dev/pci/drm/radeon/radeon_vm.c
1053
bo_va = list_first_entry(&vm->freed,
sys/dev/pci/drm/radeon/radeon_vm.c
1055
spin_unlock(&vm->status_lock);
sys/dev/pci/drm/radeon/radeon_vm.c
1060
spin_lock(&vm->status_lock);
sys/dev/pci/drm/radeon/radeon_vm.c
1067
spin_unlock(&vm->status_lock);
sys/dev/pci/drm/radeon/radeon_vm.c
1084
struct radeon_vm *vm)
sys/dev/pci/drm/radeon/radeon_vm.c
1089
spin_lock(&vm->status_lock);
sys/dev/pci/drm/radeon/radeon_vm.c
1090
while (!list_empty(&vm->invalidated)) {
sys/dev/pci/drm/radeon/radeon_vm.c
1091
bo_va = list_first_entry(&vm->invalidated,
sys/dev/pci/drm/radeon/radeon_vm.c
1093
spin_unlock(&vm->status_lock);
sys/dev/pci/drm/radeon/radeon_vm.c
1099
spin_lock(&vm->status_lock);
sys/dev/pci/drm/radeon/radeon_vm.c
1101
spin_unlock(&vm->status_lock);
sys/dev/pci/drm/radeon/radeon_vm.c
1119
struct radeon_vm *vm = bo_va->vm;
sys/dev/pci/drm/radeon/radeon_vm.c
1123
mutex_lock(&vm->mutex);
sys/dev/pci/drm/radeon/radeon_vm.c
1125
interval_tree_remove(&bo_va->it, &vm->va);
sys/dev/pci/drm/radeon/radeon_vm.c
1127
spin_lock(&vm->status_lock);
sys/dev/pci/drm/radeon/radeon_vm.c
1131
list_add(&bo_va->vm_status, &vm->freed);
sys/dev/pci/drm/radeon/radeon_vm.c
1136
spin_unlock(&vm->status_lock);
sys/dev/pci/drm/radeon/radeon_vm.c
1138
mutex_unlock(&vm->mutex);
sys/dev/pci/drm/radeon/radeon_vm.c
1155
spin_lock(&bo_va->vm->status_lock);
sys/dev/pci/drm/radeon/radeon_vm.c
1158
list_add(&bo_va->vm_status, &bo_va->vm->invalidated);
sys/dev/pci/drm/radeon/radeon_vm.c
1159
spin_unlock(&bo_va->vm->status_lock);
sys/dev/pci/drm/radeon/radeon_vm.c
1171
int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
sys/dev/pci/drm/radeon/radeon_vm.c
1178
vm->ib_bo_va = NULL;
sys/dev/pci/drm/radeon/radeon_vm.c
1180
vm->ids[i].id = 0;
sys/dev/pci/drm/radeon/radeon_vm.c
1181
vm->ids[i].flushed_updates = NULL;
sys/dev/pci/drm/radeon/radeon_vm.c
1182
vm->ids[i].last_id_use = NULL;
sys/dev/pci/drm/radeon/radeon_vm.c
1184
rw_init(&vm->mutex, "vmlk");
sys/dev/pci/drm/radeon/radeon_vm.c
1185
vm->va = RB_ROOT_CACHED;
sys/dev/pci/drm/radeon/radeon_vm.c
1186
mtx_init(&vm->status_lock, IPL_TTY);
sys/dev/pci/drm/radeon/radeon_vm.c
1187
INIT_LIST_HEAD(&vm->invalidated);
sys/dev/pci/drm/radeon/radeon_vm.c
1188
INIT_LIST_HEAD(&vm->freed);
sys/dev/pci/drm/radeon/radeon_vm.c
1189
INIT_LIST_HEAD(&vm->cleared);
sys/dev/pci/drm/radeon/radeon_vm.c
1196
vm->page_tables = kzalloc(pts_size, GFP_KERNEL);
sys/dev/pci/drm/radeon/radeon_vm.c
1197
if (vm->page_tables == NULL) {
sys/dev/pci/drm/radeon/radeon_vm.c
1204
NULL, &vm->page_directory);
sys/dev/pci/drm/radeon/radeon_vm.c
1206
kfree(vm->page_tables);
sys/dev/pci/drm/radeon/radeon_vm.c
1207
vm->page_tables = NULL;
sys/dev/pci/drm/radeon/radeon_vm.c
1210
r = radeon_vm_clear_bo(rdev, vm->page_directory);
sys/dev/pci/drm/radeon/radeon_vm.c
1212
radeon_bo_unref(&vm->page_directory);
sys/dev/pci/drm/radeon/radeon_vm.c
1213
vm->page_directory = NULL;
sys/dev/pci/drm/radeon/radeon_vm.c
1214
kfree(vm->page_tables);
sys/dev/pci/drm/radeon/radeon_vm.c
1215
vm->page_tables = NULL;
sys/dev/pci/drm/radeon/radeon_vm.c
1231
void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
sys/dev/pci/drm/radeon/radeon_vm.c
1236
if (!RB_EMPTY_ROOT(&vm->va.rb_root))
sys/dev/pci/drm/radeon/radeon_vm.c
1240
&vm->va.rb_root, it.rb) {
sys/dev/pci/drm/radeon/radeon_vm.c
1241
interval_tree_remove(&bo_va->it, &vm->va);
sys/dev/pci/drm/radeon/radeon_vm.c
1250
list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) {
sys/dev/pci/drm/radeon/radeon_vm.c
1257
radeon_bo_unref(&vm->page_tables[i].bo);
sys/dev/pci/drm/radeon/radeon_vm.c
1258
kfree(vm->page_tables);
sys/dev/pci/drm/radeon/radeon_vm.c
1260
radeon_bo_unref(&vm->page_directory);
sys/dev/pci/drm/radeon/radeon_vm.c
1263
radeon_fence_unref(&vm->ids[i].flushed_updates);
sys/dev/pci/drm/radeon/radeon_vm.c
1264
radeon_fence_unref(&vm->ids[i].last_id_use);
sys/dev/pci/drm/radeon/radeon_vm.c
1267
mutex_destroy(&vm->mutex);
sys/dev/pci/drm/radeon/radeon_vm.c
130
struct radeon_vm *vm,
sys/dev/pci/drm/radeon/radeon_vm.c
136
list = kvmalloc_array(vm->max_pde_used + 2,
sys/dev/pci/drm/radeon/radeon_vm.c
142
list[0].robj = vm->page_directory;
sys/dev/pci/drm/radeon/radeon_vm.c
149
for (i = 0, idx = 1; i <= vm->max_pde_used; i++) {
sys/dev/pci/drm/radeon/radeon_vm.c
150
if (!vm->page_tables[i].bo)
sys/dev/pci/drm/radeon/radeon_vm.c
153
list[idx].robj = vm->page_tables[i].bo;
sys/dev/pci/drm/radeon/radeon_vm.c
177
struct radeon_vm *vm, int ring)
sys/dev/pci/drm/radeon/radeon_vm.c
180
struct radeon_vm_id *vm_id = &vm->ids[ring];
sys/dev/pci/drm/radeon/radeon_vm.c
236
struct radeon_vm *vm,
sys/dev/pci/drm/radeon/radeon_vm.c
239
uint64_t pd_addr = radeon_bo_gpu_offset(vm->page_directory);
sys/dev/pci/drm/radeon/radeon_vm.c
240
struct radeon_vm_id *vm_id = &vm->ids[ring];
sys/dev/pci/drm/radeon/radeon_vm.c
245
trace_radeon_vm_flush(pd_addr, ring, vm->ids[ring].id);
sys/dev/pci/drm/radeon/radeon_vm.c
268
struct radeon_vm *vm,
sys/dev/pci/drm/radeon/radeon_vm.c
271
unsigned vm_id = vm->ids[fence->ring].id;
sys/dev/pci/drm/radeon/radeon_vm.c
276
radeon_fence_unref(&vm->ids[fence->ring].last_id_use);
sys/dev/pci/drm/radeon/radeon_vm.c
277
vm->ids[fence->ring].last_id_use = radeon_fence_ref(fence);
sys/dev/pci/drm/radeon/radeon_vm.c
292
struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm,
sys/dev/pci/drm/radeon/radeon_vm.c
298
if (bo_va->vm == vm)
sys/dev/pci/drm/radeon/radeon_vm.c
319
struct radeon_vm *vm,
sys/dev/pci/drm/radeon/radeon_vm.c
328
bo_va->vm = vm;
sys/dev/pci/drm/radeon/radeon_vm.c
337
mutex_lock(&vm->mutex);
sys/dev/pci/drm/radeon/radeon_vm.c
339
mutex_unlock(&vm->mutex);
sys/dev/pci/drm/radeon/radeon_vm.c
451
struct radeon_vm *vm = bo_va->vm;
sys/dev/pci/drm/radeon/radeon_vm.c
476
mutex_lock(&vm->mutex);
sys/dev/pci/drm/radeon/radeon_vm.c
481
it = interval_tree_iter_first(&vm->va, soffset, eoffset);
sys/dev/pci/drm/radeon/radeon_vm.c
489
mutex_unlock(&vm->mutex);
sys/dev/pci/drm/radeon/radeon_vm.c
500
mutex_unlock(&vm->mutex);
sys/dev/pci/drm/radeon/radeon_vm.c
506
tmp->vm = vm;
sys/dev/pci/drm/radeon/radeon_vm.c
509
interval_tree_remove(&bo_va->it, &vm->va);
sys/dev/pci/drm/radeon/radeon_vm.c
510
spin_lock(&vm->status_lock);
sys/dev/pci/drm/radeon/radeon_vm.c
514
list_add(&tmp->vm_status, &vm->freed);
sys/dev/pci/drm/radeon/radeon_vm.c
515
spin_unlock(&vm->status_lock);
sys/dev/pci/drm/radeon/radeon_vm.c
519
spin_lock(&vm->status_lock);
sys/dev/pci/drm/radeon/radeon_vm.c
522
list_add(&bo_va->vm_status, &vm->cleared);
sys/dev/pci/drm/radeon/radeon_vm.c
523
spin_unlock(&vm->status_lock);
sys/dev/pci/drm/radeon/radeon_vm.c
524
interval_tree_insert(&bo_va->it, &vm->va);
sys/dev/pci/drm/radeon/radeon_vm.c
534
if (eoffset > vm->max_pde_used)
sys/dev/pci/drm/radeon/radeon_vm.c
535
vm->max_pde_used = eoffset;
sys/dev/pci/drm/radeon/radeon_vm.c
543
if (vm->page_tables[pt_idx].bo)
sys/dev/pci/drm/radeon/radeon_vm.c
547
mutex_unlock(&vm->mutex);
sys/dev/pci/drm/radeon/radeon_vm.c
563
mutex_lock(&vm->mutex);
sys/dev/pci/drm/radeon/radeon_vm.c
564
if (vm->page_tables[pt_idx].bo) {
sys/dev/pci/drm/radeon/radeon_vm.c
566
mutex_unlock(&vm->mutex);
sys/dev/pci/drm/radeon/radeon_vm.c
568
mutex_lock(&vm->mutex);
sys/dev/pci/drm/radeon/radeon_vm.c
572
vm->page_tables[pt_idx].addr = 0;
sys/dev/pci/drm/radeon/radeon_vm.c
573
vm->page_tables[pt_idx].bo = pt;
sys/dev/pci/drm/radeon/radeon_vm.c
576
mutex_unlock(&vm->mutex);
sys/dev/pci/drm/radeon/radeon_vm.c
639
struct radeon_vm *vm)
sys/dev/pci/drm/radeon/radeon_vm.c
641
struct radeon_bo *pd = vm->page_directory;
sys/dev/pci/drm/radeon/radeon_vm.c
653
ndw += vm->max_pde_used * 6;
sys/dev/pci/drm/radeon/radeon_vm.c
665
for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
sys/dev/pci/drm/radeon/radeon_vm.c
666
struct radeon_bo *bo = vm->page_tables[pt_idx].bo;
sys/dev/pci/drm/radeon/radeon_vm.c
673
if (vm->page_tables[pt_idx].addr == pt)
sys/dev/pci/drm/radeon/radeon_vm.c
675
vm->page_tables[pt_idx].addr = pt;
sys/dev/pci/drm/radeon/radeon_vm.c
813
struct radeon_vm *vm,
sys/dev/pci/drm/radeon/radeon_vm.c
826
struct radeon_bo *pt = vm->page_tables[pt_idx].bo;
sys/dev/pci/drm/radeon/radeon_vm.c
884
static void radeon_vm_fence_pts(struct radeon_vm *vm,
sys/dev/pci/drm/radeon/radeon_vm.c
894
radeon_bo_fence(vm->page_tables[i].bo, fence, true);
sys/dev/pci/drm/radeon/radeon_vm.c
913
struct radeon_vm *vm = bo_va->vm;
sys/dev/pci/drm/radeon/radeon_vm.c
922
bo_va->bo, vm);
sys/dev/pci/drm/radeon/radeon_vm.c
926
spin_lock(&vm->status_lock);
sys/dev/pci/drm/radeon/radeon_vm.c
929
spin_unlock(&vm->status_lock);
sys/dev/pci/drm/radeon/radeon_vm.c
935
list_add(&bo_va->vm_status, &vm->cleared);
sys/dev/pci/drm/radeon/radeon_vm.c
937
spin_unlock(&vm->status_lock);
sys/dev/pci/drm/radeon/si.c
3383
unsigned vm_id = ib->vm ? ib->vm->ids[ib->ring].id : 0;
sys/dev/vmm/vmm.c
173
vm_find(uint32_t id, struct vm **res)
sys/dev/vmm/vmm.c
176
struct vm *vm;
sys/dev/vmm/vmm.c
182
SLIST_FOREACH(vm, &vmm_softc->vm_list, vm_link) {
sys/dev/vmm/vmm.c
183
if (vm->vm_id == id) {
sys/dev/vmm/vmm.c
192
(vm->vm_creator_pid != p->p_p->ps_pid))
sys/dev/vmm/vmm.c
195
refcnt_take(&vm->vm_refcnt);
sys/dev/vmm/vmm.c
196
*res = vm;
sys/dev/vmm/vmm.c
328
vm_find_vcpu(struct vm *vm, uint32_t id)
sys/dev/vmm/vmm.c
332
if (vm == NULL)
sys/dev/vmm/vmm.c
335
SLIST_FOREACH(vcpu, &vm->vm_vcpu_list, vc_vcpu_link) {
sys/dev/vmm/vmm.c
360
struct vm *vm;
sys/dev/vmm/vmm.c
391
vm = pool_get(&vm_pool, PR_WAITOK | PR_ZERO);
sys/dev/vmm/vmm.c
394
vm->vm_creator_pid = p->p_p->ps_pid;
sys/dev/vmm/vmm.c
395
strncpy(vm->vm_name, vcp->vcp_name, VMM_MAX_NAME_LEN - 1);
sys/dev/vmm/vmm.c
398
vm->vm_pmap = pmap_create();
sys/dev/vmm/vmm.c
401
vm->vm_nmemranges = vcp->vcp_nmemranges;
sys/dev/vmm/vmm.c
402
memcpy(vm->vm_memranges, vcp->vcp_memranges,
sys/dev/vmm/vmm.c
403
vm->vm_nmemranges * sizeof(vm->vm_memranges[0]));
sys/dev/vmm/vmm.c
404
vm->vm_memory_size = memsize; /* Calculated above. */
sys/dev/vmm/vmm.c
408
for (i = 0; i < vm->vm_nmemranges; i++) {
sys/dev/vmm/vmm.c
409
vmr = &vm->vm_memranges[i];
sys/dev/vmm/vmm.c
444
vm->vm_memory_slot[i] = uao;
sys/dev/vmm/vmm.c
447
if (vm_impl_init(vm, p)) {
sys/dev/vmm/vmm.c
448
printf("failed to init arch-specific features for vm %p\n", vm);
sys/dev/vmm/vmm.c
453
vm->vm_vcpu_ct = 0;
sys/dev/vmm/vmm.c
456
SLIST_INIT(&vm->vm_vcpu_list);
sys/dev/vmm/vmm.c
460
vcpu->vc_parent = vm;
sys/dev/vmm/vmm.c
461
vcpu->vc_id = vm->vm_vcpu_ct;
sys/dev/vmm/vmm.c
462
vm->vm_vcpu_ct++;
sys/dev/vmm/vmm.c
465
printf("failed to init vcpu %d for vm %p\n", i, vm);
sys/dev/vmm/vmm.c
470
SLIST_INSERT_HEAD(&vm->vm_vcpu_list, vcpu, vc_vcpu_link);
sys/dev/vmm/vmm.c
476
vm->vm_id = vmm_softc->vm_idx;
sys/dev/vmm/vmm.c
477
vcp->vcp_id = vm->vm_id;
sys/dev/vmm/vmm.c
479
refcnt_init(&vm->vm_refcnt);
sys/dev/vmm/vmm.c
480
SLIST_INSERT_HEAD(&vmm_softc->vm_list, vm, vm_link);
sys/dev/vmm/vmm.c
484
memcpy(vcp->vcp_memranges, vm->vm_memranges,
sys/dev/vmm/vmm.c
490
vm_teardown(&vm);
sys/dev/vmm/vmm.c
576
vm_teardown(struct vm **target)
sys/dev/vmm/vmm.c
581
struct vm *vm = *target;
sys/dev/vmm/vmm.c
587
SLIST_FOREACH_SAFE(vcpu, &vm->vm_vcpu_list, vc_vcpu_link, tmp) {
sys/dev/vmm/vmm.c
588
SLIST_REMOVE(&vm->vm_vcpu_list, vcpu, vcpu, vc_vcpu_link);
sys/dev/vmm/vmm.c
595
for (i = 0; i < vm->vm_nmemranges; i++) {
sys/dev/vmm/vmm.c
596
sva = vm->vm_memranges[i].vmr_gpa;
sys/dev/vmm/vmm.c
597
eva = sva + vm->vm_memranges[i].vmr_size - 1;
sys/dev/vmm/vmm.c
598
pmap_remove(vm->vm_pmap, sva, eva);
sys/dev/vmm/vmm.c
602
for (i = 0; i < vm->vm_nmemranges; i++) {
sys/dev/vmm/vmm.c
603
uao = vm->vm_memory_slot[i];
sys/dev/vmm/vmm.c
604
vm->vm_memory_slot[i] = NULL;
sys/dev/vmm/vmm.c
610
pmap_destroy(vm->vm_pmap);
sys/dev/vmm/vmm.c
611
vm->vm_pmap = NULL;
sys/dev/vmm/vmm.c
613
pool_put(&vm_pool, vm);
sys/dev/vmm/vmm.c
639
struct vm *vm;
sys/dev/vmm/vmm.c
664
SLIST_FOREACH(vm, &vmm_softc->vm_list, vm_link) {
sys/dev/vmm/vmm.c
665
refcnt_take(&vm->vm_refcnt);
sys/dev/vmm/vmm.c
667
out[i].vir_memory_size = vm->vm_memory_size;
sys/dev/vmm/vmm.c
669
pmap_resident_count(vm->vm_pmap) * PAGE_SIZE;
sys/dev/vmm/vmm.c
670
out[i].vir_ncpus = vm->vm_vcpu_ct;
sys/dev/vmm/vmm.c
671
out[i].vir_id = vm->vm_id;
sys/dev/vmm/vmm.c
672
out[i].vir_creator_pid = vm->vm_creator_pid;
sys/dev/vmm/vmm.c
673
strlcpy(out[i].vir_name, vm->vm_name, VMM_MAX_NAME_LEN);
sys/dev/vmm/vmm.c
675
for (j = 0; j < vm->vm_vcpu_ct; j++) {
sys/dev/vmm/vmm.c
677
SLIST_FOREACH(vcpu, &vm->vm_vcpu_list,
sys/dev/vmm/vmm.c
685
refcnt_rele_wake(&vm->vm_refcnt);
sys/dev/vmm/vmm.c
716
struct vm *vm;
sys/dev/vmm/vmm.c
722
error = vm_find(vtp->vtp_vm_id, &vm);
sys/dev/vmm/vmm.c
727
if (atomic_cas_uint(&vm->vm_dying, 0, 1) == 1) {
sys/dev/vmm/vmm.c
728
refcnt_rele_wake(&vm->vm_refcnt);
sys/dev/vmm/vmm.c
734
SLIST_REMOVE(&vmm_softc->vm_list, vm, vm, vm_link);
sys/dev/vmm/vmm.c
738
if (refcnt_rele(&vm->vm_refcnt))
sys/dev/vmm/vmm.c
74
pool_init(&vm_pool, sizeof(struct vm), 0, IPL_MPFLOOR, PR_WAITOK,
sys/dev/vmm/vmm.c
740
__func__, vm->vm_id, vm);
sys/dev/vmm/vmm.c
743
refcnt_finalize(&vm->vm_refcnt, __func__);
sys/dev/vmm/vmm.c
745
vm_id = vm->vm_id;
sys/dev/vmm/vmm.c
746
nvcpu = vm->vm_vcpu_ct;
sys/dev/vmm/vmm.c
748
vm_teardown(&vm);
sys/dev/vmm/vmm.c
779
struct vm *vm;
sys/dev/vmm/vmm.c
784
error = vm_find(vrp->vrp_vm_id, &vm);
sys/dev/vmm/vmm.c
793
vcpu = vm_find_vcpu(vm, vrp->vrp_vcpu_id);
sys/dev/vmm/vmm.c
816
refcnt_rele_wake(&vm->vm_refcnt);
sys/dev/vmm/vmm.c
863
struct vm *vm;
sys/dev/vmm/vmm.c
868
ret = vm_find(vsp->vsp_vm_id, &vm);
sys/dev/vmm/vmm.c
873
if (vm->vm_nmemranges != vsp->vsp_nmemranges)
sys/dev/vmm/vmm.c
875
n = vm->vm_nmemranges;
sys/dev/vmm/vmm.c
879
src = &vm->vm_memranges[i];
sys/dev/vmm/vmm.c
906
uao = vm->vm_memory_slot[i];
sys/dev/vmm/vmm.c
939
refcnt_rele_wake(&vm->vm_refcnt);
sys/dev/vmm/vmm.h
196
SLIST_ENTRY(vm) vm_link; /* [V] */
sys/dev/vmm/vmm.h
199
SLIST_HEAD(vmlist_head, vm);
sys/dev/vmm/vmm.h
252
int vm_find(uint32_t, struct vm **);
sys/dev/vmm/vmm.h
255
struct vcpu *vm_find_vcpu(struct vm *, uint32_t);
sys/dev/vmm/vmm.h
258
void vm_teardown(struct vm **);
sys/kern/kern_clock.c
325
struct vmspace *vm = p->p_vmspace;
sys/kern/kern_clock.c
334
(vm->vm_tsize << (PAGE_SHIFT - 10)) * count;
sys/kern/kern_clock.c
336
(vm->vm_dused << (PAGE_SHIFT - 10)) * count;
sys/kern/kern_clock.c
338
(vm->vm_ssize << (PAGE_SHIFT - 10)) * count;
sys/kern/kern_exec.c
273
struct vmspace *vm = p->p_vmspace;
sys/kern/kern_exec.c
453
vm = pr->ps_vmspace;
sys/kern/kern_exec.c
455
vm->vm_taddr = (char *)trunc_page(pack.ep_taddr);
sys/kern/kern_exec.c
456
vm->vm_tsize = atop(round_page(pack.ep_taddr + pack.ep_tsize) -
sys/kern/kern_exec.c
458
vm->vm_daddr = (char *)trunc_page(pack.ep_daddr);
sys/kern/kern_exec.c
459
vm->vm_dsize = atop(round_page(pack.ep_daddr + pack.ep_dsize) -
sys/kern/kern_exec.c
461
vm->vm_dused = 0;
sys/kern/kern_exec.c
462
vm->vm_ssize = atop(round_page(pack.ep_ssize));
sys/kern/kern_exec.c
463
vm->vm_maxsaddr = (char *)pack.ep_maxsaddr;
sys/kern/kern_exec.c
464
vm->vm_minsaddr = (char *)pack.ep_minsaddr;
sys/kern/kern_exec.c
478
pr->ps_strings = (vaddr_t)vm->vm_maxsaddr + sgap;
sys/kern/kern_exec.c
479
if (uvm_map_protect(&vm->vm_map, (vaddr_t)vm->vm_maxsaddr,
sys/kern/kern_exec.c
483
pr->ps_strings = (vaddr_t)vm->vm_minsaddr - sizeof(arginfo) - sgap;
sys/kern/kern_exec.c
484
if (uvm_map_protect(&vm->vm_map,
sys/kern/kern_exec.c
486
(vaddr_t)vm->vm_minsaddr, PROT_NONE, 0, TRUE, FALSE))
sys/kern/kern_exec.c
497
stack = (char *)vm->vm_maxsaddr + sizeof(arginfo) + sgap;
sys/kern/kern_exec.c
500
stack = (char *)(vm->vm_minsaddr - len);
sys/kern/kern_exec.c
792
uvm_unmap(&vm->vm_map, VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS);
sys/kern/kern_fork.c
357
struct vmspace *vm;
sys/kern/kern_fork.c
439
vm = pr->ps_vmspace;
sys/kern/kern_fork.c
443
forkstat.sizfork += vm->vm_dsize + vm->vm_ssize;
sys/kern/kern_fork.c
446
forkstat.sizvfork += vm->vm_dsize + vm->vm_ssize;
sys/kern/kern_resource.c
306
struct vmspace *vm = p->p_vmspace;
sys/kern/kern_resource.c
312
addr = (vaddr_t)vm->vm_maxsaddr +
sys/kern/kern_resource.c
315
addr = (vaddr_t)vm->vm_minsaddr -
sys/kern/kern_resource.c
322
addr = (vaddr_t)vm->vm_maxsaddr +
sys/kern/kern_resource.c
325
addr = (vaddr_t)vm->vm_minsaddr -
sys/kern/kern_resource.c
331
(void) uvm_map_protect(&vm->vm_map, addr,
sys/kern/kern_sig.c
1897
struct vmspace *vm = p->p_vmspace;
sys/kern/kern_sig.c
1914
if (USPACE + ptoa(vm->vm_dsize + vm->vm_ssize) >= lim_cur(RLIMIT_CORE))
sys/kern/kern_sysctl.c
2066
struct vmspace *vm = NULL;
sys/kern/kern_sysctl.c
2073
vm = pr->ps_vmspace;
sys/kern/kern_sysctl.c
2074
uvmspace_addref(vm);
sys/kern/kern_sysctl.c
2085
p, pr, s, vm, pr->ps_limit, pr->ps_sigacts, &tu, isthread,
sys/kern/kern_sysctl.c
2104
if ((pr->ps_flags & PS_EMBRYO) == 0 && vm != NULL)
sys/kern/kern_sysctl.c
2105
ki->p_vm_rssize = vm_resident_count(vm);
sys/kern/kern_sysctl.c
2124
uvmspace_free(vm);
sys/kern/kern_sysctl.c
2161
struct vmspace *vm;
sys/kern/kern_sysctl.c
2208
vm = vpr->ps_vmspace;
sys/kern/kern_sysctl.c
2209
uvmspace_addref(vm);
sys/kern/kern_sysctl.c
2224
if ((error = uvm_io(&vm->vm_map, &uio, 0)) != 0)
sys/kern/kern_sysctl.c
2281
if ((error = uvm_io(&vm->vm_map, &uio, 0)) != 0)
sys/kern/kern_sysctl.c
2303
if ((error = uvm_io(&vm->vm_map, &uio, 0)) != 0)
sys/kern/kern_sysctl.c
2351
uvmspace_free(vm);
sys/kern/sys_process.c
920
struct vmspace *vm;
sys/kern/sys_process.c
932
vm = tr->ps_vmspace;
sys/kern/sys_process.c
933
if ((tr->ps_flags & PS_EXITING) || (vm->vm_refcnt < 1))
sys/kern/sys_process.c
937
uvmspace_addref(vm);
sys/kern/sys_process.c
939
error = uvm_io(&vm->vm_map, uio, UVM_IO_FIXPROT);
sys/kern/sys_process.c
941
uvmspace_free(vm);
sys/kern/sysv_shm.c
158
shm_delete_mapping(struct vmspace *vm, struct shmmap_state *shmmap_s)
sys/kern/sysv_shm.c
175
uvm_unmap(&vm->vm_map, trunc_page(shmmap_s->va), end);
sys/kern/sysv_shm.c
515
shmexit(struct vmspace *vm)
sys/kern/sysv_shm.c
522
shmmap_h = (struct shmmap_head *)vm->vm_shm;
sys/kern/sysv_shm.c
529
shm_delete_mapping(vm, shmmap_s);
sys/kern/sysv_shm.c
530
free(vm->vm_shm, M_SHM, size);
sys/kern/sysv_shm.c
531
vm->vm_shm = NULL;
sys/sys/sysctl.h
594
praddr, sess, vm, lim, sa, tu, isthread, show_addresses) \
sys/sys/sysctl.h
681
if ((vm) != NULL) { \
sys/sys/sysctl.h
682
(kp)->p_vm_rssize = (vm)->vm_rssize; \
sys/sys/sysctl.h
683
(kp)->p_vm_tsize = (vm)->vm_tsize; \
sys/sys/sysctl.h
684
(kp)->p_vm_dsize = (vm)->vm_dused; \
sys/sys/sysctl.h
685
(kp)->p_vm_ssize = (vm)->vm_ssize; \
sys/uvm/uvm_addr.c
528
struct vmspace *vm;
sys/uvm/uvm_addr.c
537
vm = (struct vmspace *)map;
sys/uvm/uvm_addr.c
554
hint = uvm_map_hint(vm, prot, minaddr, maxaddr);
sys/uvm/uvm_extern.h
249
#define vm_resident_count(vm) (pmap_resident_count((vm)->vm_map.pmap))
sys/uvm/uvm_glue.c
316
struct vmspace *vm = p->p_vmspace;
sys/uvm/uvm_glue.c
323
uvmspace_purge(vm);
sys/uvm/uvm_glue.c
332
struct vmspace *vm = pr->ps_vmspace;
sys/uvm/uvm_glue.c
335
uvmspace_free(vm);
sys/uvm/uvm_map.c
2747
struct vmspace *vm;
sys/uvm/uvm_map.c
2756
vm = (struct vmspace *)map;
sys/uvm/uvm_map.c
2757
stack_begin = MIN((vaddr_t)vm->vm_maxsaddr, (vaddr_t)vm->vm_minsaddr);
sys/uvm/uvm_map.c
2758
stack_end = MAX((vaddr_t)vm->vm_maxsaddr, (vaddr_t)vm->vm_minsaddr);
sys/uvm/uvm_map.c
2793
if (heap != vm->vm_dused) {
sys/uvm/uvm_map.c
2798
heap, vm->vm_dused,
sys/uvm/uvm_map.c
2848
struct vmspace *vm;
sys/uvm/uvm_map.c
2867
vm = (struct vmspace *)map;
sys/uvm/uvm_map.c
2870
vm->vm_refcnt, vm->vm_shm, vm->vm_rssize, vm->vm_swrss);
sys/uvm/uvm_map.c
2872
vm->vm_tsize, vm->vm_dsize);
sys/uvm/uvm_map.c
2874
vm->vm_taddr, vm->vm_daddr);
sys/uvm/uvm_map.c
2876
vm->vm_maxsaddr, vm->vm_minsaddr);
sys/uvm/uvm_map.c
3255
struct vmspace *vm;
sys/uvm/uvm_map.c
3257
vm = pool_get(&uvm_vmspace_pool, PR_WAITOK | PR_ZERO);
sys/uvm/uvm_map.c
3258
uvmspace_init(vm, NULL, min, max, pageable, remove_holes);
sys/uvm/uvm_map.c
3259
return (vm);
sys/uvm/uvm_map.c
3269
uvmspace_init(struct vmspace *vm, struct pmap *pmap, vaddr_t min, vaddr_t max,
sys/uvm/uvm_map.c
3279
uvm_map_setup(&vm->vm_map, pmap, min, max,
sys/uvm/uvm_map.c
3282
vm->vm_refcnt = 1;
sys/uvm/uvm_map.c
3285
pmap_remove_holes(vm);
sys/uvm/uvm_map.c
3297
struct vmspace *vm = pr->ps_vmspace;
sys/uvm/uvm_map.c
3299
uvmspace_addref(vm);
sys/uvm/uvm_map.c
3300
return vm;
sys/uvm/uvm_map.c
3410
uvmspace_addref(struct vmspace *vm)
sys/uvm/uvm_map.c
3412
KASSERT(vm->vm_refcnt > 0);
sys/uvm/uvm_map.c
3413
atomic_inc_int(&vm->vm_refcnt);
sys/uvm/uvm_map.c
3417
uvmspace_purge(struct vmspace *vm)
sys/uvm/uvm_map.c
3421
if (vm->vm_shm != NULL) {
sys/uvm/uvm_map.c
3423
shmexit(vm);
sys/uvm/uvm_map.c
3431
uvm_map_teardown(&vm->vm_map);
sys/uvm/uvm_map.c
3438
uvmspace_free(struct vmspace *vm)
sys/uvm/uvm_map.c
3440
if (vm == NULL)
sys/uvm/uvm_map.c
3443
if (atomic_dec_int_nv(&vm->vm_refcnt) == 0) {
sys/uvm/uvm_map.c
3449
uvmspace_purge(vm);
sys/uvm/uvm_map.c
3451
pmap_destroy(vm->vm_map.pmap);
sys/uvm/uvm_map.c
3452
vm->vm_map.pmap = NULL;
sys/uvm/uvm_map.c
3454
pool_put(&uvm_vmspace_pool, vm);
sys/uvm/uvm_map.c
3828
uvm_map_hint(struct vmspace *vm, vm_prot_t prot, vaddr_t minaddr,
sys/uvm/uvm_map.c
3840
(vaddr_t)vm->vm_daddr >= I386_MAX_EXE_ADDR) {
sys/uvm/uvm_map.c
3856
addr = (vaddr_t)vm->vm_daddr + BRKSIZ;
sys/uvm/uvm_map.c
4694
struct vmspace *vm;
sys/uvm/uvm_map.c
4703
vm = (struct vmspace *)map;
sys/uvm/uvm_map.c
4704
b_start = (vaddr_t)vm->vm_daddr;
sys/uvm/uvm_map.c
4706
s_start = MIN((vaddr_t)vm->vm_maxsaddr, (vaddr_t)vm->vm_minsaddr);
sys/uvm/uvm_map.c
4707
s_end = MAX((vaddr_t)vm->vm_maxsaddr, (vaddr_t)vm->vm_minsaddr);
sys/uvm/uvm_map.c
4715
vm, b_start, b_end, s_start, s_end);
sys/uvm/uvm_map.c
488
struct vmspace *vm;
sys/uvm/uvm_map.c
496
vm = (struct vmspace *)map;
sys/uvm/uvm_map.c
497
stack_begin = MIN((vaddr_t)vm->vm_maxsaddr, (vaddr_t)vm->vm_minsaddr);
sys/uvm/uvm_map.c
498
stack_end = MAX((vaddr_t)vm->vm_maxsaddr, (vaddr_t)vm->vm_minsaddr);
sys/uvm/uvm_unix.c
110
struct vmspace *vm = p->p_vmspace;
sys/uvm/uvm_unix.c
111
vm_map_t map = &vm->vm_map;
sys/uvm/uvm_unix.c
115
if (sp < (vaddr_t)vm->vm_maxsaddr)
sys/uvm/uvm_unix.c
118
if (sp >= (vaddr_t)vm->vm_minsaddr)
sys/uvm/uvm_unix.c
126
if (sp < (vaddr_t)vm->vm_maxsaddr + ptoa(vm->vm_ssize))
sys/uvm/uvm_unix.c
128
if (sp >= (vaddr_t)vm->vm_minsaddr - ptoa(vm->vm_ssize))
sys/uvm/uvm_unix.c
134
si = atop(sp - (vaddr_t)vm->vm_maxsaddr) - vm->vm_ssize + 1;
sys/uvm/uvm_unix.c
136
si = atop((vaddr_t)vm->vm_minsaddr - sp) - vm->vm_ssize;
sys/uvm/uvm_unix.c
138
if (vm->vm_ssize + si <= atop(lim_cur(RLIMIT_STACK)))
sys/uvm/uvm_unix.c
139
vm->vm_ssize += si;
sys/uvm/uvm_unix.c
278
struct vmspace *vm = p->p_vmspace;
sys/uvm/uvm_unix.c
279
struct vm_map *map = &vm->vm_map;
sys/uvm/uvm_unix.c
69
struct vmspace *vm = p->p_vmspace;
sys/uvm/uvm_unix.c
73
base = (vaddr_t)vm->vm_daddr;
sys/uvm/uvm_unix.c
78
old = round_page(base + ptoa(vm->vm_dsize));
sys/uvm/uvm_unix.c
85
error = uvm_map(&vm->vm_map, &old, new - old, NULL,
sys/uvm/uvm_unix.c
95
vm->vm_dsize += atop(new - old);
sys/uvm/uvm_unix.c
97
uvm_unmap(&vm->vm_map, new, old);
sys/uvm/uvm_unix.c
98
vm->vm_dsize -= atop(old - new);
usr.bin/systat/engine.c
587
struct view_ent *ve, *vm = NULL;
usr.bin/systat/engine.c
597
if (vm)
usr.bin/systat/engine.c
599
vm = ve;
usr.bin/systat/engine.c
603
if (vm) {
usr.bin/systat/engine.c
604
set_curr_view(vm);
usr.sbin/vmd/arm64_vm.c
151
sev_init(struct vmd_vm *vm)
usr.sbin/vmd/arm64_vm.c
159
sev_shutdown(struct vmd_vm *vm)
usr.sbin/vmd/arm64_vm.c
167
sev_activate(struct vmd_vm *vm, int vcpu_id)
usr.sbin/vmd/arm64_vm.c
175
sev_encrypt_memory(struct vmd_vm *vm)
usr.sbin/vmd/arm64_vm.c
183
sev_encrypt_state(struct vmd_vm *vm, int vcpu_id)
usr.sbin/vmd/arm64_vm.c
191
sev_launch_finalize(struct vmd_vm *vm)
usr.sbin/vmd/arm64_vm.c
23
create_memory_map(struct vmd_vm *vm)
usr.sbin/vmd/arm64_vm.c
30
load_firmware(struct vmd_vm *vm, struct vcpu_reg_state *vrs)
usr.sbin/vmd/arm64_vm.c
38
init_emulated_hw(struct vmd_vm *vm, int child_cdrom,
usr.sbin/vmd/arm64_vm.c
47
pause_vm_md(struct vmd_vm *vm)
usr.sbin/vmd/arm64_vm.c
54
unpause_vm_md(struct vmd_vm *vm)
usr.sbin/vmd/arm64_vm.c
84
intr_pending(struct vmd_vm *vm)
usr.sbin/vmd/arm64_vm.c
92
intr_toggle_el(struct vmd_vm *vm, int irq, int val)
usr.sbin/vmd/arm64_vm.c
99
intr_ack(struct vmd_vm *vm)
usr.sbin/vmd/config.c
103
struct vmd_vm *vm;
usr.sbin/vmd/config.c
116
while ((vm = TAILQ_FIRST(env->vmd_vms)) != NULL) {
usr.sbin/vmd/config.c
117
vm_remove(vm, __func__);
usr.sbin/vmd/config.c
188
config_setvm(struct privsep *ps, struct vmd_vm *vm, uint32_t peerid, uid_t uid)
usr.sbin/vmd/config.c
192
struct vmop_create_params *vmc = &vm->vm_params;
usr.sbin/vmd/config.c
204
if (vm->vm_state & VM_STATE_RUNNING) {
usr.sbin/vmd/config.c
216
if (vm->vm_start_tv.tv_sec) {
usr.sbin/vmd/config.c
217
timersub(&tv, &vm->vm_start_tv, &since_last);
usr.sbin/vmd/config.c
222
vm->vm_start_limit++;
usr.sbin/vmd/config.c
225
vm->vm_start_limit = 0;
usr.sbin/vmd/config.c
229
" limit %d/%d", __func__, vm->vm_vmid, since_last.tv_sec,
usr.sbin/vmd/config.c
230
since_last.tv_usec, vm->vm_start_limit,
usr.sbin/vmd/config.c
233
if (vm->vm_start_limit >= VM_START_RATE_LIMIT) {
usr.sbin/vmd/config.c
235
vm->vm_vmid);
usr.sbin/vmd/config.c
239
vm->vm_start_tv = tv;
usr.sbin/vmd/config.c
254
vm->vm_peerid = peerid;
usr.sbin/vmd/config.c
255
vm->vm_uid = uid;
usr.sbin/vmd/config.c
260
if (vm->vm_kernel == -1) {
usr.sbin/vmd/config.c
261
if (vm->vm_kernel_path != NULL) {
usr.sbin/vmd/config.c
263
kernfd = open(vm->vm_kernel_path, O_RDONLY | O_CLOEXEC);
usr.sbin/vmd/config.c
268
vm->vm_kernel_path);
usr.sbin/vmd/config.c
292
"%s", vmc->vmc_name, vm->vm_kernel_path);
usr.sbin/vmd/config.c
297
vm->vm_kernel = kernfd;
usr.sbin/vmd/config.c
373
vif = &vm->vm_ifs[i];
usr.sbin/vmd/config.c
443
if (vm->vm_ttyname[0] == '\0') {
usr.sbin/vmd/config.c
444
if (vm_opentty(vm) == -1) {
usr.sbin/vmd/config.c
446
vm->vm_ttyname[0] == '\0' ? "" : vm->vm_ttyname);
usr.sbin/vmd/config.c
450
if ((fd = dup(vm->vm_tty)) == -1) {
usr.sbin/vmd/config.c
451
log_warn("%s: can't re-open tty %s", __func__, vm->vm_ttyname);
usr.sbin/vmd/config.c
458
vm->vm_vmid, vm->vm_kernel, vmc, sizeof(*vmc));
usr.sbin/vmd/config.c
462
vm->vm_vmid, cdromfd, NULL, 0);
usr.sbin/vmd/config.c
469
IMSG_VMDOP_START_VM_DISK, vm->vm_vmid,
usr.sbin/vmd/config.c
475
vm->vm_vmid, tapfds[i], &i, sizeof(i));
usr.sbin/vmd/config.c
478
var.var_vmid = vm->vm_vmid;
usr.sbin/vmd/config.c
481
vm->vm_vmid, dup(tapfds[i]), &var, sizeof(var));
usr.sbin/vmd/config.c
485
vm->vm_vmid, fd, NULL, 0);
usr.sbin/vmd/config.c
497
vm->vm_state |= VM_STATE_RUNNING;
usr.sbin/vmd/config.c
503
if (vm->vm_kernel != -1)
usr.sbin/vmd/config.c
517
if (vm->vm_from_config) {
usr.sbin/vmd/config.c
518
vm_stop(vm, 0, __func__);
usr.sbin/vmd/config.c
520
vm_remove(vm, __func__);
usr.sbin/vmd/config.c
530
struct vmd_vm *vm = NULL;
usr.sbin/vmd/config.c
542
if (vm_register(ps, &vmc, &vm, peer_id, 0) == -1)
usr.sbin/vmd/config.c
545
vm->vm_state |= VM_STATE_RUNNING;
usr.sbin/vmd/config.c
546
vm->vm_peerid = (uint32_t)-1;
usr.sbin/vmd/config.c
547
vm->vm_kernel = fd;
usr.sbin/vmd/config.c
554
vm_remove(vm, __func__);
usr.sbin/vmd/config.c
564
struct vmd_vm *vm;
usr.sbin/vmd/config.c
571
if ((vm = vm_getbyvmid(peer_id)) == NULL) {
usr.sbin/vmd/config.c
579
if (n >= vm->vm_params.vmc_ndisks || fd == -1) {
usr.sbin/vmd/config.c
584
idx = vm->vm_params.vmc_diskbases[n]++;
usr.sbin/vmd/config.c
590
vm->vm_disks[n][idx] = fd;
usr.sbin/vmd/config.c
597
struct vmd_vm *vm;
usr.sbin/vmd/config.c
604
if ((vm = vm_getbyvmid(peer_id)) == NULL) {
usr.sbin/vmd/config.c
612
if (n >= vm->vm_params.vmc_nnics ||
usr.sbin/vmd/config.c
613
vm->vm_ifs[n].vif_fd != -1 || fd == -1) {
usr.sbin/vmd/config.c
617
vm->vm_ifs[n].vif_fd = fd;
usr.sbin/vmd/config.c
629
struct vmd_vm *vm;
usr.sbin/vmd/config.c
635
if ((vm = vm_getbyvmid(peer_id)) == NULL) {
usr.sbin/vmd/config.c
646
vm->vm_cdrom = fd;
usr.sbin/vmd/dhcp.c
142
vm = vm_getbyid(dev->vmm_id);
usr.sbin/vmd/dhcp.c
143
if (vm && res_hnok(vm->vm_params.vmc_name))
usr.sbin/vmd/dhcp.c
144
hostname = vm->vm_params.vmc_name;
usr.sbin/vmd/dhcp.c
55
struct vmd_vm *vm;
usr.sbin/vmd/loadfile_elf.c
211
loadfile_elf(gzFile fp, struct vmd_vm *vm, struct vcpu_reg_state *vrs,
usr.sbin/vmd/loadfile_elf.c
251
memcpy(bootmac, vm->vm_params.vmc_macs[0], ETHER_ADDR_LEN);
usr.sbin/vmd/loadfile_elf.c
253
n = create_bios_memmap(&vm->vm_params, memmap);
usr.sbin/vmd/parse.y
150
| grammar vm '\n'
usr.sbin/vmd/parse.y
318
vm : VM string vm_instance {
usr.sbin/vmd/parse.y
362
struct vmd_vm *vm;
usr.sbin/vmd/parse.y
371
&vm, 0, 0);
usr.sbin/vmd/parse.y
377
(vm->vm_state & VM_STATE_RUNNING) ?
usr.sbin/vmd/parse.y
385
vm->vm_state |= VM_STATE_DISABLED;
usr.sbin/vmd/parse.y
387
vm->vm_state |= VM_STATE_WAITING;
usr.sbin/vmd/parse.y
395
vm->vm_kernel_path = kernel;
usr.sbin/vmd/parse.y
396
vm->vm_kernel = -1;
usr.sbin/vmd/parse.y
397
vm->vm_from_config = 1;
usr.sbin/vmd/priv.c
325
vm_priv_ifconfig(struct privsep *ps, struct vmd_vm *vm)
usr.sbin/vmd/priv.c
337
vif = &vm->vm_ifs[i];
usr.sbin/vmd/priv.c
349
"vm%u-if%u-%s", vm->vm_vmid, i, vm->vm_params.vmc_name);
usr.sbin/vmd/priv.c
436
if (vm->vm_params.vmc_ifflags[i] & VMIFF_LOCAL) {
usr.sbin/vmd/priv.c
451
vm->vm_vmid, i, 0)) == 0)
usr.sbin/vmd/priv.c
462
if ((vm->vm_params.vmc_ifflags[i] & VMIFF_LOCAL) &&
usr.sbin/vmd/priv.c
478
vm->vm_vmid, i, 0, &sin6->sin6_addr) == -1)
usr.sbin/vmd/sev.c
142
sev_encrypt_memory(struct vmd_vm *vm)
usr.sbin/vmd/sev.c
144
struct vmop_create_params *vmc = &vm->vm_params;
usr.sbin/vmd/sev.c
151
for (i = 0; i < vm->vm_sev_nmemsegments; i++) {
usr.sbin/vmd/sev.c
152
vmr = &vm->vm_sev_memsegments[i];
usr.sbin/vmd/sev.c
155
if (psp_launch_update(vm->vm_sev_handle, vmr->vmr_va,
usr.sbin/vmd/sev.c
174
sev_activate(struct vmd_vm *vm, int vcpu_id)
usr.sbin/vmd/sev.c
176
struct vmop_create_params *vmc = &vm->vm_params;
usr.sbin/vmd/sev.c
183
psp_activate(vm->vm_sev_handle, vm->vm_sev_asid[vcpu_id])) {
usr.sbin/vmd/sev.c
185
vm->vm_sev_handle, vm->vm_sev_asid[vcpu_id]);
usr.sbin/vmd/sev.c
189
if (psp_get_gstate(vm->vm_sev_handle, NULL, NULL, &gstate)) {
usr.sbin/vmd/sev.c
203
sev_encrypt_state(struct vmd_vm *vm, int vcpu_id)
usr.sbin/vmd/sev.c
205
struct vmop_create_params *vmc = &vm->vm_params;
usr.sbin/vmd/sev.c
210
if (psp_encrypt_state(vm->vm_sev_handle, vm->vm_sev_asid[vcpu_id],
usr.sbin/vmd/sev.c
211
vm->vm_vmmid, vcpu_id)) {
usr.sbin/vmd/sev.c
213
__func__, vm->vm_sev_handle, vm->vm_sev_asid[vcpu_id],
usr.sbin/vmd/sev.c
214
vm->vm_vmid, vcpu_id);
usr.sbin/vmd/sev.c
222
sev_launch_finalize(struct vmd_vm *vm)
usr.sbin/vmd/sev.c
224
struct vmop_create_params *vmc = &vm->vm_params;
usr.sbin/vmd/sev.c
230
if (psp_launch_measure(vm->vm_sev_handle)) {
usr.sbin/vmd/sev.c
234
if (psp_launch_finish(vm->vm_sev_handle)) {
usr.sbin/vmd/sev.c
239
if (psp_get_gstate(vm->vm_sev_handle, NULL, NULL, &gstate)) {
usr.sbin/vmd/sev.c
255
sev_shutdown(struct vmd_vm *vm)
usr.sbin/vmd/sev.c
257
struct vmop_create_params *vmc = &vm->vm_params;
usr.sbin/vmd/sev.c
262
if (psp_guest_shutdown(vm->vm_sev_handle)) {
usr.sbin/vmd/sev.c
266
vm->vm_sev_handle = 0;
usr.sbin/vmd/sev.c
41
sev_init(struct vmd_vm *vm)
usr.sbin/vmd/sev.c
43
struct vmop_create_params *vmc = &vm->vm_params;
usr.sbin/vmd/sev.c
64
vm->vm_sev_handle = handle;
usr.sbin/vmd/sev.c
66
if (psp_get_gstate(vm->vm_sev_handle, NULL, NULL, &gstate)) {
usr.sbin/vmd/vioblk.c
104
memset(&vm, 0, sizeof(vm));
usr.sbin/vmd/vioblk.c
105
sz = atomicio(read, dev.sync_fd, &vm, sizeof(vm));
usr.sbin/vmd/vioblk.c
106
if (sz != sizeof(vm)) {
usr.sbin/vmd/vioblk.c
111
current_vm = &vm;
usr.sbin/vmd/vioblk.c
113
setproctitle("%s/vioblk%d", vm.vm_params.vmc_name, vioblk->idx);
usr.sbin/vmd/vioblk.c
114
log_procinit("vm/%s/vioblk%d", vm.vm_params.vmc_name, vioblk->idx);
usr.sbin/vmd/vioblk.c
117
ret = remap_guest_mem(&vm, fd_vmm);
usr.sbin/vmd/vioblk.c
131
type = vm.vm_params.vmc_disktypes[vioblk->idx];
usr.sbin/vmd/vioblk.c
180
vm.vm_params.vmc_name);
usr.sbin/vmd/vioblk.c
67
struct vmd_vm vm;
usr.sbin/vmd/vionet.c
114
struct vmd_vm vm;
usr.sbin/vmd/vionet.c
149
memset(&vm, 0, sizeof(vm));
usr.sbin/vmd/vionet.c
150
sz = atomicio(read, dev.sync_fd, &vm, sizeof(vm));
usr.sbin/vmd/vionet.c
151
if (sz != sizeof(vm)) {
usr.sbin/vmd/vionet.c
156
current_vm = &vm;
usr.sbin/vmd/vionet.c
157
setproctitle("%s/vionet%d", vm.vm_params.vmc_name, vionet->idx);
usr.sbin/vmd/vionet.c
158
log_procinit("vm/%s/vionet%d", vm.vm_params.vmc_name, vionet->idx);
usr.sbin/vmd/vionet.c
161
ret = remap_guest_mem(&vm, fd_vmm);
usr.sbin/vmd/vionet.c
240
vm.vm_params.vmc_name);
usr.sbin/vmd/vioscsi.c
104
current_vm = &vm;
usr.sbin/vmd/vioscsi.c
106
setproctitle("%s/vioscsi", vm.vm_params.vmc_name);
usr.sbin/vmd/vioscsi.c
107
log_procinit("vm/%s/vioscsi", vm.vm_params.vmc_name);
usr.sbin/vmd/vioscsi.c
110
ret = remap_guest_mem(&vm, fd_vmm);
usr.sbin/vmd/vioscsi.c
158
vm.vm_params.vmc_name);
usr.sbin/vmd/vioscsi.c
65
struct vmd_vm vm;
usr.sbin/vmd/vioscsi.c
97
memset(&vm, 0, sizeof(vm));
usr.sbin/vmd/vioscsi.c
98
sz = atomicio(read, dev.sync_fd, &vm, sizeof(vm));
usr.sbin/vmd/vioscsi.c
99
if (sz != sizeof(vm)) {
usr.sbin/vmd/virtio.c
1004
virtio_dev_init(vm, &viornd, id, VIORND_QUEUE_SIZE_DEFAULT,
usr.sbin/vmd/virtio.c
1032
virtio_dev_init(vm, dev, id, VIONET_QUEUE_SIZE_DEFAULT,
usr.sbin/vmd/virtio.c
1053
dev->vmm_id = vm->vm_vmmid;
usr.sbin/vmd/virtio.c
1068
__func__, vm->vm_params.vmc_name, i,
usr.sbin/vmd/virtio.c
1098
virtio_dev_init(vm, dev, id, VIOBLK_QUEUE_SIZE_DEFAULT,
usr.sbin/vmd/virtio.c
1120
dev->vmm_id = vm->vm_vmmid;
usr.sbin/vmd/virtio.c
1152
virtio_dev_init(vm, dev, id, VIOSCSI_QUEUE_SIZE_DEFAULT,
usr.sbin/vmd/virtio.c
1166
dev->vmm_id = vm->vm_vmmid;
usr.sbin/vmd/virtio.c
1178
if (virtio_dev_launch(vm, dev) != 0) {
usr.sbin/vmd/virtio.c
1192
virtio_dev_init(vm, dev, id, 0, 0,
usr.sbin/vmd/virtio.c
1224
vionet_set_hostmac(struct vmd_vm *vm, unsigned int idx, uint8_t *addr)
usr.sbin/vmd/virtio.c
1226
struct vmop_create_params *vmc = &vm->vm_params;
usr.sbin/vmd/virtio.c
1258
virtio_shutdown(struct vmd_vm *vm)
usr.sbin/vmd/virtio.c
1306
void virtio_broadcast_imsg(struct vmd_vm *vm, uint16_t type, void *data,
usr.sbin/vmd/virtio.c
1324
virtio_stop(struct vmd_vm *vm)
usr.sbin/vmd/virtio.c
1326
return virtio_broadcast_imsg(vm, IMSG_VMDOP_PAUSE_VM, NULL, 0);
usr.sbin/vmd/virtio.c
1330
virtio_start(struct vmd_vm *vm)
usr.sbin/vmd/virtio.c
1332
return virtio_broadcast_imsg(vm, IMSG_VMDOP_UNPAUSE_VM, NULL, 0);
usr.sbin/vmd/virtio.c
1339
virtio_dev_init(struct vmd_vm *vm, struct virtio_dev *dev, uint8_t pci_id,
usr.sbin/vmd/virtio.c
1358
dev->vm_id = vm->vm_vmid;
usr.sbin/vmd/virtio.c
1359
dev->vmm_id = vm->vm_vmmid;
usr.sbin/vmd/virtio.c
1459
virtio_dev_launch(struct vmd_vm *vm, struct virtio_dev *dev)
usr.sbin/vmd/virtio.c
1472
log_debug("%s: launching vionet%d", vm->vm_params.vmc_name,
usr.sbin/vmd/virtio.c
1476
log_debug("%s: launching vioblk%d", vm->vm_params.vmc_name,
usr.sbin/vmd/virtio.c
1480
log_debug("%s: launching vioscsi", vm->vm_params.vmc_name);
usr.sbin/vmd/virtio.c
1539
vm->vm_params.vmc_name);
usr.sbin/vmd/virtio.c
1540
sz = atomicio(vwrite, sync_fds[0], vm, sizeof(*vm));
usr.sbin/vmd/virtio.c
1541
if (sz != sizeof(*vm)) {
usr.sbin/vmd/virtio.c
1591
close_fd(vm->vm_tty);
usr.sbin/vmd/virtio.c
1592
vm->vm_tty = -1;
usr.sbin/vmd/virtio.c
1594
if (vm->vm_cdrom != -1 && dev->dev_type != VMD_DEVTYPE_SCSI) {
usr.sbin/vmd/virtio.c
1595
close_fd(vm->vm_cdrom);
usr.sbin/vmd/virtio.c
1596
vm->vm_cdrom = -1;
usr.sbin/vmd/virtio.c
1613
vm->vm_params.vmc_name);
usr.sbin/vmd/virtio.c
986
virtio_init(struct vmd_vm *vm, int child_cdrom,
usr.sbin/vmd/virtio.c
989
struct vmop_create_params *vmc = &vm->vm_params;
usr.sbin/vmd/vm.c
1099
remap_guest_mem(struct vmd_vm *vm, int vmm_fd)
usr.sbin/vmd/vm.c
1104
if (vm == NULL)
usr.sbin/vmd/vm.c
1109
vsp.vsp_nmemranges = vm->vm_params.vmc_nmemranges;
usr.sbin/vmd/vm.c
1110
vsp.vsp_vm_id = vm->vm_vmmid;
usr.sbin/vmd/vm.c
1111
memcpy(&vsp.vsp_memranges, &vm->vm_params.vmc_memranges,
usr.sbin/vmd/vm.c
112
memset(&vm, 0, sizeof(vm));
usr.sbin/vmd/vm.c
1120
vm->vm_params.vmc_memranges[i].vmr_va = vsp.vsp_va[i];
usr.sbin/vmd/vm.c
113
sz = atomicio(read, fd, &vm, sizeof(vm));
usr.sbin/vmd/vm.c
114
if (sz != sizeof(vm)) {
usr.sbin/vmd/vm.c
120
setproctitle("%s", vm.vm_params.vmc_name);
usr.sbin/vmd/vm.c
121
log_procinit("vm/%s", vm.vm_params.vmc_name);
usr.sbin/vmd/vm.c
135
if (vm.vm_kernel == -1) {
usr.sbin/vmd/vm.c
140
if (vm.vm_params.vmc_sev && env->vmd_psp_fd < 0) {
usr.sbin/vmd/vm.c
145
ret = start_vm(&vm, fd);
usr.sbin/vmd/vm.c
172
start_vm(struct vmd_vm *vm, int fd)
usr.sbin/vmd/vm.c
182
create_memory_map(vm);
usr.sbin/vmd/vm.c
185
ret = vmm_create_vm(vm);
usr.sbin/vmd/vm.c
199
vm->vm_vmmid = 0;
usr.sbin/vmd/vm.c
200
atomicio(vwrite, fd, &vm->vm_vmmid, sizeof(vm->vm_vmmid));
usr.sbin/vmd/vm.c
205
ret = sev_init(vm);
usr.sbin/vmd/vm.c
214
current_vm = vm;
usr.sbin/vmd/vm.c
215
con_fd = vm->vm_tty;
usr.sbin/vmd/vm.c
225
if (atomicio(vwrite, fd, &vm->vm_vmmid, sizeof(vm->vm_vmmid)) !=
usr.sbin/vmd/vm.c
226
sizeof(vm->vm_vmmid)) {
usr.sbin/vmd/vm.c
232
if (load_firmware(vm, &vrs))
usr.sbin/vmd/vm.c
235
if (vm->vm_kernel != -1)
usr.sbin/vmd/vm.c
236
close_fd(vm->vm_kernel);
usr.sbin/vmd/vm.c
266
if (vmm_pipe(vm, fd, vm_dispatch_vmm) == -1)
usr.sbin/vmd/vm.c
273
nicfds[i] = vm->vm_ifs[i].vif_fd;
usr.sbin/vmd/vm.c
274
ret = init_emulated_hw(vm, vm->vm_cdrom, vm->vm_disks, nicfds);
usr.sbin/vmd/vm.c
276
virtio_shutdown(vm);
usr.sbin/vmd/vm.c
287
ret = run_vm(vm, &vrs);
usr.sbin/vmd/vm.c
290
if (sev_shutdown(vm))
usr.sbin/vmd/vm.c
294
virtio_shutdown(vm);
usr.sbin/vmd/vm.c
307
struct vmd_vm *vm = arg;
usr.sbin/vmd/vm.c
310
struct imsgev *iev = &vm->vm_iev;
usr.sbin/vmd/vm.c
344
vm->vm_params.vmc_params.vcp_name);
usr.sbin/vmd/vm.c
351
virtio_broadcast_imsg(vm, IMSG_CTL_VERBOSE, &verbose,
usr.sbin/vmd/vm.c
364
vmr.vmr_id = vm->vm_vmid;
usr.sbin/vmd/vm.c
365
pause_vm(vm);
usr.sbin/vmd/vm.c
366
imsg_compose_event(&vm->vm_iev,
usr.sbin/vmd/vm.c
372
vmr.vmr_id = vm->vm_vmid;
usr.sbin/vmd/vm.c
373
unpause_vm(vm);
usr.sbin/vmd/vm.c
374
imsg_compose_event(&vm->vm_iev,
usr.sbin/vmd/vm.c
381
vm->vm_params.vmc_name,
usr.sbin/vmd/vm.c
384
vionet_set_hostmac(vm, var.var_nic_idx, var.var_addr);
usr.sbin/vmd/vm.c
388
type, vm->vm_params.vmc_name);
usr.sbin/vmd/vm.c
425
pause_vm(struct vmd_vm *vm)
usr.sbin/vmd/vm.c
431
if (vm->vm_state & VM_STATE_PAUSED) {
usr.sbin/vmd/vm.c
438
for (n = 0; n < vm->vm_params.vmc_ncpus; n++) {
usr.sbin/vmd/vm.c
453
pause_vm_md(vm);
usr.sbin/vmd/vm.c
457
unpause_vm(struct vmd_vm *vm)
usr.sbin/vmd/vm.c
463
if (!(vm->vm_state & VM_STATE_PAUSED)) {
usr.sbin/vmd/vm.c
470
for (n = 0; n < vm->vm_params.vmc_ncpus; n++) {
usr.sbin/vmd/vm.c
479
unpause_vm_md(vm);
usr.sbin/vmd/vm.c
531
vmm_create_vm(struct vmd_vm *vm)
usr.sbin/vmd/vm.c
534
struct vmop_create_params *vmc = &vm->vm_params;
usr.sbin/vmd/vm.c
563
vm->vm_vmmid = vcp.vcp_id;
usr.sbin/vmd/vm.c
565
vm->vm_sev_asid[i] = vcp.vcp_asid[i];
usr.sbin/vmd/vm.c
568
vm->vm_poscbit = vcp.vcp_poscbit;
usr.sbin/vmd/vm.c
588
run_vm(struct vmd_vm *vm, struct vcpu_reg_state *vrs)
usr.sbin/vmd/vm.c
599
vmc = &vm->vm_params;
usr.sbin/vmd/vm.c
644
vrp[i]->vrp_vm_id = vm->vm_vmmid;
usr.sbin/vmd/vm.c
647
if (vcpu_reset(vm->vm_vmmid, i, vrs)) {
usr.sbin/vmd/vm.c
652
if (sev_activate(vm, i)) {
usr.sbin/vmd/vm.c
657
if (sev_encrypt_memory(vm)) {
usr.sbin/vmd/vm.c
662
if (sev_encrypt_state(vm, i)) {
usr.sbin/vmd/vm.c
667
if (sev_launch_finalize(vm)) {
usr.sbin/vmd/vm.c
85
struct vmd_vm vm;
usr.sbin/vmd/vmd.c
1000
return (vm);
usr.sbin/vmd/vmd.c
1009
struct vmd_vm *vm;
usr.sbin/vmd/vmd.c
1011
TAILQ_FOREACH(vm, env->vmd_vms, vm_entry) {
usr.sbin/vmd/vmd.c
1012
if (vm->vm_pid == pid)
usr.sbin/vmd/vmd.c
1013
return (vm);
usr.sbin/vmd/vmd.c
1020
vm_stop(struct vmd_vm *vm, int keeptty, const char *caller)
usr.sbin/vmd/vmd.c
1025
if (vm == NULL)
usr.sbin/vmd/vmd.c
1030
vm->vm_vmid, keeptty ? ", keeping tty open" : "");
usr.sbin/vmd/vmd.c
1032
vm->vm_state &= ~(VM_STATE_RUNNING | VM_STATE_SHUTDOWN);
usr.sbin/vmd/vmd.c
1034
if (vm->vm_iev.ibuf.fd != -1) {
usr.sbin/vmd/vmd.c
1035
event_del(&vm->vm_iev.ev);
usr.sbin/vmd/vmd.c
1036
close(vm->vm_iev.ibuf.fd);
usr.sbin/vmd/vmd.c
1040
if (vm->vm_disks[i][j] != -1) {
usr.sbin/vmd/vmd.c
1041
close(vm->vm_disks[i][j]);
usr.sbin/vmd/vmd.c
1042
vm->vm_disks[i][j] = -1;
usr.sbin/vmd/vmd.c
1047
if (vm->vm_ifs[i].vif_fd != -1) {
usr.sbin/vmd/vmd.c
1048
close(vm->vm_ifs[i].vif_fd);
usr.sbin/vmd/vmd.c
1049
vm->vm_ifs[i].vif_fd = -1;
usr.sbin/vmd/vmd.c
1051
free(vm->vm_ifs[i].vif_name);
usr.sbin/vmd/vmd.c
1052
free(vm->vm_ifs[i].vif_switch);
usr.sbin/vmd/vmd.c
1053
free(vm->vm_ifs[i].vif_group);
usr.sbin/vmd/vmd.c
1054
vm->vm_ifs[i].vif_name = NULL;
usr.sbin/vmd/vmd.c
1055
vm->vm_ifs[i].vif_switch = NULL;
usr.sbin/vmd/vmd.c
1056
vm->vm_ifs[i].vif_group = NULL;
usr.sbin/vmd/vmd.c
1058
if (vm->vm_kernel != -1) {
usr.sbin/vmd/vmd.c
1059
close(vm->vm_kernel);
usr.sbin/vmd/vmd.c
1060
vm->vm_kernel = -1;
usr.sbin/vmd/vmd.c
1062
if (vm->vm_cdrom != -1) {
usr.sbin/vmd/vmd.c
1063
close(vm->vm_cdrom);
usr.sbin/vmd/vmd.c
1064
vm->vm_cdrom = -1;
usr.sbin/vmd/vmd.c
1067
vm_closetty(vm);
usr.sbin/vmd/vmd.c
1068
vm->vm_uid = 0;
usr.sbin/vmd/vmd.c
1073
vm_remove(struct vmd_vm *vm, const char *caller)
usr.sbin/vmd/vmd.c
1077
if (vm == NULL)
usr.sbin/vmd/vmd.c
1082
vm->vm_vmid);
usr.sbin/vmd/vmd.c
1084
TAILQ_REMOVE(env->vmd_vms, vm, vm_entry);
usr.sbin/vmd/vmd.c
1086
vm_stop(vm, 0, caller);
usr.sbin/vmd/vmd.c
1087
if (vm->vm_kernel_path != NULL && !vm->vm_from_config)
usr.sbin/vmd/vmd.c
1088
free(vm->vm_kernel_path);
usr.sbin/vmd/vmd.c
1089
free(vm);
usr.sbin/vmd/vmd.c
1127
struct vmd_vm *vm = NULL, *vm_parent = NULL;
usr.sbin/vmd/vmd.c
113
if (vm_register(ps, &vmc, &vm, 0, vmc.vmc_owner.uid)) {
usr.sbin/vmd/vmd.c
1144
if ((vm = vm_getbyname(vmc->vmc_name)) != NULL ||
usr.sbin/vmd/vmd.c
1145
(vm = vm_getbyvmid(vmc->vmc_id)) != NULL) {
usr.sbin/vmd/vmd.c
1146
if (vm_checkperm(vm, &vm->vm_params.vmc_owner,
usr.sbin/vmd/vmd.c
1151
vm->vm_kernel = vmc->vmc_kernel;
usr.sbin/vmd/vmd.c
1152
*ret_vm = vm;
usr.sbin/vmd/vmd.c
117
if (vm == NULL) {
usr.sbin/vmd/vmd.c
1205
if ((vm = calloc(1, sizeof(*vm))) == NULL)
usr.sbin/vmd/vmd.c
1208
memcpy(&vm->vm_params, vmc, sizeof(vm->vm_params));
usr.sbin/vmd/vmd.c
1209
vmc = &vm->vm_params;
usr.sbin/vmd/vmd.c
1210
vm->vm_pid = -1;
usr.sbin/vmd/vmd.c
1211
vm->vm_tty = -1;
usr.sbin/vmd/vmd.c
1212
vm->vm_kernel = -1;
usr.sbin/vmd/vmd.c
1213
vm->vm_state &= ~VM_STATE_PAUSED;
usr.sbin/vmd/vmd.c
1216
vm->vm_kernel = vmc->vmc_kernel;
usr.sbin/vmd/vmd.c
1220
vm->vm_disks[i][j] = -1;
usr.sbin/vmd/vmd.c
1222
vm->vm_ifs[i].vif_fd = -1;
usr.sbin/vmd/vmd.c
1246
vm->vm_cdrom = -1;
usr.sbin/vmd/vmd.c
1247
vm->vm_iev.ibuf.fd = -1;
usr.sbin/vmd/vmd.c
125
if (vm->vm_state & VM_STATE_RUNNING) {
usr.sbin/vmd/vmd.c
1254
vm->vm_vmid = id;
usr.sbin/vmd/vmd.c
1258
vm->vm_vmid = nid;
usr.sbin/vmd/vmd.c
1260
log_debug("%s: registering vm %d", __func__, vm->vm_vmid);
usr.sbin/vmd/vmd.c
1261
TAILQ_INSERT_TAIL(env->vmd_vms, vm, vm_entry);
usr.sbin/vmd/vmd.c
1263
*ret_vm = vm;
usr.sbin/vmd/vmd.c
141
res = config_setvm(ps, vm, peer_id,
usr.sbin/vmd/vmd.c
142
vm->vm_params.vmc_owner.uid);
usr.sbin/vmd/vmd.c
1464
vm_checkperm(struct vmd_vm *vm, struct vmop_owner *vmo, uid_t uid)
usr.sbin/vmd/vmd.c
1478
if (vm == NULL) {
usr.sbin/vmd/vmd.c
1486
if (((vm->vm_state & VM_STATE_RUNNING) && vm->vm_uid == uid) ||
usr.sbin/vmd/vmd.c
1487
(!(vm->vm_state & VM_STATE_RUNNING) && vmo->uid == uid))
usr.sbin/vmd/vmd.c
154
if ((vm = vm_getbyname(vid.vid_name)) == NULL) {
usr.sbin/vmd/vmd.c
158
vm_id = vm->vm_vmid;
usr.sbin/vmd/vmd.c
159
} else if ((vm = vm_getbyvmid(vm_id)) == NULL) {
usr.sbin/vmd/vmd.c
1603
vm_opentty(struct vmd_vm *vm)
usr.sbin/vmd/vmd.c
1615
if (fdopenpty(env->vmd_ptmfd, &vm->vm_tty, &tty_slave, vm->vm_ttyname,
usr.sbin/vmd/vmd.c
1625
if (ioctl(vm->vm_tty, TIOCUCNTL, &on) == -1) {
usr.sbin/vmd/vmd.c
1627
vm->vm_ttyname);
usr.sbin/vmd/vmd.c
1631
uid = vm->vm_uid;
usr.sbin/vmd/vmd.c
1632
gid = vm->vm_params.vmc_owner.gid;
usr.sbin/vmd/vmd.c
1634
if (vm->vm_params.vmc_owner.gid != -1) {
usr.sbin/vmd/vmd.c
1645
vm->vm_params.vmc_name, vm->vm_ttyname, uid, gid, mode);
usr.sbin/vmd/vmd.c
165
if ((vm->vm_state & VM_STATE_SHUTDOWN) &&
usr.sbin/vmd/vmd.c
1651
if (fstat(vm->vm_tty, &st) == -1) {
usr.sbin/vmd/vmd.c
1652
log_warn("fstat failed for %s", vm->vm_ttyname);
usr.sbin/vmd/vmd.c
1657
if (chown(vm->vm_ttyname, uid, gid) == -1) {
usr.sbin/vmd/vmd.c
1659
vm->vm_ttyname, uid, gid, getuid());
usr.sbin/vmd/vmd.c
1669
if (chmod(vm->vm_ttyname, mode) == -1) {
usr.sbin/vmd/vmd.c
1671
vm->vm_ttyname, mode, getuid());
usr.sbin/vmd/vmd.c
1682
vm_closetty(vm);
usr.sbin/vmd/vmd.c
1687
vm_closetty(struct vmd_vm *vm)
usr.sbin/vmd/vmd.c
1689
if (vm->vm_tty != -1) {
usr.sbin/vmd/vmd.c
169
} else if (!(vm->vm_state & VM_STATE_RUNNING)) {
usr.sbin/vmd/vmd.c
1691
if (fchown(vm->vm_tty, 0, 0) == -1)
usr.sbin/vmd/vmd.c
1692
log_warn("chown %s 0 0 failed", vm->vm_ttyname);
usr.sbin/vmd/vmd.c
1693
if (fchmod(vm->vm_tty, 0666) == -1)
usr.sbin/vmd/vmd.c
1694
log_warn("chmod %s 0666 failed", vm->vm_ttyname);
usr.sbin/vmd/vmd.c
1695
close(vm->vm_tty);
usr.sbin/vmd/vmd.c
1696
vm->vm_tty = -1;
usr.sbin/vmd/vmd.c
1698
memset(&vm->vm_ttyname, 0, sizeof(vm->vm_ttyname));
usr.sbin/vmd/vmd.c
172
} else if (vm_checkperm(vm, &vm->vm_params.vmc_owner,
usr.sbin/vmd/vmd.c
1772
vm_terminate(struct vmd_vm *vm, const char *caller)
usr.sbin/vmd/vmd.c
1774
if (vm->vm_from_config)
usr.sbin/vmd/vmd.c
1775
vm_stop(vm, 0, caller);
usr.sbin/vmd/vmd.c
1778
vm_remove(vm, caller);
usr.sbin/vmd/vmd.c
223
if ((vm = vm_getbyname(vid.vid_name)) == NULL) {
usr.sbin/vmd/vmd.c
230
vid.vid_id = vm->vm_vmid;
usr.sbin/vmd/vmd.c
232
} else if ((vm = vm_getbyid(vid.vid_id)) == NULL) {
usr.sbin/vmd/vmd.c
239
if (vm_checkperm(vm, &vm->vm_params.vmc_owner,
usr.sbin/vmd/vmd.c
247
proc_compose_imsg(ps, PROC_VMM, type, vm->vm_peerid, -1,
usr.sbin/vmd/vmd.c
286
struct vmd_vm *vm = NULL;
usr.sbin/vmd/vmd.c
296
if ((vm = vm_getbyvmid(vmr.vmr_id)) == NULL)
usr.sbin/vmd/vmd.c
298
proc_compose_imsg(ps, PROC_CONTROL, type, vm->vm_peerid, -1,
usr.sbin/vmd/vmd.c
301
vm->vm_params.vmc_name, vm->vm_vmid);
usr.sbin/vmd/vmd.c
302
vm->vm_state |= VM_STATE_PAUSED;
usr.sbin/vmd/vmd.c
306
if ((vm = vm_getbyvmid(vmr.vmr_id)) == NULL)
usr.sbin/vmd/vmd.c
308
proc_compose_imsg(ps, PROC_CONTROL, type, vm->vm_peerid, -1,
usr.sbin/vmd/vmd.c
311
vm->vm_params.vmc_name, vm->vm_vmid);
usr.sbin/vmd/vmd.c
312
vm->vm_state &= ~VM_STATE_PAUSED;
usr.sbin/vmd/vmd.c
316
if ((vm = vm_getbyvmid(vmr.vmr_id)) == NULL)
usr.sbin/vmd/vmd.c
318
vm->vm_pid = vmr.vmr_pid;
usr.sbin/vmd/vmd.c
319
vm->vm_vmmid = vmr.vmr_id;
usr.sbin/vmd/vmd.c
326
if (vm->vm_peerid != (uint32_t)-1) {
usr.sbin/vmd/vmd.c
327
(void)strlcpy(vmr.vmr_ttyname, vm->vm_ttyname,
usr.sbin/vmd/vmd.c
330
vm->vm_peerid, -1, &vmr, sizeof(vmr)) == -1) {
usr.sbin/vmd/vmd.c
333
vm->vm_params.vmc_name);
usr.sbin/vmd/vmd.c
334
vm_terminate(vm, __func__);
usr.sbin/vmd/vmd.c
341
vm->vm_params.vmc_name);
usr.sbin/vmd/vmd.c
342
vm_terminate(vm, __func__);
usr.sbin/vmd/vmd.c
348
if (vm_priv_ifconfig(ps, vm) == -1) {
usr.sbin/vmd/vmd.c
350
vm->vm_params.vmc_name);
usr.sbin/vmd/vmd.c
351
vm_terminate(vm, __func__);
usr.sbin/vmd/vmd.c
356
vm->vm_params.vmc_name, vm->vm_vmid, vm->vm_ttyname);
usr.sbin/vmd/vmd.c
366
if ((vm = vm_getbyvmid(vmr.vmr_id)) == NULL)
usr.sbin/vmd/vmd.c
369
vm->vm_state |= VM_STATE_SHUTDOWN;
usr.sbin/vmd/vmd.c
376
if ((vm = vm_getbyvmid(vmr.vmr_id)) == NULL) {
usr.sbin/vmd/vmd.c
382
vm->vm_params.vmc_bootdevice) {
usr.sbin/vmd/vmd.c
383
vm_terminate(vm, __func__);
usr.sbin/vmd/vmd.c
386
vm_stop(vm, 1, __func__);
usr.sbin/vmd/vmd.c
387
config_setvm(ps, vm, (uint32_t)-1, vm->vm_uid);
usr.sbin/vmd/vmd.c
400
if ((vm = vm_getbyvmid(vir.vir_id)) != NULL) {
usr.sbin/vmd/vmd.c
402
if (vm->vm_ttyname[0] != '\0')
usr.sbin/vmd/vmd.c
403
strlcpy(vir.vir_ttyname, vm->vm_ttyname,
usr.sbin/vmd/vmd.c
406
__func__, vm->vm_vmid, vm->vm_state);
usr.sbin/vmd/vmd.c
407
vir.vir_state = vm->vm_state;
usr.sbin/vmd/vmd.c
409
vir.vir_uid = vm->vm_uid;
usr.sbin/vmd/vmd.c
410
vir.vir_gid = vm->vm_params.vmc_owner.gid;
usr.sbin/vmd/vmd.c
415
if (vm)
usr.sbin/vmd/vmd.c
416
vm_terminate(vm, __func__);
usr.sbin/vmd/vmd.c
426
TAILQ_FOREACH(vm, env->vmd_vms, vm_entry) {
usr.sbin/vmd/vmd.c
427
if (!(vm->vm_state & VM_STATE_RUNNING)) {
usr.sbin/vmd/vmd.c
429
vir.vir_id = vm->vm_vmid;
usr.sbin/vmd/vmd.c
430
strlcpy(vir.vir_name, vm->vm_params.vmc_name,
usr.sbin/vmd/vmd.c
433
vm->vm_params.vmc_memranges[0].vmr_size;
usr.sbin/vmd/vmd.c
434
vir.vir_ncpus = vm->vm_params.vmc_ncpus;
usr.sbin/vmd/vmd.c
436
vir.vir_uid = vm->vm_params.vmc_owner.uid;
usr.sbin/vmd/vmd.c
437
vir.vir_gid = vm->vm_params.vmc_owner.gid;
usr.sbin/vmd/vmd.c
439
__func__, vm->vm_vmid, vm->vm_state);
usr.sbin/vmd/vmd.c
440
vir.vir_state = vm->vm_state;
usr.sbin/vmd/vmd.c
448
vm_terminate(vm, __func__);
usr.sbin/vmd/vmd.c
746
struct vmd_vm *vm;
usr.sbin/vmd/vmd.c
750
TAILQ_FOREACH(vm, env->vmd_vms, vm_entry) {
usr.sbin/vmd/vmd.c
751
if (!(vm->vm_state & VM_STATE_WAITING)) {
usr.sbin/vmd/vmd.c
753
__func__, vm->vm_params.vmc_name);
usr.sbin/vmd/vmd.c
762
vm->vm_state &= ~VM_STATE_WAITING;
usr.sbin/vmd/vmd.c
763
config_setvm(&env->vmd_ps, vm, -1, vm->vm_params.vmc_owner.uid);
usr.sbin/vmd/vmd.c
853
struct vmd_vm *vm, *next_vm;
usr.sbin/vmd/vmd.c
879
TAILQ_FOREACH_SAFE(vm, env->vmd_vms, vm_entry,
usr.sbin/vmd/vmd.c
881
if (!(vm->vm_state & VM_STATE_RUNNING)) {
usr.sbin/vmd/vmd.c
884
vm_remove(vm, __func__);
usr.sbin/vmd/vmd.c
925
struct vmd_vm *vm, *vm_next;
usr.sbin/vmd/vmd.c
929
TAILQ_FOREACH_SAFE(vm, env->vmd_vms, vm_entry, vm_next) {
usr.sbin/vmd/vmd.c
930
vm_remove(vm, __func__);
usr.sbin/vmd/vmd.c
943
struct vmd_vm *vm;
usr.sbin/vmd/vmd.c
947
TAILQ_FOREACH(vm, env->vmd_vms, vm_entry) {
usr.sbin/vmd/vmd.c
948
if (vm->vm_vmid == vmid) // XXX check this
usr.sbin/vmd/vmd.c
949
return (vm);
usr.sbin/vmd/vmd.c
959
struct vmd_vm *vm;
usr.sbin/vmd/vmd.c
963
TAILQ_FOREACH(vm, env->vmd_vms, vm_entry) {
usr.sbin/vmd/vmd.c
964
if (vm->vm_vmmid == id) // XXX check this
usr.sbin/vmd/vmd.c
965
return (vm);
usr.sbin/vmd/vmd.c
973
vm_id2vmid(uint32_t id, struct vmd_vm *vm)
usr.sbin/vmd/vmd.c
975
if (vm == NULL && (vm = vm_getbyid(id)) == NULL)
usr.sbin/vmd/vmd.c
978
id, vm->vm_vmid);
usr.sbin/vmd/vmd.c
979
return (vm->vm_vmid);
usr.sbin/vmd/vmd.c
983
vm_vmid2id(uint32_t vmid, struct vmd_vm *vm)
usr.sbin/vmd/vmd.c
985
if (vm == NULL && (vm = vm_getbyvmid(vmid)) == NULL)
usr.sbin/vmd/vmd.c
987
DPRINTF("%s: vmid %u is vmm id %u", __func__, vmid, vm->vm_vmmid);
usr.sbin/vmd/vmd.c
988
return (vm->vm_vmmid);
usr.sbin/vmd/vmd.c
99
struct vmd_vm *vm = NULL;
usr.sbin/vmd/vmd.c
994
struct vmd_vm *vm;
usr.sbin/vmd/vmd.c
998
TAILQ_FOREACH(vm, env->vmd_vms, vm_entry) {
usr.sbin/vmd/vmd.c
999
if (strcmp(vm->vm_params.vmc_name, name) == 0)
usr.sbin/vmd/vmm.c
110
struct vmd_vm *vm = NULL;
usr.sbin/vmd/vmm.c
172
} else if ((vm = vm_getbyvmid(id)) != NULL) {
usr.sbin/vmd/vmm.c
174
vtp.vtp_vm_id = vm_vmid2id(vm->vm_vmid, vm);
usr.sbin/vmd/vmm.c
175
vm->vm_state |= VM_STATE_SHUTDOWN;
usr.sbin/vmd/vmm.c
178
} else if (!(vm->vm_state & VM_STATE_SHUTDOWN)) {
usr.sbin/vmd/vmm.c
189
vm->vm_state |= VM_STATE_SHUTDOWN;
usr.sbin/vmd/vmm.c
190
if (imsg_compose_event(&vm->vm_iev,
usr.sbin/vmd/vmm.c
202
if (vm_vmid2id(vm->vm_vmid, vm) == 0) {
usr.sbin/vmd/vmm.c
236
TAILQ_FOREACH(vm, env->vmd_vms, vm_entry) {
usr.sbin/vmd/vmm.c
237
imsg_compose_event(&vm->vm_iev, type, -1, pid, -1,
usr.sbin/vmd/vmm.c
244
if ((vm = vm_getbyvmid(id)) == NULL) {
usr.sbin/vmd/vmm.c
249
imsg_compose_event(&vm->vm_iev, type, -1, pid,
usr.sbin/vmd/vmm.c
255
if ((vm = vm_getbyvmid(id)) == NULL) {
usr.sbin/vmd/vmm.c
260
imsg_compose_event(&vm->vm_iev, type, -1, pid,
usr.sbin/vmd/vmm.c
265
if ((vm = vm_getbyvmid(var.var_vmid)) == NULL) {
usr.sbin/vmd/vmm.c
270
imsg_compose_event(&vm->vm_iev, type, -1, pid,
usr.sbin/vmd/vmm.c
296
if ((vm = vm_getbyvmid(vm_id)) != NULL) {
usr.sbin/vmd/vmm.c
299
vm_remove(vm, __func__);
usr.sbin/vmd/vmm.c
333
struct vmd_vm *vm;
usr.sbin/vmd/vmm.c
345
vm = vm_getbypid(pid);
usr.sbin/vmd/vmm.c
346
if (vm == NULL) {
usr.sbin/vmd/vmm.c
360
(vm->vm_state & VM_STATE_SHUTDOWN))
usr.sbin/vmd/vmm.c
364
vtp.vtp_vm_id = vm->vm_vmmid;
usr.sbin/vmd/vmm.c
369
vm->vm_params.vmc_name,
usr.sbin/vmd/vmm.c
370
vm->vm_vmid);
usr.sbin/vmd/vmm.c
374
vmr.vmr_id = vm_id2vmid(vm->vm_vmmid, vm);
usr.sbin/vmd/vmm.c
377
vm->vm_peerid, -1, &vmr, sizeof(vmr)) == -1)
usr.sbin/vmd/vmm.c
380
"parent", vm->vm_vmid);
usr.sbin/vmd/vmm.c
382
vm_remove(vm, __func__);
usr.sbin/vmd/vmm.c
401
struct vmd_vm *vm, *vm_next;
usr.sbin/vmd/vmm.c
403
TAILQ_FOREACH_SAFE(vm, env->vmd_vms, vm_entry, vm_next) {
usr.sbin/vmd/vmm.c
404
vtp.vtp_vm_id = vm_vmid2id(vm->vm_vmid, vm);
usr.sbin/vmd/vmm.c
408
vm_remove(vm, __func__);
usr.sbin/vmd/vmm.c
419
vmm_pipe(struct vmd_vm *vm, int fd, void (*cb)(int, short, void *))
usr.sbin/vmd/vmm.c
421
struct imsgev *iev = &vm->vm_iev;
usr.sbin/vmd/vmm.c
440
iev->data = vm;
usr.sbin/vmd/vmm.c
454
struct vmd_vm *vm = arg;
usr.sbin/vmd/vmm.c
455
struct imsgev *iev = &vm->vm_iev;
usr.sbin/vmd/vmm.c
492
vm->vm_state |= VM_STATE_SHUTDOWN;
usr.sbin/vmd/vmm.c
495
vm->vm_state &= ~VM_STATE_SHUTDOWN;
usr.sbin/vmd/vmm.c
510
type, vm->vm_params.vmc_name);
usr.sbin/vmd/vmm.c
601
struct vmd_vm *vm;
usr.sbin/vmd/vmm.c
610
if ((vm = vm_getbyvmid(peer_id)) == NULL) {
usr.sbin/vmd/vmm.c
615
if ((vm->vm_tty = imsg_get_fd(imsg)) == -1) {
usr.sbin/vmd/vmm.c
634
vm->vm_pid = vm_pid;
usr.sbin/vmd/vmm.c
638
sz = atomicio(vwrite, fds[0], vm, sizeof(*vm));
usr.sbin/vmd/vmm.c
639
if (sz != sizeof(*vm)) {
usr.sbin/vmd/vmm.c
641
__func__, vm->vm_params.vmc_name);
usr.sbin/vmd/vmm.c
647
for (i = 0 ; i < vm->vm_params.vmc_ndisks; i++) {
usr.sbin/vmd/vmm.c
649
if (close_fd(vm->vm_disks[i][j]) == 0)
usr.sbin/vmd/vmm.c
650
vm->vm_disks[i][j] = -1;
usr.sbin/vmd/vmm.c
653
for (i = 0 ; i < vm->vm_params.vmc_nnics; i++) {
usr.sbin/vmd/vmm.c
654
if (close_fd(vm->vm_ifs[i].vif_fd) == 0)
usr.sbin/vmd/vmm.c
655
vm->vm_ifs[i].vif_fd = -1;
usr.sbin/vmd/vmm.c
657
if (close_fd(vm->vm_kernel) == 0)
usr.sbin/vmd/vmm.c
658
vm->vm_kernel = -1;
usr.sbin/vmd/vmm.c
659
if (close_fd(vm->vm_cdrom) == 0)
usr.sbin/vmd/vmm.c
660
vm->vm_cdrom = -1;
usr.sbin/vmd/vmm.c
661
if (close_fd(vm->vm_tty) == 0)
usr.sbin/vmd/vmm.c
662
vm->vm_tty = -1;
usr.sbin/vmd/vmm.c
673
__func__, vm->vm_params.vmc_name);
usr.sbin/vmd/vmm.c
679
sz = atomicio(read, fds[0], &vm->vm_vmmid,
usr.sbin/vmd/vmm.c
680
sizeof(vm->vm_vmmid));
usr.sbin/vmd/vmm.c
681
if (sz != sizeof(vm->vm_vmmid)) {
usr.sbin/vmd/vmm.c
683
__func__, vm->vm_params.vmc_name);
usr.sbin/vmd/vmm.c
690
if (vm->vm_vmmid == 0)
usr.sbin/vmd/vmm.c
693
*id = vm->vm_vmmid;
usr.sbin/vmd/vmm.c
694
*pid = vm->vm_pid;
usr.sbin/vmd/vmm.c
697
if (vmm_pipe(vm, fds[0], vmm_dispatch_vm) == -1)
usr.sbin/vmd/vmm.c
760
if (!vm->vm_from_config)
usr.sbin/vmd/vmm.c
761
vm_remove(vm, __func__);
usr.sbin/vmd/x86_vm.c
1124
intr_pending(struct vmd_vm *vm)
usr.sbin/vmd/x86_vm.c
1131
intr_ack(struct vmd_vm *vm)
usr.sbin/vmd/x86_vm.c
1138
intr_toggle_el(struct vmd_vm *vm, int irq, int val)
usr.sbin/vmd/x86_vm.c
153
create_memory_map(struct vmd_vm *vm)
usr.sbin/vmd/x86_vm.c
155
struct vmop_create_params *vmc = &vm->vm_params;
usr.sbin/vmd/x86_vm.c
238
load_firmware(struct vmd_vm *vm, struct vcpu_reg_state *vrs)
usr.sbin/vmd/x86_vm.c
251
if ((fp = gzdopen(vm->vm_kernel, "r")) == NULL)
usr.sbin/vmd/x86_vm.c
255
ret = loadfile_elf(fp, vm, vrs, vm->vm_params.vmc_bootdevice);
usr.sbin/vmd/x86_vm.c
261
if (ret && errno == ENOEXEC && vm->vm_kernel != -1 &&
usr.sbin/vmd/x86_vm.c
262
gzdirect(fp) && (ret = fstat(vm->vm_kernel, &sb)) == 0)
usr.sbin/vmd/x86_vm.c
350
init_emulated_hw(struct vmd_vm *vm, int child_cdrom,
usr.sbin/vmd/x86_vm.c
353
struct vmop_create_params *vmc = &vm->vm_params;
usr.sbin/vmd/x86_vm.c
371
i8253_init(vm->vm_vmmid);
usr.sbin/vmd/x86_vm.c
379
mc146818_init(vm->vm_vmmid, memlo, memhi);
usr.sbin/vmd/x86_vm.c
393
ns8250_init(con_fd, vm->vm_vmmid);
usr.sbin/vmd/x86_vm.c
426
pause_vm_md(struct vmd_vm *vm)
usr.sbin/vmd/x86_vm.c
431
virtio_stop(vm);
usr.sbin/vmd/x86_vm.c
435
unpause_vm_md(struct vmd_vm *vm)
usr.sbin/vmd/x86_vm.c
440
virtio_start(vm);