Symbol: mm
games/hack/hack.makemon.c
152
coord mm;
games/hack/hack.makemon.c
154
mm.x = x;
games/hack/hack.makemon.c
155
mm.y = y;
games/hack/hack.makemon.c
157
mm = enexto(mm.x, mm.y);
games/hack/hack.makemon.c
158
(void) makemon(ptr, mm.x, mm.y);
games/hack/hack.mklev.c
776
coord mm;
games/hack/hack.mklev.c
777
mx = mm.x;
games/hack/hack.mklev.c
778
my = mm.y;
games/hack/hack.mklev.c
799
coord mm;
games/hack/hack.mklev.c
800
mm = mazexy();
games/hack/hack.mklev.c
801
mx = mm.x;
games/hack/hack.mklev.c
802
my = mm.y;
games/hack/hack.mkmaze.c
107
mm = mazexy();
games/hack/hack.mkmaze.c
108
zx = mm.x;
games/hack/hack.mkmaze.c
109
zy = mm.y;
games/hack/hack.mkmaze.c
127
mm = mazexy();
games/hack/hack.mkmaze.c
128
(void) mkobj_at(rn2(2) ? GEM_SYM : 0, mm.x, mm.y);
games/hack/hack.mkmaze.c
131
mm = mazexy();
games/hack/hack.mkmaze.c
132
(void) mkobj_at(ROCK_SYM, mm.x, mm.y);
games/hack/hack.mkmaze.c
134
mm = mazexy();
games/hack/hack.mkmaze.c
135
(void) makemon(PM_MINOTAUR, mm.x, mm.y);
games/hack/hack.mkmaze.c
137
mm = mazexy();
games/hack/hack.mkmaze.c
138
(void) makemon((struct permonst *) 0, mm.x, mm.y);
games/hack/hack.mkmaze.c
141
mm = mazexy();
games/hack/hack.mkmaze.c
142
mkgold(0L,mm.x,mm.y);
games/hack/hack.mkmaze.c
146
mm = mazexy();
games/hack/hack.mkmaze.c
147
levl[(int)(xupstair = mm.x)][(int)(yupstair = mm.y)].scrsym = '<';
games/hack/hack.mkmaze.c
197
coord mm;
games/hack/hack.mkmaze.c
199
mm.x = 3 + 2*rn2(COLNO/2 - 2);
games/hack/hack.mkmaze.c
200
mm.y = 3 + 2*rn2(ROWNO/2 - 2);
games/hack/hack.mkmaze.c
201
return mm;
games/hack/hack.mkmaze.c
80
coord mm;
games/hack/hack.mon.c
897
coord mm;
games/hack/hack.mon.c
898
mm = enexto(u.ux, u.uy);
games/hack/hack.mon.c
899
mtmp->mx = mm.x;
games/hack/hack.mon.c
900
mtmp->my = mm.y;
lib/libcrypto/arc4random/getentropy_aix.c
297
} mm[] = {
lib/libcrypto/arc4random/getentropy_aix.c
306
for (m = 0; m < sizeof mm/sizeof(mm[0]); m++) {
lib/libcrypto/arc4random/getentropy_aix.c
307
HX(mm[m].p = mmap(NULL,
lib/libcrypto/arc4random/getentropy_aix.c
308
mm[m].npg * pgs,
lib/libcrypto/arc4random/getentropy_aix.c
311
(off_t)0), mm[m].p);
lib/libcrypto/arc4random/getentropy_aix.c
312
if (mm[m].p != MAP_FAILED) {
lib/libcrypto/arc4random/getentropy_aix.c
316
p = mm[m].p;
lib/libcrypto/arc4random/getentropy_aix.c
318
(mm[m].npg * pgs - 1);
lib/libcrypto/arc4random/getentropy_aix.c
320
cnt += (int)((long)(mm[m].p)
lib/libcrypto/arc4random/getentropy_aix.c
341
for (m = 0; m < sizeof mm/sizeof(mm[0]); m++) {
lib/libcrypto/arc4random/getentropy_aix.c
342
if (mm[m].p != MAP_FAILED)
lib/libcrypto/arc4random/getentropy_aix.c
343
munmap(mm[m].p, mm[m].npg * pgs);
lib/libcrypto/arc4random/getentropy_aix.c
344
mm[m].p = MAP_FAILED;
lib/libcrypto/arc4random/getentropy_hpux.c
291
} mm[] = {
lib/libcrypto/arc4random/getentropy_hpux.c
300
for (m = 0; m < sizeof mm/sizeof(mm[0]); m++) {
lib/libcrypto/arc4random/getentropy_hpux.c
301
HX(mm[m].p = mmap(NULL,
lib/libcrypto/arc4random/getentropy_hpux.c
302
mm[m].npg * pgs,
lib/libcrypto/arc4random/getentropy_hpux.c
305
(off_t)0), mm[m].p);
lib/libcrypto/arc4random/getentropy_hpux.c
306
if (mm[m].p != MAP_FAILED) {
lib/libcrypto/arc4random/getentropy_hpux.c
310
p = mm[m].p;
lib/libcrypto/arc4random/getentropy_hpux.c
312
(mm[m].npg * pgs - 1);
lib/libcrypto/arc4random/getentropy_hpux.c
314
cnt += (int)((long)(mm[m].p)
lib/libcrypto/arc4random/getentropy_hpux.c
335
for (m = 0; m < sizeof mm/sizeof(mm[0]); m++) {
lib/libcrypto/arc4random/getentropy_hpux.c
336
if (mm[m].p != MAP_FAILED)
lib/libcrypto/arc4random/getentropy_hpux.c
337
munmap(mm[m].p, mm[m].npg * pgs);
lib/libcrypto/arc4random/getentropy_hpux.c
338
mm[m].p = MAP_FAILED;
lib/libcrypto/arc4random/getentropy_linux.c
397
} mm[] = {
lib/libcrypto/arc4random/getentropy_linux.c
406
for (m = 0; m < sizeof mm/sizeof(mm[0]); m++) {
lib/libcrypto/arc4random/getentropy_linux.c
407
HX(mm[m].p = mmap(NULL,
lib/libcrypto/arc4random/getentropy_linux.c
408
mm[m].npg * pgs,
lib/libcrypto/arc4random/getentropy_linux.c
411
(off_t)0), mm[m].p);
lib/libcrypto/arc4random/getentropy_linux.c
412
if (mm[m].p != MAP_FAILED) {
lib/libcrypto/arc4random/getentropy_linux.c
416
p = mm[m].p;
lib/libcrypto/arc4random/getentropy_linux.c
418
(mm[m].npg * pgs - 1);
lib/libcrypto/arc4random/getentropy_linux.c
420
cnt += (int)((long)(mm[m].p)
lib/libcrypto/arc4random/getentropy_linux.c
441
for (m = 0; m < sizeof mm/sizeof(mm[0]); m++) {
lib/libcrypto/arc4random/getentropy_linux.c
442
if (mm[m].p != MAP_FAILED)
lib/libcrypto/arc4random/getentropy_linux.c
443
munmap(mm[m].p, mm[m].npg * pgs);
lib/libcrypto/arc4random/getentropy_linux.c
444
mm[m].p = MAP_FAILED;
lib/libcrypto/arc4random/getentropy_osx.c
311
} mm[] = {
lib/libcrypto/arc4random/getentropy_osx.c
320
for (m = 0; m < sizeof mm/sizeof(mm[0]); m++) {
lib/libcrypto/arc4random/getentropy_osx.c
321
HX(mm[m].p = mmap(NULL,
lib/libcrypto/arc4random/getentropy_osx.c
322
mm[m].npg * pgs,
lib/libcrypto/arc4random/getentropy_osx.c
325
(off_t)0), mm[m].p);
lib/libcrypto/arc4random/getentropy_osx.c
326
if (mm[m].p != MAP_FAILED) {
lib/libcrypto/arc4random/getentropy_osx.c
330
p = mm[m].p;
lib/libcrypto/arc4random/getentropy_osx.c
332
(mm[m].npg * pgs - 1);
lib/libcrypto/arc4random/getentropy_osx.c
334
cnt += (int)((long)(mm[m].p)
lib/libcrypto/arc4random/getentropy_osx.c
351
for (m = 0; m < sizeof mm/sizeof(mm[0]); m++) {
lib/libcrypto/arc4random/getentropy_osx.c
352
if (mm[m].p != MAP_FAILED)
lib/libcrypto/arc4random/getentropy_osx.c
353
munmap(mm[m].p, mm[m].npg * pgs);
lib/libcrypto/arc4random/getentropy_osx.c
354
mm[m].p = MAP_FAILED;
lib/libcrypto/arc4random/getentropy_solaris.c
317
} mm[] = {
lib/libcrypto/arc4random/getentropy_solaris.c
326
for (m = 0; m < sizeof mm/sizeof(mm[0]); m++) {
lib/libcrypto/arc4random/getentropy_solaris.c
327
HX(mm[m].p = mmap(NULL,
lib/libcrypto/arc4random/getentropy_solaris.c
328
mm[m].npg * pgs,
lib/libcrypto/arc4random/getentropy_solaris.c
331
(off_t)0), mm[m].p);
lib/libcrypto/arc4random/getentropy_solaris.c
332
if (mm[m].p != MAP_FAILED) {
lib/libcrypto/arc4random/getentropy_solaris.c
336
p = mm[m].p;
lib/libcrypto/arc4random/getentropy_solaris.c
338
(mm[m].npg * pgs - 1);
lib/libcrypto/arc4random/getentropy_solaris.c
340
cnt += (int)((long)(mm[m].p)
lib/libcrypto/arc4random/getentropy_solaris.c
361
for (m = 0; m < sizeof mm/sizeof(mm[0]); m++) {
lib/libcrypto/arc4random/getentropy_solaris.c
362
if (mm[m].p != MAP_FAILED)
lib/libcrypto/arc4random/getentropy_solaris.c
363
munmap(mm[m].p, mm[m].npg * pgs);
lib/libcrypto/arc4random/getentropy_solaris.c
364
mm[m].p = MAP_FAILED;
sys/arch/alpha/alpha/conf.c
135
cdev_mm_init(1,mm), /* 2: /dev/{null,mem,kmem,...} */
sys/arch/alpha/alpha/conf.c
80
cdev_decl(mm);
sys/arch/alpha/alpha/mem.c
62
cdev_decl(mm);
sys/arch/amd64/amd64/conf.c
112
cdev_decl(mm);
sys/arch/amd64/amd64/conf.c
179
cdev_mm_init(1,mm), /* 2: /dev/{null,mem,kmem,...} */
sys/arch/amd64/include/conf.h
37
cdev_decl(mm);
sys/arch/amd64/stand/efiboot/efiboot.c
340
EFI_MEMORY_DESCRIPTOR *mm;
sys/arch/amd64/stand/efiboot/efiboot.c
349
mm = alloc(siz);
sys/arch/amd64/stand/efiboot/efiboot.c
350
status = BS->GetMemoryMap(&siz, mm, &mapkey, &mmsiz, &mmver);
sys/arch/amd64/stand/efiboot/efiboot.c
359
bios_efiinfo.mmap_start = (uintptr_t)mm;
sys/arch/amd64/stand/efiboot/efiboot.c
372
EFI_MEMORY_DESCRIPTOR *mm;
sys/arch/amd64/stand/efiboot/efiboot.c
377
for (i = 0, mm = (EFI_MEMORY_DESCRIPTOR *)bios_efiinfo.mmap_start;
sys/arch/amd64/stand/efiboot/efiboot.c
379
i++, mm = NextMemoryDescriptor(mm, bios_efiinfo.mmap_desc_size)) {
sys/arch/amd64/stand/efiboot/efiboot.c
381
bm0.addr = mm->PhysicalStart;
sys/arch/amd64/stand/efiboot/efiboot.c
382
bm0.size = mm->NumberOfPages * EFI_PAGE_SIZE;
sys/arch/amd64/stand/efiboot/efiboot.c
383
if (mm->Type == EfiReservedMemoryType ||
sys/arch/amd64/stand/efiboot/efiboot.c
384
mm->Type == EfiUnusableMemory ||
sys/arch/amd64/stand/efiboot/efiboot.c
385
mm->Type == EfiRuntimeServicesCode ||
sys/arch/amd64/stand/efiboot/efiboot.c
386
mm->Type == EfiRuntimeServicesData)
sys/arch/amd64/stand/efiboot/efiboot.c
388
else if (mm->Type == EfiLoaderCode ||
sys/arch/amd64/stand/efiboot/efiboot.c
389
mm->Type == EfiLoaderData ||
sys/arch/amd64/stand/efiboot/efiboot.c
390
mm->Type == EfiBootServicesCode ||
sys/arch/amd64/stand/efiboot/efiboot.c
391
mm->Type == EfiBootServicesData ||
sys/arch/amd64/stand/efiboot/efiboot.c
392
mm->Type == EfiConventionalMemory)
sys/arch/amd64/stand/efiboot/efiboot.c
394
else if (mm->Type == EfiACPIReclaimMemory)
sys/arch/amd64/stand/efiboot/efiboot.c
396
else if (mm->Type == EfiACPIMemoryNVS)
sys/arch/arm/arm/conf.c
270
cdev_mm_init(1,mm), /* 2: /dev/{null,mem,kmem,...} */
sys/arch/arm/include/conf.h
51
cdev_decl(mm);
sys/arch/arm64/arm64/conf.c
129
cdev_mm_init(1,mm), /* 2: /dev/{null,mem,kmem,...} */
sys/arch/arm64/arm64/conf.c
77
cdev_decl(mm);
sys/arch/arm64/include/conf.h
40
cdev_decl(mm);
sys/arch/arm64/stand/efiboot/efiboot.c
1037
EFI_MEMORY_DESCRIPTOR *mm;
sys/arch/arm64/stand/efiboot/efiboot.c
1046
mm = alloc(siz);
sys/arch/arm64/stand/efiboot/efiboot.c
1047
status = BS->GetMemoryMap(&siz, mm, &mapkey, &mmsiz, &mmver);
sys/arch/arm64/stand/efiboot/efiboot.c
1051
mmap = mm;
sys/arch/arm64/stand/efiboot/efiboot.c
1067
EFI_MEMORY_DESCRIPTOR *mm;
sys/arch/arm64/stand/efiboot/efiboot.c
1075
for (i = 0, mm = mmap; i < mmap_ndesc;
sys/arch/arm64/stand/efiboot/efiboot.c
1076
i++, mm = NextMemoryDescriptor(mm, mmap_descsiz)) {
sys/arch/arm64/stand/efiboot/efiboot.c
1077
if (mm->Type != EfiConventionalMemory)
sys/arch/arm64/stand/efiboot/efiboot.c
1080
if (mm->NumberOfPages < pages)
sys/arch/arm64/stand/efiboot/efiboot.c
1083
for (j = 0; j < mm->NumberOfPages; j++) {
sys/arch/arm64/stand/efiboot/efiboot.c
1086
if (mm->NumberOfPages - j < pages)
sys/arch/arm64/stand/efiboot/efiboot.c
1089
paddr = mm->PhysicalStart + (j * EFI_PAGE_SIZE);
sys/arch/armv7/stand/efiboot/efiboot.c
910
EFI_MEMORY_DESCRIPTOR *mm;
sys/arch/armv7/stand/efiboot/efiboot.c
919
mm = alloc(siz);
sys/arch/armv7/stand/efiboot/efiboot.c
920
status = BS->GetMemoryMap(&siz, mm, &mapkey, &mmsiz, &mmver);
sys/arch/armv7/stand/efiboot/efiboot.c
924
mmap = mm;
sys/arch/armv7/stand/efiboot/efiboot.c
935
EFI_MEMORY_DESCRIPTOR *mm;
sys/arch/armv7/stand/efiboot/efiboot.c
943
for (i = 0, mm = mmap; i < mmap_ndesc;
sys/arch/armv7/stand/efiboot/efiboot.c
944
i++, mm = NextMemoryDescriptor(mm, mmap_descsiz)) {
sys/arch/armv7/stand/efiboot/efiboot.c
945
if (mm->Type != EfiConventionalMemory)
sys/arch/armv7/stand/efiboot/efiboot.c
948
if (mm->NumberOfPages < pages)
sys/arch/armv7/stand/efiboot/efiboot.c
951
for (j = 0; j < mm->NumberOfPages; j++) {
sys/arch/armv7/stand/efiboot/efiboot.c
954
if (mm->NumberOfPages - j < pages)
sys/arch/armv7/stand/efiboot/efiboot.c
957
paddr = mm->PhysicalStart + (j * EFI_PAGE_SIZE);
sys/arch/hppa/hppa/conf.c
128
cdev_mm_init(1,mm), /* 2: /dev/{null,mem,kmem,...} */
sys/arch/hppa/include/conf.h
33
cdev_decl(mm);
sys/arch/i386/i386/conf.c
101
cdev_decl(mm);
sys/arch/i386/i386/conf.c
166
cdev_mm_init(1,mm), /* 2: /dev/{null,mem,kmem,...} */
sys/arch/i386/include/conf.h
37
cdev_decl(mm);
sys/arch/landisk/include/conf.h
49
cdev_decl(mm);
sys/arch/landisk/landisk/conf.c
250
cdev_mm_init(1,mm), /* 2: /dev/{null,mem,kmem,...} */
sys/arch/loongson/loongson/conf.c
138
cdev_mm_init(1,mm), /* 3: /dev/{null,mem,kmem,...} */
sys/arch/loongson/loongson/conf.c
90
cdev_decl(mm);
sys/arch/luna88k/include/conf.h
34
cdev_decl(mm);
sys/arch/luna88k/luna88k/conf.c
105
cdev_mm_init(1,mm), /* 2: /dev/{null,mem,kmem,...} */
sys/arch/macppc/include/conf.h
37
cdev_decl(mm);
sys/arch/macppc/macppc/conf.c
132
cdev_mm_init(1,mm), /* 2: /dev/{null,mem,kmem,...} */
sys/arch/mips64/mips64/mem.c
71
cdev_decl(mm);
sys/arch/octeon/octeon/conf.c
100
cdev_decl(mm);
sys/arch/octeon/octeon/conf.c
154
cdev_mm_init(1,mm), /* 3: /dev/{null,mem,kmem,...} */
sys/arch/powerpc64/include/conf.h
5
cdev_decl(mm);
sys/arch/powerpc64/powerpc64/conf.c
112
cdev_mm_init(1,mm), /* 2: /dev/{null,mem,kmem,...} */
sys/arch/riscv64/include/conf.h
39
cdev_decl(mm);
sys/arch/riscv64/riscv64/conf.c
125
cdev_mm_init(1,mm), /* 2: /dev/{null,mem,kmem,...} */
sys/arch/riscv64/riscv64/conf.c
75
cdev_decl(mm);
sys/arch/riscv64/stand/efiboot/efiboot.c
915
EFI_MEMORY_DESCRIPTOR *mm;
sys/arch/riscv64/stand/efiboot/efiboot.c
924
mm = alloc(siz);
sys/arch/riscv64/stand/efiboot/efiboot.c
925
status = BS->GetMemoryMap(&siz, mm, &mapkey, &mmsiz, &mmver);
sys/arch/riscv64/stand/efiboot/efiboot.c
929
mmap = mm;
sys/arch/riscv64/stand/efiboot/efiboot.c
944
EFI_MEMORY_DESCRIPTOR *mm;
sys/arch/riscv64/stand/efiboot/efiboot.c
952
for (i = 0, mm = mmap; i < mmap_ndesc;
sys/arch/riscv64/stand/efiboot/efiboot.c
953
i++, mm = NextMemoryDescriptor(mm, mmap_descsiz)) {
sys/arch/riscv64/stand/efiboot/efiboot.c
954
if (mm->Type != EfiConventionalMemory)
sys/arch/riscv64/stand/efiboot/efiboot.c
957
if (mm->NumberOfPages < pages)
sys/arch/riscv64/stand/efiboot/efiboot.c
960
for (j = 0; j < mm->NumberOfPages; j++) {
sys/arch/riscv64/stand/efiboot/efiboot.c
963
if (mm->NumberOfPages - j < pages)
sys/arch/riscv64/stand/efiboot/efiboot.c
966
paddr = mm->PhysicalStart + (j * EFI_PAGE_SIZE);
sys/arch/sh/sh/mem.c
100
cdev_decl(mm);
sys/arch/sparc64/include/conf.h
35
cdev_decl(mm);
sys/arch/sparc64/sparc64/conf.c
155
cdev_mm_init(1,mm), /* 3: /dev/{null,mem,kmem,...} */
sys/dev/ic/mfi.c
334
struct mfi_mem *mm;
sys/dev/ic/mfi.c
340
mm = malloc(sizeof(struct mfi_mem), M_DEVBUF, M_NOWAIT|M_ZERO);
sys/dev/ic/mfi.c
341
if (mm == NULL)
sys/dev/ic/mfi.c
344
mm->am_size = size;
sys/dev/ic/mfi.c
347
BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mm->am_map) != 0)
sys/dev/ic/mfi.c
350
if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &mm->am_seg, 1,
sys/dev/ic/mfi.c
354
if (bus_dmamem_map(sc->sc_dmat, &mm->am_seg, nsegs, size, &mm->am_kva,
sys/dev/ic/mfi.c
358
if (bus_dmamap_load(sc->sc_dmat, mm->am_map, mm->am_kva, size, NULL,
sys/dev/ic/mfi.c
363
mm->am_kva, mm->am_map->dm_segs[0].ds_addr, mm->am_map);
sys/dev/ic/mfi.c
365
return (mm);
sys/dev/ic/mfi.c
368
bus_dmamem_unmap(sc->sc_dmat, mm->am_kva, size);
sys/dev/ic/mfi.c
370
bus_dmamem_free(sc->sc_dmat, &mm->am_seg, 1);
sys/dev/ic/mfi.c
372
bus_dmamap_destroy(sc->sc_dmat, mm->am_map);
sys/dev/ic/mfi.c
374
free(mm, M_DEVBUF, sizeof *mm);
sys/dev/ic/mfi.c
380
mfi_freemem(struct mfi_softc *sc, struct mfi_mem *mm)
sys/dev/ic/mfi.c
382
DNPRINTF(MFI_D_MEM, "%s: mfi_freemem: %p\n", DEVNAME(sc), mm);
sys/dev/ic/mfi.c
384
bus_dmamap_unload(sc->sc_dmat, mm->am_map);
sys/dev/ic/mfi.c
385
bus_dmamem_unmap(sc->sc_dmat, mm->am_kva, mm->am_size);
sys/dev/ic/mfi.c
386
bus_dmamem_free(sc->sc_dmat, &mm->am_seg, 1);
sys/dev/ic/mfi.c
387
bus_dmamap_destroy(sc->sc_dmat, mm->am_map);
sys/dev/ic/mfi.c
388
free(mm, M_DEVBUF, sizeof *mm);
sys/dev/pci/drm/amd/amdgpu/amdgpu.h
1543
WREG32(mm##reg, (RREG32(mm##reg) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
sys/dev/pci/drm/amd/amdgpu/amdgpu.h
1546
WREG32(mm##reg + offset, (RREG32(mm##reg + offset) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd.h
190
struct mm_struct *mm,
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd.h
198
bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm);
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd.h
208
bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm)
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd.h
284
if ((mmptr) == current->mm) { \
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd.h
407
int kgd2kfd_quiesce_mm(struct mm_struct *mm, uint32_t trigger);
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd.h
408
int kgd2kfd_resume_mm(struct mm_struct *mm);
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd.h
409
int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm,
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd.h
97
struct mm_struct *mm;
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
124
uint32_t __user *wptr, struct mm_struct *mm)
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
164
if (read_user_wptr(mm, wptr64, data64)) {
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.h
24
uint32_t __user *wptr, struct mm_struct *mm);
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_fence.c
130
if (!kgd2kfd_schedule_evict_and_restore_process(fence->mm, f))
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_fence.c
157
mmdrop(fence->mm);
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_fence.c
171
bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm)
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_fence.c
177
else if (fence->mm == mm && !fence->svm_bo)
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_fence.c
64
struct mm_struct *mm,
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_fence.c
74
mmgrab(mm);
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_fence.c
75
fence->mm = mm;
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c
100
if (read_user_wptr(mm, wptr64, data64)) {
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c
287
uint32_t wptr_mask, struct mm_struct *mm, uint32_t inst)
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c
60
uint32_t __user *wptr, struct mm_struct *mm)
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
211
uint32_t wptr_mask, struct mm_struct *mm, uint32_t inst)
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
374
uint32_t __user *wptr, struct mm_struct *mm)
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
414
if (read_user_wptr(mm, wptr64, data64)) {
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
182
uint32_t wptr_mask, struct mm_struct *mm, uint32_t inst)
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
360
uint32_t __user *wptr, struct mm_struct *mm)
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
400
if (read_user_wptr(mm, wptr64, data64)) {
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c
167
struct mm_struct *mm, uint32_t inst)
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c
345
uint32_t __user *wptr, struct mm_struct *mm)
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c
385
if (read_user_wptr(mm, wptr64, data64)) {
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
162
uint32_t wptr_mask, struct mm_struct *mm, uint32_t inst)
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
191
valid_wptr = read_user_wptr(mm, wptr, wptr_val);
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
240
uint32_t __user *wptr, struct mm_struct *mm)
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
271
if (read_user_wptr(mm, wptr, data))
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
157
uint32_t wptr_mask, struct mm_struct *mm, uint32_t inst)
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
215
valid_wptr = read_user_wptr(mm, wptr, wptr_val);
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
264
uint32_t __user *wptr, struct mm_struct *mm)
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
294
if (read_user_wptr(mm, wptr, data))
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
225
uint32_t wptr_mask, struct mm_struct *mm,
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
385
uint32_t __user *wptr, struct mm_struct *mm)
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
425
if (read_user_wptr(mm, wptr64, data64)) {
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
34
struct mm_struct *mm, uint32_t inst);
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
1409
current->mm,
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
2500
r = kgd2kfd_quiesce_mm(mni->mm,
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
2523
struct mm_struct *mm)
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
2788
struct mm_struct *mm;
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
2801
mm = get_task_mm(usertask);
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
2802
if (!mm) {
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
2809
if (update_invalid_user_pages(process_info, mm))
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
2835
if (kgd2kfd_resume_mm(mm)) {
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
2853
kfd_smi_event_queue_restore_rescheduled(mm);
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
2855
mmput(mm);
sys/dev/pci/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
3062
process_info->eviction_fence->mm,
sys/dev/pci/drm/amd/amdgpu/amdgpu_bo_list.c
109
if (usermm != current->mm) {
sys/dev/pci/drm/amd/amdgpu/amdgpu_cs.c
938
if (usermm && usermm != current->mm) {
sys/dev/pci/drm/amd/amdgpu/amdgpu_gem.c
322
struct mm_struct *mm;
sys/dev/pci/drm/amd/amdgpu/amdgpu_gem.c
327
mm = amdgpu_ttm_tt_get_usermm(abo->tbo.ttm);
sys/dev/pci/drm/amd/amdgpu/amdgpu_gem.c
328
if (mm && mm != current->mm)
sys/dev/pci/drm/amd/amdgpu/amdgpu_gtt_mgr.c
136
r = drm_mm_insert_node_in_range(&mgr->mm, &node->mm_nodes[0],
sys/dev/pci/drm/amd/amdgpu/amdgpu_gtt_mgr.c
198
drm_mm_for_each_node(mm_node, &mgr->mm) {
sys/dev/pci/drm/amd/amdgpu/amdgpu_gtt_mgr.c
255
drm_mm_print(&mgr->mm, printer);
sys/dev/pci/drm/amd/amdgpu/amdgpu_gtt_mgr.c
288
drm_mm_init(&mgr->mm, start, size);
sys/dev/pci/drm/amd/amdgpu/amdgpu_gtt_mgr.c
317
drm_mm_takedown(&mgr->mm);
sys/dev/pci/drm/amd/amdgpu/amdgpu_hmm.c
135
r = mmu_interval_notifier_insert(&bo->notifier, current->mm,
sys/dev/pci/drm/amd/amdgpu/amdgpu_hmm.c
139
r = mmu_interval_notifier_insert(&bo->notifier, current->mm, addr,
sys/dev/pci/drm/amd/amdgpu/amdgpu_hmm.c
148
bo->notifier.mm = NULL;
sys/dev/pci/drm/amd/amdgpu/amdgpu_hmm.c
162
if (!bo->notifier.mm)
sys/dev/pci/drm/amd/amdgpu/amdgpu_hmm.c
165
bo->notifier.mm = NULL;
sys/dev/pci/drm/amd/amdgpu/amdgpu_ttm.c
1327
return gtt->usertask->mm;
sys/dev/pci/drm/amd/amdgpu/amdgpu_ttm.c
1474
if (amdkfd_fence_check_mm(f, current->mm) &&
sys/dev/pci/drm/amd/amdgpu/amdgpu_ttm.c
718
struct mm_struct *mm;
sys/dev/pci/drm/amd/amdgpu/amdgpu_ttm.c
725
mm = bo->notifier.mm;
sys/dev/pci/drm/amd/amdgpu/amdgpu_ttm.c
726
if (unlikely(!mm)) {
sys/dev/pci/drm/amd/amdgpu/amdgpu_ttm.c
731
if (!mmget_not_zero(mm)) /* Happens during process shutdown */
sys/dev/pci/drm/amd/amdgpu/amdgpu_ttm.c
734
mmap_read_lock(mm);
sys/dev/pci/drm/amd/amdgpu/amdgpu_ttm.c
735
vma = vma_lookup(mm, start);
sys/dev/pci/drm/amd/amdgpu/amdgpu_ttm.c
750
mmap_read_unlock(mm);
sys/dev/pci/drm/amd/amdgpu/amdgpu_ttm.c
754
mmput(mm);
sys/dev/pci/drm/amd/amdgpu/amdgpu_ttm.h
50
struct drm_mm mm;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vm.c
2611
if (current->group_leader->mm != current->mm)
sys/dev/pci/drm/amd/amdgpu/amdgpu_vram_mgr.c
306
struct drm_buddy *mm = &mgr->mm;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vram_mgr.c
312
if (drm_buddy_alloc_blocks(mm, rsv->start, rsv->start + rsv->size,
sys/dev/pci/drm/amd/amdgpu/amdgpu_vram_mgr.c
313
rsv->size, mm->chunk_size, &rsv->allocated,
sys/dev/pci/drm/amd/amdgpu/amdgpu_vram_mgr.c
457
struct drm_buddy *mm = &mgr->mm;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vram_mgr.c
508
if (fpfn || lpfn != mgr->mm.size)
sys/dev/pci/drm/amd/amdgpu/amdgpu_vram_mgr.c
541
BUG_ON(min_block_size < mm->chunk_size);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vram_mgr.c
543
r = drm_buddy_alloc_blocks(mm, fpfn,
sys/dev/pci/drm/amd/amdgpu/amdgpu_vram_mgr.c
589
drm_buddy_block_trim(mm, &trim_start,
sys/dev/pci/drm/amd/amdgpu/amdgpu_vram_mgr.c
627
drm_buddy_free_list(mm, &vres->blocks, 0);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vram_mgr.c
650
struct drm_buddy *mm = &mgr->mm;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vram_mgr.c
662
drm_buddy_free_list(mm, &vres->blocks, vres->flags);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vram_mgr.c
812
struct drm_buddy *mm = &mgr->mm;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vram_mgr.c
815
drm_buddy_reset_clear(mm, false);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vram_mgr.c
897
struct drm_buddy *mm = &mgr->mm;
sys/dev/pci/drm/amd/amdgpu/amdgpu_vram_mgr.c
907
drm_buddy_print(mm, printer);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vram_mgr.c
950
err = drm_buddy_init(&mgr->mm, man->size, PAGE_SIZE);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vram_mgr.c
985
drm_buddy_free_list(&mgr->mm, &rsv->allocated, 0);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vram_mgr.c
989
drm_buddy_fini(&mgr->mm);
sys/dev/pci/drm/amd/amdgpu/amdgpu_vram_mgr.h
31
struct drm_buddy mm;
sys/dev/pci/drm/amd/amdgpu/soc15_common.h
185
__WREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg), \
sys/dev/pci/drm/amd/amdgpu/soc15_common.h
186
(__RREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg, \
sys/dev/pci/drm/amd/amdgpu/soc15_common.h
51
__WREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg, \
sys/dev/pci/drm/amd/amdgpu/soc15_common.h
53
adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg, \
sys/dev/pci/drm/amd/amdkfd/kfd_chardev.c
1064
svm_range_list_lock_and_flush_work(&p->svms, current->mm);
sys/dev/pci/drm/amd/amdkfd/kfd_chardev.c
1066
mmap_write_unlock(current->mm);
sys/dev/pci/drm/amd/amdkfd/kfd_chardev.c
2907
struct mm_struct *mm = NULL;
sys/dev/pci/drm/amd/amdkfd/kfd_chardev.c
2931
mm = get_task_mm(thread);
sys/dev/pci/drm/amd/amdkfd/kfd_chardev.c
2932
if (!mm) {
sys/dev/pci/drm/amd/amdkfd/kfd_chardev.c
3108
if (mm)
sys/dev/pci/drm/amd/amdkfd/kfd_chardev.c
3109
mmput(mm);
sys/dev/pci/drm/amd/amdkfd/kfd_device.c
1162
int kgd2kfd_quiesce_mm(struct mm_struct *mm, uint32_t trigger)
sys/dev/pci/drm/amd/amdkfd/kfd_device.c
1171
p = kfd_lookup_process_by_mm(mm);
sys/dev/pci/drm/amd/amdkfd/kfd_device.c
1182
int kgd2kfd_resume_mm(struct mm_struct *mm)
sys/dev/pci/drm/amd/amdkfd/kfd_device.c
1191
p = kfd_lookup_process_by_mm(mm);
sys/dev/pci/drm/amd/amdkfd/kfd_device.c
1209
int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm,
sys/dev/pci/drm/amd/amdkfd/kfd_device.c
1222
p = kfd_lookup_process_by_mm(mm);
sys/dev/pci/drm/amd/amdkfd/kfd_device_queue_manager.c
1038
if (WARN(q->process->mm != current->mm,
sys/dev/pci/drm/amd/amdkfd/kfd_device_queue_manager.c
1044
&q->properties, current->mm);
sys/dev/pci/drm/amd/amdkfd/kfd_device_queue_manager.c
1253
struct mm_struct *mm = NULL;
sys/dev/pci/drm/amd/amdkfd/kfd_device_queue_manager.c
1291
mm = get_task_mm(pdd->process->lead_thread);
sys/dev/pci/drm/amd/amdkfd/kfd_device_queue_manager.c
1292
if (!mm) {
sys/dev/pci/drm/amd/amdkfd/kfd_device_queue_manager.c
1314
q->queue, &q->properties, mm);
sys/dev/pci/drm/amd/amdkfd/kfd_device_queue_manager.c
1325
if (mm)
sys/dev/pci/drm/amd/amdkfd/kfd_device_queue_manager.c
1326
mmput(mm);
sys/dev/pci/drm/amd/amdkfd/kfd_device_queue_manager.c
3248
struct mm_struct *mm;
sys/dev/pci/drm/amd/amdkfd/kfd_device_queue_manager.c
3258
mm = get_task_mm(p->lead_thread);
sys/dev/pci/drm/amd/amdkfd/kfd_device_queue_manager.c
3260
if (!mm)
sys/dev/pci/drm/amd/amdkfd/kfd_device_queue_manager.c
3263
kthread_use_mm(mm);
sys/dev/pci/drm/amd/amdkfd/kfd_device_queue_manager.c
3289
kthread_unuse_mm(mm);
sys/dev/pci/drm/amd/amdkfd/kfd_device_queue_manager.c
3290
mmput(mm);
sys/dev/pci/drm/amd/amdkfd/kfd_device_queue_manager.c
698
if (WARN(q->process->mm != current->mm,
sys/dev/pci/drm/amd/amdkfd/kfd_device_queue_manager.c
703
q->queue, &q->properties, current->mm);
sys/dev/pci/drm/amd/amdkfd/kfd_migrate.c
1007
mmput(mm);
sys/dev/pci/drm/amd/amdkfd/kfd_migrate.c
490
struct mm_struct *mm, uint32_t trigger)
sys/dev/pci/drm/amd/amdkfd/kfd_migrate.c
537
vma = vma_lookup(mm, addr);
sys/dev/pci/drm/amd/amdkfd/kfd_migrate.c
789
int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm,
sys/dev/pci/drm/amd/amdkfd/kfd_migrate.c
829
vma = vma_lookup(mm, addr);
sys/dev/pci/drm/amd/amdkfd/kfd_migrate.c
886
struct mm_struct *mm, uint32_t trigger)
sys/dev/pci/drm/amd/amdkfd/kfd_migrate.c
898
r = svm_migrate_vram_to_ram(prange, mm, prange->start, prange->last,
sys/dev/pci/drm/amd/amdkfd/kfd_migrate.c
907
return svm_migrate_ram_to_vram(prange, best_loc, start, last, mm, trigger);
sys/dev/pci/drm/amd/amdkfd/kfd_migrate.c
913
struct mm_struct *mm, uint32_t trigger)
sys/dev/pci/drm/amd/amdkfd/kfd_migrate.c
917
mm, trigger);
sys/dev/pci/drm/amd/amdkfd/kfd_migrate.c
921
mm, trigger);
sys/dev/pci/drm/amd/amdkfd/kfd_migrate.c
942
struct mm_struct *mm;
sys/dev/pci/drm/amd/amdkfd/kfd_migrate.c
950
if (!mmget_not_zero(svm_bo->eviction_fence->mm)) {
sys/dev/pci/drm/amd/amdkfd/kfd_migrate.c
955
mm = svm_bo->eviction_fence->mm;
sys/dev/pci/drm/amd/amdkfd/kfd_migrate.c
956
if (mm != vmf->vma->vm_mm)
sys/dev/pci/drm/amd/amdkfd/kfd_migrate.c
959
p = kfd_lookup_process_by_mm(mm);
sys/dev/pci/drm/amd/amdkfd/kfd_migrate.h
45
struct mm_struct *mm, uint32_t trigger);
sys/dev/pci/drm/amd/amdkfd/kfd_migrate.h
47
int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager.c
102
struct amdgpu_cu_info *cu_info = &mm->dev->adev->gfx.cu_info;
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager.c
103
struct amdgpu_gfx_config *gfx_info = &mm->dev->adev->gfx.config;
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager.c
105
bool wgp_mode_req = KFD_GC_VERSION(mm->dev) >= IP_VERSION(10, 0, 0);
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager.c
109
int inc = cu_inc * NUM_XCC(mm->dev->xcc_mask);
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager.c
110
int xcc_inst = inst + ffs(mm->dev->xcc_mask) - 1;
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager.c
112
cu_active_per_node = cu_info->number / mm->dev->kfd->num_nodes;
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager.c
121
dev_err(mm->dev->adev->dev,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager.c
127
dev_err(mm->dev->adev->dev,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager.c
133
cu_bitmap_sh_mul = (KFD_GC_VERSION(mm->dev) >= IP_VERSION(11, 0, 0) &&
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager.c
134
KFD_GC_VERSION(mm->dev) < IP_VERSION(13, 0, 0)) ? 2 : 1;
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager.c
208
int kfd_hiq_load_mqd_kiq(struct mqd_manager *mm, void *mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager.c
212
return mm->dev->kfd2kgd->hiq_mqd_load(mm->dev->adev, mqd, pipe_id,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager.c
216
int kfd_destroy_mqd_cp(struct mqd_manager *mm, void *mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager.c
220
return mm->dev->kfd2kgd->hqd_destroy(mm->dev->adev, mqd, type, timeout,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager.c
224
void kfd_free_mqd_cp(struct mqd_manager *mm, void *mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager.c
228
amdgpu_amdkfd_free_gtt_mem(mm->dev->adev, &mqd_mem_obj->gtt_mem);
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager.c
231
kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager.c
235
bool kfd_is_occupied_cp(struct mqd_manager *mm, void *mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager.c
239
return mm->dev->kfd2kgd->hqd_is_occupied(mm->dev->adev, queue_address,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager.c
243
int kfd_load_mqd_sdma(struct mqd_manager *mm, void *mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager.c
247
return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->adev, mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager.c
256
int kfd_destroy_mqd_sdma(struct mqd_manager *mm, void *mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager.c
261
return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->adev, mqd, timeout);
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager.c
264
bool kfd_is_occupied_sdma(struct mqd_manager *mm, void *mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager.c
268
return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->adev, mqd);
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager.c
290
uint64_t kfd_mqd_stride(struct mqd_manager *mm,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager.c
293
return mm->mqd_size;
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager.c
91
void free_mqd_hiq_sdma(struct mqd_manager *mm, void *mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager.c
98
void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager.h
105
void (*get_checkpoint_info)(struct mqd_manager *mm, void *mqd, uint32_t *ctl_stack_size);
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager.h
107
void (*checkpoint_mqd)(struct mqd_manager *mm,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager.h
112
void (*restore_mqd)(struct mqd_manager *mm, void **mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager.h
122
bool (*check_preemption_failed)(struct mqd_manager *mm, void *mqd);
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager.h
123
uint64_t (*mqd_stride)(struct mqd_manager *mm,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager.h
161
void free_mqd_hiq_sdma(struct mqd_manager *mm, void *mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager.h
164
void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager.h
168
int kfd_hiq_load_mqd_kiq(struct mqd_manager *mm, void *mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager.h
172
int kfd_destroy_mqd_cp(struct mqd_manager *mm, void *mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager.h
176
void kfd_free_mqd_cp(struct mqd_manager *mm, void *mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager.h
179
bool kfd_is_occupied_cp(struct mqd_manager *mm, void *mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager.h
183
int kfd_load_mqd_sdma(struct mqd_manager *mm, void *mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager.h
187
int kfd_destroy_mqd_sdma(struct mqd_manager *mm, void *mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager.h
191
bool kfd_is_occupied_sdma(struct mqd_manager *mm, void *mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager.h
199
uint64_t kfd_mqd_stride(struct mqd_manager *mm,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager.h
74
void (*init_mqd)(struct mqd_manager *mm, void **mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager.h
78
int (*load_mqd)(struct mqd_manager *mm, void *mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager.h
83
void (*update_mqd)(struct mqd_manager *mm, void *mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager.h
87
int (*destroy_mqd)(struct mqd_manager *mm, void *mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager.h
92
void (*free_mqd)(struct mqd_manager *mm, void *mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager.h
95
bool (*is_occupied)(struct mqd_manager *mm, void *mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager.h
99
int (*get_wave_state)(struct mqd_manager *mm, void *mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_cik.c
139
mm->update_mqd(mm, m, q, NULL);
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_cik.c
142
static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_cik.c
156
mm->update_mqd(mm, m, q, NULL);
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_cik.c
159
static int load_mqd(struct mqd_manager *mm, void *mqd, uint32_t pipe_id,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_cik.c
167
return mm->dev->kfd2kgd->hqd_load(mm->dev->adev, mqd, pipe_id, queue_id,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_cik.c
172
static void __update_mqd(struct mqd_manager *mm, void *mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_cik.c
203
update_cu_mask(mm, mqd, minfo);
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_cik.c
209
static bool check_preemption_failed(struct mqd_manager *mm, void *mqd)
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_cik.c
213
return kfd_check_hiq_mqd_doorbell_id(mm->dev, m->queue_doorbell_id0, 0);
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_cik.c
216
static void update_mqd(struct mqd_manager *mm, void *mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_cik.c
220
__update_mqd(mm, mqd, q, minfo, 0);
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_cik.c
223
static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_cik.c
251
static void checkpoint_mqd(struct mqd_manager *mm, void *mqd, void *mqd_dst, void *ctl_stack_dst)
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_cik.c
260
static void restore_mqd(struct mqd_manager *mm, void **mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_cik.c
286
static void checkpoint_mqd_sdma(struct mqd_manager *mm,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_cik.c
298
static void restore_mqd_sdma(struct mqd_manager *mm, void **mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_cik.c
328
static void init_mqd_hiq(struct mqd_manager *mm, void **mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_cik.c
332
init_mqd(mm, mqd, mqd_mem_obj, gart_addr, q);
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_cik.c
335
static void update_mqd_hiq(struct mqd_manager *mm, void *mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_cik.c
45
static void update_cu_mask(struct mqd_manager *mm, void *mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_cik.c
54
mqd_symmetrically_map_cu_mask(mm,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_cik.c
88
static void init_mqd(struct mqd_manager *mm, void **mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v10.c
131
if (mm->dev->kfd->cwsr_enabled) {
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v10.c
147
mm->update_mqd(mm, m, q, NULL);
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v10.c
150
static int load_mqd(struct mqd_manager *mm, void *mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v10.c
158
r = mm->dev->kfd2kgd->hqd_load(mm->dev->adev, mqd, pipe_id, queue_id,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v10.c
164
static void update_mqd(struct mqd_manager *mm, void *mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v10.c
220
if (mm->dev->kfd->cwsr_enabled)
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v10.c
223
update_cu_mask(mm, mqd, minfo);
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v10.c
229
static bool check_preemption_failed(struct mqd_manager *mm, void *mqd)
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v10.c
233
return kfd_check_hiq_mqd_doorbell_id(mm->dev, m->queue_doorbell_id0, 0);
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v10.c
236
static int get_wave_state(struct mqd_manager *mm, void *mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v10.c
274
static void checkpoint_mqd(struct mqd_manager *mm, void *mqd, void *mqd_dst, void *ctl_stack_dst)
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v10.c
283
static void restore_mqd(struct mqd_manager *mm, void **mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v10.c
310
static void init_mqd_hiq(struct mqd_manager *mm, void **mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v10.c
316
init_mqd(mm, mqd, mqd_mem_obj, gart_addr, q);
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v10.c
324
static int destroy_hiq_mqd(struct mqd_manager *mm, void *mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v10.c
337
err = amdgpu_amdkfd_unmap_hiq(mm->dev->adev, doorbell_off, 0);
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v10.c
344
static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v10.c
358
mm->update_mqd(mm, m, q, NULL);
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v10.c
363
static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v10.c
390
static void checkpoint_mqd_sdma(struct mqd_manager *mm,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v10.c
402
static void restore_mqd_sdma(struct mqd_manager *mm, void **mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v10.c
45
static void update_cu_mask(struct mqd_manager *mm, void *mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v10.c
54
mqd_symmetrically_map_cu_mask(mm,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v10.c
88
static void init_mqd(struct mqd_manager *mm, void **mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v11.c
123
static void init_mqd(struct mqd_manager *mm, void **mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v11.c
135
if (mm->dev->kfd->shared_resources.enable_mes)
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v11.c
177
if (amdgpu_amdkfd_have_atomics_support(mm->dev->adev))
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v11.c
185
if (mm->dev->kfd->cwsr_enabled) {
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v11.c
201
mm->update_mqd(mm, m, q, NULL);
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v11.c
204
static int load_mqd(struct mqd_manager *mm, void *mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v11.c
212
r = mm->dev->kfd2kgd->hqd_load(mm->dev->adev, mqd, pipe_id, queue_id,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v11.c
218
static void update_mqd(struct mqd_manager *mm, void *mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v11.c
273
if (mm->dev->kfd->cwsr_enabled)
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v11.c
276
update_cu_mask(mm, mqd, minfo);
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v11.c
282
static bool check_preemption_failed(struct mqd_manager *mm, void *mqd)
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v11.c
286
return kfd_check_hiq_mqd_doorbell_id(mm->dev, m->queue_doorbell_id0, 0);
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v11.c
289
static int get_wave_state(struct mqd_manager *mm, void *mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v11.c
326
static void checkpoint_mqd(struct mqd_manager *mm, void *mqd, void *mqd_dst, void *ctl_stack_dst)
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v11.c
335
static void restore_mqd(struct mqd_manager *mm, void **mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v11.c
363
static void init_mqd_hiq(struct mqd_manager *mm, void **mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v11.c
369
init_mqd(mm, mqd, mqd_mem_obj, gart_addr, q);
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v11.c
377
static int destroy_hiq_mqd(struct mqd_manager *mm, void *mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v11.c
390
err = amdgpu_amdkfd_unmap_hiq(mm->dev->adev, doorbell_off, 0);
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v11.c
397
static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v11.c
406
if (mm->dev->kfd->shared_resources.enable_mes)
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v11.c
416
mm->update_mqd(mm, m, q, NULL);
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v11.c
421
static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v11.c
44
static void update_cu_mask(struct mqd_manager *mm, void *mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v11.c
73
mqd_symmetrically_map_cu_mask(mm,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v12.c
140
if (amdgpu_amdkfd_have_atomics_support(mm->dev->adev))
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v12.c
148
if (mm->dev->kfd->cwsr_enabled) {
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v12.c
164
mm->update_mqd(mm, m, q, NULL);
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v12.c
167
static int load_mqd(struct mqd_manager *mm, void *mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v12.c
175
r = mm->dev->kfd2kgd->hqd_load(mm->dev->adev, mqd, pipe_id, queue_id,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v12.c
181
static void update_mqd(struct mqd_manager *mm, void *mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v12.c
236
if (mm->dev->kfd->cwsr_enabled)
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v12.c
239
update_cu_mask(mm, mqd, minfo);
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v12.c
245
static bool check_preemption_failed(struct mqd_manager *mm, void *mqd)
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v12.c
249
return kfd_check_hiq_mqd_doorbell_id(mm->dev, m->queue_doorbell_id0, 0);
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v12.c
252
static int get_wave_state(struct mqd_manager *mm, void *mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v12.c
289
static void init_mqd_hiq(struct mqd_manager *mm, void **mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v12.c
295
init_mqd(mm, mqd, mqd_mem_obj, gart_addr, q);
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v12.c
303
static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v12.c
317
mm->update_mqd(mm, m, q, NULL);
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v12.c
322
static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v12.c
44
static void update_cu_mask(struct mqd_manager *mm, void *mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v12.c
53
mqd_symmetrically_map_cu_mask(mm,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v12.c
98
static void init_mqd(struct mqd_manager *mm, void **mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
160
static void init_mqd(struct mqd_manager *mm, void **mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
212
if (mm->dev->kfd->cwsr_enabled && q->ctx_save_restore_area_address) {
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
228
update_mqd(mm, m, q, NULL);
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
231
static int load_mqd(struct mqd_manager *mm, void *mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
238
return mm->dev->kfd2kgd->hqd_load(mm->dev->adev, mqd, pipe_id, queue_id,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
243
static void update_mqd(struct mqd_manager *mm, void *mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
304
if (mm->dev->kfd->cwsr_enabled && q->ctx_save_restore_area_address)
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
307
if (KFD_GC_VERSION(mm->dev) != IP_VERSION(9, 4, 3) &&
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
308
KFD_GC_VERSION(mm->dev) != IP_VERSION(9, 4, 4) &&
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
309
KFD_GC_VERSION(mm->dev) != IP_VERSION(9, 5, 0))
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
310
update_cu_mask(mm, mqd, minfo, 0);
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
313
if (minfo && KFD_GC_VERSION(mm->dev) >= IP_VERSION(9, 4, 2)) {
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
326
static bool check_preemption_failed(struct mqd_manager *mm, void *mqd)
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
333
return kfd_check_hiq_mqd_doorbell_id(mm->dev, doorbell_id, 0);
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
336
static int get_wave_state(struct mqd_manager *mm, void *mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
37
static void update_mqd(struct mqd_manager *mm, void *mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
372
static void get_checkpoint_info(struct mqd_manager *mm, void *mqd, u32 *ctl_stack_size)
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
376
*ctl_stack_size = m->cp_hqd_cntl_stack_size * NUM_XCC(mm->dev->xcc_mask);
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
379
static void checkpoint_mqd(struct mqd_manager *mm, void *mqd, void *mqd_dst, void *ctl_stack_dst)
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
391
static void checkpoint_mqd_v9_4_3(struct mqd_manager *mm,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
400
for (xcc = 0; xcc < NUM_XCC(mm->dev->xcc_mask); xcc++) {
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
403
checkpoint_mqd(mm, m,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
409
static void restore_mqd(struct mqd_manager *mm, void **mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
41
static uint64_t mqd_stride_v9(struct mqd_manager *mm,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
44
if (mm->dev->kfd->cwsr_enabled &&
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
441
static void init_mqd_hiq(struct mqd_manager *mm, void **mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
447
init_mqd(mm, mqd, mqd_mem_obj, gart_addr, q);
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
455
static int destroy_hiq_mqd(struct mqd_manager *mm, void *mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
467
err = amdgpu_amdkfd_unmap_hiq(mm->dev->adev, doorbell_off, 0);
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
474
static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
488
mm->update_mqd(mm, m, q, NULL);
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
49
return mm->mqd_size;
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
493
static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
524
static void checkpoint_mqd_sdma(struct mqd_manager *mm,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
536
static void restore_mqd_sdma(struct mqd_manager *mm, void **mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
560
static void init_mqd_hiq_v9_4_3(struct mqd_manager *mm, void **mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
571
for (xcc = 0; xcc < NUM_XCC(mm->dev->xcc_mask); xcc++) {
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
572
kfd_get_hiq_xcc_mqd(mm->dev, &xcc_mqd_mem_obj, xcc);
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
574
init_mqd(mm, (void **)&m, &xcc_mqd_mem_obj, &xcc_gart_addr, q);
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
579
if (amdgpu_sriov_multi_vf_mode(mm->dev->adev))
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
582
m->cp_mqd_stride_size = kfd_hiq_mqd_stride(mm->dev);
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
594
static int hiq_load_mqd_kiq_v9_4_3(struct mqd_manager *mm, void *mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
598
uint32_t xcc_mask = mm->dev->xcc_mask;
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
601
uint64_t hiq_mqd_size = kfd_hiq_mqd_stride(mm->dev);
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
605
err = mm->dev->kfd2kgd->hiq_mqd_load(mm->dev->adev, xcc_mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
618
static int destroy_hiq_mqd_v9_4_3(struct mqd_manager *mm, void *mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
62
static void update_cu_mask(struct mqd_manager *mm, void *mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
622
uint32_t xcc_mask = mm->dev->xcc_mask;
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
624
uint64_t hiq_mqd_size = kfd_hiq_mqd_stride(mm->dev);
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
634
err = amdgpu_amdkfd_unmap_hiq(mm->dev->adev, doorbell_off, xcc_id);
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
645
static bool check_preemption_failed_v9_4_3(struct mqd_manager *mm, void *mqd)
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
647
uint64_t hiq_mqd_size = kfd_hiq_mqd_stride(mm->dev);
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
648
uint32_t xcc_mask = mm->dev->xcc_mask;
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
655
ret |= kfd_check_hiq_mqd_doorbell_id(mm->dev,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
675
static void init_mqd_v9_4_3(struct mqd_manager *mm, void **mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
684
uint64_t offset = mm->mqd_stride(mm, q);
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
685
uint32_t local_xcc_start = mm->dev->dqm->current_logical_xcc_start++;
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
688
for (xcc = 0; xcc < NUM_XCC(mm->dev->xcc_mask); xcc++) {
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
691
init_mqd(mm, (void **)&m, &xcc_mqd_mem_obj, &xcc_gart_addr, q);
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
692
if (amdgpu_sriov_multi_vf_mode(mm->dev->adev))
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
701
if (mm->dev->kfd->cwsr_enabled &&
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
71
mqd_symmetrically_map_cu_mask(mm,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
717
NUM_XCC(mm->dev->xcc_mask);
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
743
static void update_mqd_v9_4_3(struct mqd_manager *mm, void *mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
748
uint64_t size = mm->mqd_stride(mm, q);
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
750
for (xcc = 0; xcc < NUM_XCC(mm->dev->xcc_mask); xcc++) {
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
752
update_mqd(mm, m, q, minfo);
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
754
if (amdgpu_sriov_multi_vf_mode(mm->dev->adev))
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
757
update_cu_mask(mm, m, minfo, xcc);
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
779
static void restore_mqd_v9_4_3(struct mqd_manager *mm, void **mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
791
uint64_t offset = mm->mqd_stride(mm, qp);
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
793
mm->dev->dqm->current_logical_xcc_start++;
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
795
num_xcc = NUM_XCC(mm->dev->xcc_mask);
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
80
if (KFD_GC_VERSION(mm->dev) != IP_VERSION(9, 4, 3) &&
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
807
restore_mqd(mm, (void **)&m,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
81
KFD_GC_VERSION(mm->dev) != IP_VERSION(9, 4, 4) &&
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
816
static int destroy_mqd_v9_4_3(struct mqd_manager *mm, void *mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
82
KFD_GC_VERSION(mm->dev) != IP_VERSION(9, 5, 0)) {
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
820
uint32_t xcc_mask = mm->dev->xcc_mask;
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
831
err = mm->dev->kfd2kgd->hqd_destroy(mm->dev->adev, xcc_mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
844
static int load_mqd_v9_4_3(struct mqd_manager *mm, void *mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
850
uint32_t xcc_mask = mm->dev->xcc_mask;
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
853
uint64_t mqd_stride_size = mm->mqd_stride(mm, p);
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
857
err = mm->dev->kfd2kgd->hqd_load(
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
858
mm->dev->adev, xcc_mqd, pipe_id, queue_id,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
871
static int get_wave_state_v9_4_3(struct mqd_manager *mm, void *mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
880
uint64_t mqd_stride_size = mm->mqd_stride(mm, q);
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
883
for (xcc = 0; xcc < NUM_XCC(mm->dev->xcc_mask); xcc++) {
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_v9.c
888
err = get_wave_state(mm, xcc_mqd, q, xcc_ctl_stack,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_vi.c
138
if (mm->dev->kfd->cwsr_enabled && q->ctx_save_restore_area_address) {
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_vi.c
154
mm->update_mqd(mm, m, q, NULL);
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_vi.c
157
static int load_mqd(struct mqd_manager *mm, void *mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_vi.c
165
return mm->dev->kfd2kgd->hqd_load(mm->dev->adev, mqd, pipe_id, queue_id,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_vi.c
170
static void __update_mqd(struct mqd_manager *mm, void *mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_vi.c
229
if (mm->dev->kfd->cwsr_enabled && q->ctx_save_restore_area_address)
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_vi.c
234
update_cu_mask(mm, mqd, minfo);
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_vi.c
240
static bool check_preemption_failed(struct mqd_manager *mm, void *mqd)
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_vi.c
244
return kfd_check_hiq_mqd_doorbell_id(mm->dev, m->queue_doorbell_id0, 0);
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_vi.c
247
static void update_mqd(struct mqd_manager *mm, void *mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_vi.c
251
__update_mqd(mm, mqd, q, minfo, MTYPE_UC, 0);
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_vi.c
254
static int get_wave_state(struct mqd_manager *mm, void *mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_vi.c
277
static void get_checkpoint_info(struct mqd_manager *mm, void *mqd, u32 *ctl_stack_size)
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_vi.c
283
static void checkpoint_mqd(struct mqd_manager *mm, void *mqd, void *mqd_dst, void *ctl_stack_dst)
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_vi.c
292
static void restore_mqd(struct mqd_manager *mm, void **mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_vi.c
319
static void init_mqd_hiq(struct mqd_manager *mm, void **mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_vi.c
325
init_mqd(mm, mqd, mqd_mem_obj, gart_addr, q);
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_vi.c
333
static void update_mqd_hiq(struct mqd_manager *mm, void *mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_vi.c
337
__update_mqd(mm, mqd, q, minfo, MTYPE_UC, 0);
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_vi.c
340
static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_vi.c
354
mm->update_mqd(mm, m, q, NULL);
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_vi.c
357
static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_vi.c
385
static void checkpoint_mqd_sdma(struct mqd_manager *mm,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_vi.c
397
static void restore_mqd_sdma(struct mqd_manager *mm, void **mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_vi.c
48
static void update_cu_mask(struct mqd_manager *mm, void *mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_vi.c
57
mqd_symmetrically_map_cu_mask(mm,
sys/dev/pci/drm/amd/amdkfd/kfd_mqd_manager_vi.c
91
static void init_mqd(struct mqd_manager *mm, void **mqd,
sys/dev/pci/drm/amd/amdkfd/kfd_priv.h
1055
struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm);
sys/dev/pci/drm/amd/amdkfd/kfd_priv.h
907
void *mm;
sys/dev/pci/drm/amd/amdkfd/kfd_process.c
105
struct mm_struct *mm;
sys/dev/pci/drm/amd/amdkfd/kfd_process.c
1209
static struct mmu_notifier *kfd_process_alloc_notifier(struct mm_struct *mm)
sys/dev/pci/drm/amd/amdkfd/kfd_process.c
1212
struct kfd_process *p = kfd_lookup_process_by_mm(mm);
sys/dev/pci/drm/amd/amdkfd/kfd_process.c
1238
p->mm = NULL;
sys/dev/pci/drm/amd/amdkfd/kfd_process.c
1263
struct mm_struct *mm)
sys/dev/pci/drm/amd/amdkfd/kfd_process.c
1272
if (WARN_ON(p->mm != mm))
sys/dev/pci/drm/amd/amdkfd/kfd_process.c
1527
process->mm = thread->mm;
sys/dev/pci/drm/amd/amdkfd/kfd_process.c
1562
(uintptr_t)process->mm);
sys/dev/pci/drm/amd/amdkfd/kfd_process.c
1574
mn = mmu_notifier_get(&kfd_process_mmu_notifier_ops, process->mm);
sys/dev/pci/drm/amd/amdkfd/kfd_process.c
185
mm = get_task_mm(pdd->process->lead_thread);
sys/dev/pci/drm/amd/amdkfd/kfd_process.c
186
if (!mm)
sys/dev/pci/drm/amd/amdkfd/kfd_process.c
1864
struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm)
sys/dev/pci/drm/amd/amdkfd/kfd_process.c
1870
p = find_process_by_mm(mm);
sys/dev/pci/drm/amd/amdkfd/kfd_process.c
189
kthread_use_mm(mm);
sys/dev/pci/drm/amd/amdkfd/kfd_process.c
203
kthread_unuse_mm(mm);
sys/dev/pci/drm/amd/amdkfd/kfd_process.c
204
mmput(mm);
sys/dev/pci/drm/amd/amdkfd/kfd_process.c
2224
struct mm_struct *mm;
sys/dev/pci/drm/amd/amdkfd/kfd_process.c
2235
mm = get_task_mm(p->lead_thread);
sys/dev/pci/drm/amd/amdkfd/kfd_process.c
2237
if (!mm)
sys/dev/pci/drm/amd/amdkfd/kfd_process.c
2240
kthread_use_mm(mm);
sys/dev/pci/drm/amd/amdkfd/kfd_process.c
2258
kthread_unuse_mm(mm);
sys/dev/pci/drm/amd/amdkfd/kfd_process.c
2259
mmput(mm);
sys/dev/pci/drm/amd/amdkfd/kfd_process.c
833
if (!(thread->mm && mmget_not_zero(thread->mm)))
sys/dev/pci/drm/amd/amdkfd/kfd_process.c
837
if (thread->group_leader->mm != thread->mm) {
sys/dev/pci/drm/amd/amdkfd/kfd_process.c
838
mmput(thread->mm);
sys/dev/pci/drm/amd/amdkfd/kfd_process.c
909
mmput(thread->mm);
sys/dev/pci/drm/amd/amdkfd/kfd_process.c
918
if (!thread->mm)
sys/dev/pci/drm/amd/amdkfd/kfd_process.c
922
if (thread->group_leader->mm != thread->mm)
sys/dev/pci/drm/amd/amdkfd/kfd_process.c
932
static struct kfd_process *find_process_by_mm(const struct mm_struct *mm)
sys/dev/pci/drm/amd/amdkfd/kfd_process.c
937
kfd_processes, (uintptr_t)mm)
sys/dev/pci/drm/amd/amdkfd/kfd_process.c
938
if (process->mm == mm)
sys/dev/pci/drm/amd/amdkfd/kfd_process.c
951
p = find_process_by_mm(thread->mm);
sys/dev/pci/drm/amd/amdkfd/kfd_smi_events.c
318
void kfd_smi_event_queue_restore_rescheduled(struct mm_struct *mm)
sys/dev/pci/drm/amd/amdkfd/kfd_smi_events.c
323
p = kfd_lookup_process_by_mm(mm);
sys/dev/pci/drm/amd/amdkfd/kfd_smi_events.h
52
void kfd_smi_event_queue_restore_rescheduled(struct mm_struct *mm);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
110
svm_range_add_notifier_locked(struct mm_struct *mm, struct svm_range *prange)
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
115
mmu_interval_notifier_insert_locked(&prange->notifier, mm,
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
1198
pchild->work_item.mm = NULL;
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
1633
static int svm_range_validate_and_map(struct mm_struct *mm,
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
1729
vma = vma_lookup(mm, addr);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
1833
struct mm_struct *mm)
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
1837
mmap_write_lock(mm);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
1841
mmap_write_unlock(mm);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
1853
struct mm_struct *mm;
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
1869
mm = get_task_mm(p->lead_thread);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
1870
if (!mm) {
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
1876
svm_range_list_lock_and_flush_work(svms, mm);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
1895
r = svm_range_validate_and_map(mm, prange->start, prange->last, prange,
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
1915
r = kgd2kfd_resume_mm(mm);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
1927
mmap_write_unlock(mm);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
1936
kfd_smi_event_queue_restore_rescheduled(mm);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
1938
mmput(mm);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
1958
svm_range_evict(struct svm_range *prange, struct mm_struct *mm,
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
2004
r = kgd2kfd_quiesce_mm(mm, KFD_QUEUE_EVICTION_TRIGGER_SVM);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
2259
svm_range_update_notifier_and_interval_tree(struct mm_struct *mm,
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
2283
svm_range_add_notifier_locked(mm, prange);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
2288
struct mm_struct *mm)
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
2305
svm_range_update_notifier_and_interval_tree(mm, prange);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
2310
svm_range_update_notifier_and_interval_tree(mm, prange);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
2317
svm_range_add_notifier_locked(mm, prange);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
2323
svm_range_add_notifier_locked(mm, prange);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
2369
struct mm_struct *mm;
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
2383
mm = prange->work_item.mm;
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
2385
mmap_write_lock(mm);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
2409
svm_range_handle_list_op(svms, pchild, mm);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
2413
svm_range_handle_list_op(svms, prange, mm);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
2415
mmap_write_unlock(mm);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
2420
mmput_async(mm);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
2430
struct mm_struct *mm, enum svm_work_list_ops op)
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
2436
WARN_ONCE(prange->work_item.mm != mm, "unmatch mm\n");
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
2444
if (mmget_not_zero(mm)) {
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
2445
prange->work_item.mm = mm;
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
2498
svm_range_unmap_from_cpu(struct mm_struct *mm, struct svm_range *prange,
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
2514
r = kgd2kfd_quiesce_mm(mm, KFD_QUEUE_EVICTION_TRIGGER_SVM);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
2519
p = kfd_lookup_process_by_mm(mm);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
2578
svm_range_add_list_work(svms, prange, mm, SVM_OP_UNMAP_RANGE);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
2580
svm_range_add_list_work(svms, prange, mm,
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
2636
svm_range_unmap_from_cpu(mni->mm, prange, start, last);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
2639
svm_range_evict(prange, mni->mm, start, last, range->event);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
2770
vma = vma_lookup(p->mm, addr << PAGE_SHIFT);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
2868
struct mm_struct *mm,
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
2911
svm_range_add_notifier_locked(mm, prange);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
3000
struct mm_struct *mm = NULL;
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
3056
mm = get_task_mm(p->lead_thread);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
3057
if (!mm) {
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
3063
mmap_read_lock(mm);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
3072
mmap_write_downgrade(mm);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
3093
mmap_read_unlock(mm);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
3094
mmap_write_lock(mm);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
3098
prange = svm_range_create_unregistered_range(node, p, mm, addr);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
3102
mmap_write_downgrade(mm);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
3108
mmap_write_downgrade(mm);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
3130
vma = vma_lookup(mm, addr << PAGE_SHIFT);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
3166
mm, KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
3174
r = svm_migrate_vram_to_ram(prange, mm, start, last,
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
3180
r = svm_migrate_vram_to_ram(prange, mm, start, last,
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
3192
r = svm_range_validate_and_map(mm, start, last, prange, gpuidx, false,
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
3206
mmap_read_unlock(mm);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
3211
mmput(mm);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
3426
vma = vma_lookup(p->mm, start);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
3544
svm_range_trigger_migration(struct mm_struct *mm, struct svm_range *prange,
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
3563
r = svm_migrate_vram_to_ram(prange, mm, prange->start, prange->last,
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
3570
mm, KFD_MIGRATE_TRIGGER_PREFETCH);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
3596
struct mm_struct *mm;
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
3601
if (mmget_not_zero(svm_bo->eviction_fence->mm)) {
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
3602
mm = svm_bo->eviction_fence->mm;
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
3608
mmap_read_lock(mm);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
3627
r = svm_migrate_vram_to_ram(prange, mm,
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
3645
mmap_read_unlock(mm);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
3646
mmput(mm);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
3658
svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm,
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
3685
svm_range_list_lock_and_flush_work(svms, mm);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
3690
mmap_write_unlock(mm);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
3701
mmap_write_unlock(mm);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
3707
svm_range_add_notifier_locked(mm, prange);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
3724
mmap_write_downgrade(mm);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
3735
r = svm_range_trigger_migration(mm, prange, &migrated);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
3754
r = svm_range_validate_and_map(mm, prange->start, prange->last, prange,
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
3769
r = svm_range_validate_and_map(mm, prange->start, prange->last, prange,
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
3781
mmap_read_unlock(mm);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
3792
svm_range_get_attr(struct kfd_process *p, struct mm_struct *mm,
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
3827
mmap_read_lock(mm);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
3829
mmap_read_unlock(mm);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
3978
struct mm_struct *mm;
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
3985
mm = get_task_mm(p->lead_thread);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
3986
if (!mm) {
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
4048
ret = svm_range_set_attr(p, mm, criu_svm_md->data.start_addr,
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
4066
mmput(mm);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
410
if (mmget_not_zero(svm_bo->eviction_fence->mm)) {
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
413
struct mm_struct *mm;
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
415
mm = svm_bo->eviction_fence->mm;
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
4191
struct mm_struct *mm;
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
4195
mm = get_task_mm(p->lead_thread);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
4196
if (!mm) {
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
420
p = kfd_lookup_process_by_mm(mm);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
4243
ret = svm_range_get_attr(p, mm, svm_priv->start_addr,
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
4269
mmput(mm);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
427
mmput(mm);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
4277
struct mm_struct *mm = current->mm;
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
4285
r = svm_range_set_attr(p, mm, start, size, nattrs, attrs);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
4288
r = svm_range_get_attr(p, mm, start, size, nattrs, attrs);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
563
struct mm_struct *mm;
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
579
mm = get_task_mm(p->lead_thread);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
580
if (!mm) {
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
588
mm,
sys/dev/pci/drm/amd/amdkfd/kfd_svm.c
590
mmput(mm);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.h
181
struct svm_range *prange, struct mm_struct *mm,
sys/dev/pci/drm/amd/amdkfd/kfd_svm.h
199
void svm_range_list_lock_and_flush_work(struct svm_range_list *svms, struct mm_struct *mm);
sys/dev/pci/drm/amd/amdkfd/kfd_svm.h
65
struct mm_struct *mm;
sys/dev/pci/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c
36
.reg_name = mm ## reg_name
sys/dev/pci/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c
40
.reg_name = mm ## block ## id ## _ ## reg_name
sys/dev/pci/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c
38
.reg_name = mm ## reg_name
sys/dev/pci/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c
42
.reg_name = mm ## block ## id ## _ ## reg_name
sys/dev/pci/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c
55
.reg_name = mm ## reg_name
sys/dev/pci/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c
65
(MP1_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name)
sys/dev/pci/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
54
.reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
55
mm ## reg_name
sys/dev/pci/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c
50
.reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c
51
mm ## reg_name
sys/dev/pci/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
51
(CLK_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name)
sys/dev/pci/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
41
(MP0_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name)
sys/dev/pci/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
60
.reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
61
mm ## reg_name
sys/dev/pci/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
65
.reg_name = mm ## block ## _ ## reg_name
sys/dev/pci/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr_smu_msg.c
40
mm ## reg_name
sys/dev/pci/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr_smu_msg.c
39
mm ## reg_name
sys/dev/pci/drm/amd/display/dc/clk_mgr/dcn301/dcn301_smu.c
40
(MP0_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name)
sys/dev/pci/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
59
(CLK_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name)
sys/dev/pci/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
100
.reg_name = mm ## reg_name
sys/dev/pci/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
116
.reg_name = mm ## block ## _ ## reg_name
sys/dev/pci/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr_smu_msg.c
38
mm ## reg_name
sys/dev/pci/drm/amd/display/dc/clk_mgr/dcn35/dcn351_clk_mgr.c
108
.reg_name = mm ## reg_name
sys/dev/pci/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
139
.reg_name = mm ## reg_name
sys/dev/pci/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
58
.reg_name = mm ## block ## _ ## reg_name
sys/dev/pci/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr_smu_msg.c
18
mm ## reg_name
sys/dev/pci/drm/amd/display/dc/dce/dce_panel_cntl.h
33
.reg_name = mm ## block ## _ ## reg_name
sys/dev/pci/drm/amd/display/dc/dce/dce_panel_cntl.h
46
.reg_name = BASE(mm ## block ## _ ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/dce/dce_panel_cntl.h
47
mm ## block ## _ ## reg_name
sys/dev/pci/drm/amd/display/dc/dcn10/dcn10_dwb.h
35
.reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/dcn10/dcn10_dwb.h
36
mm ## reg_name
sys/dev/pci/drm/amd/display/dc/dcn10/dcn10_dwb.h
39
.reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/dcn10/dcn10_dwb.h
40
mm ## block ## id ## _ ## reg_name
sys/dev/pci/drm/amd/display/dc/dcn10/dcn10_dwb.h
44
.reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/dcn10/dcn10_dwb.h
45
mm ## block ## id ## _ ## reg_name
sys/dev/pci/drm/amd/display/dc/dm_services.h
158
generic_reg_update_ex(ctx, DCE_BASE.instance[0].segment[mm##reg_name##_BASE_IDX] + mm##reg_name + inst_offset, \
sys/dev/pci/drm/amd/display/dc/dm_services.h
162
generic_reg_set_ex(ctx, DCE_BASE.instance[0].segment[mm##reg_name##_BASE_IDX] + mm##reg_name + inst_offset, 0, \
sys/dev/pci/drm/amd/display/dc/gpio/dce110/hw_factory_dce110.c
46
mm ## reg_name
sys/dev/pci/drm/amd/display/dc/gpio/dce110/hw_factory_dce110.c
49
mm ## block ## id ## _ ## reg_name
sys/dev/pci/drm/amd/display/dc/gpio/dce120/hw_factory_dce120.c
61
BASE(mm ## reg_name ## _BASE_IDX) + mm ## reg_name
sys/dev/pci/drm/amd/display/dc/gpio/dce120/hw_factory_dce120.c
64
BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/gpio/dce120/hw_factory_dce120.c
65
mm ## block ## id ## _ ## reg_name
sys/dev/pci/drm/amd/display/dc/gpio/dce120/hw_translate_dce120.c
52
BASE(mm ## reg_name ## _BASE_IDX) + mm ## reg_name
sys/dev/pci/drm/amd/display/dc/gpio/dce120/hw_translate_dce120.c
55
BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/gpio/dce120/hw_translate_dce120.c
56
mm ## block ## id ## _ ## reg_name
sys/dev/pci/drm/amd/display/dc/gpio/dce60/hw_factory_dce60.c
42
mm ## reg_name
sys/dev/pci/drm/amd/display/dc/gpio/dce80/hw_factory_dce80.c
42
mm ## reg_name
sys/dev/pci/drm/amd/display/dc/gpio/dcn10/hw_factory_dcn10.c
58
BASE(mm ## reg_name ## _BASE_IDX) + mm ## reg_name
sys/dev/pci/drm/amd/display/dc/gpio/dcn10/hw_factory_dcn10.c
61
BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/gpio/dcn10/hw_factory_dcn10.c
62
mm ## block ## id ## _ ## reg_name
sys/dev/pci/drm/amd/display/dc/gpio/dcn10/hw_translate_dcn10.c
52
BASE(mm ## reg_name ## _BASE_IDX) + mm ## reg_name
sys/dev/pci/drm/amd/display/dc/gpio/dcn10/hw_translate_dcn10.c
55
BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/gpio/dcn10/hw_translate_dcn10.c
56
mm ## block ## id ## _ ## reg_name
sys/dev/pci/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c
60
BASE(mm ## reg_name ## _BASE_IDX) + mm ## reg_name
sys/dev/pci/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c
66
BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/gpio/dcn20/hw_factory_dcn20.c
67
mm ## block ## id ## _ ## reg_name
sys/dev/pci/drm/amd/display/dc/gpio/dcn20/hw_translate_dcn20.c
56
BASE(mm ## reg_name ## _BASE_IDX) + mm ## reg_name
sys/dev/pci/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.c
58
BASE(mm ## reg_name ## _BASE_IDX) + mm ## reg_name
sys/dev/pci/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.c
64
BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/gpio/dcn21/hw_factory_dcn21.c
65
mm ## block ## id ## _ ## reg_name
sys/dev/pci/drm/amd/display/dc/gpio/dcn21/hw_translate_dcn21.c
56
BASE(mm ## reg_name ## _BASE_IDX) + mm ## reg_name
sys/dev/pci/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.c
69
BASE(mm ## reg_name ## _BASE_IDX) + mm ## reg_name
sys/dev/pci/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.c
75
BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/gpio/dcn30/hw_factory_dcn30.c
76
mm ## block ## id ## _ ## reg_name
sys/dev/pci/drm/amd/display/dc/gpio/dcn30/hw_translate_dcn30.c
63
BASE(mm ## reg_name ## _BASE_IDX) + mm ## reg_name
sys/dev/pci/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
85
.reg_name = CLK_BASE(mm ## block ## _ ## inst ## _ ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
86
mm ## block ## _ ## inst ## _ ## reg_name
sys/dev/pci/drm/amd/display/dc/irq/dce120/irq_service_dce120.c
72
BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/irq/dce120/irq_service_dce120.c
73
mm ## block ## id ## _ ## reg_name
sys/dev/pci/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c
169
BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c
170
mm ## block ## id ## _ ## reg_name
sys/dev/pci/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c
172
BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c
173
mm ## block ## id ## _ ## reg_name
sys/dev/pci/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c
122
BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c
123
mm ## block ## id ## _ ## reg_name
sys/dev/pci/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
179
BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
180
mm ## block ## id ## _ ## reg_name
sys/dev/pci/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
183
BASE(mm ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
184
mm ## reg_name
sys/dev/pci/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c
188
BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c
189
mm ## block ## id ## _ ## reg_name
sys/dev/pci/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c
192
BASE(mm ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/irq/dcn30/irq_service_dcn30.c
193
mm ## reg_name
sys/dev/pci/drm/amd/display/dc/irq/dcn302/irq_service_dcn302.c
171
BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/irq/dcn302/irq_service_dcn302.c
172
mm ## block ## id ## _ ## reg_name
sys/dev/pci/drm/amd/display/dc/irq/dcn302/irq_service_dcn302.c
175
BASE(mm ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/irq/dcn302/irq_service_dcn302.c
176
mm ## reg_name
sys/dev/pci/drm/amd/display/dc/irq/dcn303/irq_service_dcn303.c
117
BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/irq/dcn303/irq_service_dcn303.c
118
mm ## block ## id ## _ ## reg_name
sys/dev/pci/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.h
39
.RMU##_##reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/mpc/dcn30/dcn30_mpc.h
40
mm ## block ## id ## _ ## reg_name
sys/dev/pci/drm/amd/display/dc/resource/dce100/dce100_resource.c
137
.reg_name = mm ## reg_name
sys/dev/pci/drm/amd/display/dc/resource/dce100/dce100_resource.c
141
.reg_name = mm ## block ## id ## _ ## reg_name
sys/dev/pci/drm/amd/display/dc/resource/dce100/dce100_resource.c
410
#define REG(reg) mm ## reg
sys/dev/pci/drm/amd/display/dc/resource/dce100/dce100_resource.c
493
.reg_name[id] = mm ## block ## id ## _ ## reg_name
sys/dev/pci/drm/amd/display/dc/resource/dce110/dce110_resource.c
145
.reg_name = mm ## reg_name
sys/dev/pci/drm/amd/display/dc/resource/dce110/dce110_resource.c
149
.reg_name = mm ## block ## id ## _ ## reg_name
sys/dev/pci/drm/amd/display/dc/resource/dce110/dce110_resource.c
457
#define REG(reg) mm ## reg
sys/dev/pci/drm/amd/display/dc/resource/dce110/dce110_resource.c
541
.reg_name[id] = mm ## block ## id ## _ ## reg_name
sys/dev/pci/drm/amd/display/dc/resource/dce112/dce112_resource.c
146
.reg_name = mm ## reg_name
sys/dev/pci/drm/amd/display/dc/resource/dce112/dce112_resource.c
150
.reg_name = mm ## block ## id ## _ ## reg_name
sys/dev/pci/drm/amd/display/dc/resource/dce112/dce112_resource.c
437
#define REG(reg) mm ## reg
sys/dev/pci/drm/amd/display/dc/resource/dce112/dce112_resource.c
522
.reg_name[id] = mm ## block ## id ## _ ## reg_name
sys/dev/pci/drm/amd/display/dc/resource/dce120/dce120_resource.c
138
.reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/resource/dce120/dce120_resource.c
139
mm ## reg_name
sys/dev/pci/drm/amd/display/dc/resource/dce120/dce120_resource.c
142
.reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/resource/dce120/dce120_resource.c
143
mm ## block ## id ## _ ## reg_name
sys/dev/pci/drm/amd/display/dc/resource/dce120/dce120_resource.c
153
.reg_name = MMHUB_BASE(mm ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/resource/dce120/dce120_resource.c
154
mm ## reg_name
sys/dev/pci/drm/amd/display/dc/resource/dce120/dce120_resource.c
781
.reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/resource/dce120/dce120_resource.c
782
mm ## block ## id ## _ ## reg_name
sys/dev/pci/drm/amd/display/dc/resource/dce60/dce60_resource.c
154
.reg_name = mm ## reg_name
sys/dev/pci/drm/amd/display/dc/resource/dce60/dce60_resource.c
158
.reg_name = mm ## block ## id ## _ ## reg_name
sys/dev/pci/drm/amd/display/dc/resource/dce60/dce60_resource.c
443
#define REG(reg) mm ## reg
sys/dev/pci/drm/amd/display/dc/resource/dce60/dce60_resource.c
609
.reg_name[id] = mm ## block ## id ## _ ## reg_name
sys/dev/pci/drm/amd/display/dc/resource/dce80/dce80_resource.c
153
.reg_name = mm ## reg_name
sys/dev/pci/drm/amd/display/dc/resource/dce80/dce80_resource.c
157
.reg_name = mm ## block ## id ## _ ## reg_name
sys/dev/pci/drm/amd/display/dc/resource/dce80/dce80_resource.c
449
#define REG(reg) mm ## reg
sys/dev/pci/drm/amd/display/dc/resource/dce80/dce80_resource.c
615
.reg_name[id] = mm ## block ## id ## _ ## reg_name
sys/dev/pci/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
113
.reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
114
mm ## reg_name
sys/dev/pci/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
117
.reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
118
mm ## block ## id ## _ ## reg_name
sys/dev/pci/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
122
.reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
123
mm ## block ## id ## _ ## reg_name
sys/dev/pci/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
126
.reg_name[id] = BASE(mm ## reg_name ## 0 ## _ ## block ## id ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
127
mm ## reg_name ## 0 ## _ ## block ## id
sys/dev/pci/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
141
.reg_name = NBIO_BASE(mm ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
142
mm ## reg_name
sys/dev/pci/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
152
.reg_name = MMHUB_BASE(mm ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
153
mm ## reg_name
sys/dev/pci/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
129
.reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
130
mm ## reg_name
sys/dev/pci/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
133
.reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
134
mm ## block ## id ## _ ## reg_name
sys/dev/pci/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
137
.reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
138
mm ## reg_name
sys/dev/pci/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
146
.var_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
147
mm ## block ## id ## _ ## reg_name
sys/dev/pci/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
150
.reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
151
mm ## block ## id ## _ ## reg_name
sys/dev/pci/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
154
.block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
155
mm ## block ## id ## _ ## reg_name
sys/dev/pci/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
158
.reg_name[id] = BASE(mm ## reg_name ## _ ## block ## id ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
159
mm ## reg_name ## _ ## block ## id
sys/dev/pci/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
169
.reg_name = NBIO_BASE(mm ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
170
mm ## reg_name
sys/dev/pci/drm/amd/display/dc/resource/dcn201/dcn201_resource.c
252
.reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/resource/dcn201/dcn201_resource.c
253
mm ## reg_name
sys/dev/pci/drm/amd/display/dc/resource/dcn201/dcn201_resource.c
256
.reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/resource/dcn201/dcn201_resource.c
257
mm ## block ## id ## _ ## reg_name
sys/dev/pci/drm/amd/display/dc/resource/dcn201/dcn201_resource.c
260
.var_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/resource/dcn201/dcn201_resource.c
261
mm ## block ## id ## _ ## reg_name
sys/dev/pci/drm/amd/display/dc/resource/dcn201/dcn201_resource.c
264
.reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/resource/dcn201/dcn201_resource.c
265
mm ## block ## id ## _ ## reg_name
sys/dev/pci/drm/amd/display/dc/resource/dcn201/dcn201_resource.c
271
.block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/resource/dcn201/dcn201_resource.c
272
mm ## block ## id ## _ ## reg_name
sys/dev/pci/drm/amd/display/dc/resource/dcn201/dcn201_resource.c
275
.reg_name[id] = BASE(mm ## reg_name ## _ ## block ## id ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/resource/dcn201/dcn201_resource.c
276
mm ## reg_name ## _ ## block ## id
sys/dev/pci/drm/amd/display/dc/resource/dcn201/dcn201_resource.c
286
.reg_name = NBIO_BASE(mm ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/resource/dcn201/dcn201_resource.c
287
mm ## reg_name
sys/dev/pci/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
102
.reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
103
mm ## reg_name
sys/dev/pci/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
106
.reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
107
mm ## block ## id ## _ ## reg_name
sys/dev/pci/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
110
.var_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
111
mm ## block ## id ## _ ## reg_name
sys/dev/pci/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
114
.reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
115
mm ## block ## id ## _ ## reg_name
sys/dev/pci/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
118
.block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
119
mm ## block ## id ## _ ## reg_name
sys/dev/pci/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
122
.reg_name[id] = BASE(mm ## reg_name ## _ ## block ## id ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
123
mm ## reg_name ## _ ## block ## id
sys/dev/pci/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
133
.reg_name = NBIO_BASE(mm ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
134
mm ## reg_name
sys/dev/pci/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
1343
(DCN_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name)
sys/dev/pci/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
118
.reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
119
mm ## reg_name
sys/dev/pci/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
122
.reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
123
mm ## block ## id ## _ ## reg_name
sys/dev/pci/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
126
.reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
127
mm ## reg_name
sys/dev/pci/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
130
.var_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
131
mm ## block ## id ## _ ## reg_name
sys/dev/pci/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
134
.reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
135
mm ## block ## id ## _ ## reg_name
sys/dev/pci/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
138
.RMU##_##reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
139
mm ## block ## id ## _ ## reg_name
sys/dev/pci/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
142
.reg_name[id] = BASE(mm ## block ## id ## _ ## temp_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
143
mm ## block ## id ## _ ## temp_name
sys/dev/pci/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
149
.block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
150
mm ## block ## id ## _ ## reg_name
sys/dev/pci/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
153
.reg_name[id] = BASE(mm ## reg_name ## _ ## block ## id ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
154
mm ## reg_name ## _ ## block ## id
sys/dev/pci/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
164
.reg_name = NBIO_BASE(mm ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
165
mm ## reg_name
sys/dev/pci/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
186
.reg_name = CLK_BASE(mm ## block ## _ ## inst ## _ ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
187
mm ## block ## _ ## inst ## _ ## reg_name
sys/dev/pci/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
2258
(DCN_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name)
sys/dev/pci/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
117
.reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
118
mm ## reg_name
sys/dev/pci/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
121
.reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
122
mm ## block ## id ## _ ## reg_name
sys/dev/pci/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
125
.reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
126
mm ## reg_name
sys/dev/pci/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
129
.var_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
130
mm ## block ## id ## _ ## reg_name
sys/dev/pci/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
133
.reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
134
mm ## block ## id ## _ ## reg_name
sys/dev/pci/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
137
.reg_name_pre ## _ ## reg_name_post[id] = BASE(mm ## reg_name_pre \
sys/dev/pci/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
139
mm ## reg_name_pre ## id ## _ ## reg_name_post
sys/dev/pci/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
142
.RMU##_##reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
143
mm ## block ## id ## _ ## reg_name
sys/dev/pci/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
146
.reg_name[id] = BASE(mm ## block ## id ## _ ## temp_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
147
mm ## block ## id ## _ ## temp_name
sys/dev/pci/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
153
.block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
154
mm ## block ## id ## _ ## reg_name
sys/dev/pci/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
157
.reg_name[id] = BASE(mm ## reg_name ## _ ## block ## id ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
158
mm ## reg_name ## _ ## block ## id
sys/dev/pci/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
190
.reg_name = CLK_BASE(mm ## block ## _ ## inst ## _ ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
191
mm ## block ## _ ## inst ## _ ## reg_name
sys/dev/pci/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
921
(DCN_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name)
sys/dev/pci/drm/amd/display/dc/resource/dcn302/dcn302_resource.c
168
.reg_name = NBIO_BASE(mm ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/resource/dcn302/dcn302_resource.c
169
mm ## reg_name
sys/dev/pci/drm/amd/display/dc/resource/dcn302/dcn302_resource.c
177
.reg_name = BASE(mm ## reg_name ## _BASE_IDX) + mm ## reg_name
sys/dev/pci/drm/amd/display/dc/resource/dcn302/dcn302_resource.c
183
.reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + mm ## block ## id ## _ ## reg_name
sys/dev/pci/drm/amd/display/dc/resource/dcn302/dcn302_resource.c
186
.reg_name = BASE(mm ## reg_name ## _BASE_IDX) + mm ## reg_name
sys/dev/pci/drm/amd/display/dc/resource/dcn302/dcn302_resource.c
189
.reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/resource/dcn302/dcn302_resource.c
190
mm ## block ## id ## _ ## reg_name
sys/dev/pci/drm/amd/display/dc/resource/dcn302/dcn302_resource.c
193
.block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/resource/dcn302/dcn302_resource.c
194
mm ## block ## id ## _ ## reg_name
sys/dev/pci/drm/amd/display/dc/resource/dcn302/dcn302_resource.c
197
.reg_name[id] = BASE(mm ## reg_name ## _ ## block ## id ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/resource/dcn302/dcn302_resource.c
198
mm ## reg_name ## _ ## block ## id
sys/dev/pci/drm/amd/display/dc/resource/dcn302/dcn302_resource.c
201
.reg_name[id] = BASE(mm ## block ## id ## _ ## temp_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/resource/dcn302/dcn302_resource.c
202
mm ## block ## id ## _ ## temp_name
sys/dev/pci/drm/amd/display/dc/resource/dcn302/dcn302_resource.c
208
.RMU##_##reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/resource/dcn302/dcn302_resource.c
209
mm ## block ## id ## _ ## reg_name
sys/dev/pci/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
164
.reg_name = NBIO_BASE(mm ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
165
mm ## reg_name
sys/dev/pci/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
173
.reg_name = BASE(mm ## reg_name ## _BASE_IDX) + mm ## reg_name
sys/dev/pci/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
179
.reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + mm ## block ## id ## _ ## reg_name
sys/dev/pci/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
182
.reg_name = BASE(mm ## reg_name ## _BASE_IDX) + mm ## reg_name
sys/dev/pci/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
185
.reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
186
mm ## block ## id ## _ ## reg_name
sys/dev/pci/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
189
.block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
190
mm ## block ## id ## _ ## reg_name
sys/dev/pci/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
193
.reg_name[id] = BASE(mm ## reg_name ## _ ## block ## id ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
194
mm ## reg_name ## _ ## block ## id
sys/dev/pci/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
197
.reg_name[id] = BASE(mm ## block ## id ## _ ## temp_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
198
mm ## block ## id ## _ ## temp_name
sys/dev/pci/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
204
.RMU##_##reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
205
mm ## block ## id ## _ ## reg_name
sys/dev/pci/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
186
.reg_name = MMHUB_BASE(mm ## reg_name ## _BASE_IDX) + \
sys/dev/pci/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
187
mm ## reg_name
sys/dev/pci/drm/amd/display/dmub/src/dmub_reg.h
37
#define REG_OFFSET(reg_name) (BASE(mm##reg_name##_BASE_IDX) + mm##reg_name)
sys/dev/pci/drm/amd/include/cgs_common.h
131
cgs_write_register(device, mm##reg, (cgs_read_register(device, mm##reg) & ~CGS_REG_FIELD_MASK(reg, field)) | (val) << CGS_REG_FIELD_SHIFT(reg, field))
sys/dev/pci/drm/amd/include/kgd_kfd_interface.h
237
struct mm_struct *mm, uint32_t inst);
sys/dev/pci/drm/amd/include/kgd_kfd_interface.h
246
uint32_t __user *wptr, struct mm_struct *mm);
sys/dev/pci/drm/amd/pm/powerplay/hwmgr/smu_helper.h
152
PHM_GET_FIELD(cgs_read_register(device, mm##reg), reg, field)
sys/dev/pci/drm/amd/pm/powerplay/hwmgr/smu_helper.h
163
cgs_write_register(device, mm##reg, PHM_SET_FIELD( \
sys/dev/pci/drm/amd/pm/powerplay/hwmgr/smu_helper.h
164
cgs_read_register(device, mm##reg), reg, field, fieldval))
sys/dev/pci/drm/amd/pm/powerplay/hwmgr/smu_helper.h
177
phm_wait_on_indirect_register(hwmgr, mm##port##_INDEX, index, value, mask)
sys/dev/pci/drm/amd/pm/powerplay/hwmgr/smu_helper.h
189
mm##port##_INDEX, index, value, mask)
sys/dev/pci/drm/amd/pm/powerplay/hwmgr/smu_helper.h
203
mm##port##_INDEX_11, index, value, mask)
sys/dev/pci/drm/amd/pm/powerplay/hwmgr/smu_helper.h
217
mm##port##_INDEX_11, index, value, mask)
sys/dev/pci/drm/amd/pm/powerplay/hwmgr/smu_helper.h
234
mm##reg, value, mask)
sys/dev/pci/drm/dma-resv.c
793
struct mm_struct *mm = mm_alloc();
sys/dev/pci/drm/dma-resv.c
799
if (!mm)
sys/dev/pci/drm/dma-resv.c
805
mmap_read_lock(mm);
sys/dev/pci/drm/dma-resv.c
824
mmap_read_unlock(mm);
sys/dev/pci/drm/dma-resv.c
826
mmput(mm);
sys/dev/pci/drm/drm_buddy.c
1005
int drm_buddy_block_trim(struct drm_buddy *mm,
sys/dev/pci/drm/drm_buddy.c
102
static void rbtree_insert(struct drm_buddy *mm,
sys/dev/pci/drm/drm_buddy.c
1025
block_end = block_start + drm_buddy_block_size(mm, block);
sys/dev/pci/drm/drm_buddy.c
1030
if (new_size > drm_buddy_block_size(mm, block))
sys/dev/pci/drm/drm_buddy.c
1033
if (!new_size || !IS_ALIGNED(new_size, mm->chunk_size))
sys/dev/pci/drm/drm_buddy.c
1036
if (new_size == drm_buddy_block_size(mm, block))
sys/dev/pci/drm/drm_buddy.c
1046
if (!IS_ALIGNED(new_start, mm->chunk_size))
sys/dev/pci/drm/drm_buddy.c
1054
mark_free(mm, block);
sys/dev/pci/drm/drm_buddy.c
1055
mm->avail += drm_buddy_block_size(mm, block);
sys/dev/pci/drm/drm_buddy.c
1057
mm->clear_avail += drm_buddy_block_size(mm, block);
sys/dev/pci/drm/drm_buddy.c
1064
err = __alloc_range(mm, &dfs, new_start, new_size, blocks, NULL);
sys/dev/pci/drm/drm_buddy.c
1066
mark_allocated(mm, block);
sys/dev/pci/drm/drm_buddy.c
1067
mm->avail -= drm_buddy_block_size(mm, block);
sys/dev/pci/drm/drm_buddy.c
1069
mm->clear_avail -= drm_buddy_block_size(mm, block);
sys/dev/pci/drm/drm_buddy.c
107
&mm->free_trees[tree][drm_buddy_block_order(block)],
sys/dev/pci/drm/drm_buddy.c
1079
__drm_buddy_alloc_blocks(struct drm_buddy *mm,
sys/dev/pci/drm/drm_buddy.c
1086
return __drm_buddy_alloc_range_bias(mm, start, end,
sys/dev/pci/drm/drm_buddy.c
1090
return alloc_from_freetree(mm, order, flags);
sys/dev/pci/drm/drm_buddy.c
111
static void rbtree_remove(struct drm_buddy *mm,
sys/dev/pci/drm/drm_buddy.c
1113
int drm_buddy_alloc_blocks(struct drm_buddy *mm,
sys/dev/pci/drm/drm_buddy.c
1126
if (size < mm->chunk_size)
sys/dev/pci/drm/drm_buddy.c
1129
if (min_block_size < mm->chunk_size)
sys/dev/pci/drm/drm_buddy.c
1135
if (!IS_ALIGNED(start | end | size, mm->chunk_size))
sys/dev/pci/drm/drm_buddy.c
1138
if (end > mm->size)
sys/dev/pci/drm/drm_buddy.c
1141
if (range_overflows(start, size, mm->size))
sys/dev/pci/drm/drm_buddy.c
1149
return __drm_buddy_alloc_range(mm, start, size, NULL, blocks);
sys/dev/pci/drm/drm_buddy.c
1164
pages = size >> ilog2(mm->chunk_size);
sys/dev/pci/drm/drm_buddy.c
1166
min_order = ilog2(min_block_size) - ilog2(mm->chunk_size);
sys/dev/pci/drm/drm_buddy.c
1168
if (order > mm->max_order || size > mm->size) {
sys/dev/pci/drm/drm_buddy.c
1171
return __alloc_contig_try_harder(mm, original_size,
sys/dev/pci/drm/drm_buddy.c
1179
BUG_ON(order > mm->max_order);
sys/dev/pci/drm/drm_buddy.c
1183
block = __drm_buddy_alloc_blocks(mm, start,
sys/dev/pci/drm/drm_buddy.c
119
root = &mm->free_trees[tree][order];
sys/dev/pci/drm/drm_buddy.c
1192
if (mm->clear_avail &&
sys/dev/pci/drm/drm_buddy.c
1193
!__force_merge(mm, start, end, min_order)) {
sys/dev/pci/drm/drm_buddy.c
1194
block = __drm_buddy_alloc_blocks(mm, start,
sys/dev/pci/drm/drm_buddy.c
1210
return __alloc_contig_try_harder(mm,
sys/dev/pci/drm/drm_buddy.c
1219
mark_allocated(mm, block);
sys/dev/pci/drm/drm_buddy.c
1220
mm->avail -= drm_buddy_block_size(mm, block);
sys/dev/pci/drm/drm_buddy.c
1222
mm->clear_avail -= drm_buddy_block_size(mm, block);
sys/dev/pci/drm/drm_buddy.c
1246
trim_size = drm_buddy_block_size(mm, block) -
sys/dev/pci/drm/drm_buddy.c
1250
drm_buddy_block_trim(mm,
sys/dev/pci/drm/drm_buddy.c
1263
drm_buddy_free_list_internal(mm, &allocated);
sys/dev/pci/drm/drm_buddy.c
1275
void drm_buddy_block_print(struct drm_buddy *mm,
sys/dev/pci/drm/drm_buddy.c
1280
u64 size = drm_buddy_block_size(mm, block);
sys/dev/pci/drm/drm_buddy.c
1292
void drm_buddy_print(struct drm_buddy *mm, struct drm_printer *p)
sys/dev/pci/drm/drm_buddy.c
1297
mm->chunk_size >> 10, mm->size >> 20, mm->avail >> 20, mm->clear_avail >> 20);
sys/dev/pci/drm/drm_buddy.c
1299
for (order = mm->max_order; order >= 0; order--) {
sys/dev/pci/drm/drm_buddy.c
1306
root = &mm->free_trees[tree][order];
sys/dev/pci/drm/drm_buddy.c
1316
free = count * (mm->chunk_size << order);
sys/dev/pci/drm/drm_buddy.c
135
static void mark_allocated(struct drm_buddy *mm,
sys/dev/pci/drm/drm_buddy.c
141
rbtree_remove(mm, block);
sys/dev/pci/drm/drm_buddy.c
144
static void mark_free(struct drm_buddy *mm,
sys/dev/pci/drm/drm_buddy.c
153
rbtree_insert(mm, block, tree);
sys/dev/pci/drm/drm_buddy.c
156
static void mark_split(struct drm_buddy *mm,
sys/dev/pci/drm/drm_buddy.c
162
rbtree_remove(mm, block);
sys/dev/pci/drm/drm_buddy.c
190
static unsigned int __drm_buddy_free(struct drm_buddy *mm,
sys/dev/pci/drm/drm_buddy.c
218
rbtree_remove(mm, buddy);
sys/dev/pci/drm/drm_buddy.c
220
mm->clear_avail -= drm_buddy_block_size(mm, buddy);
sys/dev/pci/drm/drm_buddy.c
222
drm_block_free(mm, block);
sys/dev/pci/drm/drm_buddy.c
223
drm_block_free(mm, buddy);
sys/dev/pci/drm/drm_buddy.c
229
mark_free(mm, block);
sys/dev/pci/drm/drm_buddy.c
234
static int __force_merge(struct drm_buddy *mm,
sys/dev/pci/drm/drm_buddy.c
245
if (min_order > mm->max_order)
sys/dev/pci/drm/drm_buddy.c
250
struct rb_node *iter = rb_last(&mm->free_trees[tree][i]);
sys/dev/pci/drm/drm_buddy.c
263
block_end = block_start + drm_buddy_block_size(mm, block) - 1;
sys/dev/pci/drm/drm_buddy.c
28
static struct drm_buddy_block *drm_block_alloc(struct drm_buddy *mm,
sys/dev/pci/drm/drm_buddy.c
282
rbtree_remove(mm, block);
sys/dev/pci/drm/drm_buddy.c
284
mm->clear_avail -= drm_buddy_block_size(mm, block);
sys/dev/pci/drm/drm_buddy.c
286
order = __drm_buddy_free(mm, block, true);
sys/dev/pci/drm/drm_buddy.c
308
int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size)
sys/dev/pci/drm/drm_buddy.c
324
mm->size = size;
sys/dev/pci/drm/drm_buddy.c
325
mm->avail = size;
sys/dev/pci/drm/drm_buddy.c
326
mm->clear_avail = 0;
sys/dev/pci/drm/drm_buddy.c
327
mm->chunk_size = chunk_size;
sys/dev/pci/drm/drm_buddy.c
328
mm->max_order = ilog2(size) - ilog2(chunk_size);
sys/dev/pci/drm/drm_buddy.c
330
BUG_ON(mm->max_order > DRM_BUDDY_MAX_ORDER);
sys/dev/pci/drm/drm_buddy.c
332
mm->free_trees = kmalloc_array(DRM_BUDDY_MAX_FREE_TREES,
sys/dev/pci/drm/drm_buddy.c
333
sizeof(*mm->free_trees),
sys/dev/pci/drm/drm_buddy.c
335
if (!mm->free_trees)
sys/dev/pci/drm/drm_buddy.c
339
mm->free_trees[i] = kmalloc_array(mm->max_order + 1,
sys/dev/pci/drm/drm_buddy.c
342
if (!mm->free_trees[i])
sys/dev/pci/drm/drm_buddy.c
345
for (j = 0; j <= mm->max_order; ++j)
sys/dev/pci/drm/drm_buddy.c
346
mm->free_trees[i][j] = RB_ROOT;
sys/dev/pci/drm/drm_buddy.c
349
mm->n_roots = hweight64(size);
sys/dev/pci/drm/drm_buddy.c
351
mm->roots = kmalloc_array(mm->n_roots,
sys/dev/pci/drm/drm_buddy.c
354
if (!mm->roots)
sys/dev/pci/drm/drm_buddy.c
369
root = drm_block_alloc(mm, NULL, order, offset);
sys/dev/pci/drm/drm_buddy.c
373
mark_free(mm, root);
sys/dev/pci/drm/drm_buddy.c
375
BUG_ON(root_count > mm->max_order);
sys/dev/pci/drm/drm_buddy.c
376
BUG_ON(drm_buddy_block_size(mm, root) < chunk_size);
sys/dev/pci/drm/drm_buddy.c
378
mm->roots[root_count] = root;
sys/dev/pci/drm/drm_buddy.c
389
drm_block_free(mm, mm->roots[root_count]);
sys/dev/pci/drm/drm_buddy.c
390
kfree(mm->roots);
sys/dev/pci/drm/drm_buddy.c
393
kfree(mm->free_trees[i]);
sys/dev/pci/drm/drm_buddy.c
394
kfree(mm->free_trees);
sys/dev/pci/drm/drm_buddy.c
406
void drm_buddy_fini(struct drm_buddy *mm)
sys/dev/pci/drm/drm_buddy.c
412
size = mm->size;
sys/dev/pci/drm/drm_buddy.c
414
for (i = 0; i < mm->n_roots; ++i) {
sys/dev/pci/drm/drm_buddy.c
415
order = ilog2(size) - ilog2(mm->chunk_size);
sys/dev/pci/drm/drm_buddy.c
416
start = drm_buddy_block_offset(mm->roots[i]);
sys/dev/pci/drm/drm_buddy.c
417
__force_merge(mm, start, start + size, order);
sys/dev/pci/drm/drm_buddy.c
419
if (WARN_ON(!drm_buddy_block_is_free(mm->roots[i])))
sys/dev/pci/drm/drm_buddy.c
422
drm_block_free(mm, mm->roots[i]);
sys/dev/pci/drm/drm_buddy.c
424
root_size = mm->chunk_size << order;
sys/dev/pci/drm/drm_buddy.c
428
WARN_ON(mm->avail != mm->size);
sys/dev/pci/drm/drm_buddy.c
431
kfree(mm->free_trees[i]);
sys/dev/pci/drm/drm_buddy.c
432
kfree(mm->free_trees);
sys/dev/pci/drm/drm_buddy.c
433
kfree(mm->roots);
sys/dev/pci/drm/drm_buddy.c
437
static int split_block(struct drm_buddy *mm,
sys/dev/pci/drm/drm_buddy.c
446
block->left = drm_block_alloc(mm, block, block_order, offset);
sys/dev/pci/drm/drm_buddy.c
450
block->right = drm_block_alloc(mm, block, block_order,
sys/dev/pci/drm/drm_buddy.c
451
offset + (mm->chunk_size << block_order));
sys/dev/pci/drm/drm_buddy.c
453
drm_block_free(mm, block->left);
sys/dev/pci/drm/drm_buddy.c
457
mark_split(mm, block);
sys/dev/pci/drm/drm_buddy.c
465
mark_free(mm, block->left);
sys/dev/pci/drm/drm_buddy.c
466
mark_free(mm, block->right);
sys/dev/pci/drm/drm_buddy.c
497
void drm_buddy_reset_clear(struct drm_buddy *mm, bool is_clear)
sys/dev/pci/drm/drm_buddy.c
504
size = mm->size;
sys/dev/pci/drm/drm_buddy.c
505
for (i = 0; i < mm->n_roots; ++i) {
sys/dev/pci/drm/drm_buddy.c
506
order = ilog2(size) - ilog2(mm->chunk_size);
sys/dev/pci/drm/drm_buddy.c
507
start = drm_buddy_block_offset(mm->roots[i]);
sys/dev/pci/drm/drm_buddy.c
508
__force_merge(mm, start, start + size, order);
sys/dev/pci/drm/drm_buddy.c
510
root_size = mm->chunk_size << order;
sys/dev/pci/drm/drm_buddy.c
517
for (i = 0; i <= mm->max_order; ++i) {
sys/dev/pci/drm/drm_buddy.c
518
struct rb_root *root = &mm->free_trees[src_tree][i];
sys/dev/pci/drm/drm_buddy.c
522
rbtree_remove(mm, block);
sys/dev/pci/drm/drm_buddy.c
525
mm->clear_avail += drm_buddy_block_size(mm, block);
sys/dev/pci/drm/drm_buddy.c
528
mm->clear_avail -= drm_buddy_block_size(mm, block);
sys/dev/pci/drm/drm_buddy.c
531
rbtree_insert(mm, block, dst_tree);
sys/dev/pci/drm/drm_buddy.c
543
void drm_buddy_free_block(struct drm_buddy *mm,
sys/dev/pci/drm/drm_buddy.c
547
mm->avail += drm_buddy_block_size(mm, block);
sys/dev/pci/drm/drm_buddy.c
549
mm->clear_avail += drm_buddy_block_size(mm, block);
sys/dev/pci/drm/drm_buddy.c
55
static void drm_block_free(struct drm_buddy *mm,
sys/dev/pci/drm/drm_buddy.c
551
__drm_buddy_free(mm, block, false);
sys/dev/pci/drm/drm_buddy.c
555
static void __drm_buddy_free_list(struct drm_buddy *mm,
sys/dev/pci/drm/drm_buddy.c
569
drm_buddy_free_block(mm, block);
sys/dev/pci/drm/drm_buddy.c
575
static void drm_buddy_free_list_internal(struct drm_buddy *mm,
sys/dev/pci/drm/drm_buddy.c
583
__drm_buddy_free_list(mm, objects, false, false);
sys/dev/pci/drm/drm_buddy.c
593
void drm_buddy_free_list(struct drm_buddy *mm,
sys/dev/pci/drm/drm_buddy.c
599
__drm_buddy_free_list(mm, objects, mark_clear, !mark_clear);
sys/dev/pci/drm/drm_buddy.c
611
__alloc_range_bias(struct drm_buddy *mm,
sys/dev/pci/drm/drm_buddy.c
617
u64 req_size = mm->chunk_size << order;
sys/dev/pci/drm/drm_buddy.c
626
for (i = 0; i < mm->n_roots; ++i)
sys/dev/pci/drm/drm_buddy.c
627
list_add_tail(&mm->roots[i]->tmp_link, &dfs);
sys/dev/pci/drm/drm_buddy.c
645
block_end = block_start + drm_buddy_block_size(mm, block) - 1;
sys/dev/pci/drm/drm_buddy.c
677
err = split_block(mm, block);
sys/dev/pci/drm/drm_buddy.c
698
__drm_buddy_free(mm, block, false);
sys/dev/pci/drm/drm_buddy.c
703
__drm_buddy_alloc_range_bias(struct drm_buddy *mm,
sys/dev/pci/drm/drm_buddy.c
711
block = __alloc_range_bias(mm, start, end, order,
sys/dev/pci/drm/drm_buddy.c
714
return __alloc_range_bias(mm, start, end, order,
sys/dev/pci/drm/drm_buddy.c
721
get_maxblock(struct drm_buddy *mm,
sys/dev/pci/drm/drm_buddy.c
729
for (i = order; i <= mm->max_order; ++i) {
sys/dev/pci/drm/drm_buddy.c
730
root = &mm->free_trees[tree][i];
sys/dev/pci/drm/drm_buddy.c
750
alloc_from_freetree(struct drm_buddy *mm,
sys/dev/pci/drm/drm_buddy.c
764
block = get_maxblock(mm, order, tree);
sys/dev/pci/drm/drm_buddy.c
769
for (tmp = order; tmp <= mm->max_order; ++tmp) {
sys/dev/pci/drm/drm_buddy.c
771
root = &mm->free_trees[tree][tmp];
sys/dev/pci/drm/drm_buddy.c
783
for (tmp = order; tmp <= mm->max_order; ++tmp) {
sys/dev/pci/drm/drm_buddy.c
784
root = &mm->free_trees[tree][tmp];
sys/dev/pci/drm/drm_buddy.c
797
err = split_block(mm, block);
sys/dev/pci/drm/drm_buddy.c
808
__drm_buddy_free(mm, block, false);
sys/dev/pci/drm/drm_buddy.c
812
static int __alloc_range(struct drm_buddy *mm,
sys/dev/pci/drm/drm_buddy.c
840
block_end = block_start + drm_buddy_block_size(mm, block) - 1;
sys/dev/pci/drm/drm_buddy.c
852
mark_allocated(mm, block);
sys/dev/pci/drm/drm_buddy.c
853
total_allocated += drm_buddy_block_size(mm, block);
sys/dev/pci/drm/drm_buddy.c
854
mm->avail -= drm_buddy_block_size(mm, block);
sys/dev/pci/drm/drm_buddy.c
856
mm->clear_avail -= drm_buddy_block_size(mm, block);
sys/dev/pci/drm/drm_buddy.c
859
} else if (!mm->clear_avail) {
sys/dev/pci/drm/drm_buddy.c
866
err = split_block(mm, block);
sys/dev/pci/drm/drm_buddy.c
894
__drm_buddy_free(mm, block, false);
sys/dev/pci/drm/drm_buddy.c
901
drm_buddy_free_list_internal(mm, &allocated);
sys/dev/pci/drm/drm_buddy.c
907
static int __drm_buddy_alloc_range(struct drm_buddy *mm,
sys/dev/pci/drm/drm_buddy.c
916
for (i = 0; i < mm->n_roots; ++i)
sys/dev/pci/drm/drm_buddy.c
917
list_add_tail(&mm->roots[i]->tmp_link, &dfs);
sys/dev/pci/drm/drm_buddy.c
919
return __alloc_range(mm, &dfs, start, size,
sys/dev/pci/drm/drm_buddy.c
923
static int __alloc_contig_try_harder(struct drm_buddy *mm,
sys/dev/pci/drm/drm_buddy.c
937
pages = modify_size >> ilog2(mm->chunk_size);
sys/dev/pci/drm/drm_buddy.c
946
root = &mm->free_trees[tree][order];
sys/dev/pci/drm/drm_buddy.c
956
err = __drm_buddy_alloc_range(mm, rhs_offset, size,
sys/dev/pci/drm/drm_buddy.c
967
err = __drm_buddy_alloc_range(mm, lhs_offset, lhs_size,
sys/dev/pci/drm/drm_buddy.c
973
drm_buddy_free_list_internal(mm, blocks);
sys/dev/pci/drm/drm_buddy.c
977
drm_buddy_free_list_internal(mm, blocks);
sys/dev/pci/drm/drm_gpusvm.c
1262
struct mm_struct *mm,
sys/dev/pci/drm/drm_gpusvm.c
1299
if (!mmget_not_zero(mm)) {
sys/dev/pci/drm/drm_gpusvm.c
1306
mmap_read_lock(mm);
sys/dev/pci/drm/drm_gpusvm.c
1308
mmap_read_unlock(mm);
sys/dev/pci/drm/drm_gpusvm.c
1320
mmput(mm);
sys/dev/pci/drm/drm_gpusvm.c
1471
return drm_gpusvm_get_pages(gpusvm, &range->pages, gpusvm->mm,
sys/dev/pci/drm/drm_gpusvm.c
1557
struct mm_struct *mm = gpusvm->mm;
sys/dev/pci/drm/drm_gpusvm.c
1559
if (!mmget_not_zero(mm))
sys/dev/pci/drm/drm_gpusvm.c
1574
mmap_read_lock(mm);
sys/dev/pci/drm/drm_gpusvm.c
1576
mmap_read_unlock(mm);
sys/dev/pci/drm/drm_gpusvm.c
1582
mmput(mm);
sys/dev/pci/drm/drm_gpusvm.c
385
struct mm_struct *mm,
sys/dev/pci/drm/drm_gpusvm.c
391
if (mm) {
sys/dev/pci/drm/drm_gpusvm.c
394
mmgrab(mm);
sys/dev/pci/drm/drm_gpusvm.c
403
gpusvm->mm = mm;
sys/dev/pci/drm/drm_gpusvm.c
502
if (gpusvm->mm)
sys/dev/pci/drm/drm_gpusvm.c
503
mmdrop(gpusvm->mm);
sys/dev/pci/drm/drm_gpusvm.c
709
mmap_assert_locked(gpusvm->mm);
sys/dev/pci/drm/drm_gpusvm.c
859
struct mm_struct *mm = gpusvm->mm;
sys/dev/pci/drm/drm_gpusvm.c
863
if (!mmget_not_zero(mm))
sys/dev/pci/drm/drm_gpusvm.c
866
mmap_read_lock(mm);
sys/dev/pci/drm/drm_gpusvm.c
868
vma = find_vma_intersection(mm, start, end);
sys/dev/pci/drm/drm_gpusvm.c
872
mmap_read_unlock(mm);
sys/dev/pci/drm/drm_gpusvm.c
873
mmput(mm);
sys/dev/pci/drm/drm_gpusvm.c
901
struct mm_struct *mm = gpusvm->mm;
sys/dev/pci/drm/drm_gpusvm.c
914
if (!mmget_not_zero(mm))
sys/dev/pci/drm/drm_gpusvm.c
926
mm,
sys/dev/pci/drm/drm_gpusvm.c
934
mmap_read_lock(mm);
sys/dev/pci/drm/drm_gpusvm.c
936
vas = vma_lookup(mm, fault_addr);
sys/dev/pci/drm/drm_gpusvm.c
980
mmap_read_unlock(mm);
sys/dev/pci/drm/drm_gpusvm.c
981
mmput(mm);
sys/dev/pci/drm/drm_gpusvm.c
986
mmap_read_unlock(mm);
sys/dev/pci/drm/drm_gpusvm.c
993
mmput(mm);
sys/dev/pci/drm/drm_mm.c
1002
total_free += drm_mm_dump_hole(p, &mm->head_node);
sys/dev/pci/drm/drm_mm.c
1004
drm_mm_for_each_node(entry, mm) {
sys/dev/pci/drm/drm_mm.c
118
static void show_leaks(struct drm_mm *mm)
sys/dev/pci/drm/drm_mm.c
129
list_for_each_entry(node, drm_mm_nodes(mm), node_list) {
sys/dev/pci/drm/drm_mm.c
149
static void show_leaks(struct drm_mm *mm) { }
sys/dev/pci/drm/drm_mm.c
184
__drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last)
sys/dev/pci/drm/drm_mm.c
186
return drm_mm_interval_tree_iter_first((struct rb_root_cached *)&mm->interval_tree,
sys/dev/pci/drm/drm_mm.c
187
start, last) ?: (struct drm_mm_node *)&mm->head_node;
sys/dev/pci/drm/drm_mm.c
194
struct drm_mm *mm = hole_node->mm;
sys/dev/pci/drm/drm_mm.c
217
link = &mm->interval_tree.rb_root.rb_node;
sys/dev/pci/drm/drm_mm.c
236
rb_insert_augmented_cached(&node->rb, &mm->interval_tree, leftmost,
sys/dev/pci/drm/drm_mm.c
239
rb_insert_color_cached(&node->rb, &mm->interval_tree, leftmost);
sys/dev/pci/drm/drm_mm.c
288
struct drm_mm *mm = node->mm;
sys/dev/pci/drm/drm_mm.c
294
insert_hole_size(&mm->holes_size, node);
sys/dev/pci/drm/drm_mm.c
295
DRM_RB_INSERT(mm->holes_addr, rb_hole_addr, HOLE_ADDR);
sys/dev/pci/drm/drm_mm.c
297
list_add(&node->hole_stack, &mm->hole_stack);
sys/dev/pci/drm/drm_mm.c
305
rb_erase_cached(&node->rb_hole_size, &node->mm->holes_size);
sys/dev/pci/drm/drm_mm.c
306
rb_erase(&node->rb_hole_addr, &node->mm->holes_addr);
sys/dev/pci/drm/drm_mm.c
327
static struct drm_mm_node *best_hole(struct drm_mm *mm, u64 size)
sys/dev/pci/drm/drm_mm.c
329
struct rb_node *rb = mm->holes_size.rb_root.rb_node;
sys/dev/pci/drm/drm_mm.c
347
static struct drm_mm_node *find_hole(struct drm_mm *mm, u64 addr)
sys/dev/pci/drm/drm_mm.c
349
struct rb_node *rb = mm->holes_addr.rb_node;
sys/dev/pci/drm/drm_mm.c
370
first_hole(struct drm_mm *mm,
sys/dev/pci/drm/drm_mm.c
377
return best_hole(mm, size);
sys/dev/pci/drm/drm_mm.c
380
return find_hole(mm, start);
sys/dev/pci/drm/drm_mm.c
383
return find_hole(mm, end);
sys/dev/pci/drm/drm_mm.c
386
return list_first_entry_or_null(&mm->hole_stack,
sys/dev/pci/drm/drm_mm.c
393
next_hole(struct drm_mm *mm,
sys/dev/pci/drm/drm_mm.c
410
return &node->hole_stack == &mm->hole_stack ? NULL : node;
sys/dev/pci/drm/drm_mm.c
428
int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
sys/dev/pci/drm/drm_mm.c
440
hole = find_hole(mm, node->start);
sys/dev/pci/drm/drm_mm.c
447
if (mm->color_adjust)
sys/dev/pci/drm/drm_mm.c
448
mm->color_adjust(hole, node->color, &adj_start, &adj_end);
sys/dev/pci/drm/drm_mm.c
453
node->mm = mm;
sys/dev/pci/drm/drm_mm.c
492
int drm_mm_insert_node_in_range(struct drm_mm * const mm,
sys/dev/pci/drm/drm_mm.c
508
if (rb_to_hole_size_or_zero(rb_first_cached(&mm->holes_size)) < size)
sys/dev/pci/drm/drm_mm.c
518
for (hole = first_hole(mm, range_start, range_end, size, mode);
sys/dev/pci/drm/drm_mm.c
520
hole = once ? NULL : next_hole(mm, hole, mode)) {
sys/dev/pci/drm/drm_mm.c
534
if (mm->color_adjust)
sys/dev/pci/drm/drm_mm.c
535
mm->color_adjust(hole, color, &col_start, &col_end);
sys/dev/pci/drm/drm_mm.c
568
node->mm = mm;
sys/dev/pci/drm/drm_mm.c
607
struct drm_mm *mm = node->mm;
sys/dev/pci/drm/drm_mm.c
618
drm_mm_interval_tree_remove(node, &mm->interval_tree);
sys/dev/pci/drm/drm_mm.c
640
struct drm_mm *mm = old->mm;
sys/dev/pci/drm/drm_mm.c
648
rb_replace_node_cached(&old->rb, &new->rb, &mm->interval_tree);
sys/dev/pci/drm/drm_mm.c
654
&mm->holes_size);
sys/dev/pci/drm/drm_mm.c
657
&mm->holes_addr);
sys/dev/pci/drm/drm_mm.c
715
struct drm_mm *mm,
sys/dev/pci/drm/drm_mm.c
725
DRM_MM_BUG_ON(mm->scan_active);
sys/dev/pci/drm/drm_mm.c
727
scan->mm = mm;
sys/dev/pci/drm/drm_mm.c
761
struct drm_mm *mm = scan->mm;
sys/dev/pci/drm/drm_mm.c
767
DRM_MM_BUG_ON(node->mm != mm);
sys/dev/pci/drm/drm_mm.c
771
mm->scan_active++;
sys/dev/pci/drm/drm_mm.c
787
if (mm->color_adjust)
sys/dev/pci/drm/drm_mm.c
788
mm->color_adjust(hole, scan->color, &col_start, &col_end);
sys/dev/pci/drm/drm_mm.c
854
DRM_MM_BUG_ON(node->mm != scan->mm);
sys/dev/pci/drm/drm_mm.c
858
DRM_MM_BUG_ON(!node->mm->scan_active);
sys/dev/pci/drm/drm_mm.c
859
node->mm->scan_active--;
sys/dev/pci/drm/drm_mm.c
892
struct drm_mm *mm = scan->mm;
sys/dev/pci/drm/drm_mm.c
896
DRM_MM_BUG_ON(list_empty(&mm->hole_stack));
sys/dev/pci/drm/drm_mm.c
898
if (!mm->color_adjust)
sys/dev/pci/drm/drm_mm.c
906
list_for_each_entry(hole, &mm->hole_stack, hole_stack) {
sys/dev/pci/drm/drm_mm.c
916
DRM_MM_BUG_ON(&hole->hole_stack == &mm->hole_stack);
sys/dev/pci/drm/drm_mm.c
917
if (unlikely(&hole->hole_stack == &mm->hole_stack))
sys/dev/pci/drm/drm_mm.c
923
mm->color_adjust(hole, scan->color, &hole_start, &hole_end);
sys/dev/pci/drm/drm_mm.c
941
void drm_mm_init(struct drm_mm *mm, u64 start, u64 size)
sys/dev/pci/drm/drm_mm.c
945
mm->color_adjust = NULL;
sys/dev/pci/drm/drm_mm.c
947
INIT_LIST_HEAD(&mm->hole_stack);
sys/dev/pci/drm/drm_mm.c
948
mm->interval_tree = RB_ROOT_CACHED;
sys/dev/pci/drm/drm_mm.c
949
mm->holes_size = RB_ROOT_CACHED;
sys/dev/pci/drm/drm_mm.c
950
mm->holes_addr = RB_ROOT;
sys/dev/pci/drm/drm_mm.c
953
INIT_LIST_HEAD(&mm->head_node.node_list);
sys/dev/pci/drm/drm_mm.c
954
mm->head_node.flags = 0;
sys/dev/pci/drm/drm_mm.c
955
mm->head_node.mm = mm;
sys/dev/pci/drm/drm_mm.c
956
mm->head_node.start = start + size;
sys/dev/pci/drm/drm_mm.c
957
mm->head_node.size = -size;
sys/dev/pci/drm/drm_mm.c
958
add_hole(&mm->head_node);
sys/dev/pci/drm/drm_mm.c
960
mm->scan_active = 0;
sys/dev/pci/drm/drm_mm.c
971
void drm_mm_takedown(struct drm_mm *mm)
sys/dev/pci/drm/drm_mm.c
973
if (WARN(!drm_mm_clean(mm),
sys/dev/pci/drm/drm_mm.c
975
show_leaks(mm);
sys/dev/pci/drm/drm_mm.c
997
void drm_mm_print(const struct drm_mm *mm, struct drm_printer *p)
sys/dev/pci/drm/drm_pagemap.c
319
struct mm_struct *mm,
sys/dev/pci/drm/drm_pagemap.c
339
mmap_assert_locked(mm);
sys/dev/pci/drm/drm_pagemap.c
345
vas = vma_lookup(mm, start);
sys/dev/pci/drm/drm_pagemap.c
569
if (!mmget_not_zero(devmem_allocation->mm))
sys/dev/pci/drm/drm_pagemap.c
618
mmput_async(devmem_allocation->mm);
sys/dev/pci/drm/drm_pagemap.c
825
struct device *dev, struct mm_struct *mm,
sys/dev/pci/drm/drm_pagemap.c
832
devmem_allocation->mm = mm;
sys/dev/pci/drm/drm_pagemap.c
876
struct mm_struct *mm,
sys/dev/pci/drm/drm_pagemap.c
881
if (!mmget_not_zero(mm))
sys/dev/pci/drm/drm_pagemap.c
883
mmap_read_lock(mm);
sys/dev/pci/drm/drm_pagemap.c
884
err = dpagemap->ops->populate_mm(dpagemap, start, end, mm,
sys/dev/pci/drm/drm_pagemap.c
886
mmap_read_unlock(mm);
sys/dev/pci/drm/drm_pagemap.c
887
mmput(mm);
sys/dev/pci/drm/i915/display/intel_dpt.c
169
dpt->obj->mm.dirty = true;
sys/dev/pci/drm/i915/display/intel_fbdev_fb.c
79
struct intel_memory_region *mem = obj->mm.region;
sys/dev/pci/drm/i915/display/intel_overlay.c
1382
overlay->flip_addr = sg_dma_address(obj->mm.pages->sgl);
sys/dev/pci/drm/i915/display/intel_plane_initial.c
157
mem == i915->mm.stolen_region &&
sys/dev/pci/drm/i915/gem/i915_gem_clflush.c
129
} else if (obj->mm.pages) {
sys/dev/pci/drm/i915/gem/i915_gem_clflush.c
23
drm_clflush_sg(obj->mm.pages);
sys/dev/pci/drm/i915/gem/i915_gem_create.c
140
if (obj->mm.n_placements > 1)
sys/dev/pci/drm/i915/gem/i915_gem_create.c
141
kfree(obj->mm.placements);
sys/dev/pci/drm/i915/gem/i915_gem_create.c
293
BUILD_BUG_ON(ARRAY_SIZE(i915->mm.regions) != ARRAY_SIZE(placements));
sys/dev/pci/drm/i915/gem/i915_gem_create.c
295
if (args->num_regions > ARRAY_SIZE(i915->mm.regions)) {
sys/dev/pci/drm/i915/gem/i915_gem_create.c
53
obj->mm.placements = &i915->mm.regions[mr->id];
sys/dev/pci/drm/i915/gem/i915_gem_create.c
54
obj->mm.n_placements = 1;
sys/dev/pci/drm/i915/gem/i915_gem_create.c
65
obj->mm.placements = arr;
sys/dev/pci/drm/i915/gem/i915_gem_create.c
66
obj->mm.n_placements = n_placements;
sys/dev/pci/drm/i915/gem/i915_gem_dmabuf.c
47
ret = sg_alloc_table(sgt, obj->mm.pages->orig_nents, GFP_KERNEL);
sys/dev/pci/drm/i915/gem/i915_gem_dmabuf.c
52
for_each_sg(obj->mm.pages->sgl, src, obj->mm.pages->orig_nents, i) {
sys/dev/pci/drm/i915/gem/i915_gem_domain.c
174
obj->mm.dirty = true;
sys/dev/pci/drm/i915/gem/i915_gem_domain.c
238
obj->mm.dirty = true;
sys/dev/pci/drm/i915/gem/i915_gem_domain.c
763
obj->mm.dirty = true;
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
1186
(struct drm_i915_gem_object *)cache->node.mm;
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
1206
i915_vma_unpin((struct i915_vma *)cache->node.mm);
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
1235
cache->node.mm = (void *)obj;
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
1241
if (!obj->mm.dirty)
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
1300
(&ggtt->vm.mm, &cache->node,
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
1309
cache->node.mm = (void *)vma;
sys/dev/pci/drm/i915/gem/i915_gem_internal.c
126
obj->mm.dirty = false;
sys/dev/pci/drm/i915/gem/i915_gem_lmem.c
117
return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_LMEM_0],
sys/dev/pci/drm/i915/gem/i915_gem_lmem.c
23
offset -= obj->mm.region->region.start;
sys/dev/pci/drm/i915/gem/i915_gem_lmem.c
25
return io_mapping_map_wc(&obj->mm.region->iomap, offset, size);
sys/dev/pci/drm/i915/gem/i915_gem_lmem.c
44
struct intel_memory_region *mr = READ_ONCE(obj->mm.region);
sys/dev/pci/drm/i915/gem/i915_gem_lmem.c
81
return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_LMEM_0],
sys/dev/pci/drm/i915/gem/i915_gem_mman.c
124
struct mm_struct *mm = current->mm;
sys/dev/pci/drm/i915/gem/i915_gem_mman.c
127
if (mmap_write_lock_killable(mm)) {
sys/dev/pci/drm/i915/gem/i915_gem_mman.c
131
vma = find_vma(mm, addr);
sys/dev/pci/drm/i915/gem/i915_gem_mman.c
137
mmap_write_unlock(mm);
sys/dev/pci/drm/i915/gem/i915_gem_mman.c
309
iomap = obj->mm.region->iomap.base;
sys/dev/pci/drm/i915/gem/i915_gem_mman.c
310
iomap -= obj->mm.region->region.start;
sys/dev/pci/drm/i915/gem/i915_gem_mman.c
317
obj->mm.pages->sgl, obj_offset, iomap);
sys/dev/pci/drm/i915/gem/i915_gem_mman.c
321
obj->mm.dirty = true;
sys/dev/pci/drm/i915/gem/i915_gem_mman.c
519
obj->mm.dirty = true;
sys/dev/pci/drm/i915/gem/i915_gem_mman.c
668
pages = obj->mm.pages;
sys/dev/pci/drm/i915/gem/i915_gem_mman.c
682
obj->mm.dirty = true;
sys/dev/pci/drm/i915/gem/i915_gem_mman.c
892
obj->mm.dirty = true;
sys/dev/pci/drm/i915/gem/i915_gem_object.c
1013
return obj->mm.unknown_state;
sys/dev/pci/drm/i915/gem/i915_gem_object.c
116
INIT_LIST_HEAD(&obj->mm.link);
sys/dev/pci/drm/i915/gem/i915_gem_object.c
134
obj->mm.madv = I915_MADV_WILLNEED;
sys/dev/pci/drm/i915/gem/i915_gem_object.c
135
INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
sys/dev/pci/drm/i915/gem/i915_gem_object.c
136
rw_init(&obj->mm.get_page.lock, "mmget");
sys/dev/pci/drm/i915/gem/i915_gem_object.c
137
INIT_RADIX_TREE(&obj->mm.get_dma_page.radix, GFP_KERNEL | __GFP_NOWARN);
sys/dev/pci/drm/i915/gem/i915_gem_object.c
138
rw_init(&obj->mm.get_dma_page.lock, "mmgetd");
sys/dev/pci/drm/i915/gem/i915_gem_object.c
152
mutex_destroy(&obj->mm.get_page.lock);
sys/dev/pci/drm/i915/gem/i915_gem_object.c
153
mutex_destroy(&obj->mm.get_dma_page.lock);
sys/dev/pci/drm/i915/gem/i915_gem_object.c
310
if (obj->mm.n_placements > 1)
sys/dev/pci/drm/i915/gem/i915_gem_object.c
311
kfree(obj->mm.placements);
sys/dev/pci/drm/i915/gem/i915_gem_object.c
320
GEM_BUG_ON(!atomic_read(&i915->mm.free_count));
sys/dev/pci/drm/i915/gem/i915_gem_object.c
321
atomic_dec(&i915->mm.free_count);
sys/dev/pci/drm/i915/gem/i915_gem_object.c
379
atomic_set(&obj->mm.pages_pin_count, 0);
sys/dev/pci/drm/i915/gem/i915_gem_object.c
443
struct llist_node *freed = llist_del_all(&i915->mm.free_list);
sys/dev/pci/drm/i915/gem/i915_gem_object.c
452
container_of(work, struct drm_i915_private, mm.free_work);
sys/dev/pci/drm/i915/gem/i915_gem_object.c
472
atomic_inc(&i915->mm.free_count);
sys/dev/pci/drm/i915/gem/i915_gem_object.c
485
if (llist_add(&obj->freed, &i915->mm.free_list))
sys/dev/pci/drm/i915/gem/i915_gem_object.c
486
queue_work(i915->wq, &i915->mm.free_work);
sys/dev/pci/drm/i915/gem/i915_gem_object.c
536
src_map = io_mapping_map_wc(&obj->mm.region->iomap,
sys/dev/pci/drm/i915/gem/i915_gem_object.c
537
dma - obj->mm.region->region.start,
sys/dev/pci/drm/i915/gem/i915_gem_object.c
604
int pin_count = atomic_read(&obj->mm.pages_pin_count);
sys/dev/pci/drm/i915/gem/i915_gem_object.c
634
struct intel_memory_region *mr = READ_ONCE(obj->mm.region);
sys/dev/pci/drm/i915/gem/i915_gem_object.c
639
return obj->mm.n_placements > 1;
sys/dev/pci/drm/i915/gem/i915_gem_object.c
701
unsigned int num_allowed = obj->mm.n_placements;
sys/dev/pci/drm/i915/gem/i915_gem_object.c
706
GEM_BUG_ON(obj->mm.madv != I915_MADV_WILLNEED);
sys/dev/pci/drm/i915/gem/i915_gem_object.c
708
mr = i915->mm.regions[id];
sys/dev/pci/drm/i915/gem/i915_gem_object.c
715
if (obj->mm.region == mr)
sys/dev/pci/drm/i915/gem/i915_gem_object.c
731
if (mr == obj->mm.placements[i])
sys/dev/pci/drm/i915/gem/i915_gem_object.c
806
GEM_BUG_ON(obj->mm.madv != I915_MADV_WILLNEED);
sys/dev/pci/drm/i915/gem/i915_gem_object.c
809
mr = i915->mm.regions[id];
sys/dev/pci/drm/i915/gem/i915_gem_object.c
816
if (GEM_WARN_ON(obj->mm.region != mr))
sys/dev/pci/drm/i915/gem/i915_gem_object.c
837
if (!obj->mm.n_placements) {
sys/dev/pci/drm/i915/gem/i915_gem_object.c
850
for (i = 0; i < obj->mm.n_placements; i++) {
sys/dev/pci/drm/i915/gem/i915_gem_object.c
851
if (obj->mm.placements[i]->type == type)
sys/dev/pci/drm/i915/gem/i915_gem_object.c
878
for (i = 0; i < obj->mm.n_placements; i++) {
sys/dev/pci/drm/i915/gem/i915_gem_object.c
880
if (obj->mm.placements[i]->type == INTEL_MEMORY_SYSTEM)
sys/dev/pci/drm/i915/gem/i915_gem_object.c
883
obj->mm.placements[i]->type == INTEL_MEMORY_LOCAL)
sys/dev/pci/drm/i915/gem/i915_gem_object.c
916
INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
sys/dev/pci/drm/i915/gem/i915_gem_object.h
427
return __i915_gem_object_page_iter_get_sg(obj, &obj->mm.get_page, n, offset);
sys/dev/pci/drm/i915/gem/i915_gem_object.h
472
return __i915_gem_object_page_iter_get_sg(obj, &obj->mm.get_dma_page, n, offset);
sys/dev/pci/drm/i915/gem/i915_gem_object.h
650
if (atomic_inc_not_zero(&obj->mm.pages_pin_count))
sys/dev/pci/drm/i915/gem/i915_gem_object.h
661
return !IS_ERR_OR_NULL(READ_ONCE(obj->mm.pages));
sys/dev/pci/drm/i915/gem/i915_gem_object.h
669
atomic_inc(&obj->mm.pages_pin_count);
sys/dev/pci/drm/i915/gem/i915_gem_object.h
675
return atomic_read(&obj->mm.pages_pin_count);
sys/dev/pci/drm/i915/gem/i915_gem_object.h
684
atomic_dec(&obj->mm.pages_pin_count);
sys/dev/pci/drm/i915/gem/i915_gem_object.h
869
return obj->userptr.notifier.mm;
sys/dev/pci/drm/i915/gem/i915_gem_object_types.h
696
} mm;
sys/dev/pci/drm/i915/gem/i915_gem_pages.c
107
if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
sys/dev/pci/drm/i915/gem/i915_gem_pages.c
143
atomic_inc(&obj->mm.pages_pin_count);
sys/dev/pci/drm/i915/gem/i915_gem_pages.c
183
radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
sys/dev/pci/drm/i915/gem/i915_gem_pages.c
184
radix_tree_delete(&obj->mm.get_page.radix, iter.index);
sys/dev/pci/drm/i915/gem/i915_gem_pages.c
185
radix_tree_for_each_slot(slot, &obj->mm.get_dma_page.radix, &iter, 0)
sys/dev/pci/drm/i915/gem/i915_gem_pages.c
186
radix_tree_delete(&obj->mm.get_dma_page.radix, iter.index);
sys/dev/pci/drm/i915/gem/i915_gem_pages.c
203
if (!obj->mm.tlb[id])
sys/dev/pci/drm/i915/gem/i915_gem_pages.c
206
intel_gt_invalidate_tlb_full(gt, obj->mm.tlb[id]);
sys/dev/pci/drm/i915/gem/i915_gem_pages.c
207
obj->mm.tlb[id] = 0;
sys/dev/pci/drm/i915/gem/i915_gem_pages.c
218
pages = fetch_and_zero(&obj->mm.pages);
sys/dev/pci/drm/i915/gem/i915_gem_pages.c
223
obj->mm.madv = I915_MADV_WILLNEED;
sys/dev/pci/drm/i915/gem/i915_gem_pages.c
228
if (obj->mm.mapping) {
sys/dev/pci/drm/i915/gem/i915_gem_pages.c
229
unmap_object(obj, page_mask_bits(obj->mm.mapping));
sys/dev/pci/drm/i915/gem/i915_gem_pages.c
230
obj->mm.mapping = NULL;
sys/dev/pci/drm/i915/gem/i915_gem_pages.c
234
obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
sys/dev/pci/drm/i915/gem/i915_gem_pages.c
305
if (n_pages == 1 && !PageHighMem(sg_page(obj->mm.pages->sgl)))
sys/dev/pci/drm/i915/gem/i915_gem_pages.c
306
return page_address(sg_page(obj->mm.pages->sgl));
sys/dev/pci/drm/i915/gem/i915_gem_pages.c
32
obj->mm.madv = I915_MADV_DONTNEED;
sys/dev/pci/drm/i915/gem/i915_gem_pages.c
323
for_each_sgt_page(page, iter, obj->mm.pages)
sys/dev/pci/drm/i915/gem/i915_gem_pages.c
335
resource_size_t iomap = obj->mm.region->iomap.base -
sys/dev/pci/drm/i915/gem/i915_gem_pages.c
336
obj->mm.region->region.start;
sys/dev/pci/drm/i915/gem/i915_gem_pages.c
353
for_each_sgt_daddr(addr, iter, obj->mm.pages)
sys/dev/pci/drm/i915/gem/i915_gem_pages.c
390
for_each_sgt_page(page, iter, obj->mm.pages)
sys/dev/pci/drm/i915/gem/i915_gem_pages.c
43
obj->mm.get_page.sg_pos = pages->sgl;
sys/dev/pci/drm/i915/gem/i915_gem_pages.c
44
obj->mm.get_page.sg_idx = 0;
sys/dev/pci/drm/i915/gem/i915_gem_pages.c
45
obj->mm.get_dma_page.sg_pos = pages->sgl;
sys/dev/pci/drm/i915/gem/i915_gem_pages.c
457
ptr = page_unpack_bits(obj->mm.mapping, &has_type);
sys/dev/pci/drm/i915/gem/i915_gem_pages.c
46
obj->mm.get_dma_page.sg_idx = 0;
sys/dev/pci/drm/i915/gem/i915_gem_pages.c
48
obj->mm.pages = pages;
sys/dev/pci/drm/i915/gem/i915_gem_pages.c
50
obj->mm.page_sizes.phys = i915_sg_dma_sizes(pages->sgl);
sys/dev/pci/drm/i915/gem/i915_gem_pages.c
508
if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
sys/dev/pci/drm/i915/gem/i915_gem_pages.c
51
GEM_BUG_ON(!obj->mm.page_sizes.phys);
sys/dev/pci/drm/i915/gem/i915_gem_pages.c
518
atomic_inc(&obj->mm.pages_pin_count);
sys/dev/pci/drm/i915/gem/i915_gem_pages.c
541
if (type != I915_MAP_WC && !obj->mm.n_placements) {
sys/dev/pci/drm/i915/gem/i915_gem_pages.c
551
ptr = page_unpack_bits(obj->mm.mapping, &has_type);
sys/dev/pci/drm/i915/gem/i915_gem_pages.c
560
ptr = obj->mm.mapping = NULL;
sys/dev/pci/drm/i915/gem/i915_gem_pages.c
579
obj->mm.mapping = page_pack_bits(ptr, type);
sys/dev/pci/drm/i915/gem/i915_gem_pages.c
585
atomic_dec(&obj->mm.pages_pin_count);
sys/dev/pci/drm/i915/gem/i915_gem_pages.c
61
obj->mm.page_sizes.sg = 0;
sys/dev/pci/drm/i915/gem/i915_gem_pages.c
613
obj->mm.dirty = true;
sys/dev/pci/drm/i915/gem/i915_gem_pages.c
618
ptr = page_unpack_bits(obj->mm.mapping, &has_type);
sys/dev/pci/drm/i915/gem/i915_gem_pages.c
63
if (obj->mm.page_sizes.phys & ~0u << i)
sys/dev/pci/drm/i915/gem/i915_gem_pages.c
631
GEM_BUG_ON(!obj->mm.mapping);
sys/dev/pci/drm/i915/gem/i915_gem_pages.c
639
unmap_object(obj, page_mask_bits(fetch_and_zero(&obj->mm.mapping)));
sys/dev/pci/drm/i915/gem/i915_gem_pages.c
64
obj->mm.page_sizes.sg |= BIT(i);
sys/dev/pci/drm/i915/gem/i915_gem_pages.c
651
const bool dma = iter == &obj->mm.get_dma_page ||
sys/dev/pci/drm/i915/gem/i915_gem_pages.c
66
GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
sys/dev/pci/drm/i915/gem/i915_gem_pages.c
74
GEM_BUG_ON(!list_empty(&obj->mm.link));
sys/dev/pci/drm/i915/gem/i915_gem_pages.c
75
atomic_inc(&obj->mm.shrink_pin);
sys/dev/pci/drm/i915/gem/i915_gem_pages.c
780
if (!obj->mm.dirty)
sys/dev/pci/drm/i915/gem/i915_gem_pages.c
84
spin_lock_irqsave(&i915->mm.obj_lock, flags);
sys/dev/pci/drm/i915/gem/i915_gem_pages.c
86
i915->mm.shrink_count++;
sys/dev/pci/drm/i915/gem/i915_gem_pages.c
87
i915->mm.shrink_memory += obj->base.size;
sys/dev/pci/drm/i915/gem/i915_gem_pages.c
89
if (obj->mm.madv != I915_MADV_WILLNEED)
sys/dev/pci/drm/i915/gem/i915_gem_pages.c
90
list = &i915->mm.purge_list;
sys/dev/pci/drm/i915/gem/i915_gem_pages.c
92
list = &i915->mm.shrink_list;
sys/dev/pci/drm/i915/gem/i915_gem_pages.c
93
list_add_tail(&obj->mm.link, list);
sys/dev/pci/drm/i915/gem/i915_gem_pages.c
95
atomic_set(&obj->mm.shrink_pin, 0);
sys/dev/pci/drm/i915/gem/i915_gem_pages.c
96
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
sys/dev/pci/drm/i915/gem/i915_gem_phys.c
150
if (obj->mm.dirty) {
sys/dev/pci/drm/i915/gem/i915_gem_phys.c
178
if (obj->mm.madv == I915_MADV_WILLNEED)
sys/dev/pci/drm/i915/gem/i915_gem_phys.c
188
obj->mm.dirty = false;
sys/dev/pci/drm/i915/gem/i915_gem_phys.c
207
void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset;
sys/dev/pci/drm/i915/gem/i915_gem_phys.c
209
struct drm_dmamem *dmah = (void *)sg_page(obj->mm.pages->sgl);
sys/dev/pci/drm/i915/gem/i915_gem_phys.c
243
void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset;
sys/dev/pci/drm/i915/gem/i915_gem_phys.c
245
struct drm_dmamem *dmah = (void *)sg_page(obj->mm.pages->sgl);
sys/dev/pci/drm/i915/gem/i915_gem_phys.c
309
if (obj->mm.madv != I915_MADV_WILLNEED)
sys/dev/pci/drm/i915/gem/i915_gem_phys.c
315
if (obj->mm.mapping || i915_gem_object_has_pinned_pages(obj))
sys/dev/pci/drm/i915/gem/i915_gem_phys.c
318
if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
sys/dev/pci/drm/i915/gem/i915_gem_pm.c
140
&i915->mm.shrink_list,
sys/dev/pci/drm/i915/gem/i915_gem_pm.c
141
&i915->mm.purge_list,
sys/dev/pci/drm/i915/gem/i915_gem_pm.c
175
spin_lock_irqsave(&i915->mm.obj_lock, flags);
sys/dev/pci/drm/i915/gem/i915_gem_pm.c
177
list_for_each_entry(obj, *phase, mm.link) {
sys/dev/pci/drm/i915/gem/i915_gem_pm.c
183
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
sys/dev/pci/drm/i915/gem/i915_gem_pm.c
223
list_for_each_entry(obj, &i915->mm.shrink_list, mm.link)
sys/dev/pci/drm/i915/gem/i915_gem_region.c
16
obj->mm.region = mem;
sys/dev/pci/drm/i915/gem/i915_gem_region.c
174
mm.region_link);
sys/dev/pci/drm/i915/gem/i915_gem_region.c
178
list_move_tail(&obj->mm.region_link, &still_in_list);
sys/dev/pci/drm/i915/gem/i915_gem_region.c
19
list_add(&obj->mm.region_link, &mem->objects.list);
sys/dev/pci/drm/i915/gem/i915_gem_region.c
194
if (obj->mm.region == mr)
sys/dev/pci/drm/i915/gem/i915_gem_region.c
25
struct intel_memory_region *mem = obj->mm.region;
sys/dev/pci/drm/i915/gem/i915_gem_region.c
28
list_del(&obj->mm.region_link);
sys/dev/pci/drm/i915/gem/i915_gem_shmem.c
252
struct intel_memory_region *mem = obj->mm.region;
sys/dev/pci/drm/i915/gem/i915_gem_shmem.c
360
obj->mm.madv = __I915_MADV_PURGED;
sys/dev/pci/drm/i915/gem/i915_gem_shmem.c
361
obj->mm.pages = ERR_PTR(-EFAULT);
sys/dev/pci/drm/i915/gem/i915_gem_shmem.c
405
switch (obj->mm.madv) {
sys/dev/pci/drm/i915/gem/i915_gem_shmem.c
425
GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
sys/dev/pci/drm/i915/gem/i915_gem_shmem.c
427
if (obj->mm.madv == I915_MADV_DONTNEED)
sys/dev/pci/drm/i915/gem/i915_gem_shmem.c
428
obj->mm.dirty = false;
sys/dev/pci/drm/i915/gem/i915_gem_shmem.c
459
obj->mm.dirty, obj->mm.madv == I915_MADV_WILLNEED);
sys/dev/pci/drm/i915/gem/i915_gem_shmem.c
462
obj->mm.dirty, obj->mm.madv == I915_MADV_WILLNEED, obj);
sys/dev/pci/drm/i915/gem/i915_gem_shmem.c
465
obj->mm.dirty = false;
sys/dev/pci/drm/i915/gem/i915_gem_shmem.c
512
if (obj->mm.madv != I915_MADV_WILLNEED)
sys/dev/pci/drm/i915/gem/i915_gem_shmem.c
647
if (i915->mm.gemfs)
sys/dev/pci/drm/i915/gem/i915_gem_shmem.c
648
filp = shmem_file_setup_with_mnt(i915->mm.gemfs, "i915", size,
sys/dev/pci/drm/i915/gem/i915_gem_shmem.c
742
return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_SMEM],
sys/dev/pci/drm/i915/gem/i915_gem_shrinker.c
112
{ &i915->mm.purge_list, ~0u },
sys/dev/pci/drm/i915/gem/i915_gem_shrinker.c
114
&i915->mm.shrink_list,
sys/dev/pci/drm/i915/gem/i915_gem_shrinker.c
193
spin_lock_irqsave(&i915->mm.obj_lock, flags);
sys/dev/pci/drm/i915/gem/i915_gem_shrinker.c
197
mm.link))) {
sys/dev/pci/drm/i915/gem/i915_gem_shrinker.c
198
list_move_tail(&obj->mm.link, &still_in_list);
sys/dev/pci/drm/i915/gem/i915_gem_shrinker.c
201
!is_vmalloc_addr(obj->mm.mapping))
sys/dev/pci/drm/i915/gem/i915_gem_shrinker.c
214
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
sys/dev/pci/drm/i915/gem/i915_gem_shrinker.c
238
spin_lock_irqsave(&i915->mm.obj_lock, flags);
sys/dev/pci/drm/i915/gem/i915_gem_shrinker.c
243
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
sys/dev/pci/drm/i915/gem/i915_gem_shrinker.c
294
count = READ_ONCE(i915->mm.shrink_memory) >> PAGE_SHIFT;
sys/dev/pci/drm/i915/gem/i915_gem_shrinker.c
295
num_objects = READ_ONCE(i915->mm.shrink_count);
sys/dev/pci/drm/i915/gem/i915_gem_shrinker.c
307
i915->mm.shrinker->batch =
sys/dev/pci/drm/i915/gem/i915_gem_shrinker.c
308
max((i915->mm.shrinker->batch + avg) >> 1,
sys/dev/pci/drm/i915/gem/i915_gem_shrinker.c
349
container_of(nb, struct drm_i915_private, mm.oom_notifier);
sys/dev/pci/drm/i915/gem/i915_gem_shrinker.c
36
return swap_available() || obj->mm.madv == I915_MADV_DONTNEED;
sys/dev/pci/drm/i915/gem/i915_gem_shrinker.c
367
spin_lock_irqsave(&i915->mm.obj_lock, flags);
sys/dev/pci/drm/i915/gem/i915_gem_shrinker.c
368
list_for_each_entry(obj, &i915->mm.shrink_list, mm.link) {
sys/dev/pci/drm/i915/gem/i915_gem_shrinker.c
374
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
sys/dev/pci/drm/i915/gem/i915_gem_shrinker.c
389
container_of(nb, struct drm_i915_private, mm.vmap_notifier);
sys/dev/pci/drm/i915/gem/i915_gem_shrinker.c
430
i915->mm.shrinker = shrinker_alloc(0, "drm-i915_gem");
sys/dev/pci/drm/i915/gem/i915_gem_shrinker.c
431
if (!i915->mm.shrinker) {
sys/dev/pci/drm/i915/gem/i915_gem_shrinker.c
434
i915->mm.shrinker->scan_objects = i915_gem_shrinker_scan;
sys/dev/pci/drm/i915/gem/i915_gem_shrinker.c
435
i915->mm.shrinker->count_objects = i915_gem_shrinker_count;
sys/dev/pci/drm/i915/gem/i915_gem_shrinker.c
436
i915->mm.shrinker->batch = 4096;
sys/dev/pci/drm/i915/gem/i915_gem_shrinker.c
437
i915->mm.shrinker->private_data = i915;
sys/dev/pci/drm/i915/gem/i915_gem_shrinker.c
439
shrinker_register(i915->mm.shrinker);
sys/dev/pci/drm/i915/gem/i915_gem_shrinker.c
443
i915->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
sys/dev/pci/drm/i915/gem/i915_gem_shrinker.c
444
drm_WARN_ON(&i915->drm, register_oom_notifier(&i915->mm.oom_notifier));
sys/dev/pci/drm/i915/gem/i915_gem_shrinker.c
446
i915->mm.vmap_notifier.notifier_call = i915_gem_shrinker_vmap;
sys/dev/pci/drm/i915/gem/i915_gem_shrinker.c
448
register_vmap_purge_notifier(&i915->mm.vmap_notifier));
sys/dev/pci/drm/i915/gem/i915_gem_shrinker.c
456
unregister_vmap_purge_notifier(&i915->mm.vmap_notifier));
sys/dev/pci/drm/i915/gem/i915_gem_shrinker.c
458
unregister_oom_notifier(&i915->mm.oom_notifier));
sys/dev/pci/drm/i915/gem/i915_gem_shrinker.c
460
shrinker_free(i915->mm.shrinker);
sys/dev/pci/drm/i915/gem/i915_gem_shrinker.c
500
if (atomic_add_unless(&obj->mm.shrink_pin, 1, 0))
sys/dev/pci/drm/i915/gem/i915_gem_shrinker.c
503
spin_lock_irqsave(&i915->mm.obj_lock, flags);
sys/dev/pci/drm/i915/gem/i915_gem_shrinker.c
504
if (!atomic_fetch_inc(&obj->mm.shrink_pin) &&
sys/dev/pci/drm/i915/gem/i915_gem_shrinker.c
505
!list_empty(&obj->mm.link)) {
sys/dev/pci/drm/i915/gem/i915_gem_shrinker.c
506
list_del_init(&obj->mm.link);
sys/dev/pci/drm/i915/gem/i915_gem_shrinker.c
507
i915->mm.shrink_count--;
sys/dev/pci/drm/i915/gem/i915_gem_shrinker.c
508
i915->mm.shrink_memory -= obj->base.size;
sys/dev/pci/drm/i915/gem/i915_gem_shrinker.c
510
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
sys/dev/pci/drm/i915/gem/i915_gem_shrinker.c
522
if (atomic_add_unless(&obj->mm.shrink_pin, -1, 1))
sys/dev/pci/drm/i915/gem/i915_gem_shrinker.c
525
spin_lock_irqsave(&i915->mm.obj_lock, flags);
sys/dev/pci/drm/i915/gem/i915_gem_shrinker.c
527
if (atomic_dec_and_test(&obj->mm.shrink_pin)) {
sys/dev/pci/drm/i915/gem/i915_gem_shrinker.c
528
GEM_BUG_ON(!list_empty(&obj->mm.link));
sys/dev/pci/drm/i915/gem/i915_gem_shrinker.c
530
list_add_tail(&obj->mm.link, head);
sys/dev/pci/drm/i915/gem/i915_gem_shrinker.c
531
i915->mm.shrink_count++;
sys/dev/pci/drm/i915/gem/i915_gem_shrinker.c
532
i915->mm.shrink_memory += obj->base.size;
sys/dev/pci/drm/i915/gem/i915_gem_shrinker.c
535
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
sys/dev/pci/drm/i915/gem/i915_gem_shrinker.c
551
&obj_to_i915(obj)->mm.shrink_list);
sys/dev/pci/drm/i915/gem/i915_gem_shrinker.c
567
&obj_to_i915(obj)->mm.purge_list);
sys/dev/pci/drm/i915/gem/i915_gem_stolen.c
1069
return drm_mm_initialized(&i915->mm.stolen);
sys/dev/pci/drm/i915/gem/i915_gem_stolen.c
198
if (!drm_mm_initialized(&i915->mm.stolen))
sys/dev/pci/drm/i915/gem/i915_gem_stolen.c
201
drm_mm_takedown(&i915->mm.stolen);
sys/dev/pci/drm/i915/gem/i915_gem_stolen.c
45
if (!drm_mm_initialized(&i915->mm.stolen))
sys/dev/pci/drm/i915/gem/i915_gem_stolen.c
518
rw_init(&i915->mm.stolen_lock, "stln");
sys/dev/pci/drm/i915/gem/i915_gem_stolen.c
52
mutex_lock(&i915->mm.stolen_lock);
sys/dev/pci/drm/i915/gem/i915_gem_stolen.c
53
ret = drm_mm_insert_node_in_range(&i915->mm.stolen, node,
sys/dev/pci/drm/i915/gem/i915_gem_stolen.c
56
mutex_unlock(&i915->mm.stolen_lock);
sys/dev/pci/drm/i915/gem/i915_gem_stolen.c
562
drm_mm_init(&i915->mm.stolen, 0, i915->dsm.usable_size);
sys/dev/pci/drm/i915/gem/i915_gem_stolen.c
74
mutex_lock(&i915->mm.stolen_lock);
sys/dev/pci/drm/i915/gem/i915_gem_stolen.c
750
if (!drm_mm_initialized(&i915->mm.stolen))
sys/dev/pci/drm/i915/gem/i915_gem_stolen.c
76
mutex_unlock(&i915->mm.stolen_lock);
sys/dev/pci/drm/i915/gem/i915_gem_stolen.c
775
mutex_lock(&i915->mm.stolen_lock);
sys/dev/pci/drm/i915/gem/i915_gem_stolen.c
776
ret = drm_mm_reserve_node(&i915->mm.stolen, stolen);
sys/dev/pci/drm/i915/gem/i915_gem_stolen.c
777
mutex_unlock(&i915->mm.stolen_lock);
sys/dev/pci/drm/i915/gem/i915_gem_stolen.c
802
return i915_gem_object_create_region(i915->mm.stolen_region, size, 0, 0);
sys/dev/pci/drm/i915/gem/i915_gem_tiling.c
279
obj->mm.madv == I915_MADV_WILLNEED &&
sys/dev/pci/drm/i915/gem/i915_gem_ttm.c
1002
obj->mm.ttm_shrinkable = shrinkable;
sys/dev/pci/drm/i915/gem/i915_gem_ttm.c
1014
} else if (obj->mm.madv != I915_MADV_WILLNEED) {
sys/dev/pci/drm/i915/gem/i915_gem_ttm.c
1085
if (obj->mm.madv != I915_MADV_WILLNEED) {
sys/dev/pci/drm/i915/gem/i915_gem_ttm.c
1114
for (i = 0; i < obj->mm.n_placements; i++) {
sys/dev/pci/drm/i915/gem/i915_gem_ttm.c
1115
struct intel_memory_region *mr = obj->mm.placements[i];
sys/dev/pci/drm/i915/gem/i915_gem_ttm.c
119
if (obj->mm.n_placements <= 1)
sys/dev/pci/drm/i915/gem/i915_gem_ttm.c
1258
if (obj->mm.madv != I915_MADV_WILLNEED) {
sys/dev/pci/drm/i915/gem/i915_gem_ttm.c
1289
for (i = 0; i < obj->mm.n_placements; i++) {
sys/dev/pci/drm/i915/gem/i915_gem_ttm.c
1290
struct intel_memory_region *mr = obj->mm.placements[i];
sys/dev/pci/drm/i915/gem/i915_gem_ttm.c
1472
if (obj->mm.ttm_shrinkable)
sys/dev/pci/drm/i915/gem/i915_gem_ttm.c
1517
obj->mm.region = mem;
sys/dev/pci/drm/i915/gem/i915_gem_ttm.c
1518
INIT_LIST_HEAD(&obj->mm.region_link);
sys/dev/pci/drm/i915/gem/i915_gem_ttm.c
1528
GEM_BUG_ON(page_size && obj->mm.n_placements);
sys/dev/pci/drm/i915/gem/i915_gem_ttm.c
161
unsigned int num_allowed = obj->mm.n_placements;
sys/dev/pci/drm/i915/gem/i915_gem_ttm.c
165
i915_ttm_place_from_region(num_allowed ? obj->mm.placements[0] :
sys/dev/pci/drm/i915/gem/i915_gem_ttm.c
166
obj->mm.region, &places[0], obj->bo_offset,
sys/dev/pci/drm/i915/gem/i915_gem_ttm.c
171
i915_ttm_place_from_region(obj->mm.placements[i],
sys/dev/pci/drm/i915/gem/i915_gem_ttm.c
189
struct intel_memory_region *mr = i915->mm.regions[INTEL_MEMORY_SYSTEM];
sys/dev/pci/drm/i915/gem/i915_gem_ttm.c
438
if (obj->mm.madv == __I915_MADV_PURGED)
sys/dev/pci/drm/i915/gem/i915_gem_ttm.c
468
obj->mm.madv = __I915_MADV_PURGED;
sys/dev/pci/drm/i915/gem/i915_gem_ttm.c
497
switch (obj->mm.madv) {
sys/dev/pci/drm/i915/gem/i915_gem_ttm.c
598
page_alignment = obj->mm.region->min_page_size;
sys/dev/pci/drm/i915/gem/i915_gem_ttm.c
609
rsgt = intel_region_ttm_resource_to_rsgt(obj->mm.region,
sys/dev/pci/drm/i915/gem/i915_gem_ttm.c
620
return intel_region_ttm_resource_to_rsgt(obj->mm.region, res,
sys/dev/pci/drm/i915/gem/i915_gem_ttm.c
629
WARN_ON_ONCE(obj->mm.madv == I915_MADV_WILLNEED);
sys/dev/pci/drm/i915/gem/i915_gem_ttm.c
656
if (!ret && obj->mm.madv != I915_MADV_WILLNEED)
sys/dev/pci/drm/i915/gem/i915_gem_ttm.c
719
base = obj->mm.region->iomap.base - obj->mm.region->region.start;
sys/dev/pci/drm/i915/gem/i915_gem_ttm.c
733
resource_size_t iomap = obj->mm.region->iomap.base -
sys/dev/pci/drm/i915/gem/i915_gem_ttm.c
734
obj->mm.region->region.start;
sys/dev/pci/drm/i915/gem/i915_gem_ttm.c
855
GEM_BUG_ON(obj->mm.rsgt);
sys/dev/pci/drm/i915/gem/i915_gem_ttm.c
856
obj->mm.rsgt = rsgt;
sys/dev/pci/drm/i915/gem/i915_gem_ttm.c
874
GEM_BUG_ON(obj->mm.n_placements > I915_TTM_MAX_PLACEMENTS);
sys/dev/pci/drm/i915/gem/i915_gem_ttm.c
918
if (obj->mm.region != mr) {
sys/dev/pci/drm/i915/gem/i915_gem_ttm.c
944
if (obj->mm.rsgt)
sys/dev/pci/drm/i915/gem/i915_gem_ttm.c
945
i915_refct_sgt_put(fetch_and_zero(&obj->mm.rsgt));
sys/dev/pci/drm/i915/gem/i915_gem_ttm.c
992
if (shrinkable != obj->mm.ttm_shrinkable) {
sys/dev/pci/drm/i915/gem/i915_gem_ttm.c
994
if (obj->mm.madv == I915_MADV_WILLNEED)
sys/dev/pci/drm/i915/gem/i915_gem_ttm_move.c
134
if (intel_region_to_ttm_type(obj->mm.region) != mem_type) {
sys/dev/pci/drm/i915/gem/i915_gem_ttm_move.c
135
for (i = 0; i < obj->mm.n_placements; ++i) {
sys/dev/pci/drm/i915/gem/i915_gem_ttm_move.c
136
struct intel_memory_region *mr = obj->mm.placements[i];
sys/dev/pci/drm/i915/gem/i915_gem_ttm_move.c
139
mr != obj->mm.region) {
sys/dev/pci/drm/i915/gem/i915_gem_ttm_move.c
383
copy_work->obj->mm.unknown_state = true;
sys/dev/pci/drm/i915/gem/i915_gem_ttm_move.c
622
if (obj->mm.madv != I915_MADV_WILLNEED) {
sys/dev/pci/drm/i915/gem/i915_gem_ttm_pm.c
83
backup = i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_SMEM],
sys/dev/pci/drm/i915/gem/i915_gem_userptr.c
175
obj->mm.dirty = false;
sys/dev/pci/drm/i915/gem/i915_gem_userptr.c
178
if (obj->mm.dirty && trylock_page(page)) {
sys/dev/pci/drm/i915/gem/i915_gem_userptr.c
203
obj->mm.dirty = false;
sys/dev/pci/drm/i915/gem/i915_gem_userptr.c
240
if (obj->userptr.notifier.mm != current->mm)
sys/dev/pci/drm/i915/gem/i915_gem_userptr.c
351
if (!obj->userptr.notifier.mm)
sys/dev/pci/drm/i915/gem/i915_gem_userptr.c
355
obj->userptr.notifier.mm = NULL;
sys/dev/pci/drm/i915/gem/i915_gem_userptr.c
402
probe_range(struct mm_struct *mm, unsigned long addr, unsigned long len)
sys/dev/pci/drm/i915/gem/i915_gem_userptr.c
404
VMA_ITERATOR(vmi, mm, addr);
sys/dev/pci/drm/i915/gem/i915_gem_userptr.c
408
mmap_read_lock(mm);
sys/dev/pci/drm/i915/gem/i915_gem_userptr.c
419
mmap_read_unlock(mm);
sys/dev/pci/drm/i915/gem/i915_gem_userptr.c
516
ret = probe_range(current->mm, args->user_ptr, args->user_size);
sys/dev/pci/drm/i915/gem/i915_gem_userptr.c
73
return mmu_interval_notifier_insert(&obj->userptr.notifier, current->mm,
sys/dev/pci/drm/i915/gem/i915_gemfs.c
56
i915->mm.gemfs = gemfs;
sys/dev/pci/drm/i915/gem/i915_gemfs.c
71
kern_unmount(i915->mm.gemfs);
sys/dev/pci/drm/i915/gem/selftests/huge_gem_object.c
91
obj->mm.dirty = false;
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
1224
if (obj->mm.page_sizes.sg & I915_GTT_PAGE_SIZE_64K &&
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
1254
max_page_size = rounddown_pow_of_two(obj->mm.page_sizes.sg);
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
1285
if (obj->mm.page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
1319
return i915->mm.gemfs && has_transparent_hugepage();
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
142
GEM_BUG_ON(sg_page_sizes != obj->mm.page_mask);
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
1425
if (obj->mm.page_sizes.phys < min) {
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
1525
obj->mm.page_sizes.sg = pages;
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
1585
if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_64K) {
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
1594
obj->mm.page_sizes.sg = I915_GTT_PAGE_SIZE_64K;
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
161
obj->mm.dirty = false;
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
1660
mr = i915->mm.regions[INTEL_REGION_LMEM_0];
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
1662
mr = i915->mm.regions[INTEL_REGION_SMEM];
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
1718
addr = round_up(addr, obj->mm.region->min_page_size);
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
1764
struct vfsmount *gemfs = i915->mm.gemfs;
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
1788
i915->mm.gemfs = NULL;
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
1822
i915->mm.gemfs = gemfs;
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
1885
if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_2M) {
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
1935
if (should_swap == (obj->mm.page_sizes.sg || obj->mm.page_sizes.phys)) {
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
206
obj->mm.page_mask = page_mask;
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
310
obj->mm.dirty = false;
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
386
if (vma->page_sizes.phys != obj->mm.page_sizes.phys) {
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
388
vma->page_sizes.phys, obj->mm.page_sizes.phys);
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
392
if (vma->page_sizes.sg != obj->mm.page_sizes.sg) {
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
394
vma->page_sizes.sg, obj->mm.page_sizes.sg);
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
619
obj->mm.page_sizes.sg = page_size;
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
81
unsigned int page_mask = obj->mm.page_mask;
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
980
obj->mm.page_sizes.sg &= ~I915_GTT_PAGE_SIZE_2M;
sys/dev/pci/drm/i915/gem/selftests/i915_gem_client_blt.c
561
err = drm_mm_insert_node_in_range(&t->ce->vm->mm, &hole,
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
1495
node = __drm_mm_interval_first(&vm->mm,
sys/dev/pci/drm/i915/gem/selftests/i915_gem_dmabuf.c
278
if (obj->mm.region != i915->mm.regions[INTEL_REGION_SMEM]) {
sys/dev/pci/drm/i915/gem/selftests/i915_gem_dmabuf.c
326
struct intel_memory_region *smem = i915->mm.regions[INTEL_REGION_SMEM];
sys/dev/pci/drm/i915/gem/selftests/i915_gem_dmabuf.c
336
if (!i915->mm.regions[INTEL_REGION_LMEM_0])
sys/dev/pci/drm/i915/gem/selftests/i915_gem_dmabuf.c
339
regions[0] = i915->mm.regions[INTEL_REGION_LMEM_0];
sys/dev/pci/drm/i915/gem/selftests/i915_gem_dmabuf.c
340
regions[1] = i915->mm.regions[INTEL_REGION_SMEM];
sys/dev/pci/drm/i915/gem/selftests/i915_gem_dmabuf.c
96
struct intel_memory_region *lmem = i915->mm.regions[INTEL_REGION_LMEM_0];
sys/dev/pci/drm/i915/gem/selftests/i915_gem_migrate.c
222
obj->mm.pages->sgl, obj->pat_index,
sys/dev/pci/drm/i915/gem/selftests/i915_gem_migrate.c
50
struct intel_memory_region *src_mr = i915->mm.regions[src];
sys/dev/pci/drm/i915/gem/selftests/i915_gem_migrate.c
51
struct intel_memory_region *dst_mr = i915->mm.regions[dst];
sys/dev/pci/drm/i915/gem/selftests/i915_gem_mman.c
1102
obj->mm.region->name, I915_MMAP_TYPE_FIXED, addr);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_mman.c
1111
obj->mm.region->name, i * sizeof(x));
sys/dev/pci/drm/i915/gem/selftests/i915_gem_mman.c
1120
obj->mm.region->name);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_mman.c
1126
obj->mm.region->name,
sys/dev/pci/drm/i915/gem/selftests/i915_gem_mman.c
1134
obj->mm.region->name, i * sizeof(x));
sys/dev/pci/drm/i915/gem/selftests/i915_gem_mman.c
1191
mmap_read_lock(current->mm);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_mman.c
1192
area = vma_lookup(current->mm, addr);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_mman.c
1193
mmap_read_unlock(current->mm);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_mman.c
1196
obj->mm.region->name);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_mman.c
1218
obj->mm.pages->sgl, obj->pat_index,
sys/dev/pci/drm/i915/gem/selftests/i915_gem_mman.c
1255
if (!err && obj->mm.region != expected_mr) {
sys/dev/pci/drm/i915/gem/selftests/i915_gem_mman.c
1304
struct intel_memory_region *system = i915->mm.regions[INTEL_REGION_SMEM];
sys/dev/pci/drm/i915/gem/selftests/i915_gem_mman.c
1456
obj->mm.region->name, repr_mmap_type(type));
sys/dev/pci/drm/i915/gem/selftests/i915_gem_mman.c
1465
obj->mm.region->name, repr_mmap_type(type));
sys/dev/pci/drm/i915/gem/selftests/i915_gem_mman.c
1472
obj->mm.region->name, repr_mmap_type(type));
sys/dev/pci/drm/i915/gem/selftests/i915_gem_mman.c
1481
obj->mm.region->name, repr_mmap_type(type));
sys/dev/pci/drm/i915/gem/selftests/i915_gem_mman.c
1487
obj->mm.region->name, repr_mmap_type(type),
sys/dev/pci/drm/i915/gem/selftests/i915_gem_mman.c
1573
pr_err("%s: Unable to write to mmap\n", obj->mm.region->name);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_mman.c
1617
__func__, engine->name, obj->mm.region->name);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_mman.c
1706
return apply_to_page_range(current->mm, addr, len,
sys/dev/pci/drm/i915/gem/selftests/i915_gem_mman.c
1712
return apply_to_page_range(current->mm, addr, len,
sys/dev/pci/drm/i915/gem/selftests/i915_gem_mman.c
1759
pr_err("%s: was not present\n", obj->mm.region->name);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_mman.c
1789
pr_err("%s: was not absent\n", obj->mm.region->name);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_mman.c
1847
if (!current->mm) {
sys/dev/pci/drm/i915/gem/selftests/i915_gem_mman.c
604
i915->mm.regions[INTEL_REGION_SMEM];
sys/dev/pci/drm/i915/gem/selftests/i915_gem_mman.c
659
struct drm_mm *mm = &i915->drm.vma_offset_manager->vm_addr_space_mm;
sys/dev/pci/drm/i915/gem/selftests/i915_gem_mman.c
675
list_for_each_entry_safe(hole, next, &mm->hole_stack, hole_stack) {
sys/dev/pci/drm/i915/gem/selftests/i915_gem_mman.c
697
err = drm_mm_reserve_node(mm, resv);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_mman.c
704
GEM_BUG_ON(!list_is_singular(&mm->hole_stack));
sys/dev/pci/drm/i915/gem/selftests/i915_gem_mman.c
764
drm_mm_for_each_node_safe(hole, next, mm) {
sys/dev/pci/drm/i915/gem/selftests/i915_gem_mman.c
827
obj->mm.region->name);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_mman.c
863
obj->mm.region->name);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_mman.c
921
pr_debug("igt_mmap(%s, %d) @ %lx\n", obj->mm.region->name, type, addr);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_mman.c
923
mmap_read_lock(current->mm);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_mman.c
924
area = vma_lookup(current->mm, addr);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_mman.c
925
mmap_read_unlock(current->mm);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_mman.c
928
obj->mm.region->name);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_mman.c
939
obj->mm.region->name, i * sizeof(x));
sys/dev/pci/drm/i915/gem/selftests/i915_gem_mman.c
946
obj->mm.region->name,
sys/dev/pci/drm/i915/gem/selftests/i915_gem_mman.c
955
obj->mm.region->name, i * sizeof(x));
sys/dev/pci/drm/i915/gem/selftests/i915_gem_phys.c
49
if (!atomic_read(&obj->mm.pages_pin_count)) {
sys/dev/pci/drm/i915/gt/gen6_ppgtt.c
364
obj->mm.pages = ZERO_SIZE_PTR;
sys/dev/pci/drm/i915/gt/intel_context.c
144
vma->obj->mm.dirty = true;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
67
ggtt->vm.mm.color_adjust = i915_ggtt_color_adjust;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
945
if (drm_mm_reserve_node(&ggtt->vm.mm, &ggtt->error_capture))
sys/dev/pci/drm/i915/gt/intel_ggtt.c
946
drm_mm_insert_node_in_range(&ggtt->vm.mm,
sys/dev/pci/drm/i915/gt/intel_ggtt.c
973
drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) {
sys/dev/pci/drm/i915/gt/intel_gt.c
109
GEM_BUG_ON(i915->mm.regions[id]);
sys/dev/pci/drm/i915/gt/intel_gt.c
110
i915->mm.regions[id] = mem;
sys/dev/pci/drm/i915/gt/intel_gtt.c
212
drm_mm_takedown(&vm->mm);
sys/dev/pci/drm/i915/gt/intel_gtt.c
301
drm_mm_init(&vm->mm, 0, vm->total);
sys/dev/pci/drm/i915/gt/intel_gtt.c
311
vm->mm.head_node.color = I915_COLOR_UNEVICTABLE;
sys/dev/pci/drm/i915/gt/intel_gtt.c
322
return page_unpack_bits(p->mm.mapping, &type);
sys/dev/pci/drm/i915/gt/intel_gtt.c
328
return sg_dma_address(p->mm.pages->sgl);
sys/dev/pci/drm/i915/gt/intel_gtt.c
334
return sg_page(p->mm.pages->sgl);
sys/dev/pci/drm/i915/gt/intel_gtt.c
391
if (obj->mm.page_sizes.sg < size)
sys/dev/pci/drm/i915/gt/intel_gtt.h
251
struct drm_mm mm;
sys/dev/pci/drm/i915/gt/intel_gtt.h
454
struct intel_memory_region *mr = READ_ONCE(obj->mm.region);
sys/dev/pci/drm/i915/gt/intel_gtt.h
463
return i915_is_ggtt(vm) && vm->mm.color_adjust;
sys/dev/pci/drm/i915/gt/intel_rc6.c
317
pctx = i915_gem_object_create_region_at(i915->mm.stolen_region,
sys/dev/pci/drm/i915/gt/intel_ring_submission.c
69
return sg_page(obj->mm.pages->sgl);
sys/dev/pci/drm/i915/gt/selftest_migrate.c
302
obj->mm.pages->sgl,
sys/dev/pci/drm/i915/gt/selftest_migrate.c
353
obj->mm.pages->sgl,
sys/dev/pci/drm/i915/gt/selftest_migrate.c
417
src->mm.pages->sgl, src->pat_index,
sys/dev/pci/drm/i915/gt/selftest_migrate.c
419
dst->mm.pages->sgl, dst->pat_index,
sys/dev/pci/drm/i915/gt/selftest_migrate.c
431
src->mm.pages->sgl, src->pat_index,
sys/dev/pci/drm/i915/gt/selftest_migrate.c
433
dst->mm.pages->sgl, dst->pat_index,
sys/dev/pci/drm/i915/gt/selftest_migrate.c
457
obj->mm.pages->sgl,
sys/dev/pci/drm/i915/gt/selftest_migrate.c
470
obj->mm.pages->sgl,
sys/dev/pci/drm/i915/gt/selftest_migrate.c
650
it = sg_sgt(obj->mm.pages->sgl);
sys/dev/pci/drm/i915/gt/selftest_migrate.c
904
dst->mm.pages->sgl,
sys/dev/pci/drm/i915/gt/selftest_migrate.c
995
src->mm.pages->sgl,
sys/dev/pci/drm/i915/gt/selftest_migrate.c
999
dst->mm.pages->sgl,
sys/dev/pci/drm/i915/gt/selftest_reset.c
147
!__drm_mm_interval_first(&gt->i915->mm.stolen,
sys/dev/pci/drm/i915/gt/selftest_reset.c
98
if (!__drm_mm_interval_first(&gt->i915->mm.stolen,
sys/dev/pci/drm/i915/gt/selftest_timeline.c
32
return sg_page(obj->mm.pages->sgl);
sys/dev/pci/drm/i915/gt/selftest_timeline.c
841
page_unmask_bits(tl->hwsp_ggtt->obj->mm.mapping));
sys/dev/pci/drm/i915/gt/selftest_tlb.c
151
ce->engine->name, va->obj->mm.region->name ?: "smem",
sys/dev/pci/drm/i915/gt/selftest_tlb.c
158
.bi.pages = vb->obj->mm.pages,
sys/dev/pci/drm/i915/gt/selftest_tlb.c
159
.bi.page_sizes = vb->obj->mm.page_sizes,
sys/dev/pci/drm/i915/gt/selftest_tlb.c
184
cs = page_mask_bits(batch->mm.mapping);
sys/dev/pci/drm/i915/gt/selftest_tlb.c
201
struct intel_memory_region *mr = gt->i915->mm.regions[INTEL_REGION_LMEM_0];
sys/dev/pci/drm/i915/gt/selftest_tlb.c
27
memset64(page_mask_bits(vma->obj->mm.mapping) +
sys/dev/pci/drm/i915/gt/selftest_tlb.c
273
if ((A->mm.page_sizes.phys | B->mm.page_sizes.phys) & (A->base.size - 1))
sys/dev/pci/drm/i915/gt/selftest_tlb.c
95
ce->engine->name, va->obj->mm.region->name ?: "smem",
sys/dev/pci/drm/i915/gt/uc/intel_uc_fw.c
1054
vma_res->bi.pages = obj->mm.pages;
sys/dev/pci/drm/i915/gt/uc/intel_uc_fw.c
1339
struct intel_memory_region *mr = uc_fw->obj->mm.region;
sys/dev/pci/drm/i915/gt/uc/intel_uc_fw.c
1354
for_each_sgt_page(page, iter, uc_fw->obj->mm.pages) {
sys/dev/pci/drm/i915/gt/uc/intel_uc_fw.c
1374
for_each_sgt_daddr(addr, iter, uc_fw->obj->mm.pages) {
sys/dev/pci/drm/i915/gvt/cmd_parser.c
1767
static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm,
sys/dev/pci/drm/i915/gvt/cmd_parser.c
1775
gpa = intel_vgpu_gma_to_gpa(mm, gma);
sys/dev/pci/drm/i915/gvt/cmd_parser.c
1824
struct intel_vgpu_mm *mm = (s->buf_addr_type == GTT_BUFFER) ?
sys/dev/pci/drm/i915/gvt/cmd_parser.c
1845
if (copy_gma_to_hva(s->vgpu, mm,
sys/dev/pci/drm/i915/gvt/cmd_parser.c
1907
struct intel_vgpu_mm *mm = (s->buf_addr_type == GTT_BUFFER) ?
sys/dev/pci/drm/i915/gvt/cmd_parser.c
1955
ret = copy_gma_to_hva(s->vgpu, mm,
sys/dev/pci/drm/i915/gvt/cmd_parser.c
865
struct intel_vgpu_mm *mm;
sys/dev/pci/drm/i915/gvt/cmd_parser.c
873
mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps);
sys/dev/pci/drm/i915/gvt/cmd_parser.c
874
if (!mm) {
sys/dev/pci/drm/i915/gvt/cmd_parser.c
878
intel_vgpu_mm_get(mm);
sys/dev/pci/drm/i915/gvt/cmd_parser.c
879
list_add_tail(&mm->ppgtt_mm.link,
sys/dev/pci/drm/i915/gvt/cmd_parser.c
881
*cmd_ptr(s, 2) = upper_32_bits(mm->ppgtt_mm.shadow_pdps[0]);
sys/dev/pci/drm/i915/gvt/cmd_parser.c
882
*cmd_ptr(s, 4) = lower_32_bits(mm->ppgtt_mm.shadow_pdps[0]);
sys/dev/pci/drm/i915/gvt/gtt.c
1690
static void invalidate_ppgtt_mm(struct intel_vgpu_mm *mm)
sys/dev/pci/drm/i915/gvt/gtt.c
1692
struct intel_vgpu *vgpu = mm->vgpu;
sys/dev/pci/drm/i915/gvt/gtt.c
1699
if (!mm->ppgtt_mm.shadowed)
sys/dev/pci/drm/i915/gvt/gtt.c
1702
for (index = 0; index < ARRAY_SIZE(mm->ppgtt_mm.shadow_pdps); index++) {
sys/dev/pci/drm/i915/gvt/gtt.c
1703
ppgtt_get_shadow_root_entry(mm, &se, index);
sys/dev/pci/drm/i915/gvt/gtt.c
1710
ppgtt_set_shadow_root_entry(mm, &se, index);
sys/dev/pci/drm/i915/gvt/gtt.c
1716
mm->ppgtt_mm.shadowed = false;
sys/dev/pci/drm/i915/gvt/gtt.c
1720
static int shadow_ppgtt_mm(struct intel_vgpu_mm *mm)
sys/dev/pci/drm/i915/gvt/gtt.c
1722
struct intel_vgpu *vgpu = mm->vgpu;
sys/dev/pci/drm/i915/gvt/gtt.c
1730
if (mm->ppgtt_mm.shadowed)
sys/dev/pci/drm/i915/gvt/gtt.c
1736
mm->ppgtt_mm.shadowed = true;
sys/dev/pci/drm/i915/gvt/gtt.c
1738
for (index = 0; index < ARRAY_SIZE(mm->ppgtt_mm.guest_pdps); index++) {
sys/dev/pci/drm/i915/gvt/gtt.c
1739
ppgtt_get_guest_root_entry(mm, &ge, index);
sys/dev/pci/drm/i915/gvt/gtt.c
1754
ppgtt_set_shadow_root_entry(mm, &se, index);
sys/dev/pci/drm/i915/gvt/gtt.c
1762
invalidate_ppgtt_mm(mm);
sys/dev/pci/drm/i915/gvt/gtt.c
1768
struct intel_vgpu_mm *mm;
sys/dev/pci/drm/i915/gvt/gtt.c
1770
mm = kzalloc(sizeof(*mm), GFP_KERNEL);
sys/dev/pci/drm/i915/gvt/gtt.c
1771
if (!mm)
sys/dev/pci/drm/i915/gvt/gtt.c
1774
mm->vgpu = vgpu;
sys/dev/pci/drm/i915/gvt/gtt.c
1775
kref_init(&mm->ref);
sys/dev/pci/drm/i915/gvt/gtt.c
1776
atomic_set(&mm->pincount, 0);
sys/dev/pci/drm/i915/gvt/gtt.c
1778
return mm;
sys/dev/pci/drm/i915/gvt/gtt.c
1781
static void vgpu_free_mm(struct intel_vgpu_mm *mm)
sys/dev/pci/drm/i915/gvt/gtt.c
1783
kfree(mm);
sys/dev/pci/drm/i915/gvt/gtt.c
1801
struct intel_vgpu_mm *mm;
sys/dev/pci/drm/i915/gvt/gtt.c
1804
mm = vgpu_alloc_mm(vgpu);
sys/dev/pci/drm/i915/gvt/gtt.c
1805
if (!mm)
sys/dev/pci/drm/i915/gvt/gtt.c
1808
mm->type = INTEL_GVT_MM_PPGTT;
sys/dev/pci/drm/i915/gvt/gtt.c
1812
mm->ppgtt_mm.root_entry_type = root_entry_type;
sys/dev/pci/drm/i915/gvt/gtt.c
1814
INIT_LIST_HEAD(&mm->ppgtt_mm.list);
sys/dev/pci/drm/i915/gvt/gtt.c
1815
INIT_LIST_HEAD(&mm->ppgtt_mm.lru_list);
sys/dev/pci/drm/i915/gvt/gtt.c
1816
INIT_LIST_HEAD(&mm->ppgtt_mm.link);
sys/dev/pci/drm/i915/gvt/gtt.c
1819
mm->ppgtt_mm.guest_pdps[0] = pdps[0];
sys/dev/pci/drm/i915/gvt/gtt.c
1821
memcpy(mm->ppgtt_mm.guest_pdps, pdps,
sys/dev/pci/drm/i915/gvt/gtt.c
1822
sizeof(mm->ppgtt_mm.guest_pdps));
sys/dev/pci/drm/i915/gvt/gtt.c
1824
ret = shadow_ppgtt_mm(mm);
sys/dev/pci/drm/i915/gvt/gtt.c
1827
vgpu_free_mm(mm);
sys/dev/pci/drm/i915/gvt/gtt.c
1831
list_add_tail(&mm->ppgtt_mm.list, &vgpu->gtt.ppgtt_mm_list_head);
sys/dev/pci/drm/i915/gvt/gtt.c
1834
list_add_tail(&mm->ppgtt_mm.lru_list, &gvt->gtt.ppgtt_mm_lru_list_head);
sys/dev/pci/drm/i915/gvt/gtt.c
1837
return mm;
sys/dev/pci/drm/i915/gvt/gtt.c
1842
struct intel_vgpu_mm *mm;
sys/dev/pci/drm/i915/gvt/gtt.c
1845
mm = vgpu_alloc_mm(vgpu);
sys/dev/pci/drm/i915/gvt/gtt.c
1846
if (!mm)
sys/dev/pci/drm/i915/gvt/gtt.c
1849
mm->type = INTEL_GVT_MM_GGTT;
sys/dev/pci/drm/i915/gvt/gtt.c
1852
mm->ggtt_mm.virtual_ggtt =
sys/dev/pci/drm/i915/gvt/gtt.c
1855
if (!mm->ggtt_mm.virtual_ggtt) {
sys/dev/pci/drm/i915/gvt/gtt.c
1856
vgpu_free_mm(mm);
sys/dev/pci/drm/i915/gvt/gtt.c
1860
mm->ggtt_mm.host_ggtt_aperture = vzalloc((vgpu_aperture_sz(vgpu) >> PAGE_SHIFT) * sizeof(u64));
sys/dev/pci/drm/i915/gvt/gtt.c
1861
if (!mm->ggtt_mm.host_ggtt_aperture) {
sys/dev/pci/drm/i915/gvt/gtt.c
1862
vfree(mm->ggtt_mm.virtual_ggtt);
sys/dev/pci/drm/i915/gvt/gtt.c
1863
vgpu_free_mm(mm);
sys/dev/pci/drm/i915/gvt/gtt.c
1867
mm->ggtt_mm.host_ggtt_hidden = vzalloc((vgpu_hidden_sz(vgpu) >> PAGE_SHIFT) * sizeof(u64));
sys/dev/pci/drm/i915/gvt/gtt.c
1868
if (!mm->ggtt_mm.host_ggtt_hidden) {
sys/dev/pci/drm/i915/gvt/gtt.c
1869
vfree(mm->ggtt_mm.host_ggtt_aperture);
sys/dev/pci/drm/i915/gvt/gtt.c
1870
vfree(mm->ggtt_mm.virtual_ggtt);
sys/dev/pci/drm/i915/gvt/gtt.c
1871
vgpu_free_mm(mm);
sys/dev/pci/drm/i915/gvt/gtt.c
1875
return mm;
sys/dev/pci/drm/i915/gvt/gtt.c
1887
struct intel_vgpu_mm *mm = container_of(mm_ref, typeof(*mm), ref);
sys/dev/pci/drm/i915/gvt/gtt.c
1889
if (GEM_WARN_ON(atomic_read(&mm->pincount)))
sys/dev/pci/drm/i915/gvt/gtt.c
1892
if (mm->type == INTEL_GVT_MM_PPGTT) {
sys/dev/pci/drm/i915/gvt/gtt.c
1893
list_del(&mm->ppgtt_mm.list);
sys/dev/pci/drm/i915/gvt/gtt.c
1895
mutex_lock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
sys/dev/pci/drm/i915/gvt/gtt.c
1896
list_del(&mm->ppgtt_mm.lru_list);
sys/dev/pci/drm/i915/gvt/gtt.c
1897
mutex_unlock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
sys/dev/pci/drm/i915/gvt/gtt.c
1899
invalidate_ppgtt_mm(mm);
sys/dev/pci/drm/i915/gvt/gtt.c
1901
vfree(mm->ggtt_mm.virtual_ggtt);
sys/dev/pci/drm/i915/gvt/gtt.c
1902
vfree(mm->ggtt_mm.host_ggtt_aperture);
sys/dev/pci/drm/i915/gvt/gtt.c
1903
vfree(mm->ggtt_mm.host_ggtt_hidden);
sys/dev/pci/drm/i915/gvt/gtt.c
1906
vgpu_free_mm(mm);
sys/dev/pci/drm/i915/gvt/gtt.c
1915
void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm)
sys/dev/pci/drm/i915/gvt/gtt.c
1917
atomic_dec_if_positive(&mm->pincount);
sys/dev/pci/drm/i915/gvt/gtt.c
1931
int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm)
sys/dev/pci/drm/i915/gvt/gtt.c
1935
atomic_inc(&mm->pincount);
sys/dev/pci/drm/i915/gvt/gtt.c
1937
if (mm->type == INTEL_GVT_MM_PPGTT) {
sys/dev/pci/drm/i915/gvt/gtt.c
1938
ret = shadow_ppgtt_mm(mm);
sys/dev/pci/drm/i915/gvt/gtt.c
1942
mutex_lock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
sys/dev/pci/drm/i915/gvt/gtt.c
1943
list_move_tail(&mm->ppgtt_mm.lru_list,
sys/dev/pci/drm/i915/gvt/gtt.c
1944
&mm->vgpu->gvt->gtt.ppgtt_mm_lru_list_head);
sys/dev/pci/drm/i915/gvt/gtt.c
1945
mutex_unlock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
sys/dev/pci/drm/i915/gvt/gtt.c
1953
struct intel_vgpu_mm *mm;
sys/dev/pci/drm/i915/gvt/gtt.c
1959
mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.lru_list);
sys/dev/pci/drm/i915/gvt/gtt.c
1961
if (atomic_read(&mm->pincount))
sys/dev/pci/drm/i915/gvt/gtt.c
1964
list_del_init(&mm->ppgtt_mm.lru_list);
sys/dev/pci/drm/i915/gvt/gtt.c
1966
invalidate_ppgtt_mm(mm);
sys/dev/pci/drm/i915/gvt/gtt.c
1976
static inline int ppgtt_get_next_level_entry(struct intel_vgpu_mm *mm,
sys/dev/pci/drm/i915/gvt/gtt.c
1979
struct intel_vgpu *vgpu = mm->vgpu;
sys/dev/pci/drm/i915/gvt/gtt.c
2005
unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
sys/dev/pci/drm/i915/gvt/gtt.c
2007
struct intel_vgpu *vgpu = mm->vgpu;
sys/dev/pci/drm/i915/gvt/gtt.c
2017
GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT &&
sys/dev/pci/drm/i915/gvt/gtt.c
2018
mm->type != INTEL_GVT_MM_PPGTT);
sys/dev/pci/drm/i915/gvt/gtt.c
2020
if (mm->type == INTEL_GVT_MM_GGTT) {
sys/dev/pci/drm/i915/gvt/gtt.c
2024
ggtt_get_guest_entry(mm, &e,
sys/dev/pci/drm/i915/gvt/gtt.c
2032
switch (mm->ppgtt_mm.root_entry_type) {
sys/dev/pci/drm/i915/gvt/gtt.c
2034
ppgtt_get_shadow_root_entry(mm, &e, 0);
sys/dev/pci/drm/i915/gvt/gtt.c
2043
ppgtt_get_shadow_root_entry(mm, &e,
sys/dev/pci/drm/i915/gvt/gtt.c
2056
ret = ppgtt_get_next_level_entry(mm, &e, gma_index[i],
sys/dev/pci/drm/i915/gvt/gtt.c
2070
mm->ppgtt_mm.root_entry_type, gma, gpa);
sys/dev/pci/drm/i915/gvt/gtt.c
2075
gvt_vgpu_err("invalid mm type: %d gma %lx\n", mm->type, gma);
sys/dev/pci/drm/i915/gvt/gtt.c
2426
struct intel_vgpu_mm *mm;
sys/dev/pci/drm/i915/gvt/gtt.c
2429
mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list);
sys/dev/pci/drm/i915/gvt/gtt.c
2430
intel_vgpu_destroy_mm(mm);
sys/dev/pci/drm/i915/gvt/gtt.c
2541
struct intel_vgpu_mm *mm;
sys/dev/pci/drm/i915/gvt/gtt.c
2545
mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list);
sys/dev/pci/drm/i915/gvt/gtt.c
2547
switch (mm->ppgtt_mm.root_entry_type) {
sys/dev/pci/drm/i915/gvt/gtt.c
2549
if (pdps[0] == mm->ppgtt_mm.guest_pdps[0])
sys/dev/pci/drm/i915/gvt/gtt.c
2550
return mm;
sys/dev/pci/drm/i915/gvt/gtt.c
2553
if (!memcmp(pdps, mm->ppgtt_mm.guest_pdps,
sys/dev/pci/drm/i915/gvt/gtt.c
2554
sizeof(mm->ppgtt_mm.guest_pdps)))
sys/dev/pci/drm/i915/gvt/gtt.c
2555
return mm;
sys/dev/pci/drm/i915/gvt/gtt.c
2578
struct intel_vgpu_mm *mm;
sys/dev/pci/drm/i915/gvt/gtt.c
2580
mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps);
sys/dev/pci/drm/i915/gvt/gtt.c
2581
if (mm) {
sys/dev/pci/drm/i915/gvt/gtt.c
2582
intel_vgpu_mm_get(mm);
sys/dev/pci/drm/i915/gvt/gtt.c
2584
mm = intel_vgpu_create_ppgtt_mm(vgpu, root_entry_type, pdps);
sys/dev/pci/drm/i915/gvt/gtt.c
2585
if (IS_ERR(mm))
sys/dev/pci/drm/i915/gvt/gtt.c
2588
return mm;
sys/dev/pci/drm/i915/gvt/gtt.c
2603
struct intel_vgpu_mm *mm;
sys/dev/pci/drm/i915/gvt/gtt.c
2605
mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps);
sys/dev/pci/drm/i915/gvt/gtt.c
2606
if (!mm) {
sys/dev/pci/drm/i915/gvt/gtt.c
2610
intel_vgpu_mm_put(mm);
sys/dev/pci/drm/i915/gvt/gtt.c
2699
struct intel_vgpu_mm *mm;
sys/dev/pci/drm/i915/gvt/gtt.c
2702
mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list);
sys/dev/pci/drm/i915/gvt/gtt.c
2703
if (mm->type == INTEL_GVT_MM_PPGTT) {
sys/dev/pci/drm/i915/gvt/gtt.c
2705
list_del_init(&mm->ppgtt_mm.lru_list);
sys/dev/pci/drm/i915/gvt/gtt.c
2707
if (mm->ppgtt_mm.shadowed)
sys/dev/pci/drm/i915/gvt/gtt.c
2708
invalidate_ppgtt_mm(mm);
sys/dev/pci/drm/i915/gvt/gtt.c
2768
struct intel_vgpu_mm *mm;
sys/dev/pci/drm/i915/gvt/gtt.c
2775
mm = vgpu->gtt.ggtt_mm;
sys/dev/pci/drm/i915/gvt/gtt.c
2780
pte = mm->ggtt_mm.host_ggtt_aperture[idx];
sys/dev/pci/drm/i915/gvt/gtt.c
2788
pte = mm->ggtt_mm.host_ggtt_hidden[idx];
sys/dev/pci/drm/i915/gvt/gtt.c
486
static void _ppgtt_get_root_entry(struct intel_vgpu_mm *mm,
sys/dev/pci/drm/i915/gvt/gtt.c
490
const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
sys/dev/pci/drm/i915/gvt/gtt.c
492
GEM_BUG_ON(mm->type != INTEL_GVT_MM_PPGTT);
sys/dev/pci/drm/i915/gvt/gtt.c
494
entry->type = mm->ppgtt_mm.root_entry_type;
sys/dev/pci/drm/i915/gvt/gtt.c
495
pte_ops->get_entry(guest ? mm->ppgtt_mm.guest_pdps :
sys/dev/pci/drm/i915/gvt/gtt.c
496
mm->ppgtt_mm.shadow_pdps,
sys/dev/pci/drm/i915/gvt/gtt.c
497
entry, index, false, 0, mm->vgpu);
sys/dev/pci/drm/i915/gvt/gtt.c
501
static inline void ppgtt_get_guest_root_entry(struct intel_vgpu_mm *mm,
sys/dev/pci/drm/i915/gvt/gtt.c
504
_ppgtt_get_root_entry(mm, entry, index, true);
sys/dev/pci/drm/i915/gvt/gtt.c
507
static inline void ppgtt_get_shadow_root_entry(struct intel_vgpu_mm *mm,
sys/dev/pci/drm/i915/gvt/gtt.c
510
_ppgtt_get_root_entry(mm, entry, index, false);
sys/dev/pci/drm/i915/gvt/gtt.c
513
static void _ppgtt_set_root_entry(struct intel_vgpu_mm *mm,
sys/dev/pci/drm/i915/gvt/gtt.c
517
const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
sys/dev/pci/drm/i915/gvt/gtt.c
519
pte_ops->set_entry(guest ? mm->ppgtt_mm.guest_pdps :
sys/dev/pci/drm/i915/gvt/gtt.c
520
mm->ppgtt_mm.shadow_pdps,
sys/dev/pci/drm/i915/gvt/gtt.c
521
entry, index, false, 0, mm->vgpu);
sys/dev/pci/drm/i915/gvt/gtt.c
524
static inline void ppgtt_set_shadow_root_entry(struct intel_vgpu_mm *mm,
sys/dev/pci/drm/i915/gvt/gtt.c
527
_ppgtt_set_root_entry(mm, entry, index, false);
sys/dev/pci/drm/i915/gvt/gtt.c
530
static void ggtt_get_guest_entry(struct intel_vgpu_mm *mm,
sys/dev/pci/drm/i915/gvt/gtt.c
533
const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
sys/dev/pci/drm/i915/gvt/gtt.c
535
GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
sys/dev/pci/drm/i915/gvt/gtt.c
538
pte_ops->get_entry(mm->ggtt_mm.virtual_ggtt, entry, index,
sys/dev/pci/drm/i915/gvt/gtt.c
539
false, 0, mm->vgpu);
sys/dev/pci/drm/i915/gvt/gtt.c
542
static void ggtt_set_guest_entry(struct intel_vgpu_mm *mm,
sys/dev/pci/drm/i915/gvt/gtt.c
545
const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
sys/dev/pci/drm/i915/gvt/gtt.c
547
GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
sys/dev/pci/drm/i915/gvt/gtt.c
549
pte_ops->set_entry(mm->ggtt_mm.virtual_ggtt, entry, index,
sys/dev/pci/drm/i915/gvt/gtt.c
550
false, 0, mm->vgpu);
sys/dev/pci/drm/i915/gvt/gtt.c
553
static void ggtt_get_host_entry(struct intel_vgpu_mm *mm,
sys/dev/pci/drm/i915/gvt/gtt.c
556
const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
sys/dev/pci/drm/i915/gvt/gtt.c
558
GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
sys/dev/pci/drm/i915/gvt/gtt.c
560
pte_ops->get_entry(NULL, entry, index, false, 0, mm->vgpu);
sys/dev/pci/drm/i915/gvt/gtt.c
563
static void ggtt_set_host_entry(struct intel_vgpu_mm *mm,
sys/dev/pci/drm/i915/gvt/gtt.c
566
const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
sys/dev/pci/drm/i915/gvt/gtt.c
569
GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
sys/dev/pci/drm/i915/gvt/gtt.c
571
if (vgpu_gmadr_is_aperture(mm->vgpu, index << I915_GTT_PAGE_SHIFT)) {
sys/dev/pci/drm/i915/gvt/gtt.c
572
offset -= (vgpu_aperture_gmadr_base(mm->vgpu) >> PAGE_SHIFT);
sys/dev/pci/drm/i915/gvt/gtt.c
573
mm->ggtt_mm.host_ggtt_aperture[offset] = entry->val64;
sys/dev/pci/drm/i915/gvt/gtt.c
574
} else if (vgpu_gmadr_is_hidden(mm->vgpu, index << I915_GTT_PAGE_SHIFT)) {
sys/dev/pci/drm/i915/gvt/gtt.c
575
offset -= (vgpu_hidden_gmadr_base(mm->vgpu) >> PAGE_SHIFT);
sys/dev/pci/drm/i915/gvt/gtt.c
576
mm->ggtt_mm.host_ggtt_hidden[offset] = entry->val64;
sys/dev/pci/drm/i915/gvt/gtt.c
579
pte_ops->set_entry(NULL, entry, index, false, 0, mm->vgpu);
sys/dev/pci/drm/i915/gvt/gtt.h
185
static inline void intel_vgpu_mm_get(struct intel_vgpu_mm *mm)
sys/dev/pci/drm/i915/gvt/gtt.h
187
kref_get(&mm->ref);
sys/dev/pci/drm/i915/gvt/gtt.h
192
static inline void intel_vgpu_mm_put(struct intel_vgpu_mm *mm)
sys/dev/pci/drm/i915/gvt/gtt.h
194
kref_put(&mm->ref, _intel_vgpu_mm_release);
sys/dev/pci/drm/i915/gvt/gtt.h
197
static inline void intel_vgpu_destroy_mm(struct intel_vgpu_mm *mm)
sys/dev/pci/drm/i915/gvt/gtt.h
199
intel_vgpu_mm_put(mm);
sys/dev/pci/drm/i915/gvt/gtt.h
269
int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm);
sys/dev/pci/drm/i915/gvt/gtt.h
271
void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm);
sys/dev/pci/drm/i915/gvt/gtt.h
273
unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm,
sys/dev/pci/drm/i915/gvt/handlers.c
1493
struct intel_vgpu_mm *mm;
sys/dev/pci/drm/i915/gvt/handlers.c
1503
mm = intel_vgpu_get_ppgtt_mm(vgpu, root_entry_type, pdps);
sys/dev/pci/drm/i915/gvt/handlers.c
1504
return PTR_ERR_OR_ZERO(mm);
sys/dev/pci/drm/i915/gvt/scheduler.c
1527
struct intel_vgpu_mm *m, *mm;
sys/dev/pci/drm/i915/gvt/scheduler.c
1528
list_for_each_entry_safe(m, mm, &workload->lri_shadow_mm,
sys/dev/pci/drm/i915/gvt/scheduler.c
1584
struct intel_vgpu_mm *mm;
sys/dev/pci/drm/i915/gvt/scheduler.c
1603
mm = intel_vgpu_get_ppgtt_mm(workload->vgpu, root_entry_type, pdps);
sys/dev/pci/drm/i915/gvt/scheduler.c
1604
if (IS_ERR(mm))
sys/dev/pci/drm/i915/gvt/scheduler.c
1605
return PTR_ERR(mm);
sys/dev/pci/drm/i915/gvt/scheduler.c
1607
workload->shadow_mm = mm;
sys/dev/pci/drm/i915/gvt/scheduler.c
428
struct scatterlist *sg = pd->pt.base->mm.pages->sgl;
sys/dev/pci/drm/i915/gvt/scheduler.c
437
struct intel_vgpu_mm *mm = workload->shadow_mm;
sys/dev/pci/drm/i915/gvt/scheduler.c
441
if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
sys/dev/pci/drm/i915/gvt/scheduler.c
442
set_dma_address(ppgtt->pd, mm->ppgtt_mm.shadow_pdps[0]);
sys/dev/pci/drm/i915/gvt/scheduler.c
453
set_dma_address(pd, mm->ppgtt_mm.shadow_pdps[i]);
sys/dev/pci/drm/i915/i915_cmd_parser.c
1556
cmd = page_mask_bits(shadow->obj->mm.mapping);
sys/dev/pci/drm/i915/i915_debugfs.c
189
obj->mm.dirty ? " dirty" : "",
sys/dev/pci/drm/i915/i915_debugfs.c
190
obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : "");
sys/dev/pci/drm/i915/i915_debugfs.c
277
i915->mm.shrink_count,
sys/dev/pci/drm/i915/i915_debugfs.c
278
atomic_read(&i915->mm.free_count),
sys/dev/pci/drm/i915/i915_debugfs.c
279
i915->mm.shrink_memory);
sys/dev/pci/drm/i915/i915_debugfs.c
97
return obj->mm.mapping ? 'M' : ' ';
sys/dev/pci/drm/i915/i915_drm_client.c
52
const enum intel_region_id id = obj->mm.region ?
sys/dev/pci/drm/i915/i915_drm_client.c
53
obj->mm.region->id : INTEL_REGION_SMEM;
sys/dev/pci/drm/i915/i915_drm_client.c
68
obj->mm.madv == I915_MADV_DONTNEED)
sys/dev/pci/drm/i915/i915_drv.h
336
struct i915_gem_mm mm;
sys/dev/pci/drm/i915/i915_gem.c
1055
if (obj->mm.madv == I915_MADV_WILLNEED) {
sys/dev/pci/drm/i915/i915_gem.c
1067
if (obj->mm.madv != __I915_MADV_PURGED) {
sys/dev/pci/drm/i915/i915_gem.c
1068
obj->mm.madv = args->madv;
sys/dev/pci/drm/i915/i915_gem.c
1077
spin_lock_irqsave(&i915->mm.obj_lock, flags);
sys/dev/pci/drm/i915/i915_gem.c
1078
if (!list_empty(&obj->mm.link)) {
sys/dev/pci/drm/i915/i915_gem.c
1081
if (obj->mm.madv != I915_MADV_WILLNEED)
sys/dev/pci/drm/i915/i915_gem.c
1082
list = &i915->mm.purge_list;
sys/dev/pci/drm/i915/i915_gem.c
1084
list = &i915->mm.shrink_list;
sys/dev/pci/drm/i915/i915_gem.c
1085
list_move_tail(&obj->mm.link, list);
sys/dev/pci/drm/i915/i915_gem.c
1088
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
sys/dev/pci/drm/i915/i915_gem.c
1092
if (obj->mm.madv == I915_MADV_DONTNEED &&
sys/dev/pci/drm/i915/i915_gem.c
1096
args->retained = obj->mm.madv != __I915_MADV_PURGED;
sys/dev/pci/drm/i915/i915_gem.c
1112
while (atomic_read(&i915->mm.free_count)) {
sys/dev/pci/drm/i915/i915_gem.c
1113
flush_work(&i915->mm.free_work);
sys/dev/pci/drm/i915/i915_gem.c
1287
mtx_init(&i915->mm.obj_lock, IPL_TTY);
sys/dev/pci/drm/i915/i915_gem.c
1289
init_llist_head(&i915->mm.free_list);
sys/dev/pci/drm/i915/i915_gem.c
1291
INIT_LIST_HEAD(&i915->mm.purge_list);
sys/dev/pci/drm/i915/i915_gem.c
1292
INIT_LIST_HEAD(&i915->mm.shrink_list);
sys/dev/pci/drm/i915/i915_gem.c
1306
GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list));
sys/dev/pci/drm/i915/i915_gem.c
1307
GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count));
sys/dev/pci/drm/i915/i915_gem.c
1308
drm_WARN_ON(&dev_priv->drm, dev_priv->mm.shrink_count);
sys/dev/pci/drm/i915/i915_gem.c
70
err = drm_mm_insert_node_in_range(&ggtt->vm.mm, node,
sys/dev/pci/drm/i915/i915_gem_evict.c
183
drm_mm_scan_init_with_range(&scan, &vm->mm,
sys/dev/pci/drm/i915/i915_gem_evict.c
372
drm_mm_for_each_node_in_range(node, &vm->mm, start, end) {
sys/dev/pci/drm/i915/i915_gem_gtt.c
121
err = drm_mm_reserve_node(&vm->mm, node);
sys/dev/pci/drm/i915/i915_gem_gtt.c
130
err = drm_mm_reserve_node(&vm->mm, node);
sys/dev/pci/drm/i915/i915_gem_gtt.c
239
err = drm_mm_insert_node_in_range(&vm->mm, node,
sys/dev/pci/drm/i915/i915_gem_gtt.c
246
err = drm_mm_insert_node_in_range(&vm->mm, node,
sys/dev/pci/drm/i915/i915_gem_gtt.c
295
return drm_mm_insert_node_in_range(&vm->mm, node,
sys/dev/pci/drm/i915/i915_gem_gtt.c
47
GEM_BUG_ON(obj->mm.pages == pages);
sys/dev/pci/drm/i915/i915_mm.c
108
r.mm = vma->vm_mm;
sys/dev/pci/drm/i915/i915_mm.c
113
err = apply_to_page_range(r.mm, addr, size, remap_pfn, &r);
sys/dev/pci/drm/i915/i915_mm.c
140
.mm = vma->vm_mm,
sys/dev/pci/drm/i915/i915_mm.c
161
err = apply_to_page_range(r.mm, addr, size, remap_sg, &r);
sys/dev/pci/drm/i915/i915_mm.c
33
struct mm_struct *mm;
sys/dev/pci/drm/i915/i915_mm.c
61
set_pte_at(r->mm, addr, pte,
sys/dev/pci/drm/i915/i915_mm.c
81
set_pte_at(r->mm, addr, pte, pte_mkspecial(pfn_pte(r->pfn, r->prot)));
sys/dev/pci/drm/i915/i915_scatterlist.c
170
struct drm_buddy *mm = bman_res->mm;
sys/dev/pci/drm/i915/i915_scatterlist.c
205
block_size = min_t(u64, size, drm_buddy_block_size(mm, block));
sys/dev/pci/drm/i915/i915_ttm_buddy_manager.c
112
(drm_buddy_block_size(mm, block) >> PAGE_SHIFT);
sys/dev/pci/drm/i915/i915_ttm_buddy_manager.c
129
drm_buddy_free_list(mm, &bman_res->blocks, 0);
sys/dev/pci/drm/i915/i915_ttm_buddy_manager.c
144
drm_buddy_free_list(&bman->mm, &bman_res->blocks, 0);
sys/dev/pci/drm/i915/i915_ttm_buddy_manager.c
159
struct drm_buddy *mm = &bman->mm;
sys/dev/pci/drm/i915/i915_ttm_buddy_manager.c
181
(drm_buddy_block_size(mm, block) >> PAGE_SHIFT);
sys/dev/pci/drm/i915/i915_ttm_buddy_manager.c
19
struct drm_buddy mm;
sys/dev/pci/drm/i915/i915_ttm_buddy_manager.c
197
struct drm_buddy *mm = &bman->mm;
sys/dev/pci/drm/i915/i915_ttm_buddy_manager.c
214
(drm_buddy_block_size(mm, block) >> PAGE_SHIFT);
sys/dev/pci/drm/i915/i915_ttm_buddy_manager.c
239
drm_buddy_print(&bman->mm, printer);
sys/dev/pci/drm/i915/i915_ttm_buddy_manager.c
243
drm_buddy_block_print(&bman->mm, block, printer);
sys/dev/pci/drm/i915/i915_ttm_buddy_manager.c
296
err = drm_buddy_init(&bman->mm, size, chunk_size);
sys/dev/pci/drm/i915/i915_ttm_buddy_manager.c
310
ttm_resource_manager_init(man, bdev, bman->mm.size >> PAGE_SHIFT);
sys/dev/pci/drm/i915/i915_ttm_buddy_manager.c
336
struct drm_buddy *mm = &bman->mm;
sys/dev/pci/drm/i915/i915_ttm_buddy_manager.c
348
drm_buddy_free_list(mm, &bman->reserved, 0);
sys/dev/pci/drm/i915/i915_ttm_buddy_manager.c
349
drm_buddy_fini(mm);
sys/dev/pci/drm/i915/i915_ttm_buddy_manager.c
374
struct drm_buddy *mm = &bman->mm;
sys/dev/pci/drm/i915/i915_ttm_buddy_manager.c
382
ret = drm_buddy_alloc_blocks(mm, start,
sys/dev/pci/drm/i915/i915_ttm_buddy_manager.c
384
size, mm->chunk_size,
sys/dev/pci/drm/i915/i915_ttm_buddy_manager.c
41
struct drm_buddy *mm = &bman->mm;
sys/dev/pci/drm/i915/i915_ttm_buddy_manager.c
427
*avail = bman->mm.avail >> PAGE_SHIFT;
sys/dev/pci/drm/i915/i915_ttm_buddy_manager.c
57
bman_res->mm = mm;
sys/dev/pci/drm/i915/i915_ttm_buddy_manager.c
75
GEM_BUG_ON(min_page_size < mm->chunk_size);
sys/dev/pci/drm/i915/i915_ttm_buddy_manager.c
83
n_pages = size >> ilog2(mm->chunk_size);
sys/dev/pci/drm/i915/i915_ttm_buddy_manager.c
92
err = drm_buddy_alloc_blocks(mm, (u64)place->fpfn << PAGE_SHIFT,
sys/dev/pci/drm/i915/i915_ttm_buddy_manager.h
36
struct drm_buddy *mm;
sys/dev/pci/drm/i915/i915_vma.c
1327
pages = vma->obj->mm.pages;
sys/dev/pci/drm/i915/i915_vma.c
1372
vma->page_sizes = vma->obj->mm.page_sizes;
sys/dev/pci/drm/i915/i915_vma.c
1410
if (vma->pages != vma->obj->mm.pages) {
sys/dev/pci/drm/i915/i915_vma.c
2057
obj->mm.dirty = true;
sys/dev/pci/drm/i915/i915_vma.c
2113
vma->obj->mm.tlb);
sys/dev/pci/drm/i915/i915_vma.c
2130
vma_invalidate_tlb(vma->vm, vma->obj->mm.tlb);
sys/dev/pci/drm/i915/i915_vma.c
2184
&vma->obj->mm.rsgt->table != vma->resource->bi.pages)
sys/dev/pci/drm/i915/i915_vma.c
2269
if (!obj->mm.rsgt)
sys/dev/pci/drm/i915/i915_vma.c
463
obj->mm.rsgt, i915_gem_object_is_readonly(obj),
sys/dev/pci/drm/i915/i915_vma.c
464
i915_gem_object_is_lmem(obj), obj->mm.region,
sys/dev/pci/drm/i915/intel_memory_region.c
348
for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) {
sys/dev/pci/drm/i915/intel_memory_region.c
369
i915->mm.stolen_region = mem;
sys/dev/pci/drm/i915/intel_memory_region.c
374
i915->mm.stolen_region = mem;
sys/dev/pci/drm/i915/intel_memory_region.c
390
i915->mm.regions[i] = mem;
sys/dev/pci/drm/i915/intel_memory_region.c
394
for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) {
sys/dev/pci/drm/i915/intel_memory_region.c
395
struct intel_memory_region *mem = i915->mm.regions[i];
sys/dev/pci/drm/i915/intel_memory_region.c
438
for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) {
sys/dev/pci/drm/i915/intel_memory_region.c
440
fetch_and_zero(&i915->mm.regions[i]);
sys/dev/pci/drm/i915/intel_memory_region.h
44
for (id = 0; id < ARRAY_SIZE((i915)->mm.regions); id++) \
sys/dev/pci/drm/i915/intel_memory_region.h
45
for_each_if((mr) = (i915)->mm.regions[id])
sys/dev/pci/drm/i915/selftests/i915_gem_evict.c
262
ggtt->vm.mm.color_adjust = mock_color_adjust;
sys/dev/pci/drm/i915/selftests/i915_gem_evict.c
328
ggtt->vm.mm.color_adjust = NULL;
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
105
obj->mm.dirty = false;
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1291
list_sort(NULL, &ggtt->vm.mm.hole_stack, sort_holes);
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1292
drm_mm_for_each_hole(node, &ggtt->vm.mm, hole_start, hole_end) {
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1296
if (ggtt->vm.mm.color_adjust)
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1297
ggtt->vm.mm.color_adjust(node, 0,
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1369
err = drm_mm_insert_node_in_range(&ggtt->vm.mm, &tmp,
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
1448
vma->pages = obj->mm.pages;
sys/dev/pci/drm/i915/selftests/i915_gem_gtt.c
357
mock_vma_res->bi.pages = obj->mm.pages;
sys/dev/pci/drm/i915/selftests/i915_vma.c
322
GEM_BUG_ON(!drm_mm_clean(&ggtt->vm.mm));
sys/dev/pci/drm/i915/selftests/i915_vma.c
667
if (vma->pages == obj->mm.pages) {
sys/dev/pci/drm/i915/selftests/i915_vma.c
773
if (vma->pages == vma->obj->mm.pages) {
sys/dev/pci/drm/i915/selftests/i915_vma.c
785
if (vma->pages != vma->obj->mm.pages) {
sys/dev/pci/drm/i915/selftests/intel_memory_region.c
1065
obj->mm.pages->sgl,
sys/dev/pci/drm/i915/selftests/intel_memory_region.c
146
for (sg = obj->mm.pages->sgl; sg; sg = sg_next(sg)) {
sys/dev/pci/drm/i915/selftests/intel_memory_region.c
374
struct drm_buddy *mm;
sys/dev/pci/drm/i915/selftests/intel_memory_region.c
397
res = to_ttm_buddy_resource(obj->mm.res);
sys/dev/pci/drm/i915/selftests/intel_memory_region.c
398
mm = res->mm;
sys/dev/pci/drm/i915/selftests/intel_memory_region.c
399
if (mm->size != size) {
sys/dev/pci/drm/i915/selftests/intel_memory_region.c
401
__func__, mm->size, size);
sys/dev/pci/drm/i915/selftests/intel_memory_region.c
407
if (mm->max_order != expected_order) {
sys/dev/pci/drm/i915/selftests/intel_memory_region.c
409
__func__, mm->max_order, expected_order);
sys/dev/pci/drm/i915/selftests/intel_memory_region.c
451
struct drm_buddy *mm;
sys/dev/pci/drm/i915/selftests/intel_memory_region.c
485
res = to_ttm_buddy_resource(obj->mm.res);
sys/dev/pci/drm/i915/selftests/intel_memory_region.c
487
mm = res->mm;
sys/dev/pci/drm/i915/selftests/intel_memory_region.c
490
if (drm_buddy_block_size(mm, block) > size)
sys/dev/pci/drm/i915/selftests/intel_memory_region.c
491
size = drm_buddy_block_size(mm, block);
sys/dev/pci/drm/i915/selftests/intel_memory_region.c
500
for (sg = obj->mm.pages->sgl; sg; sg = sg_next(sg)) {
sys/dev/pci/drm/i915/selftests/intel_memory_region.c
527
struct intel_memory_region *mr = obj->mm.region;
sys/dev/pci/drm/i915/selftests/intel_memory_region.c
529
to_ttm_buddy_resource(obj->mm.res);
sys/dev/pci/drm/i915/selftests/intel_memory_region.c
530
struct drm_buddy *mm = bman_res->mm;
sys/dev/pci/drm/i915/selftests/intel_memory_region.c
537
u64 end = start + drm_buddy_block_size(mm, block);
sys/dev/pci/drm/i915/selftests/mock_region.c
18
i915_refct_sgt_put(obj->mm.rsgt);
sys/dev/pci/drm/i915/selftests/mock_region.c
19
obj->mm.rsgt = NULL;
sys/dev/pci/drm/i915/selftests/mock_region.c
20
intel_region_ttm_resource_free(obj->mm.region, obj->mm.res);
sys/dev/pci/drm/i915/selftests/mock_region.c
28
obj->mm.res = intel_region_ttm_resource_alloc(obj->mm.region,
sys/dev/pci/drm/i915/selftests/mock_region.c
32
if (IS_ERR(obj->mm.res))
sys/dev/pci/drm/i915/selftests/mock_region.c
33
return PTR_ERR(obj->mm.res);
sys/dev/pci/drm/i915/selftests/mock_region.c
35
obj->mm.rsgt = intel_region_ttm_resource_to_rsgt(obj->mm.region,
sys/dev/pci/drm/i915/selftests/mock_region.c
36
obj->mm.res,
sys/dev/pci/drm/i915/selftests/mock_region.c
37
obj->mm.region->min_page_size);
sys/dev/pci/drm/i915/selftests/mock_region.c
38
if (IS_ERR(obj->mm.rsgt)) {
sys/dev/pci/drm/i915/selftests/mock_region.c
39
err = PTR_ERR(obj->mm.rsgt);
sys/dev/pci/drm/i915/selftests/mock_region.c
43
pages = &obj->mm.rsgt->table;
sys/dev/pci/drm/i915/selftests/mock_region.c
49
intel_region_ttm_resource_free(obj->mm.region, obj->mm.res);
sys/dev/pci/drm/include/drm/drm_buddy.h
135
drm_buddy_block_size(struct drm_buddy *mm,
sys/dev/pci/drm/include/drm/drm_buddy.h
138
return mm->chunk_size << drm_buddy_block_order(block);
sys/dev/pci/drm/include/drm/drm_buddy.h
141
int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size);
sys/dev/pci/drm/include/drm/drm_buddy.h
143
void drm_buddy_fini(struct drm_buddy *mm);
sys/dev/pci/drm/include/drm/drm_buddy.h
148
int drm_buddy_alloc_blocks(struct drm_buddy *mm,
sys/dev/pci/drm/include/drm/drm_buddy.h
154
int drm_buddy_block_trim(struct drm_buddy *mm,
sys/dev/pci/drm/include/drm/drm_buddy.h
159
void drm_buddy_reset_clear(struct drm_buddy *mm, bool is_clear);
sys/dev/pci/drm/include/drm/drm_buddy.h
161
void drm_buddy_free_block(struct drm_buddy *mm, struct drm_buddy_block *block);
sys/dev/pci/drm/include/drm/drm_buddy.h
163
void drm_buddy_free_list(struct drm_buddy *mm,
sys/dev/pci/drm/include/drm/drm_buddy.h
167
void drm_buddy_print(struct drm_buddy *mm, struct drm_printer *p);
sys/dev/pci/drm/include/drm/drm_buddy.h
168
void drm_buddy_block_print(struct drm_buddy *mm,
sys/dev/pci/drm/include/drm/drm_gpusvm.h
205
struct mm_struct *mm;
sys/dev/pci/drm/include/drm/drm_gpusvm.h
253
struct mm_struct *mm,
sys/dev/pci/drm/include/drm/drm_gpusvm.h
313
struct mm_struct *mm,
sys/dev/pci/drm/include/drm/drm_mm.h
165
struct drm_mm *mm;
sys/dev/pci/drm/include/drm/drm_mm.h
228
struct drm_mm *mm;
sys/dev/pci/drm/include/drm/drm_mm.h
275
static inline bool drm_mm_initialized(const struct drm_mm *mm)
sys/dev/pci/drm/include/drm/drm_mm.h
277
return READ_ONCE(mm->hole_stack.next);
sys/dev/pci/drm/include/drm/drm_mm.h
352
#define drm_mm_nodes(mm) (&(mm)->head_node.node_list)
sys/dev/pci/drm/include/drm/drm_mm.h
362
#define drm_mm_for_each_node(entry, mm) \
sys/dev/pci/drm/include/drm/drm_mm.h
363
list_for_each_entry(entry, drm_mm_nodes(mm), node_list)
sys/dev/pci/drm/include/drm/drm_mm.h
374
#define drm_mm_for_each_node_safe(entry, next, mm) \
sys/dev/pci/drm/include/drm/drm_mm.h
375
list_for_each_entry_safe(entry, next, drm_mm_nodes(mm), node_list)
sys/dev/pci/drm/include/drm/drm_mm.h
393
#define drm_mm_for_each_hole(pos, mm, hole_start, hole_end) \
sys/dev/pci/drm/include/drm/drm_mm.h
394
for (pos = list_first_entry(&(mm)->hole_stack, \
sys/dev/pci/drm/include/drm/drm_mm.h
396
&pos->hole_stack != &(mm)->hole_stack ? \
sys/dev/pci/drm/include/drm/drm_mm.h
405
int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node);
sys/dev/pci/drm/include/drm/drm_mm.h
406
int drm_mm_insert_node_in_range(struct drm_mm *mm,
sys/dev/pci/drm/include/drm/drm_mm.h
433
drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
sys/dev/pci/drm/include/drm/drm_mm.h
438
return drm_mm_insert_node_in_range(mm, node,
sys/dev/pci/drm/include/drm/drm_mm.h
457
static inline int drm_mm_insert_node(struct drm_mm *mm,
sys/dev/pci/drm/include/drm/drm_mm.h
461
return drm_mm_insert_node_generic(mm, node, size, 0, 0, 0);
sys/dev/pci/drm/include/drm/drm_mm.h
465
void drm_mm_init(struct drm_mm *mm, u64 start, u64 size);
sys/dev/pci/drm/include/drm/drm_mm.h
466
void drm_mm_takedown(struct drm_mm *mm);
sys/dev/pci/drm/include/drm/drm_mm.h
476
static inline bool drm_mm_clean(const struct drm_mm *mm)
sys/dev/pci/drm/include/drm/drm_mm.h
478
return list_empty(drm_mm_nodes(mm));
sys/dev/pci/drm/include/drm/drm_mm.h
482
__drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last);
sys/dev/pci/drm/include/drm/drm_mm.h
507
struct drm_mm *mm,
sys/dev/pci/drm/include/drm/drm_mm.h
532
struct drm_mm *mm,
sys/dev/pci/drm/include/drm/drm_mm.h
538
drm_mm_scan_init_with_range(scan, mm,
sys/dev/pci/drm/include/drm/drm_mm.h
549
void drm_mm_print(const struct drm_mm *mm, struct drm_printer *p);
sys/dev/pci/drm/include/drm/drm_pagemap.h
125
struct mm_struct *mm,
sys/dev/pci/drm/include/drm/drm_pagemap.h
240
struct mm_struct *mm;
sys/dev/pci/drm/include/drm/drm_pagemap.h
252
struct mm_struct *mm,
sys/dev/pci/drm/include/drm/drm_pagemap.h
262
struct device *dev, struct mm_struct *mm,
sys/dev/pci/drm/include/drm/drm_pagemap.h
269
struct mm_struct *mm,
sys/dev/pci/drm/radeon/radeon_cs.c
196
mmap_read_lock(current->mm);
sys/dev/pci/drm/radeon/radeon_cs.c
203
mmap_read_unlock(current->mm);
sys/dev/pci/drm/radeon/radeon_gem.c
493
mmap_read_lock(current->mm);
sys/dev/pci/drm/radeon/radeon_gem.c
496
mmap_read_unlock(current->mm);
sys/dev/pci/drm/radeon/radeon_gem.c
503
mmap_read_unlock(current->mm);
sys/dev/pci/drm/radeon/radeon_mn.c
100
ret = mmu_interval_notifier_insert(&bo->notifier, current->mm, addr,
sys/dev/pci/drm/radeon/radeon_mn.c
124
if (!bo->notifier.mm)
sys/dev/pci/drm/radeon/radeon_mn.c
127
bo->notifier.mm = NULL;
sys/dev/pci/drm/radeon/radeon_ttm.c
345
if (current->mm != gtt->usermm)
sys/dev/pci/drm/radeon/radeon_ttm.c
601
gtt->usermm = current->mm;
sys/dev/pci/drm/ttm/tests/ttm_mock_manager.c
108
err = drm_buddy_init(&manager->mm, size, PAGE_SIZE);
sys/dev/pci/drm/ttm/tests/ttm_mock_manager.c
144
drm_buddy_fini(&mock_man->mm);
sys/dev/pci/drm/ttm/tests/ttm_mock_manager.c
33
struct drm_buddy *mm = &manager->mm;
sys/dev/pci/drm/ttm/tests/ttm_mock_manager.c
56
err = drm_buddy_alloc_blocks(mm, fpfn, lpfn, alloc_size,
sys/dev/pci/drm/ttm/tests/ttm_mock_manager.c
69
drm_buddy_free_list(mm, &mock_res->blocks, 0);
sys/dev/pci/drm/ttm/tests/ttm_mock_manager.c
81
struct drm_buddy *mm = &manager->mm;
sys/dev/pci/drm/ttm/tests/ttm_mock_manager.c
84
drm_buddy_free_list(mm, &mock_res->blocks, 0);
sys/dev/pci/drm/ttm/tests/ttm_mock_manager.h
12
struct drm_buddy mm;
sys/dev/pci/drm/ttm/ttm_range_manager.c
155
drm_mm_print(&rman->mm, printer);
sys/dev/pci/drm/ttm/ttm_range_manager.c
198
drm_mm_init(&rman->mm, 0, p_size);
sys/dev/pci/drm/ttm/ttm_range_manager.c
221
struct drm_mm *mm = &rman->mm;
sys/dev/pci/drm/ttm/ttm_range_manager.c
234
drm_mm_takedown(mm);
sys/dev/pci/drm/ttm/ttm_range_manager.c
50
struct drm_mm mm;
sys/dev/pci/drm/ttm/ttm_range_manager.c
67
struct drm_mm *mm = &rman->mm;
sys/dev/pci/drm/ttm/ttm_range_manager.c
87
ret = drm_mm_insert_node_in_range(mm, &node->mm_nodes[0],
sys/kern/sysv_msg.c
570
struct mbuf **mm, *m;
sys/kern/sysv_msg.c
590
mm = &msg->msg_data;
sys/kern/sysv_msg.c
602
*mm = m;
sys/kern/sysv_msg.c
603
mm = &m->m_next;
sys/lib/libsa/ctime.c
47
int ss, mm, hh, wday, month, year;
sys/lib/libsa/ctime.c
52
mm = tt % 60;
sys/lib/libsa/ctime.c
72
(int)tt, hh, mm, ss, year);
sys/net/radix.c
1019
struct radix_mask *mm = m->rm_mklist;
sys/net/radix.c
1029
mm = m;
sys/net/radix.c
1030
m = mm;
sys/netinet/ip_mroute.c
1122
socket_send(struct socket *so, struct mbuf *mm, struct sockaddr_in *src)
sys/netinet/ip_mroute.c
1128
ret = sbappendaddr(&so->so_rcv, sintosa(src), mm, NULL);
sys/netinet/ip_mroute.c
1136
m_freem(mm);
sys/netinet/ip_mroute.c
1161
struct mbuf *mm;
sys/netinet/ip_mroute.c
1221
mm = m_copym(m, 0, hlen, M_NOWAIT);
sys/netinet/ip_mroute.c
1222
if (mm == NULL ||
sys/netinet/ip_mroute.c
1223
(mm = m_pullup(mm, hlen)) == NULL)
sys/netinet/ip_mroute.c
1231
im = mtod(mm, struct igmpmsg *);
sys/netinet/ip_mroute.c
1239
if (socket_send(ip_mrouter[rtableid], mm, &sin) < 0) {
sys/netinet6/ip6_mroute.c
1001
im = mtod(mm, struct mrt6msg *);
sys/netinet6/ip6_mroute.c
1007
m_freem(mm);
sys/netinet6/ip6_mroute.c
1011
if (socket6_send(ip6_mrouter[rtableid], mm,
sys/netinet6/ip6_mroute.c
897
socket6_send(struct socket *so, struct mbuf *mm, struct sockaddr_in6 *src)
sys/netinet6/ip6_mroute.c
903
ret = sbappendaddr(&so->so_rcv, sin6tosa(src), mm, NULL);
sys/netinet6/ip6_mroute.c
911
m_freem(mm);
sys/netinet6/ip6_mroute.c
930
struct mbuf *mm;
sys/netinet6/ip6_mroute.c
986
mm = m_copym(m, 0, sizeof(struct ip6_hdr), M_NOWAIT);
sys/netinet6/ip6_mroute.c
987
if (mm == NULL)
usr.bin/cdio/cdio.c
1463
u_char mm, ss, ff;
usr.bin/cdio/cdio.c
1484
lba2msf(betoh32(s.data->what.position.reladdr.lba), &mm, &ss,
usr.bin/cdio/cdio.c
1486
*min = mm;
usr.bin/tic/dump_entry.c
894
unsigned long mm;
usr.bin/tic/dump_entry.c
896
mm = 1UL << nn;
usr.bin/tic/dump_entry.c
897
if ((mm - 16) <= lv && (mm + 16) > lv) {
usr.sbin/bgpctl/bgpctl.c
1724
show_mrt_msg(struct mrt_bgp_msg *mm, void *arg)
usr.sbin/bgpctl/bgpctl.c
1735
printf("%s %s[%u] -> ", fmt_time(&mm->time),
usr.sbin/bgpctl/bgpctl.c
1736
log_addr(&mm->src), mm->src_as);
usr.sbin/bgpctl/bgpctl.c
1737
printf("%s[%u]: size %zu%s ", log_addr(&mm->dst), mm->dst_as,
usr.sbin/bgpctl/bgpctl.c
1738
ibuf_size(&mm->msg), mm->add_path ? " addpath" : "");
usr.sbin/bgpctl/bgpctl.c
1739
b = &mm->msg;
usr.sbin/bgpctl/bgpctl.c
1786
show_mrt_update(b, req->flags, mm->add_path);
usr.sbin/ldomctl/mdstore.c
182
struct mdstore_msg mm;
usr.sbin/ldomctl/mdstore.c
184
bzero(&mm, sizeof(mm));
usr.sbin/ldomctl/mdstore.c
185
mm.msg_type = DS_DATA;
usr.sbin/ldomctl/mdstore.c
186
mm.payload_len = sizeof(mm) - 8;
usr.sbin/ldomctl/mdstore.c
187
mm.svc_handle = svc_handle;
usr.sbin/ldomctl/mdstore.c
188
mm.reqnum = mdstore_reqnum++;
usr.sbin/ldomctl/mdstore.c
189
mm.command = mdstore_command = MDSET_LIST_REQUEST;
usr.sbin/ldomctl/mdstore.c
190
ds_send_msg(lc, &mm, sizeof(mm));
usr.sbin/zic/zic.c
827
int mm, ss, sign;
usr.sbin/zic/zic.c
839
mm = ss = 0;
usr.sbin/zic/zic.c
840
else if (sscanf(string, scheck(string, "%"SCNdZIC":%d"), &hh, &mm) == 2)
usr.sbin/zic/zic.c
843
&hh, &mm, &ss) != 3) {
usr.sbin/zic/zic.c
848
mm < 0 || mm >= MINSPERHOUR ||
usr.sbin/zic/zic.c
857
return oadd(sign * hh * SECSPERHOUR, sign * (mm * SECSPERMIN + ss));