root/drivers/gpu/drm/xe/xe_svm.c
// SPDX-License-Identifier: MIT
/*
 * Copyright © 2024 Intel Corporation
 */

#include <linux/pci-p2pdma.h>

#include <drm/drm_drv.h>
#include <drm/drm_managed.h>
#include <drm/drm_pagemap.h>
#include <drm/drm_pagemap_util.h>

#include "xe_bo.h"
#include "xe_exec_queue_types.h"
#include "xe_gt_stats.h"
#include "xe_migrate.h"
#include "xe_module.h"
#include "xe_pm.h"
#include "xe_pt.h"
#include "xe_svm.h"
#include "xe_tile.h"
#include "xe_ttm_vram_mgr.h"
#include "xe_vm.h"
#include "xe_vm_types.h"
#include "xe_vram_types.h"

/* Identifies subclasses of struct drm_pagemap_peer */
#define XE_PEER_PAGEMAP ((void *)0ul)
#define XE_PEER_VM ((void *)1ul)

/**
 * DOC: drm_pagemap reference-counting in xe:
 *
 * In addition to the drm_pagemap internal reference counting by its zone
 * device data, the xe driver holds the following long-time references:
 *
 * - struct xe_pagemap:
 *      The xe_pagemap struct derives from struct drm_pagemap and uses its
 *      reference count.
 * - SVM-enabled VMs:
 *      SVM-enabled VMs look up and keeps a reference to all xe_pagemaps on
 *      the same device.
 * - VMAs:
 *      vmas keep a reference on the drm_pagemap indicated by a gpu_madvise()
 *      call.
 *
 * In addition, all drm_pagemap or xe_pagemap pointers where lifetime cannot
 * be guaranteed by a vma reference under the vm lock should keep a reference.
 * That includes the range->pages.dpagemap pointer.
 */

static int xe_svm_get_pagemaps(struct xe_vm *vm);

void *xe_svm_private_page_owner(struct xe_vm *vm, bool force_smem)
{
        return force_smem ? NULL : vm->svm.peer.owner;
}

static bool xe_svm_range_in_vram(struct xe_svm_range *range)
{
        /*
         * Advisory only check whether the range is currently backed by VRAM
         * memory.
         */

        struct drm_gpusvm_pages_flags flags = {
                /* Pairs with WRITE_ONCE in drm_gpusvm.c */
                .__flags = READ_ONCE(range->base.pages.flags.__flags),
        };

        return flags.has_devmem_pages;
}

static bool xe_svm_range_has_vram_binding(struct xe_svm_range *range)
{
        /* Not reliable without notifier lock */
        return xe_svm_range_in_vram(range) && range->tile_present;
}

static struct xe_vm *gpusvm_to_vm(struct drm_gpusvm *gpusvm)
{
        return container_of(gpusvm, struct xe_vm, svm.gpusvm);
}

static struct xe_vm *range_to_vm(struct drm_gpusvm_range *r)
{
        return gpusvm_to_vm(r->gpusvm);
}

#define range_debug(r__, operation__)                                   \
        vm_dbg(&range_to_vm(&(r__)->base)->xe->drm,                     \
               "%s: asid=%u, gpusvm=%p, vram=%d,%d, seqno=%lu, " \
               "start=0x%014lx, end=0x%014lx, size=%lu",                \
               (operation__), range_to_vm(&(r__)->base)->usm.asid,      \
               (r__)->base.gpusvm,                                      \
               xe_svm_range_in_vram((r__)) ? 1 : 0,                     \
               xe_svm_range_has_vram_binding((r__)) ? 1 : 0,            \
               (r__)->base.pages.notifier_seq,                          \
               xe_svm_range_start((r__)), xe_svm_range_end((r__)),      \
               xe_svm_range_size((r__)))

void xe_svm_range_debug(struct xe_svm_range *range, const char *operation)
{
        range_debug(range, operation);
}

static struct drm_gpusvm_range *
xe_svm_range_alloc(struct drm_gpusvm *gpusvm)
{
        struct xe_svm_range *range;

        range = kzalloc_obj(*range);
        if (!range)
                return NULL;

        INIT_LIST_HEAD(&range->garbage_collector_link);
        xe_vm_get(gpusvm_to_vm(gpusvm));

        return &range->base;
}

static void xe_svm_range_free(struct drm_gpusvm_range *range)
{
        xe_vm_put(range_to_vm(range));
        kfree(range);
}

static void
xe_svm_garbage_collector_add_range(struct xe_vm *vm, struct xe_svm_range *range,
                                   const struct mmu_notifier_range *mmu_range)
{
        struct xe_device *xe = vm->xe;

        range_debug(range, "GARBAGE COLLECTOR ADD");

        drm_gpusvm_range_set_unmapped(&range->base, mmu_range);

        spin_lock(&vm->svm.garbage_collector.lock);
        if (list_empty(&range->garbage_collector_link))
                list_add_tail(&range->garbage_collector_link,
                              &vm->svm.garbage_collector.range_list);
        spin_unlock(&vm->svm.garbage_collector.lock);

        queue_work(xe->usm.pf_wq, &vm->svm.garbage_collector.work);
}

static void xe_svm_tlb_inval_count_stats_incr(struct xe_gt *gt)
{
        xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_TLB_INVAL_COUNT, 1);
}

static u8
xe_svm_range_notifier_event_begin(struct xe_vm *vm, struct drm_gpusvm_range *r,
                                  const struct mmu_notifier_range *mmu_range,
                                  u64 *adj_start, u64 *adj_end)
{
        struct xe_svm_range *range = to_xe_range(r);
        struct xe_device *xe = vm->xe;
        struct xe_tile *tile;
        u8 tile_mask = 0;
        u8 id;

        xe_svm_assert_in_notifier(vm);

        range_debug(range, "NOTIFIER");

        /* Skip if already unmapped or if no binding exist */
        if (range->base.pages.flags.unmapped || !range->tile_present)
                return 0;

        range_debug(range, "NOTIFIER - EXECUTE");

        /* Adjust invalidation to range boundaries */
        *adj_start = min(xe_svm_range_start(range), mmu_range->start);
        *adj_end = max(xe_svm_range_end(range), mmu_range->end);

        /*
         * XXX: Ideally would zap PTEs in one shot in xe_svm_invalidate but the
         * invalidation code can't correctly cope with sparse ranges or
         * invalidations spanning multiple ranges.
         */
        for_each_tile(tile, xe, id)
                if (xe_pt_zap_ptes_range(tile, vm, range)) {
                        /*
                         * WRITE_ONCE pairs with READ_ONCE in
                         * xe_vm_has_valid_gpu_mapping()
                         */
                        WRITE_ONCE(range->tile_invalidated,
                                   range->tile_invalidated | BIT(id));

                        if (!(tile_mask & BIT(id))) {
                                xe_svm_tlb_inval_count_stats_incr(tile->primary_gt);
                                if (tile->media_gt)
                                        xe_svm_tlb_inval_count_stats_incr(tile->media_gt);
                                tile_mask |= BIT(id);
                        }
                }

        return tile_mask;
}

static void
xe_svm_range_notifier_event_end(struct xe_vm *vm, struct drm_gpusvm_range *r,
                                const struct mmu_notifier_range *mmu_range)
{
        struct drm_gpusvm_ctx ctx = { .in_notifier = true, };

        xe_svm_assert_in_notifier(vm);

        drm_gpusvm_range_unmap_pages(&vm->svm.gpusvm, r, &ctx);
        if (!xe_vm_is_closed(vm) && mmu_range->event == MMU_NOTIFY_UNMAP)
                xe_svm_garbage_collector_add_range(vm, to_xe_range(r),
                                                   mmu_range);
}

static void xe_svm_tlb_inval_us_stats_incr(struct xe_gt *gt, ktime_t start)
{
        s64 us_delta = xe_gt_stats_ktime_us_delta(start);

        xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_TLB_INVAL_US, us_delta);
}

static void xe_svm_invalidate(struct drm_gpusvm *gpusvm,
                              struct drm_gpusvm_notifier *notifier,
                              const struct mmu_notifier_range *mmu_range)
{
        struct xe_vm *vm = gpusvm_to_vm(gpusvm);
        struct xe_device *xe = vm->xe;
        struct drm_gpusvm_range *r, *first;
        struct xe_tile *tile;
        ktime_t start = xe_gt_stats_ktime_get();
        u64 adj_start = mmu_range->start, adj_end = mmu_range->end;
        u8 tile_mask = 0, id;
        long err;

        xe_svm_assert_in_notifier(vm);

        vm_dbg(&gpusvm_to_vm(gpusvm)->xe->drm,
               "INVALIDATE: asid=%u, gpusvm=%p, seqno=%lu, start=0x%016lx, end=0x%016lx, event=%d",
               vm->usm.asid, gpusvm, notifier->notifier.invalidate_seq,
               mmu_range->start, mmu_range->end, mmu_range->event);

        /* Adjust invalidation to notifier boundaries */
        adj_start = max(drm_gpusvm_notifier_start(notifier), adj_start);
        adj_end = min(drm_gpusvm_notifier_end(notifier), adj_end);

        first = drm_gpusvm_range_find(notifier, adj_start, adj_end);
        if (!first)
                return;

        /*
         * PTs may be getting destroyed so not safe to touch these but PT should
         * be invalidated at this point in time. Regardless we still need to
         * ensure any dma mappings are unmapped in the here.
         */
        if (xe_vm_is_closed(vm))
                goto range_notifier_event_end;

        /*
         * XXX: Less than ideal to always wait on VM's resv slots if an
         * invalidation is not required. Could walk range list twice to figure
         * out if an invalidations is need, but also not ideal.
         */
        err = dma_resv_wait_timeout(xe_vm_resv(vm),
                                    DMA_RESV_USAGE_BOOKKEEP,
                                    false, MAX_SCHEDULE_TIMEOUT);
        XE_WARN_ON(err <= 0);

        r = first;
        drm_gpusvm_for_each_range(r, notifier, adj_start, adj_end)
                tile_mask |= xe_svm_range_notifier_event_begin(vm, r, mmu_range,
                                                               &adj_start,
                                                               &adj_end);
        if (!tile_mask)
                goto range_notifier_event_end;

        xe_device_wmb(xe);

        err = xe_vm_range_tilemask_tlb_inval(vm, adj_start, adj_end, tile_mask);
        WARN_ON_ONCE(err);

range_notifier_event_end:
        r = first;
        drm_gpusvm_for_each_range(r, notifier, adj_start, adj_end)
                xe_svm_range_notifier_event_end(vm, r, mmu_range);
        for_each_tile(tile, xe, id) {
                if (tile_mask & BIT(id)) {
                        xe_svm_tlb_inval_us_stats_incr(tile->primary_gt, start);
                        if (tile->media_gt)
                                xe_svm_tlb_inval_us_stats_incr(tile->media_gt, start);
                }
        }
}

static int __xe_svm_garbage_collector(struct xe_vm *vm,
                                      struct xe_svm_range *range)
{
        struct dma_fence *fence;

        range_debug(range, "GARBAGE COLLECTOR");

        xe_vm_lock(vm, false);
        fence = xe_vm_range_unbind(vm, range);
        xe_vm_unlock(vm);
        if (IS_ERR(fence))
                return PTR_ERR(fence);
        dma_fence_put(fence);

        drm_gpusvm_range_remove(&vm->svm.gpusvm, &range->base);

        return 0;
}

static void xe_vma_set_default_attributes(struct xe_vma *vma)
{
        struct xe_vma_mem_attr default_attr = {
                .preferred_loc.devmem_fd = DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE,
                .preferred_loc.migration_policy = DRM_XE_MIGRATE_ALL_PAGES,
                .pat_index = vma->attr.default_pat_index,
                .atomic_access = DRM_XE_ATOMIC_UNDEFINED,
        };

        xe_vma_mem_attr_copy(&vma->attr, &default_attr);
}

static int xe_svm_range_set_default_attr(struct xe_vm *vm, u64 start, u64 end)
{
        struct xe_vma *vma;
        bool has_default_attr;
        int err;

        vma = xe_vm_find_vma_by_addr(vm, start);
        if (!vma)
                return -EINVAL;

        if (!(vma->gpuva.flags & XE_VMA_MADV_AUTORESET)) {
                drm_dbg(&vm->xe->drm, "Skipping madvise reset for vma.\n");
                return 0;
        }

        vm_dbg(&vm->xe->drm, "Existing VMA start=0x%016llx, vma_end=0x%016llx",
               xe_vma_start(vma), xe_vma_end(vma));

        has_default_attr = xe_vma_has_default_mem_attrs(vma);

        if (has_default_attr) {
                start = xe_vma_start(vma);
                end = xe_vma_end(vma);
        } else if (xe_vma_start(vma) == start && xe_vma_end(vma) == end) {
                xe_vma_set_default_attributes(vma);
        }

        xe_vm_find_cpu_addr_mirror_vma_range(vm, &start, &end);

        if (xe_vma_start(vma) == start && xe_vma_end(vma) == end && has_default_attr)
                return 0;

        vm_dbg(&vm->xe->drm, "New VMA start=0x%016llx, vma_end=0x%016llx",  start, end);

        err = xe_vm_alloc_cpu_addr_mirror_vma(vm, start, end - start);
        if (err) {
                drm_warn(&vm->xe->drm, "New VMA MAP failed: %pe\n", ERR_PTR(err));
                xe_vm_kill(vm, true);
                return err;
        }

        /*
         * On call from xe_svm_handle_pagefault original VMA might be changed
         * signal this to lookup for VMA again.
         */
        return -EAGAIN;
}

static int xe_svm_garbage_collector(struct xe_vm *vm)
{
        struct xe_svm_range *range;
        u64 range_start;
        u64 range_end;
        int err, ret = 0;

        lockdep_assert_held_write(&vm->lock);

        if (xe_vm_is_closed_or_banned(vm))
                return -ENOENT;

        for (;;) {
                spin_lock(&vm->svm.garbage_collector.lock);
                range = list_first_entry_or_null(&vm->svm.garbage_collector.range_list,
                                                 typeof(*range),
                                                 garbage_collector_link);
                if (!range)
                        break;

                range_start = xe_svm_range_start(range);
                range_end = xe_svm_range_end(range);

                list_del(&range->garbage_collector_link);
                spin_unlock(&vm->svm.garbage_collector.lock);

                err = __xe_svm_garbage_collector(vm, range);
                if (err) {
                        drm_warn(&vm->xe->drm,
                                 "Garbage collection failed: %pe\n",
                                 ERR_PTR(err));
                        xe_vm_kill(vm, true);
                        return err;
                }

                err = xe_svm_range_set_default_attr(vm, range_start, range_end);
                if (err) {
                        if (err == -EAGAIN)
                                ret = -EAGAIN;
                        else
                                return err;
                }
        }
        spin_unlock(&vm->svm.garbage_collector.lock);

        return ret;
}

static void xe_svm_garbage_collector_work_func(struct work_struct *w)
{
        struct xe_vm *vm = container_of(w, struct xe_vm,
                                        svm.garbage_collector.work);

        down_write(&vm->lock);
        xe_svm_garbage_collector(vm);
        up_write(&vm->lock);
}

#if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)

static struct xe_vram_region *xe_pagemap_to_vr(struct xe_pagemap *xpagemap)
{
        return xpagemap->vr;
}

static struct xe_pagemap *xe_page_to_pagemap(struct page *page)
{
        return container_of(page_pgmap(page), struct xe_pagemap, pagemap);
}

static struct xe_vram_region *xe_page_to_vr(struct page *page)
{
        return xe_pagemap_to_vr(xe_page_to_pagemap(page));
}

static u64 xe_page_to_dpa(struct page *page)
{
        struct xe_pagemap *xpagemap = xe_page_to_pagemap(page);
        struct xe_vram_region *vr = xe_pagemap_to_vr(xpagemap);
        u64 hpa_base = xpagemap->hpa_base;
        u64 pfn = page_to_pfn(page);
        u64 offset;
        u64 dpa;

        xe_assert(vr->xe, is_device_private_page(page));
        xe_assert(vr->xe, (pfn << PAGE_SHIFT) >= hpa_base);

        offset = (pfn << PAGE_SHIFT) - hpa_base;
        dpa = vr->dpa_base + offset;

        return dpa;
}

static u64 xe_page_to_pcie(struct page *page)
{
        struct xe_pagemap *xpagemap = xe_page_to_pagemap(page);
        struct xe_vram_region *vr = xe_pagemap_to_vr(xpagemap);

        return xe_page_to_dpa(page) - vr->dpa_base + vr->io_start;
}

enum xe_svm_copy_dir {
        XE_SVM_COPY_TO_VRAM,
        XE_SVM_COPY_TO_SRAM,
};

static void xe_svm_copy_kb_stats_incr(struct xe_gt *gt,
                                      const enum xe_svm_copy_dir dir,
                                      int kb)
{
        if (dir == XE_SVM_COPY_TO_VRAM)
                xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_DEVICE_COPY_KB, kb);
        else
                xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_CPU_COPY_KB, kb);
}

static void xe_svm_copy_us_stats_incr(struct xe_gt *gt,
                                      const enum xe_svm_copy_dir dir,
                                      unsigned long npages,
                                      ktime_t start)
{
        s64 us_delta = xe_gt_stats_ktime_us_delta(start);

        if (dir == XE_SVM_COPY_TO_VRAM) {
                switch (npages) {
                case 1:
                        xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_4K_DEVICE_COPY_US,
                                         us_delta);
                        break;
                case 16:
                        xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_64K_DEVICE_COPY_US,
                                         us_delta);
                        break;
                case 512:
                        xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_2M_DEVICE_COPY_US,
                                         us_delta);
                        break;
                }
                xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_DEVICE_COPY_US,
                                 us_delta);
        } else {
                switch (npages) {
                case 1:
                        xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_4K_CPU_COPY_US,
                                         us_delta);
                        break;
                case 16:
                        xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_64K_CPU_COPY_US,
                                         us_delta);
                        break;
                case 512:
                        xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_2M_CPU_COPY_US,
                                         us_delta);
                        break;
                }
                xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_CPU_COPY_US,
                                 us_delta);
        }
}

static int xe_svm_copy(struct page **pages,
                       struct drm_pagemap_addr *pagemap_addr,
                       unsigned long npages, const enum xe_svm_copy_dir dir,
                       struct dma_fence *pre_migrate_fence)
{
        struct xe_vram_region *vr = NULL;
        struct xe_gt *gt = NULL;
        struct xe_device *xe;
        struct dma_fence *fence = NULL;
        unsigned long i;
#define XE_VRAM_ADDR_INVALID    ~0x0ull
        u64 vram_addr = XE_VRAM_ADDR_INVALID;
        int err = 0, pos = 0;
        bool sram = dir == XE_SVM_COPY_TO_SRAM;
        ktime_t start = xe_gt_stats_ktime_get();

        /*
         * This flow is complex: it locates physically contiguous device pages,
         * derives the starting physical address, and performs a single GPU copy
         * to for every 8M chunk in a DMA address array. Both device pages and
         * DMA addresses may be sparsely populated. If either is NULL, a copy is
         * triggered based on the current search state. The last GPU copy is
         * waited on to ensure all copies are complete.
         */

        for (i = 0; i < npages; ++i) {
                struct page *spage = pages[i];
                struct dma_fence *__fence;
                u64 __vram_addr;
                bool match = false, chunk, last;

#define XE_MIGRATE_CHUNK_SIZE   SZ_8M
                chunk = (i - pos) == (XE_MIGRATE_CHUNK_SIZE / PAGE_SIZE);
                last = (i + 1) == npages;

                /* No CPU page and no device pages queue'd to copy */
                if (!pagemap_addr[i].addr && vram_addr == XE_VRAM_ADDR_INVALID)
                        continue;

                if (!vr && spage) {
                        vr = xe_page_to_vr(spage);
                        gt = xe_migrate_exec_queue(vr->migrate)->gt;
                        xe = vr->xe;
                }
                XE_WARN_ON(spage && xe_page_to_vr(spage) != vr);

                /*
                 * CPU page and device page valid, capture physical address on
                 * first device page, check if physical contiguous on subsequent
                 * device pages.
                 */
                if (pagemap_addr[i].addr && spage) {
                        __vram_addr = xe_page_to_dpa(spage);
                        if (vram_addr == XE_VRAM_ADDR_INVALID) {
                                vram_addr = __vram_addr;
                                pos = i;
                        }

                        match = vram_addr + PAGE_SIZE * (i - pos) == __vram_addr;
                        /* Expected with contiguous memory */
                        xe_assert(vr->xe, match);

                        if (pagemap_addr[i].order) {
                                i += NR_PAGES(pagemap_addr[i].order) - 1;
                                chunk = (i - pos) == (XE_MIGRATE_CHUNK_SIZE / PAGE_SIZE);
                                last = (i + 1) == npages;
                        }
                }

                /*
                 * Mismatched physical address, 8M copy chunk, or last page -
                 * trigger a copy.
                 */
                if (!match || chunk || last) {
                        /*
                         * Extra page for first copy if last page and matching
                         * physical address.
                         */
                        int incr = (match && last) ? 1 : 0;

                        if (vram_addr != XE_VRAM_ADDR_INVALID) {
                                xe_svm_copy_kb_stats_incr(gt, dir,
                                                          (i - pos + incr) *
                                                          (PAGE_SIZE / SZ_1K));
                                if (sram) {
                                        vm_dbg(&xe->drm,
                                               "COPY TO SRAM - 0x%016llx -> 0x%016llx, NPAGES=%ld",
                                               vram_addr,
                                               (u64)pagemap_addr[pos].addr, i - pos + incr);
                                        __fence = xe_migrate_from_vram(vr->migrate,
                                                                       i - pos + incr,
                                                                       vram_addr,
                                                                       &pagemap_addr[pos],
                                                                       pre_migrate_fence);
                                } else {
                                        vm_dbg(&xe->drm,
                                               "COPY TO VRAM - 0x%016llx -> 0x%016llx, NPAGES=%ld",
                                               (u64)pagemap_addr[pos].addr, vram_addr,
                                               i - pos + incr);
                                        __fence = xe_migrate_to_vram(vr->migrate,
                                                                     i - pos + incr,
                                                                     &pagemap_addr[pos],
                                                                     vram_addr,
                                                                     pre_migrate_fence);
                                }
                                if (IS_ERR(__fence)) {
                                        err = PTR_ERR(__fence);
                                        goto err_out;
                                }
                                pre_migrate_fence = NULL;
                                dma_fence_put(fence);
                                fence = __fence;
                        }

                        /* Setup physical address of next device page */
                        if (pagemap_addr[i].addr && spage) {
                                vram_addr = __vram_addr;
                                pos = i;
                        } else {
                                vram_addr = XE_VRAM_ADDR_INVALID;
                        }

                        /* Extra mismatched device page, copy it */
                        if (!match && last && vram_addr != XE_VRAM_ADDR_INVALID) {
                                xe_svm_copy_kb_stats_incr(gt, dir,
                                                          (PAGE_SIZE / SZ_1K));
                                if (sram) {
                                        vm_dbg(&xe->drm,
                                               "COPY TO SRAM - 0x%016llx -> 0x%016llx, NPAGES=%d",
                                               vram_addr, (u64)pagemap_addr[pos].addr, 1);
                                        __fence = xe_migrate_from_vram(vr->migrate, 1,
                                                                       vram_addr,
                                                                       &pagemap_addr[pos],
                                                                       pre_migrate_fence);
                                } else {
                                        vm_dbg(&xe->drm,
                                               "COPY TO VRAM - 0x%016llx -> 0x%016llx, NPAGES=%d",
                                               (u64)pagemap_addr[pos].addr, vram_addr, 1);
                                        __fence = xe_migrate_to_vram(vr->migrate, 1,
                                                                     &pagemap_addr[pos],
                                                                     vram_addr,
                                                                     pre_migrate_fence);
                                }
                                if (IS_ERR(__fence)) {
                                        err = PTR_ERR(__fence);
                                        goto err_out;
                                }
                                pre_migrate_fence = NULL;
                                dma_fence_put(fence);
                                fence = __fence;
                        }
                }
        }

err_out:
        /* Wait for all copies to complete */
        if (fence) {
                dma_fence_wait(fence, false);
                dma_fence_put(fence);
        }
        if (pre_migrate_fence)
                dma_fence_wait(pre_migrate_fence, false);

        /*
         * XXX: We can't derive the GT here (or anywhere in this functions, but
         * compute always uses the primary GT so accumulate stats on the likely
         * GT of the fault.
         */
        if (gt)
                xe_svm_copy_us_stats_incr(gt, dir, npages, start);

        return err;
#undef XE_MIGRATE_CHUNK_SIZE
#undef XE_VRAM_ADDR_INVALID
}

static int xe_svm_copy_to_devmem(struct page **pages,
                                 struct drm_pagemap_addr *pagemap_addr,
                                 unsigned long npages,
                                 struct dma_fence *pre_migrate_fence)
{
        return xe_svm_copy(pages, pagemap_addr, npages, XE_SVM_COPY_TO_VRAM,
                           pre_migrate_fence);
}

static int xe_svm_copy_to_ram(struct page **pages,
                              struct drm_pagemap_addr *pagemap_addr,
                              unsigned long npages,
                              struct dma_fence *pre_migrate_fence)
{
        return xe_svm_copy(pages, pagemap_addr, npages, XE_SVM_COPY_TO_SRAM,
                           pre_migrate_fence);
}

static struct xe_bo *to_xe_bo(struct drm_pagemap_devmem *devmem_allocation)
{
        return container_of(devmem_allocation, struct xe_bo, devmem_allocation);
}

static void xe_svm_devmem_release(struct drm_pagemap_devmem *devmem_allocation)
{
        struct xe_bo *bo = to_xe_bo(devmem_allocation);
        struct xe_device *xe = xe_bo_device(bo);

        dma_fence_put(devmem_allocation->pre_migrate_fence);
        xe_bo_put_async(bo);
        xe_pm_runtime_put(xe);
}

static u64 block_offset_to_pfn(struct drm_pagemap *dpagemap, u64 offset)
{
        struct xe_pagemap *xpagemap = container_of(dpagemap, typeof(*xpagemap), dpagemap);

        return PHYS_PFN(offset + xpagemap->hpa_base);
}

static struct drm_buddy *vram_to_buddy(struct xe_vram_region *vram)
{
        return &vram->ttm.mm;
}

static int xe_svm_populate_devmem_pfn(struct drm_pagemap_devmem *devmem_allocation,
                                      unsigned long npages, unsigned long *pfn)
{
        struct xe_bo *bo = to_xe_bo(devmem_allocation);
        struct ttm_resource *res = bo->ttm.resource;
        struct list_head *blocks = &to_xe_ttm_vram_mgr_resource(res)->blocks;
        struct drm_buddy_block *block;
        int j = 0;

        list_for_each_entry(block, blocks, link) {
                struct xe_vram_region *vr = block->private;
                struct drm_buddy *buddy = vram_to_buddy(vr);
                u64 block_pfn = block_offset_to_pfn(devmem_allocation->dpagemap,
                                                    drm_buddy_block_offset(block));
                int i;

                for (i = 0; i < drm_buddy_block_size(buddy, block) >> PAGE_SHIFT; ++i)
                        pfn[j++] = block_pfn + i;
        }

        return 0;
}

static const struct drm_pagemap_devmem_ops dpagemap_devmem_ops = {
        .devmem_release = xe_svm_devmem_release,
        .populate_devmem_pfn = xe_svm_populate_devmem_pfn,
        .copy_to_devmem = xe_svm_copy_to_devmem,
        .copy_to_ram = xe_svm_copy_to_ram,
};

#else
static int xe_svm_get_pagemaps(struct xe_vm *vm)
{
        return 0;
}
#endif

static const struct drm_gpusvm_ops gpusvm_ops = {
        .range_alloc = xe_svm_range_alloc,
        .range_free = xe_svm_range_free,
        .invalidate = xe_svm_invalidate,
};

static const unsigned long fault_chunk_sizes[] = {
        SZ_2M,
        SZ_64K,
        SZ_4K,
};

static void xe_pagemap_put(struct xe_pagemap *xpagemap)
{
        drm_pagemap_put(&xpagemap->dpagemap);
}

static void xe_svm_put_pagemaps(struct xe_vm *vm)
{
        struct xe_device *xe = vm->xe;
        struct xe_tile *tile;
        int id;

        for_each_tile(tile, xe, id) {
                struct xe_pagemap *xpagemap = vm->svm.pagemaps[id];

                if (xpagemap)
                        xe_pagemap_put(xpagemap);
                vm->svm.pagemaps[id] = NULL;
        }
}

static struct device *xe_peer_to_dev(struct drm_pagemap_peer *peer)
{
        if (peer->private == XE_PEER_PAGEMAP)
                return container_of(peer, struct xe_pagemap, peer)->dpagemap.drm->dev;

        return container_of(peer, struct xe_vm, svm.peer)->xe->drm.dev;
}

static bool xe_has_interconnect(struct drm_pagemap_peer *peer1,
                                struct drm_pagemap_peer *peer2)
{
        struct device *dev1 = xe_peer_to_dev(peer1);
        struct device *dev2 = xe_peer_to_dev(peer2);

        if (dev1 == dev2)
                return true;

        return pci_p2pdma_distance(to_pci_dev(dev1), dev2, true) >= 0;
}

static DRM_PAGEMAP_OWNER_LIST_DEFINE(xe_owner_list);

/**
 * xe_svm_init() - SVM initialize
 * @vm: The VM.
 *
 * Initialize SVM state which is embedded within the VM.
 *
 * Return: 0 on success, negative error code on error.
 */
int xe_svm_init(struct xe_vm *vm)
{
        int err;

        if (vm->flags & XE_VM_FLAG_FAULT_MODE) {
                spin_lock_init(&vm->svm.garbage_collector.lock);
                INIT_LIST_HEAD(&vm->svm.garbage_collector.range_list);
                INIT_WORK(&vm->svm.garbage_collector.work,
                          xe_svm_garbage_collector_work_func);

                vm->svm.peer.private = XE_PEER_VM;
                err = drm_pagemap_acquire_owner(&vm->svm.peer, &xe_owner_list,
                                                xe_has_interconnect);
                if (err)
                        return err;

                err = xe_svm_get_pagemaps(vm);
                if (err) {
                        drm_pagemap_release_owner(&vm->svm.peer);
                        return err;
                }

                err = drm_gpusvm_init(&vm->svm.gpusvm, "Xe SVM", &vm->xe->drm,
                                      current->mm, 0, vm->size,
                                      xe_modparam.svm_notifier_size * SZ_1M,
                                      &gpusvm_ops, fault_chunk_sizes,
                                      ARRAY_SIZE(fault_chunk_sizes));
                drm_gpusvm_driver_set_lock(&vm->svm.gpusvm, &vm->lock);

                if (err) {
                        xe_svm_put_pagemaps(vm);
                        drm_pagemap_release_owner(&vm->svm.peer);
                        return err;
                }
        } else {
                err = drm_gpusvm_init(&vm->svm.gpusvm, "Xe SVM (simple)",
                                      &vm->xe->drm, NULL, 0, 0, 0, NULL,
                                      NULL, 0);
        }

        return err;
}

/**
 * xe_svm_close() - SVM close
 * @vm: The VM.
 *
 * Close SVM state (i.e., stop and flush all SVM actions).
 */
void xe_svm_close(struct xe_vm *vm)
{
        xe_assert(vm->xe, xe_vm_is_closed(vm));
        disable_work_sync(&vm->svm.garbage_collector.work);
        xe_svm_put_pagemaps(vm);
        drm_pagemap_release_owner(&vm->svm.peer);
}

/**
 * xe_svm_fini() - SVM finalize
 * @vm: The VM.
 *
 * Finalize SVM state which is embedded within the VM.
 */
void xe_svm_fini(struct xe_vm *vm)
{
        xe_assert(vm->xe, xe_vm_is_closed(vm));

        drm_gpusvm_fini(&vm->svm.gpusvm);
}

static bool xe_svm_range_has_pagemap_locked(const struct xe_svm_range *range,
                                            const struct drm_pagemap *dpagemap)
{
        return range->base.pages.dpagemap == dpagemap;
}

static bool xe_svm_range_has_pagemap(struct xe_svm_range *range,
                                     const struct drm_pagemap *dpagemap)
{
        struct xe_vm *vm = range_to_vm(&range->base);
        bool ret;

        xe_svm_notifier_lock(vm);
        ret = xe_svm_range_has_pagemap_locked(range, dpagemap);
        xe_svm_notifier_unlock(vm);

        return ret;
}

static bool xe_svm_range_is_valid(struct xe_svm_range *range,
                                  struct xe_tile *tile,
                                  bool devmem_only,
                                  const struct drm_pagemap *dpagemap)

{
        return (xe_vm_has_valid_gpu_mapping(tile, range->tile_present,
                                            range->tile_invalidated) &&
                (!devmem_only || xe_svm_range_has_pagemap(range, dpagemap)));
}

/** xe_svm_range_migrate_to_smem() - Move range pages from VRAM to SMEM
 * @vm: xe_vm pointer
 * @range: Pointer to the SVM range structure
 *
 * The xe_svm_range_migrate_to_smem() checks range has pages in VRAM
 * and migrates them to SMEM
 */
void xe_svm_range_migrate_to_smem(struct xe_vm *vm, struct xe_svm_range *range)
{
        if (xe_svm_range_in_vram(range))
                drm_gpusvm_range_evict(&vm->svm.gpusvm, &range->base);
}

/**
 * xe_svm_range_validate() - Check if the SVM range is valid
 * @vm: xe_vm pointer
 * @range: Pointer to the SVM range structure
 * @tile_mask: Mask representing the tiles to be checked
 * @dpagemap: if !%NULL, the range is expected to be present
 * in device memory identified by this parameter.
 *
 * The xe_svm_range_validate() function checks if a range is
 * valid and located in the desired memory region.
 *
 * Return: true if the range is valid, false otherwise
 */
bool xe_svm_range_validate(struct xe_vm *vm,
                           struct xe_svm_range *range,
                           u8 tile_mask, const struct drm_pagemap *dpagemap)
{
        bool ret;

        xe_svm_notifier_lock(vm);

        ret = (range->tile_present & ~range->tile_invalidated & tile_mask) == tile_mask;
        if (dpagemap)
                ret = ret && xe_svm_range_has_pagemap_locked(range, dpagemap);
        else
                ret = ret && !range->base.pages.dpagemap;

        xe_svm_notifier_unlock(vm);

        return ret;
}

/**
 * xe_svm_find_vma_start - Find start of CPU VMA
 * @vm: xe_vm pointer
 * @start: start address
 * @end: end address
 * @vma: Pointer to struct xe_vma
 *
 *
 * This function searches for a cpu vma, within the specified
 * range [start, end] in the given VM. It adjusts the range based on the
 * xe_vma start and end addresses. If no cpu VMA is found, it returns ULONG_MAX.
 *
 * Return: The starting address of the VMA within the range,
 * or ULONG_MAX if no VMA is found
 */
u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 start, u64 end, struct xe_vma *vma)
{
        return drm_gpusvm_find_vma_start(&vm->svm.gpusvm,
                                         max(start, xe_vma_start(vma)),
                                         min(end, xe_vma_end(vma)));
}

#if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)
static int xe_drm_pagemap_populate_mm(struct drm_pagemap *dpagemap,
                                      unsigned long start, unsigned long end,
                                      struct mm_struct *mm,
                                      unsigned long timeslice_ms)
{
        struct xe_pagemap *xpagemap = container_of(dpagemap, typeof(*xpagemap), dpagemap);
        struct drm_pagemap_migrate_details mdetails = {
                .timeslice_ms = timeslice_ms,
                .source_peer_migrates = 1,
        };
        struct xe_vram_region *vr = xe_pagemap_to_vr(xpagemap);
        struct dma_fence *pre_migrate_fence = NULL;
        struct xe_device *xe = vr->xe;
        struct device *dev = xe->drm.dev;
        struct drm_buddy_block *block;
        struct xe_validation_ctx vctx;
        struct list_head *blocks;
        struct drm_exec exec;
        struct xe_bo *bo;
        int err = 0, idx;

        if (!drm_dev_enter(&xe->drm, &idx))
                return -ENODEV;

        xe_pm_runtime_get(xe);

        xe_validation_guard(&vctx, &xe->val, &exec, (struct xe_val_flags) {}, err) {
                bo = xe_bo_create_locked(xe, NULL, NULL, end - start,
                                         ttm_bo_type_device,
                                         (IS_DGFX(xe) ? XE_BO_FLAG_VRAM(vr) : XE_BO_FLAG_SYSTEM) |
                                         XE_BO_FLAG_CPU_ADDR_MIRROR, &exec);
                drm_exec_retry_on_contention(&exec);
                if (IS_ERR(bo)) {
                        err = PTR_ERR(bo);
                        xe_validation_retry_on_oom(&vctx, &err);
                        break;
                }

                /* Ensure that any clearing or async eviction will complete before migration. */
                if (!dma_resv_test_signaled(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL)) {
                        err = dma_resv_get_singleton(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL,
                                                     &pre_migrate_fence);
                        if (err)
                                dma_resv_wait_timeout(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL,
                                                      false, MAX_SCHEDULE_TIMEOUT);
                        else if (pre_migrate_fence)
                                dma_fence_enable_sw_signaling(pre_migrate_fence);
                }

                drm_pagemap_devmem_init(&bo->devmem_allocation, dev, mm,
                                        &dpagemap_devmem_ops, dpagemap, end - start,
                                        pre_migrate_fence);

                blocks = &to_xe_ttm_vram_mgr_resource(bo->ttm.resource)->blocks;
                list_for_each_entry(block, blocks, link)
                        block->private = vr;

                xe_bo_get(bo);

                /* Ensure the device has a pm ref while there are device pages active. */
                xe_pm_runtime_get_noresume(xe);
                /* Consumes the devmem allocation ref. */
                err = drm_pagemap_migrate_to_devmem(&bo->devmem_allocation, mm,
                                                    start, end, &mdetails);
                xe_bo_unlock(bo);
                xe_bo_put(bo);
        }
        xe_pm_runtime_put(xe);
        drm_dev_exit(idx);

        return err;
}
#endif

static bool supports_4K_migration(struct xe_device *xe)
{
        if (xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K)
                return false;

        return true;
}

/**
 * xe_svm_range_needs_migrate_to_vram() - SVM range needs migrate to VRAM or not
 * @range: SVM range for which migration needs to be decided
 * @vma: vma which has range
 * @dpagemap: The preferred struct drm_pagemap to migrate to.
 *
 * Return: True for range needing migration and migration is supported else false
 */
bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vma *vma,
                                        const struct drm_pagemap *dpagemap)
{
        struct xe_vm *vm = range_to_vm(&range->base);
        u64 range_size = xe_svm_range_size(range);

        if (!range->base.pages.flags.migrate_devmem || !dpagemap)
                return false;

        xe_assert(vm->xe, IS_DGFX(vm->xe));

        if (xe_svm_range_has_pagemap(range, dpagemap)) {
                drm_dbg(&vm->xe->drm, "Range is already in VRAM\n");
                return false;
        }

        if (range_size < SZ_64K && !supports_4K_migration(vm->xe)) {
                drm_dbg(&vm->xe->drm, "Platform doesn't support SZ_4K range migration\n");
                return false;
        }

        return true;
}

#define DECL_SVM_RANGE_COUNT_STATS(elem, stat) \
static void xe_svm_range_##elem##_count_stats_incr(struct xe_gt *gt, \
                                                   struct xe_svm_range *range) \
{ \
        switch (xe_svm_range_size(range)) { \
        case SZ_4K: \
                xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_4K_##stat##_COUNT, 1); \
                break; \
        case SZ_64K: \
                xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_64K_##stat##_COUNT, 1); \
                break; \
        case SZ_2M: \
                xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_2M_##stat##_COUNT, 1); \
                break; \
        } \
} \

DECL_SVM_RANGE_COUNT_STATS(fault, PAGEFAULT)
DECL_SVM_RANGE_COUNT_STATS(valid_fault, VALID_PAGEFAULT)
DECL_SVM_RANGE_COUNT_STATS(migrate, MIGRATE)

#define DECL_SVM_RANGE_US_STATS(elem, stat) \
static void xe_svm_range_##elem##_us_stats_incr(struct xe_gt *gt, \
                                                struct xe_svm_range *range, \
                                                ktime_t start) \
{ \
        s64 us_delta = xe_gt_stats_ktime_us_delta(start); \
\
        switch (xe_svm_range_size(range)) { \
        case SZ_4K: \
                xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_4K_##stat##_US, \
                                 us_delta); \
                break; \
        case SZ_64K: \
                xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_64K_##stat##_US, \
                                 us_delta); \
                break; \
        case SZ_2M: \
                xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_2M_##stat##_US, \
                                 us_delta); \
                break; \
        } \
} \

DECL_SVM_RANGE_US_STATS(migrate, MIGRATE)
DECL_SVM_RANGE_US_STATS(get_pages, GET_PAGES)
DECL_SVM_RANGE_US_STATS(bind, BIND)
DECL_SVM_RANGE_US_STATS(fault, PAGEFAULT)

static int __xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
                                     struct xe_gt *gt, u64 fault_addr,
                                     bool need_vram)
{
        int devmem_possible = IS_DGFX(vm->xe) &&
                IS_ENABLED(CONFIG_DRM_XE_PAGEMAP);
        struct drm_gpusvm_ctx ctx = {
                .read_only = xe_vma_read_only(vma),
                .devmem_possible = devmem_possible,
                .check_pages_threshold = devmem_possible ? SZ_64K : 0,
                .devmem_only = need_vram && devmem_possible,
                .timeslice_ms = need_vram && devmem_possible ?
                        vm->xe->atomic_svm_timeslice_ms : 0,
        };
        struct xe_validation_ctx vctx;
        struct drm_exec exec;
        struct xe_svm_range *range;
        struct dma_fence *fence;
        struct drm_pagemap *dpagemap;
        struct xe_tile *tile = gt_to_tile(gt);
        int migrate_try_count = ctx.devmem_only ? 3 : 1;
        ktime_t start = xe_gt_stats_ktime_get(), bind_start, get_pages_start;
        int err;

        lockdep_assert_held_write(&vm->lock);
        xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(vma));

        xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_PAGEFAULT_COUNT, 1);

retry:
        /* Always process UNMAPs first so view SVM ranges is current */
        err = xe_svm_garbage_collector(vm);
        if (err)
                return err;

        dpagemap = ctx.devmem_only ? xe_tile_local_pagemap(tile) :
                xe_vma_resolve_pagemap(vma, tile);
        ctx.device_private_page_owner = xe_svm_private_page_owner(vm, !dpagemap);
        range = xe_svm_range_find_or_insert(vm, fault_addr, vma, &ctx);

        if (IS_ERR(range))
                return PTR_ERR(range);

        xe_svm_range_fault_count_stats_incr(gt, range);

        if (ctx.devmem_only && !range->base.pages.flags.migrate_devmem) {
                err = -EACCES;
                goto out;
        }

        if (xe_svm_range_is_valid(range, tile, ctx.devmem_only, dpagemap)) {
                xe_svm_range_valid_fault_count_stats_incr(gt, range);
                range_debug(range, "PAGE FAULT - VALID");
                goto out;
        }

        range_debug(range, "PAGE FAULT");

        if (--migrate_try_count >= 0 &&
            xe_svm_range_needs_migrate_to_vram(range, vma, dpagemap)) {
                ktime_t migrate_start = xe_gt_stats_ktime_get();

                xe_svm_range_migrate_count_stats_incr(gt, range);
                err = xe_svm_alloc_vram(range, &ctx, dpagemap);
                xe_svm_range_migrate_us_stats_incr(gt, range, migrate_start);
                ctx.timeslice_ms <<= 1; /* Double timeslice if we have to retry */
                if (err) {
                        if (migrate_try_count || !ctx.devmem_only) {
                                drm_dbg(&vm->xe->drm,
                                        "VRAM allocation failed, falling back to retrying fault, asid=%u, errno=%pe\n",
                                        vm->usm.asid, ERR_PTR(err));

                                /*
                                 * In the devmem-only case, mixed mappings may
                                 * be found. The get_pages function will fix
                                 * these up to a single location, allowing the
                                 * page fault handler to make forward progress.
                                 */
                                if (ctx.devmem_only)
                                        goto get_pages;
                                else
                                        goto retry;
                        } else {
                                drm_err(&vm->xe->drm,
                                        "VRAM allocation failed, retry count exceeded, asid=%u, errno=%pe\n",
                                        vm->usm.asid, ERR_PTR(err));
                                return err;
                        }
                }
        }

get_pages:
        get_pages_start = xe_gt_stats_ktime_get();

        range_debug(range, "GET PAGES");
        err = xe_svm_range_get_pages(vm, range, &ctx);
        /* Corner where CPU mappings have changed */
        if (err == -EOPNOTSUPP || err == -EFAULT || err == -EPERM) {
                ctx.timeslice_ms <<= 1; /* Double timeslice if we have to retry */
                if (migrate_try_count > 0 || !ctx.devmem_only) {
                        drm_dbg(&vm->xe->drm,
                                "Get pages failed, falling back to retrying, asid=%u, gpusvm=%p, errno=%pe\n",
                                vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err));
                        range_debug(range, "PAGE FAULT - RETRY PAGES");
                        goto retry;
                } else {
                        drm_err(&vm->xe->drm,
                                "Get pages failed, retry count exceeded, asid=%u, gpusvm=%p, errno=%pe\n",
                                vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err));
                }
        }
        if (err) {
                range_debug(range, "PAGE FAULT - FAIL PAGE COLLECT");
                goto out;
        } else if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)) {
                drm_dbg(&vm->xe->drm, "After page collect data location is %sin \"%s\".\n",
                        xe_svm_range_has_pagemap(range, dpagemap) ? "" : "NOT ",
                        dpagemap ? dpagemap->drm->unique : "System.");
        }

        xe_svm_range_get_pages_us_stats_incr(gt, range, get_pages_start);
        range_debug(range, "PAGE FAULT - BIND");

        bind_start = xe_gt_stats_ktime_get();
        xe_validation_guard(&vctx, &vm->xe->val, &exec, (struct xe_val_flags) {}, err) {
                err = xe_vm_drm_exec_lock(vm, &exec);
                drm_exec_retry_on_contention(&exec);

                xe_vm_set_validation_exec(vm, &exec);
                fence = xe_vm_range_rebind(vm, vma, range, BIT(tile->id));
                xe_vm_set_validation_exec(vm, NULL);
                if (IS_ERR(fence)) {
                        drm_exec_retry_on_contention(&exec);
                        err = PTR_ERR(fence);
                        xe_validation_retry_on_oom(&vctx, &err);
                        xe_svm_range_bind_us_stats_incr(gt, range, bind_start);
                        break;
                }
        }
        if (err)
                goto err_out;

        dma_fence_wait(fence, false);
        dma_fence_put(fence);
        xe_svm_range_bind_us_stats_incr(gt, range, bind_start);

out:
        xe_svm_range_fault_us_stats_incr(gt, range, start);
        return 0;

err_out:
        if (err == -EAGAIN) {
                ctx.timeslice_ms <<= 1; /* Double timeslice if we have to retry */
                range_debug(range, "PAGE FAULT - RETRY BIND");
                goto retry;
        }

        return err;
}

/**
 * xe_svm_handle_pagefault() - SVM handle page fault
 * @vm: The VM.
 * @vma: The CPU address mirror VMA.
 * @gt: The gt upon the fault occurred.
 * @fault_addr: The GPU fault address.
 * @atomic: The fault atomic access bit.
 *
 * Create GPU bindings for a SVM page fault. Optionally migrate to device
 * memory.
 *
 * Return: 0 on success, negative error code on error.
 */
int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
                            struct xe_gt *gt, u64 fault_addr,
                            bool atomic)
{
        int need_vram, ret;
retry:
        need_vram = xe_vma_need_vram_for_atomic(vm->xe, vma, atomic);
        if (need_vram < 0)
                return need_vram;

        ret =  __xe_svm_handle_pagefault(vm, vma, gt, fault_addr,
                                         need_vram ? true : false);
        if (ret == -EAGAIN) {
                /*
                 * Retry once on -EAGAIN to re-lookup the VMA, as the original VMA
                 * may have been split by xe_svm_range_set_default_attr.
                 */
                vma = xe_vm_find_vma_by_addr(vm, fault_addr);
                if (!vma)
                        return -EINVAL;

                goto retry;
        }
        return ret;
}

/**
 * xe_svm_has_mapping() - SVM has mappings
 * @vm: The VM.
 * @start: Start address.
 * @end: End address.
 *
 * Check if an address range has SVM mappings.
 *
 * Return: True if address range has a SVM mapping, False otherwise
 */
bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end)
{
        return drm_gpusvm_has_mapping(&vm->svm.gpusvm, start, end);
}

/**
 * xe_svm_unmap_address_range - UNMAP SVM mappings and ranges
 * @vm: The VM
 * @start: start addr
 * @end: end addr
 *
 * This function UNMAPS svm ranges if start or end address are inside them.
 */
void xe_svm_unmap_address_range(struct xe_vm *vm, u64 start, u64 end)
{
        struct drm_gpusvm_notifier *notifier, *next;

        lockdep_assert_held_write(&vm->lock);

        drm_gpusvm_for_each_notifier_safe(notifier, next, &vm->svm.gpusvm, start, end) {
                struct drm_gpusvm_range *range, *__next;

                drm_gpusvm_for_each_range_safe(range, __next, notifier, start, end) {
                        if (start > drm_gpusvm_range_start(range) ||
                            end < drm_gpusvm_range_end(range)) {
                                if (IS_DGFX(vm->xe) && xe_svm_range_in_vram(to_xe_range(range)))
                                        drm_gpusvm_range_evict(&vm->svm.gpusvm, range);
                                drm_gpusvm_range_get(range);
                                __xe_svm_garbage_collector(vm, to_xe_range(range));
                                if (!list_empty(&to_xe_range(range)->garbage_collector_link)) {
                                        spin_lock(&vm->svm.garbage_collector.lock);
                                        list_del(&to_xe_range(range)->garbage_collector_link);
                                        spin_unlock(&vm->svm.garbage_collector.lock);
                                }
                                drm_gpusvm_range_put(range);
                        }
                }
        }
}

/**
 * xe_svm_bo_evict() - SVM evict BO to system memory
 * @bo: BO to evict
 *
 * SVM evict BO to system memory. GPU SVM layer ensures all device pages
 * are evicted before returning.
 *
 * Return: 0 on success standard error code otherwise
 */
int xe_svm_bo_evict(struct xe_bo *bo)
{
        return drm_pagemap_evict_to_ram(&bo->devmem_allocation);
}

/**
 * xe_svm_range_find_or_insert- Find or insert GPU SVM range
 * @vm: xe_vm pointer
 * @addr: address for which range needs to be found/inserted
 * @vma:  Pointer to struct xe_vma which mirrors CPU
 * @ctx: GPU SVM context
 *
 * This function finds or inserts a newly allocated a SVM range based on the
 * address.
 *
 * Return: Pointer to the SVM range on success, ERR_PTR() on failure.
 */
struct xe_svm_range *xe_svm_range_find_or_insert(struct xe_vm *vm, u64 addr,
                                                 struct xe_vma *vma, struct drm_gpusvm_ctx *ctx)
{
        struct drm_gpusvm_range *r;

        r = drm_gpusvm_range_find_or_insert(&vm->svm.gpusvm, max(addr, xe_vma_start(vma)),
                                            xe_vma_start(vma), xe_vma_end(vma), ctx);
        if (IS_ERR(r))
                return ERR_CAST(r);

        return to_xe_range(r);
}

/**
 * xe_svm_range_get_pages() - Get pages for a SVM range
 * @vm: Pointer to the struct xe_vm
 * @range: Pointer to the xe SVM range structure
 * @ctx: GPU SVM context
 *
 * This function gets pages for a SVM range and ensures they are mapped for
 * DMA access. In case of failure with -EOPNOTSUPP, it evicts the range.
 *
 * Return: 0 on success, negative error code on failure.
 */
int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range,
                           struct drm_gpusvm_ctx *ctx)
{
        int err = 0;

        err = drm_gpusvm_range_get_pages(&vm->svm.gpusvm, &range->base, ctx);
        if (err == -EOPNOTSUPP) {
                range_debug(range, "PAGE FAULT - EVICT PAGES");
                drm_gpusvm_range_evict(&vm->svm.gpusvm, &range->base);
        }

        return err;
}

/**
 * xe_svm_ranges_zap_ptes_in_range - clear ptes of svm ranges in input range
 * @vm: Pointer to the xe_vm structure
 * @start: Start of the input range
 * @end: End of the input range
 *
 * This function removes the page table entries (PTEs) associated
 * with the svm ranges within the given input start and end
 *
 * Return: tile_mask for which gt's need to be tlb invalidated.
 */
u8 xe_svm_ranges_zap_ptes_in_range(struct xe_vm *vm, u64 start, u64 end)
{
        struct drm_gpusvm_notifier *notifier;
        struct xe_svm_range *range;
        u64 adj_start, adj_end;
        struct xe_tile *tile;
        u8 tile_mask = 0;
        u8 id;

        lockdep_assert(lockdep_is_held_type(&vm->svm.gpusvm.notifier_lock, 1) &&
                       lockdep_is_held_type(&vm->lock, 0));

        drm_gpusvm_for_each_notifier(notifier, &vm->svm.gpusvm, start, end) {
                struct drm_gpusvm_range *r = NULL;

                adj_start = max(start, drm_gpusvm_notifier_start(notifier));
                adj_end = min(end, drm_gpusvm_notifier_end(notifier));
                drm_gpusvm_for_each_range(r, notifier, adj_start, adj_end) {
                        range = to_xe_range(r);
                        for_each_tile(tile, vm->xe, id) {
                                if (xe_pt_zap_ptes_range(tile, vm, range)) {
                                        tile_mask |= BIT(id);
                                        /*
                                         * WRITE_ONCE pairs with READ_ONCE in
                                         * xe_vm_has_valid_gpu_mapping().
                                         * Must not fail after setting
                                         * tile_invalidated and before
                                         * TLB invalidation.
                                         */
                                        WRITE_ONCE(range->tile_invalidated,
                                                   range->tile_invalidated | BIT(id));
                                }
                        }
                }
        }

        return tile_mask;
}

#if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP)

/**
 * xe_vma_resolve_pagemap - Resolve the appropriate DRM pagemap for a VMA
 * @vma: Pointer to the xe_vma structure containing memory attributes
 * @tile: Pointer to the xe_tile structure used as fallback for VRAM mapping
 *
 * This function determines the correct DRM pagemap to use for a given VMA.
 * It first checks if a valid devmem_fd is provided in the VMA's preferred
 * location. If the devmem_fd is negative, it returns NULL, indicating no
 * pagemap is available and smem to be used as preferred location.
 * If the devmem_fd is equal to the default faulting
 * GT identifier, it returns the VRAM pagemap associated with the tile.
 *
 * Future support for multi-device configurations may use drm_pagemap_from_fd()
 * to resolve pagemaps from arbitrary file descriptors.
 *
 * Return: A pointer to the resolved drm_pagemap, or NULL if none is applicable.
 */
struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *tile)
{
        struct drm_pagemap *dpagemap = vma->attr.preferred_loc.dpagemap;
        s32 fd;

        if (dpagemap)
                return dpagemap;

        fd = (s32)vma->attr.preferred_loc.devmem_fd;

        if (fd == DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM)
                return NULL;

        if (fd == DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE)
                return IS_DGFX(tile_to_xe(tile)) ? xe_tile_local_pagemap(tile) : NULL;

        return NULL;
}

/**
 * xe_svm_alloc_vram()- Allocate device memory pages for range,
 * migrating existing data.
 * @range: SVM range
 * @ctx: DRM GPU SVM context
 * @dpagemap: The struct drm_pagemap representing the memory to allocate.
 *
 * Return: 0 on success, error code on failure.
 */
int xe_svm_alloc_vram(struct xe_svm_range *range, const struct drm_gpusvm_ctx *ctx,
                      struct drm_pagemap *dpagemap)
{
        static DECLARE_RWSEM(driver_migrate_lock);
        struct xe_vm *vm = range_to_vm(&range->base);
        enum drm_gpusvm_scan_result migration_state;
        struct xe_device *xe = vm->xe;
        int err, retries = 1;
        bool write_locked = false;

        xe_assert(range_to_vm(&range->base)->xe, range->base.pages.flags.migrate_devmem);
        range_debug(range, "ALLOCATE VRAM");

        migration_state = drm_gpusvm_scan_mm(&range->base,
                                             xe_svm_private_page_owner(vm, false),
                                             dpagemap->pagemap);

        if (migration_state == DRM_GPUSVM_SCAN_EQUAL) {
                if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM))
                        drm_dbg(dpagemap->drm, "Already migrated!\n");
                return 0;
        }

        if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM))
                drm_dbg(&xe->drm, "Request migration to device memory on \"%s\".\n",
                        dpagemap->drm->unique);

        err = down_read_interruptible(&driver_migrate_lock);
        if (err)
                return err;
        do {
                err = drm_pagemap_populate_mm(dpagemap, xe_svm_range_start(range),
                                              xe_svm_range_end(range),
                                              range->base.gpusvm->mm,
                                              ctx->timeslice_ms);

                if (err == -EBUSY && retries) {
                        if (!write_locked) {
                                int lock_err;

                                up_read(&driver_migrate_lock);
                                lock_err = down_write_killable(&driver_migrate_lock);
                                if (lock_err)
                                        return lock_err;
                                write_locked = true;
                        }
                        drm_gpusvm_range_evict(range->base.gpusvm, &range->base);
                }
        } while (err == -EBUSY && retries--);
        if (write_locked)
                up_write(&driver_migrate_lock);
        else
                up_read(&driver_migrate_lock);

        return err;
}

static struct drm_pagemap_addr
xe_drm_pagemap_device_map(struct drm_pagemap *dpagemap,
                          struct device *dev,
                          struct page *page,
                          unsigned int order,
                          enum dma_data_direction dir)
{
        struct device *pgmap_dev = dpagemap->drm->dev;
        enum drm_interconnect_protocol prot;
        dma_addr_t addr;

        if (pgmap_dev == dev) {
                addr = xe_page_to_dpa(page);
                prot = XE_INTERCONNECT_VRAM;
        } else {
                addr = dma_map_resource(dev,
                                        xe_page_to_pcie(page),
                                        PAGE_SIZE << order, dir,
                                        DMA_ATTR_SKIP_CPU_SYNC);
                prot = XE_INTERCONNECT_P2P;
        }

        return drm_pagemap_addr_encode(addr, prot, order, dir);
}

static void xe_drm_pagemap_device_unmap(struct drm_pagemap *dpagemap,
                                        struct device *dev,
                                        const struct drm_pagemap_addr *addr)
{
        if (addr->proto != XE_INTERCONNECT_P2P)
                return;

        dma_unmap_resource(dev, addr->addr, PAGE_SIZE << addr->order,
                           addr->dir, DMA_ATTR_SKIP_CPU_SYNC);
}

static void xe_pagemap_destroy_work(struct work_struct *work)
{
        struct xe_pagemap *xpagemap = container_of(work, typeof(*xpagemap), destroy_work);
        struct dev_pagemap *pagemap = &xpagemap->pagemap;
        struct drm_device *drm = xpagemap->dpagemap.drm;
        int idx;

        /*
         * Only unmap / release if devm_ release hasn't run yet.
         * Otherwise the devm_ callbacks have already released, or
         * will do shortly.
         */
        if (drm_dev_enter(drm, &idx)) {
                devm_memunmap_pages(drm->dev, pagemap);
                devm_release_mem_region(drm->dev, pagemap->range.start,
                                        pagemap->range.end - pagemap->range.start + 1);
                drm_dev_exit(idx);
        }

        drm_pagemap_release_owner(&xpagemap->peer);
        kfree(xpagemap);
}

static void xe_pagemap_destroy(struct drm_pagemap *dpagemap, bool from_atomic_or_reclaim)
{
        struct xe_pagemap *xpagemap = container_of(dpagemap, typeof(*xpagemap), dpagemap);
        struct xe_device *xe = to_xe_device(dpagemap->drm);

        if (from_atomic_or_reclaim)
                queue_work(xe->destroy_wq, &xpagemap->destroy_work);
        else
                xe_pagemap_destroy_work(&xpagemap->destroy_work);
}

static const struct drm_pagemap_ops xe_drm_pagemap_ops = {
        .device_map = xe_drm_pagemap_device_map,
        .device_unmap = xe_drm_pagemap_device_unmap,
        .populate_mm = xe_drm_pagemap_populate_mm,
        .destroy = xe_pagemap_destroy,
};

/**
 * xe_pagemap_create() - Create a struct xe_pagemap object
 * @xe: The xe device.
 * @vr: Back-pointer to the struct xe_vram_region.
 *
 * Allocate and initialize a struct xe_pagemap. On successful
 * return, drm_pagemap_put() on the embedded struct drm_pagemap
 * should be used to unreference.
 *
 * Return: Pointer to a struct xe_pagemap if successful. Error pointer
 * on failure.
 */
static struct xe_pagemap *xe_pagemap_create(struct xe_device *xe, struct xe_vram_region *vr)
{
        struct device *dev = xe->drm.dev;
        struct xe_pagemap *xpagemap;
        struct dev_pagemap *pagemap;
        struct drm_pagemap *dpagemap;
        struct resource *res;
        void *addr;
        int err;

        xpagemap = kzalloc_obj(*xpagemap);
        if (!xpagemap)
                return ERR_PTR(-ENOMEM);

        pagemap = &xpagemap->pagemap;
        dpagemap = &xpagemap->dpagemap;
        INIT_WORK(&xpagemap->destroy_work, xe_pagemap_destroy_work);
        xpagemap->vr = vr;
        xpagemap->peer.private = XE_PEER_PAGEMAP;

        err = drm_pagemap_init(dpagemap, pagemap, &xe->drm, &xe_drm_pagemap_ops);
        if (err)
                goto out_no_dpagemap;

        res = devm_request_free_mem_region(dev, &iomem_resource,
                                           vr->usable_size);
        if (IS_ERR(res)) {
                err = PTR_ERR(res);
                goto out_err;
        }

        err = drm_pagemap_acquire_owner(&xpagemap->peer, &xe_owner_list,
                                        xe_has_interconnect);
        if (err)
                goto out_no_owner;

        pagemap->type = MEMORY_DEVICE_PRIVATE;
        pagemap->range.start = res->start;
        pagemap->range.end = res->end;
        pagemap->nr_range = 1;
        pagemap->owner = xpagemap->peer.owner;
        pagemap->ops = drm_pagemap_pagemap_ops_get();
        addr = devm_memremap_pages(dev, pagemap);
        if (IS_ERR(addr)) {
                err = PTR_ERR(addr);
                goto out_no_pages;
        }
        xpagemap->hpa_base = res->start;
        return xpagemap;

out_no_pages:
        drm_pagemap_release_owner(&xpagemap->peer);
out_no_owner:
        devm_release_mem_region(dev, res->start, res->end - res->start + 1);
out_err:
        drm_pagemap_put(dpagemap);
        return ERR_PTR(err);

out_no_dpagemap:
        kfree(xpagemap);
        return ERR_PTR(err);
}

/**
 * xe_pagemap_find_or_create() - Find or create a struct xe_pagemap
 * @xe: The xe device.
 * @cache: The struct xe_pagemap_cache.
 * @vr: The VRAM region.
 *
 * Check if there is an already used xe_pagemap for this tile, and in that case,
 * return it.
 * If not, check if there is a cached xe_pagemap for this tile, and in that case,
 * cancel its destruction, re-initialize it and return it.
 * Finally if there is no cached or already used pagemap, create one and
 * register it in the tile's pagemap cache.
 *
 * Note that this function is typically called from within an IOCTL, and waits are
 * therefore carried out interruptible if possible.
 *
 * Return: A pointer to a struct xe_pagemap if successful, Error pointer on failure.
 */
static struct xe_pagemap *
xe_pagemap_find_or_create(struct xe_device *xe, struct drm_pagemap_cache *cache,
                          struct xe_vram_region *vr)
{
        struct drm_pagemap *dpagemap;
        struct xe_pagemap *xpagemap;
        int err;

        err = drm_pagemap_cache_lock_lookup(cache);
        if (err)
                return ERR_PTR(err);

        dpagemap = drm_pagemap_get_from_cache(cache);
        if (IS_ERR(dpagemap)) {
                xpagemap = ERR_CAST(dpagemap);
        } else if (!dpagemap) {
                xpagemap = xe_pagemap_create(xe, vr);
                if (IS_ERR(xpagemap))
                        goto out_unlock;
                drm_pagemap_cache_set_pagemap(cache, &xpagemap->dpagemap);
        } else {
                xpagemap = container_of(dpagemap, typeof(*xpagemap), dpagemap);
        }

out_unlock:
        drm_pagemap_cache_unlock_lookup(cache);
        return xpagemap;
}

static int xe_svm_get_pagemaps(struct xe_vm *vm)
{
        struct xe_device *xe = vm->xe;
        struct xe_pagemap *xpagemap;
        struct xe_tile *tile;
        int id;

        for_each_tile(tile, xe, id) {
                struct xe_vram_region *vr;

                if (!((BIT(id) << 1) & xe->info.mem_region_mask))
                        continue;

                vr = xe_tile_to_vr(tile);
                xpagemap = xe_pagemap_find_or_create(xe, vr->dpagemap_cache, vr);
                if (IS_ERR(xpagemap))
                        break;
                vm->svm.pagemaps[id] = xpagemap;
        }

        if (IS_ERR(xpagemap)) {
                xe_svm_put_pagemaps(vm);
                return PTR_ERR(xpagemap);
        }

        return 0;
}

/**
 * xe_pagemap_shrinker_create() - Create a drm_pagemap shrinker
 * @xe: The xe device
 *
 * Create a drm_pagemap shrinker and register with the xe device.
 *
 * Return: %0 on success, negative error code on failure.
 */
int xe_pagemap_shrinker_create(struct xe_device *xe)
{
        xe->usm.dpagemap_shrinker = drm_pagemap_shrinker_create_devm(&xe->drm);
        return PTR_ERR_OR_ZERO(xe->usm.dpagemap_shrinker);
}

/**
 * xe_pagemap_cache_create() - Create a drm_pagemap cache
 * @tile: The tile to register the cache with
 *
 * Create a drm_pagemap cache and register with the tile.
 *
 * Return: %0 on success, negative error code on failure.
 */
int xe_pagemap_cache_create(struct xe_tile *tile)
{
        struct xe_device *xe = tile_to_xe(tile);

        if (IS_DGFX(xe)) {
                struct drm_pagemap_cache *cache =
                        drm_pagemap_cache_create_devm(xe->usm.dpagemap_shrinker);

                if (IS_ERR(cache))
                        return PTR_ERR(cache);

                tile->mem.vram->dpagemap_cache = cache;
        }

        return 0;
}

static struct drm_pagemap *xe_devmem_open(struct xe_device *xe, u32 region_instance)
{
        u32 tile_id = region_instance - 1;
        struct xe_pagemap *xpagemap;
        struct xe_vram_region *vr;

        if (tile_id >= xe->info.tile_count)
                return ERR_PTR(-ENOENT);

        if (!((BIT(tile_id) << 1) & xe->info.mem_region_mask))
                return ERR_PTR(-ENOENT);

        vr = xe_tile_to_vr(&xe->tiles[tile_id]);

        /* Returns a reference-counted embedded struct drm_pagemap */
        xpagemap = xe_pagemap_find_or_create(xe, vr->dpagemap_cache, vr);
        if (IS_ERR(xpagemap))
                return ERR_CAST(xpagemap);

        return &xpagemap->dpagemap;
}

/**
 * xe_drm_pagemap_from_fd() - Return a drm_pagemap pointer from a
 * (file_descriptor, region_instance) pair.
 * @fd: An fd opened against an xe device.
 * @region_instance: The region instance representing the device memory
 * on the opened xe device.
 *
 * Opens a struct drm_pagemap pointer on the
 * indicated device and region_instance.
 *
 * Return: A reference-counted struct drm_pagemap pointer on success,
 * negative error pointer on failure.
 */
struct drm_pagemap *xe_drm_pagemap_from_fd(int fd, u32 region_instance)
{
        struct drm_pagemap *dpagemap;
        struct file *file;
        struct drm_file *fpriv;
        struct drm_device *drm;
        int idx;

        if (fd <= 0)
                return ERR_PTR(-EINVAL);

        file = fget(fd);
        if (!file)
                return ERR_PTR(-ENOENT);

        if (!xe_is_xe_file(file)) {
                dpagemap = ERR_PTR(-ENOENT);
                goto out;
        }

        fpriv = file->private_data;
        drm = fpriv->minor->dev;
        if (!drm_dev_enter(drm, &idx)) {
                dpagemap = ERR_PTR(-ENODEV);
                goto out;
        }

        dpagemap = xe_devmem_open(to_xe_device(drm), region_instance);
        drm_dev_exit(idx);
out:
        fput(file);
        return dpagemap;
}

#else

int xe_pagemap_shrinker_create(struct xe_device *xe)
{
        return 0;
}

int xe_pagemap_cache_create(struct xe_tile *tile)
{
        return 0;
}

int xe_svm_alloc_vram(struct xe_svm_range *range,
                      const struct drm_gpusvm_ctx *ctx,
                      struct drm_pagemap *dpagemap)
{
        return -EOPNOTSUPP;
}

struct drm_pagemap *xe_vma_resolve_pagemap(struct xe_vma *vma, struct xe_tile *tile)
{
        return NULL;
}

struct drm_pagemap *xe_drm_pagemap_from_fd(int fd, u32 region_instance)
{
        return ERR_PTR(-ENOENT);
}

#endif

/**
 * xe_svm_flush() - SVM flush
 * @vm: The VM.
 *
 * Flush all SVM actions.
 */
void xe_svm_flush(struct xe_vm *vm)
{
        if (xe_vm_in_fault_mode(vm))
                flush_work(&vm->svm.garbage_collector.work);
}