root/sys/dev/pci/drm/i915/i915_gem_gtt.c
// SPDX-License-Identifier: MIT
/*
 * Copyright © 2010 Daniel Vetter
 * Copyright © 2020 Intel Corporation
 */

#include <linux/slab.h> /* fault-inject.h is not standalone! */

#include <linux/fault-inject.h>
#include <linux/log2.h>
#include <linux/random.h>
#include <linux/seq_file.h>
#include <linux/stop_machine.h>

#include <asm/set_memory.h>
#include <asm/smp.h>

#include "gt/intel_gt.h"
#include "gt/intel_gt_requests.h"

#include "i915_drv.h"
#include "i915_gem_evict.h"
#include "i915_scatterlist.h"
#include "i915_trace.h"
#include "i915_vgpu.h"

int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
                               struct sg_table *pages)
{
#ifdef __linux__
        do {
                if (dma_map_sg_attrs(obj->base.dev->dev,
                                     pages->sgl, pages->nents,
                                     DMA_BIDIRECTIONAL,
                                     DMA_ATTR_SKIP_CPU_SYNC |
                                     DMA_ATTR_NO_KERNEL_MAPPING |
                                     DMA_ATTR_NO_WARN))
                        return 0;

                /*
                 * If the DMA remap fails, one cause can be that we have
                 * too many objects pinned in a small remapping table,
                 * such as swiotlb. Incrementally purge all other objects and
                 * try again - if there are no more pages to remove from
                 * the DMA remapper, i915_gem_shrink will return 0.
                 */
                GEM_BUG_ON(obj->mm.pages == pages);
        } while (i915_gem_shrink(NULL, to_i915(obj->base.dev),
                                 obj->base.size >> PAGE_SHIFT, NULL,
                                 I915_SHRINK_BOUND |
                                 I915_SHRINK_UNBOUND));

        return -ENOSPC;
#else
        return 0;
#endif
}

void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
                               struct sg_table *pages)
{
        struct drm_i915_private *i915 = to_i915(obj->base.dev);
        struct i915_ggtt *ggtt = to_gt(i915)->ggtt;

        /* XXX This does not prevent more requests being submitted! */
        if (unlikely(ggtt->do_idle_maps))
                /* Wait a bit, in the hope it avoids the hang */
                usleep_range(100, 250);

#ifdef notyet
        dma_unmap_sg(i915->drm.dev, pages->sgl, pages->nents,
                     DMA_BIDIRECTIONAL);
#endif
}

/**
 * i915_gem_gtt_reserve - reserve a node in an address_space (GTT)
 * @vm: the &struct i915_address_space
 * @ww: An optional struct i915_gem_ww_ctx.
 * @node: the &struct drm_mm_node (typically i915_vma.node)
 * @size: how much space to allocate inside the GTT,
 *        must be #I915_GTT_PAGE_SIZE aligned
 * @offset: where to insert inside the GTT,
 *          must be #I915_GTT_MIN_ALIGNMENT aligned, and the node
 *          (@offset + @size) must fit within the address space
 * @color: color to apply to node, if this node is not from a VMA,
 *         color must be #I915_COLOR_UNEVICTABLE
 * @flags: control search and eviction behaviour
 *
 * i915_gem_gtt_reserve() tries to insert the @node at the exact @offset inside
 * the address space (using @size and @color). If the @node does not fit, it
 * tries to evict any overlapping nodes from the GTT, including any
 * neighbouring nodes if the colors do not match (to ensure guard pages between
 * differing domains). See i915_gem_evict_for_node() for the gory details
 * on the eviction algorithm. #PIN_NONBLOCK may used to prevent waiting on
 * evicting active overlapping objects, and any overlapping node that is pinned
 * or marked as unevictable will also result in failure.
 *
 * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if
 * asked to wait for eviction and interrupted.
 */
int i915_gem_gtt_reserve(struct i915_address_space *vm,
                         struct i915_gem_ww_ctx *ww,
                         struct drm_mm_node *node,
                         u64 size, u64 offset, unsigned long color,
                         unsigned int flags)
{
        int err;

        GEM_BUG_ON(!size);
        GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
        GEM_BUG_ON(!IS_ALIGNED(offset, I915_GTT_MIN_ALIGNMENT));
        GEM_BUG_ON(range_overflows(offset, size, vm->total));
        GEM_BUG_ON(vm == &to_gt(vm->i915)->ggtt->alias->vm);
        GEM_BUG_ON(drm_mm_node_allocated(node));

        node->size = size;
        node->start = offset;
        node->color = color;

        err = drm_mm_reserve_node(&vm->mm, node);
        if (err != -ENOSPC)
                return err;

        if (flags & PIN_NOEVICT)
                return -ENOSPC;

        err = i915_gem_evict_for_node(vm, ww, node, flags);
        if (err == 0)
                err = drm_mm_reserve_node(&vm->mm, node);

        return err;
}

static u64 random_offset(u64 start, u64 end, u64 len, u64 align)
{
        u64 range, addr;

        GEM_BUG_ON(range_overflows(start, len, end));
        GEM_BUG_ON(round_up(start, align) > round_down(end - len, align));

        range = round_down(end - len, align) - round_up(start, align);
        if (range) {
                if (sizeof(unsigned long) == sizeof(u64)) {
                        addr = get_random_u64();
                } else {
                        addr = get_random_u32();
                        if (range > U32_MAX) {
                                addr <<= 32;
                                addr |= get_random_u32();
                        }
                }
                div64_u64_rem(addr, range, &addr);
                start += addr;
        }

        return round_up(start, align);
}

/**
 * i915_gem_gtt_insert - insert a node into an address_space (GTT)
 * @vm: the &struct i915_address_space
 * @ww: An optional struct i915_gem_ww_ctx.
 * @node: the &struct drm_mm_node (typically i915_vma.node)
 * @size: how much space to allocate inside the GTT,
 *        must be #I915_GTT_PAGE_SIZE aligned
 * @alignment: required alignment of starting offset, may be 0 but
 *             if specified, this must be a power-of-two and at least
 *             #I915_GTT_MIN_ALIGNMENT
 * @color: color to apply to node
 * @start: start of any range restriction inside GTT (0 for all),
 *         must be #I915_GTT_PAGE_SIZE aligned
 * @end: end of any range restriction inside GTT (U64_MAX for all),
 *       must be #I915_GTT_PAGE_SIZE aligned if not U64_MAX
 * @flags: control search and eviction behaviour
 *
 * i915_gem_gtt_insert() first searches for an available hole into which
 * is can insert the node. The hole address is aligned to @alignment and
 * its @size must then fit entirely within the [@start, @end] bounds. The
 * nodes on either side of the hole must match @color, or else a guard page
 * will be inserted between the two nodes (or the node evicted). If no
 * suitable hole is found, first a victim is randomly selected and tested
 * for eviction, otherwise then the LRU list of objects within the GTT
 * is scanned to find the first set of replacement nodes to create the hole.
 * Those old overlapping nodes are evicted from the GTT (and so must be
 * rebound before any future use). Any node that is currently pinned cannot
 * be evicted (see i915_vma_pin()). Similar if the node's VMA is currently
 * active and #PIN_NONBLOCK is specified, that node is also skipped when
 * searching for an eviction candidate. See i915_gem_evict_something() for
 * the gory details on the eviction algorithm.
 *
 * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if
 * asked to wait for eviction and interrupted.
 */
int i915_gem_gtt_insert(struct i915_address_space *vm,
                        struct i915_gem_ww_ctx *ww,
                        struct drm_mm_node *node,
                        u64 size, u64 alignment, unsigned long color,
                        u64 start, u64 end, unsigned int flags)
{
        enum drm_mm_insert_mode mode;
        u64 offset;
        int err;

        lockdep_assert_held(&vm->mutex);

        GEM_BUG_ON(!size);
        GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
        GEM_BUG_ON(alignment && !is_power_of_2(alignment));
        GEM_BUG_ON(alignment && !IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
        GEM_BUG_ON(start >= end);
        GEM_BUG_ON(start > 0  && !IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
        GEM_BUG_ON(end < U64_MAX && !IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
        GEM_BUG_ON(vm == &to_gt(vm->i915)->ggtt->alias->vm);
        GEM_BUG_ON(drm_mm_node_allocated(node));

        if (unlikely(range_overflows(start, size, end)))
                return -ENOSPC;

        if (unlikely(round_up(start, alignment) > round_down(end - size, alignment)))
                return -ENOSPC;

        mode = DRM_MM_INSERT_BEST;
        if (flags & PIN_HIGH)
                mode = DRM_MM_INSERT_HIGHEST;
        if (flags & PIN_MAPPABLE)
                mode = DRM_MM_INSERT_LOW;

        /* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
         * so we know that we always have a minimum alignment of 4096.
         * The drm_mm range manager is optimised to return results
         * with zero alignment, so where possible use the optimal
         * path.
         */
        BUILD_BUG_ON(I915_GTT_MIN_ALIGNMENT > I915_GTT_PAGE_SIZE);
        if (alignment <= I915_GTT_MIN_ALIGNMENT)
                alignment = 0;

        err = drm_mm_insert_node_in_range(&vm->mm, node,
                                          size, alignment, color,
                                          start, end, mode);
        if (err != -ENOSPC)
                return err;

        if (mode & DRM_MM_INSERT_ONCE) {
                err = drm_mm_insert_node_in_range(&vm->mm, node,
                                                  size, alignment, color,
                                                  start, end,
                                                  DRM_MM_INSERT_BEST);
                if (err != -ENOSPC)
                        return err;
        }

        if (flags & PIN_NOEVICT)
                return -ENOSPC;

        /*
         * No free space, pick a slot at random.
         *
         * There is a pathological case here using a GTT shared between
         * mmap and GPU (i.e. ggtt/aliasing_ppgtt but not full-ppgtt):
         *
         *    |<-- 256 MiB aperture -->||<-- 1792 MiB unmappable -->|
         *         (64k objects)             (448k objects)
         *
         * Now imagine that the eviction LRU is ordered top-down (just because
         * pathology meets real life), and that we need to evict an object to
         * make room inside the aperture. The eviction scan then has to walk
         * the 448k list before it finds one within range. And now imagine that
         * it has to search for a new hole between every byte inside the memcpy,
         * for several simultaneous clients.
         *
         * On a full-ppgtt system, if we have run out of available space, there
         * will be lots and lots of objects in the eviction list! Again,
         * searching that LRU list may be slow if we are also applying any
         * range restrictions (e.g. restriction to low 4GiB) and so, for
         * simplicity and similarilty between different GTT, try the single
         * random replacement first.
         */
        offset = random_offset(start, end,
                               size, alignment ?: I915_GTT_MIN_ALIGNMENT);
        err = i915_gem_gtt_reserve(vm, ww, node, size, offset, color, flags);
        if (err != -ENOSPC)
                return err;

        if (flags & PIN_NOSEARCH)
                return -ENOSPC;

        /* Randomly selected placement is pinned, do a search */
        err = i915_gem_evict_something(vm, ww, size, alignment, color,
                                       start, end, flags);
        if (err)
                return err;

        return drm_mm_insert_node_in_range(&vm->mm, node,
                                           size, alignment, color,
                                           start, end, DRM_MM_INSERT_EVICT);
}

#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/i915_gem_gtt.c"
#endif