ttm_cached
if (bo->ttm->num_pages != 1 || bo->ttm->caching == ttm_cached)
caching = ttm_cached;
if (ttm && ttm->caching == ttm_cached)
mem->bus.caching == ttm_cached)
vres->base.bus.caching = ttm_cached;
ret = ttm_tt_init(tt, bo, page_flags, ttm_cached, 0);
return ttm_cached;
if (i915_gem_object_is_shrinkable(obj) && caching == ttm_cached) {
ttm->caching == ttm_cached) ? I915_CACHE_LLC :
if (i915_ttm_cpu_maps_iomem(bo->resource) || bo->ttm->caching != ttm_cached) {
ret = ttm_sg_tt_init(tt, tbo, page_flags, ttm_cached);
caching = ttm_cached;
if (ttm_tt_init(ttm, bo, page_flags, ttm_cached, 0)) {
if (ttm->caching == ttm_cached)
caching = ttm_cached;
ttm_tt_init(tt, bo, page_flags, ttm_cached, 0);
enum ttm_caching pool_caching = ttm_cached;
KUNIT_ASSERT_EQ(test, res->bus.caching, ttm_cached);
enum ttm_caching caching = ttm_cached;
enum ttm_caching caching = ttm_cached;
enum ttm_caching caching = ttm_cached;
err = ttm_tt_init(tt, bo, 0, ttm_cached, 0);
err = ttm_tt_init(tt, bo, 0, ttm_cached, 0);
err = ttm_tt_init(tt, bo, 0, ttm_cached, 0);
err = ttm_tt_init(tt, bo, 0, ttm_cached, 0);
enum ttm_caching caching = ttm_cached;
enum ttm_caching caching = ttm_cached;
enum ttm_caching caching = ttm_cached;
int ret, cached = ttm->caching == ttm_cached;
else if (mem->bus.caching == ttm_cached)
if (num_pages == 1 && ttm->caching == ttm_cached &&
else if (mem->bus.caching == ttm_cached)
if (caching == ttm_cached)
case ttm_cached:
if (caching != ttm_cached && !PageHighMem(p))
case ttm_cached:
page_caching = ttm_cached;
ttm_pool_free_range(pool, tt, ttm_cached, caching_divide,
ttm_pool_free_range(restore->pool, tt, ttm_cached,
ttm_pool_free_range(NULL, tt, ttm_cached, start_page, tt->num_pages);
if (tt->caching != ttm_cached)
res->bus.caching = ttm_cached;
else if (mem->bus.caching == ttm_cached)
ttm_cached);
ttm_cached, 0);
mem->bus.caching = ttm_cached;
enum ttm_caching caching = ttm_cached;
caching = ttm_cached;
if (!xe_bo_is_vram(bo) && bo->ttm.ttm->caching == ttm_cached)