clflush
clflush(cl);
clflush(md);
clflush((char *)page+offset);
clflush(last_page);
clflush(last_page);
clflush = NULL;
clflush = clflush_work_create(obj);
if (clflush) {
i915_sw_fence_await_reservation(&clflush->base.chain,
dma_resv_add_fence(obj->base.resv, &clflush->base.dma,
dma_fence_work_commit(&clflush->base);
struct clflush *clflush = container_of(base, typeof(*clflush), base);
__do_clflush(clflush->obj);
struct clflush *clflush = container_of(base, typeof(*clflush), base);
i915_gem_object_unpin_pages(clflush->obj);
i915_gem_object_put(clflush->obj);
static struct clflush *clflush_work_create(struct drm_i915_gem_object *obj)
struct clflush *clflush;
clflush = kmalloc_obj(*clflush);
if (!clflush)
kfree(clflush);
dma_fence_work_init(&clflush->base, &clflush_ops);
clflush->obj = i915_gem_object_get(obj); /* obj <-> clflush cycle */
return clflush;
struct clflush *clflush;
#define __flush_cache(p) clflush(p)
#define GUEST_MEASURE_EVENT(_msr, _value, clflush, FEP) \
clflush "\n\t" \