I915_GTT_PAGE_SIZE
vma_res->page_sizes_gtt = I915_GTT_PAGE_SIZE;
size = DIV_ROUND_UP_ULL(obj->size, I915_GTT_PAGE_SIZE);
size = round_up(size * sizeof(gen8_pte_t), I915_GTT_PAGE_SIZE);
vm->total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
gen8_set_pte(base + offset / I915_GTT_PAGE_SIZE,
i = vma_res->start / I915_GTT_PAGE_SIZE;
flags |= PIN_OFFSET_GUARD | (guard * I915_GTT_PAGE_SIZE);
GEM_BUG_ON(!IS_ALIGNED(dma_size, I915_GTT_PAGE_SIZE));
GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
GEM_BUG_ON(offset < I915_GTT_PAGE_SIZE);
GEM_BUG_ON(offset < I915_GTT_PAGE_SIZE);
I915_GTT_PAGE_SIZE, vm_total,
unsigned int first_entry = vma_res->start / I915_GTT_PAGE_SIZE;
GEM_BUG_ON(sg_dma_len(iter.sg) < I915_GTT_PAGE_SIZE);
iter.dma += I915_GTT_PAGE_SIZE;
vma_res->page_sizes_gtt = I915_GTT_PAGE_SIZE;
u32 ggtt_offset = vma_res->start / I915_GTT_PAGE_SIZE;
const unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
GEM_BUG_ON(sg_dma_len(iter->sg) < I915_GTT_PAGE_SIZE);
iter->dma += I915_GTT_PAGE_SIZE;
page_size = I915_GTT_PAGE_SIZE;
I915_GTT_PAGE_SIZE);
page_size = I915_GTT_PAGE_SIZE;
rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE))
rem >= (I915_PDES - index) * I915_GTT_PAGE_SIZE)))
if (vma_res->bi.page_sizes.sg > I915_GTT_PAGE_SIZE) {
vma_res->page_sizes_gtt = I915_GTT_PAGE_SIZE;
BUILD_BUG_ON(I915_GTT_PAGE_SIZE != PAGE_SIZE);
ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE;
*start += I915_GTT_PAGE_SIZE;
(gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
(gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
gte += (vma_res->start - vma_res->guard) / I915_GTT_PAGE_SIZE;
end = gte + vma_res->guard / I915_GTT_PAGE_SIZE;
end += (vma_res->node_size + vma_res->guard) / I915_GTT_PAGE_SIZE;
*end -= I915_GTT_PAGE_SIZE;
start = (vma_res->start - vma_res->guard) / I915_GTT_PAGE_SIZE;
end = start + vma_res->guard / I915_GTT_PAGE_SIZE;
end += (vma_res->node_size + vma_res->guard) / I915_GTT_PAGE_SIZE;
vma_res->node_size / I915_GTT_PAGE_SIZE, pte_encode))
start += vma_res->node_size / I915_GTT_PAGE_SIZE;
unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
(gen6_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
(gen6_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
gte += (vma_res->start - vma_res->guard) / I915_GTT_PAGE_SIZE;
end = gte + vma_res->guard / I915_GTT_PAGE_SIZE;
end += (vma_res->node_size + vma_res->guard) / I915_GTT_PAGE_SIZE;
unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
vma_res->page_sizes_gtt = I915_GTT_PAGE_SIZE;
ggtt->pin_bias = max_t(u32, I915_GTT_PAGE_SIZE,
ggtt->error_capture.size = 2 * I915_GTT_PAGE_SIZE;
__for_each_sgt_daddr(__dp, __iter, __sgt, I915_GTT_PAGE_SIZE)
__for_each_daddr_next(__dp, __iter, I915_GTT_PAGE_SIZE)
#define I915_GTT_PAGE_MASK -I915_GTT_PAGE_SIZE
#define I915_GTT_MIN_ALIGNMENT I915_GTT_PAGE_SIZE
context_size = round_up(engine->context_size, I915_GTT_PAGE_SIZE);
context_size += I915_GTT_PAGE_SIZE; /* for redzone */
GEM_BUG_ON(cs - start > I915_GTT_PAGE_SIZE / sizeof(*cs));
GEM_BUG_ON(cs - start > I915_GTT_PAGE_SIZE / sizeof(*cs));
memset(vaddr, CONTEXT_REDZONE, I915_GTT_PAGE_SIZE);
if (memchr_inv(vaddr, CONTEXT_REDZONE, I915_GTT_PAGE_SIZE))
page_size = I915_GTT_PAGE_SIZE;
vaddr += engine->context_size - I915_GTT_PAGE_SIZE;
memset(vaddr, POISON_INUSE, I915_GTT_PAGE_SIZE);
if (memchr_inv(vaddr, POISON_INUSE, I915_GTT_PAGE_SIZE)) {
engine->context_size += I915_GTT_PAGE_SIZE;
engine->context_size -= I915_GTT_PAGE_SIZE;
vgpu_aperture_sz(vgpu) = ALIGN(request, I915_GTT_PAGE_SIZE);
vgpu_hidden_sz(vgpu) = ALIGN(request, I915_GTT_PAGE_SIZE);
start = ALIGN(gvt_hidden_gmadr_base(gvt), I915_GTT_PAGE_SIZE);
end = ALIGN(gvt_hidden_gmadr_end(gvt), I915_GTT_PAGE_SIZE);
start = ALIGN(gvt_aperture_gmadr_base(gvt), I915_GTT_PAGE_SIZE);
end = ALIGN(gvt_aperture_gmadr_end(gvt), I915_GTT_PAGE_SIZE);
size, I915_GTT_PAGE_SIZE,
if (guest_gma >= I915_GTT_PAGE_SIZE) {
offset = gma & (I915_GTT_PAGE_SIZE - 1);
copy_len = (end_gma - gma) >= (I915_GTT_PAGE_SIZE - offset) ?
I915_GTT_PAGE_SIZE - offset : end_gma - gma;
if (WARN_ON(!IS_ALIGNED(workload->rb_start, I915_GTT_PAGE_SIZE)))
I915_GTT_PAGE_SIZE)))
for (index = 0; index < (I915_GTT_PAGE_SIZE >>
oos_page->mem, I915_GTT_PAGE_SIZE);
int page_entry_num = I915_GTT_PAGE_SIZE >>
(I915_GTT_PAGE_SIZE >> pt_entry_size_shift(spt))
!intel_gvt_ggtt_validate_range(vgpu, value, I915_GTT_PAGE_SIZE)) {
I915_GTT_PAGE_SIZE)
gpa_size += I915_GTT_PAGE_SIZE;
gpa_size = I915_GTT_PAGE_SIZE;
I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
I915_GTT_PAGE_SIZE - RING_CTX_SIZE);
gpa_size += I915_GTT_PAGE_SIZE;
gpa_size = I915_GTT_PAGE_SIZE;
GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
start -= I915_GTT_PAGE_SIZE;
end += I915_GTT_PAGE_SIZE;
GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
GEM_BUG_ON(start > 0 && !IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
GEM_BUG_ON(end < U64_MAX && !IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
BUILD_BUG_ON(I915_GTT_MIN_ALIGNMENT > I915_GTT_PAGE_SIZE);
config_length = ALIGN(sizeof(u32) * config_length, I915_GTT_PAGE_SIZE);
sg_set_page(sg, NULL, count * I915_GTT_PAGE_SIZE, 0);
sg_dma_len(sg) = count * I915_GTT_PAGE_SIZE;
unsigned int left = width * I915_GTT_PAGE_SIZE;
offset += length / I915_GTT_PAGE_SIZE;
left = (dst_stride - width) * I915_GTT_PAGE_SIZE;
GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE));
GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
vma->page_sizes.sg > I915_GTT_PAGE_SIZE &&
sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0);
sg_dma_len(sg) = I915_GTT_PAGE_SIZE;
left = (dst_stride - height) * I915_GTT_PAGE_SIZE;
*start -= I915_GTT_PAGE_SIZE;
*end += I915_GTT_PAGE_SIZE;
I915_GTT_PAGE_SIZE, 0, 0,
I915_GTT_PAGE_SIZE, 0, 0,
obj = i915_gem_object_create_internal(gt->i915, I915_GTT_PAGE_SIZE);
.start = I915_GTT_PAGE_SIZE * 2,
.size = I915_GTT_PAGE_SIZE,
obj = i915_gem_object_create_internal(gt->i915, I915_GTT_PAGE_SIZE);
I915_GTT_PAGE_SIZE | flags);
obj = i915_gem_object_create_internal(gt->i915, I915_GTT_PAGE_SIZE);
(I915_GTT_PAGE_SIZE * 2) | flags);
I915_GTT_PAGE_SIZE);
GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
total += 2 * I915_GTT_PAGE_SIZE) {
vma->node.size != 2*I915_GTT_PAGE_SIZE) {
total, 2*I915_GTT_PAGE_SIZE);
for (total = I915_GTT_PAGE_SIZE;
total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
total += 2 * I915_GTT_PAGE_SIZE) {
vma->node.size != 2*I915_GTT_PAGE_SIZE) {
total, 2*I915_GTT_PAGE_SIZE);
2 * I915_GTT_PAGE_SIZE,
vma->node.size != 2*I915_GTT_PAGE_SIZE) {
offset, 2*I915_GTT_PAGE_SIZE);
ggtt->vm.total + I915_GTT_PAGE_SIZE, 0,
2*I915_GTT_PAGE_SIZE, 0,
0, I915_GTT_PAGE_SIZE,
-(u64)I915_GTT_PAGE_SIZE, 0,
0, 4*I915_GTT_PAGE_SIZE,
-(u64)2*I915_GTT_PAGE_SIZE, 2*I915_GTT_PAGE_SIZE,
0, 4*I915_GTT_PAGE_SIZE,
I915_GTT_PAGE_SIZE, I915_GTT_MIN_ALIGNMENT << 1,
total + I915_GTT_PAGE_SIZE <= ggtt->vm.total;
total += I915_GTT_PAGE_SIZE) {
I915_GTT_PAGE_SIZE);
total + 2 * I915_GTT_PAGE_SIZE <= ggtt->vm.total;
total += 2 * I915_GTT_PAGE_SIZE) {
2 * I915_GTT_PAGE_SIZE);
obj = i915_gem_object_create_internal(vm->i915, 2 * I915_GTT_PAGE_SIZE);