slice
if (i == nitems(v->slice)) {
v->slice[i].start = s;
v->slice[i].end = e;
im = &object->imut.slice[imut];
m = &object->mut.slice[i];
const vaddr_t is = acc[in].slice[j].start,
ie = acc[in].slice[j].end;
const struct addr_range *ar = &acc[out].slice[i];
struct addr_range slice[40];
sc->sc_vd->vd_desc[desc].slice = VD_SLICE_NONE;
uint8_t slice;
uint8_t slice;
uint8_t slice;
enum dbuf_slice slice;
for_each_dbuf_slice(display, slice) {
if (old_dbuf_bw->max_bw[slice] != new_dbuf_bw->max_bw[slice] ||
old_dbuf_bw->active_planes[slice] != new_dbuf_bw->active_planes[slice])
enum dbuf_slice slice;
for_each_dbuf_slice_in_mask(display, slice, dbuf_mask) {
dbuf_bw->max_bw[slice] = max(dbuf_bw->max_bw[slice], data_rate);
dbuf_bw->active_planes[slice] |= BIT(plane_id);
enum dbuf_slice slice;
for_each_dbuf_slice(display, slice) {
max_bw = max(dbuf_bw->max_bw[slice], max_bw);
num_active_planes += hweight8(dbuf_bw->active_planes[slice]);
enum dbuf_slice slice, bool enable)
i915_reg_t reg = DBUF_CTL_S(slice);
slice, str_enable_disable(enable));
enum dbuf_slice slice;
for_each_dbuf_slice(display, slice)
gen9_dbuf_slice_set(display, slice, req_slices & BIT(slice));
enum dbuf_slice slice;
for_each_dbuf_slice(display, slice)
intel_de_rmw(display, DBUF_CTL_S(slice),
enum dbuf_slice slice;
for_each_dbuf_slice(display, slice)
intel_de_rmw(display, DBUF_CTL_S(slice),
enum dbuf_slice slice;
for_each_dbuf_slice(display, slice) {
if (intel_de_read(display, DBUF_CTL_S(slice)) & DBUF_POWER_STATE)
enabled_slices |= BIT(slice);
#define DBUF_CTL_S(slice) _MMIO(_PICK(slice, \
int slice;
for_each_ss_steering(iter, engine->gt, slice, subslice) {
instdone->sampler[slice][subslice] =
slice, subslice);
instdone->row[slice][subslice] =
slice, subslice);
for_each_ss_steering(iter, engine->gt, slice, subslice)
instdone->geom_svg[slice][subslice] =
slice, subslice);
#define GEN9_IZ_HASHING_MASK(slice) (0x3 << ((slice) * 2))
#define GEN9_IZ_HASHING(slice, val) ((val) << ((slice) * 2))
#define GEN9_SLICE_PGCTL_ACK(slice) _MMIO(0x804c + (slice) * 0x4)
#define GEN10_SLICE_PGCTL_ACK(slice) _MMIO(0x804c + ((slice) / 3) * 0x34 + \
((slice) % 3) * 0x4)
#define GEN10_PGCTL_VALID_SS_MASK(slice) ((slice) == 0 ? REG_GENMASK(6, 0) : REG_GENMASK(4, 0))
#define GEN9_SS01_EU_PGCTL_ACK(slice) _MMIO(0x805c + (slice) * 0x8)
#define GEN10_SS01_EU_PGCTL_ACK(slice) _MMIO(0x805c + ((slice) / 3) * 0x30 + \
((slice) % 3) * 0x8)
#define GEN9_SS23_EU_PGCTL_ACK(slice) _MMIO(0x8060 + (slice) * 0x8)
#define GEN10_SS23_EU_PGCTL_ACK(slice) _MMIO(0x8060 + ((slice) / 3) * 0x30 + \
((slice) % 3) * 0x8)
#define GEN9_EU_DISABLE(slice) _MMIO(0x9134 + (slice) * 0x4)
#define GEN8_MCR_SLICE(slice) (((slice) & 3) << 26)
#define GEN11_MCR_SLICE(slice) (((slice) & 0xf) << 27)
#define GEN7_L3LOG(slice, i) _MMIO(0xb070 + (slice) * 0x200 + (i) * 4)
static int remap_l3_slice(struct i915_request *rq, int slice)
u32 *cs, *remap_info = rq->i915->l3_parity.remap_info[slice];
*cs++ = i915_mmio_reg_offset(GEN7_L3LOG(slice, i));
intel_sseu_get_hsw_subslices(const struct sseu_dev_info *sseu, u8 slice)
if (WARN_ON(slice >= sseu->max_slices))
return sseu->subslice_mask.hsw[slice];
static u16 sseu_get_eus(const struct sseu_dev_info *sseu, int slice,
WARN_ON(slice > 0);
return sseu->eu_mask.hsw[slice][subslice];
static void sseu_set_eus(struct sseu_dev_info *sseu, int slice, int subslice,
GEM_WARN_ON(slice > 0);
sseu->eu_mask.hsw[slice][subslice] = eu_mask;
intel_sseu_has_subslice(const struct sseu_dev_info *sseu, int slice,
if (slice >= sseu->max_slices ||
return sseu->subslice_mask.hsw[slice] & BIT(subslice);
intel_sseu_get_hsw_subslices(const struct sseu_dev_info *sseu, u8 slice);
unsigned int slice, subslice;
slice = ffs(sseu->slice_mask) - 1;
GEM_BUG_ON(slice >= ARRAY_SIZE(sseu->subslice_mask.hsw));
subslice = ffs(intel_sseu_get_hsw_subslices(sseu, slice));
mcr = GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice);
drm_dbg(&i915->drm, "MCR slice:%d/subslice:%d = %x\n", slice, subslice, mcr);
unsigned int slice, unsigned int subslice)
mcr = GEN11_MCR_SLICE(slice) | GEN11_MCR_SUBSLICE(subslice);
unsigned int slice, unsigned int subslice)
__set_mcr_steering(wal, GEN8_MCR_SELECTOR, slice, subslice);
gt->default_steering.groupid = slice;
unsigned long slice, subslice = 0, slice_mask = 0;
slice = __ffs(slice_mask);
subslice = intel_sseu_find_first_xehp_dss(sseu, GEN_DSS_PER_GSLICE, slice) %
__add_mcr_wa(gt, wal, slice, subslice);
int slice, subslice, iter, i, num_steer_regs, num_tot_regs = 0;
for_each_ss_steering(iter, gt, slice, subslice)
for_each_ss_steering(iter, gt, slice, subslice) {
__fill_ext_reg(extarray, &gen8_extregs[i], slice, subslice);
__fill_ext_reg(extarray, &xehpg_extregs[i], slice, subslice);
int slice;
for_each_ss_steering(iter, ee->engine->gt, slice, subslice)
slice, subslice,
ee->instdone.sampler[slice][subslice]);
for_each_ss_steering(iter, ee->engine->gt, slice, subslice)
slice, subslice,
ee->instdone.row[slice][subslice]);
for_each_ss_steering(iter, ee->engine->gt, slice, subslice)
slice, subslice,
ee->instdone.geom_svg[slice][subslice]);
u8 slice = 0;
while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
slice--;
slice >= NUM_L3_SLICES(dev_priv)))
dev_priv->l3_parity.which_slice &= ~(1<<slice);
reg = GEN7_L3CDERRST1(slice);
parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
slice, row, bank, subbank);
#define GEN7_L3CDERRST1(slice) _MMIO(0xB008 + (slice) * 0x200) /* L3CD Error Status 1 */
if (i915->l3_parity.remap_info[slice]) {
remap_info = i915->l3_parity.remap_info[slice];
i915->l3_parity.remap_info[slice] = remap_info;
ctx->remap_slice |= BIT(slice);
int slice = (int)(uintptr_t)attr->private;
if (i915->l3_parity.remap_info[slice])
i915->l3_parity.remap_info[slice] + offset / sizeof(u32),
int slice = (int)(uintptr_t)attr->private;
unsigned pitch, slice, mslice;
slice = track->cb_color_slice[id];
surf.nby = ((slice + 1) * 64) / surf.nbx;
slice = ((nby * surf.nbx) / 64) - 1;
ib[track->cb_color_slice_idx[id]] = slice;
radeon_bo_size(track->cb_color_bo[id]), slice);
unsigned pitch, slice, mslice;
slice = track->db_depth_slice;
surf.nby = ((slice + 1) * 64) / surf.nbx;
unsigned pitch, slice, mslice;
slice = track->db_depth_slice;
surf.nby = ((slice + 1) * 64) / surf.nbx;
int slice;
slice = pmap_schedule(map, DWC2_LS_PERIODIC_SLICES_PER_FRAME,
if (slice < 0)
return slice;
qh->ls_start_schedule_slice = slice;
#define V4L2_H264_CTRL_PRED_WEIGHTS_REQUIRED(pps, slice) \
((slice)->slice_type == V4L2_H264_SLICE_TYPE_P || \
(slice)->slice_type == V4L2_H264_SLICE_TYPE_SP)) || \
(slice)->slice_type == V4L2_H264_SLICE_TYPE_B))
Eurydice_slice slice, uint8_t ret[33U]) {
uu____0, (size_t)0U, Eurydice_slice_len(slice, uint8_t), uint8_t *),
slice, uint8_t);
Eurydice_slice slice, uint8_t ret[34U]) {
uu____0, (size_t)0U, Eurydice_slice_len(slice, uint8_t), uint8_t *),
slice, uint8_t);
Eurydice_slice slice, uint8_t ret[1120U]) {
uu____0, (size_t)0U, Eurydice_slice_len(slice, uint8_t), uint8_t *),
slice, uint8_t);
Eurydice_slice slice, uint8_t ret[64U]) {
uu____0, (size_t)0U, Eurydice_slice_len(slice, uint8_t), uint8_t *),
slice, uint8_t);
#define Eurydice_slice_split_at(slice, mid, element_type, ret_t) \
EURYDICE_SLICE((element_type *)(slice).ptr, 0, mid), \
EURYDICE_SLICE((element_type *)(slice).ptr, mid, (slice).len) \
#define Eurydice_slice_split_at_mut(slice, mid, element_type, ret_t) \
KRML_CLITERAL(Eurydice_slice){EURYDICE_CFIELD(.ptr =)(slice.ptr), \
((char *)slice.ptr + mid * sizeof(element_type)), \
EURYDICE_CFIELD(.len =)(slice.len - mid) \
Eurydice_slice slice;
size_t chunk_size = chunks->slice.len >= chunks->chunk_size
: chunks->slice.len;
curr_chunk.ptr = chunks->slice.ptr;
chunks->slice.ptr = (char *)(chunks->slice.ptr) + chunk_size * element_size;
chunks->slice.len = chunks->slice.len - chunk_size;
((Eurydice_chunks){.slice = slice_, .chunk_size = sz_})
.slice = {.ptr = slice_.ptr, .len = slice_.len - (slice_.len % sz_)}, \
(((iter)->slice.len == 0) ? ((ret_t){.tag = core_option_None}) \
Eurydice_slice slice, uint8_t ret[32U]) {
uu____0, (size_t)0U, Eurydice_slice_len(slice, uint8_t), uint8_t *),
slice, uint8_t);
struct slice slices[tlv_count];
struct slice *sl;
struct slice *sl;