games/hunt/huntd/answer.c
143
ce(ALL_PLAYERS);
games/hunt/huntd/draw.c
335
ce(pp);
games/hunt/huntd/driver.c
852
ce(ALL_PLAYERS);
games/hunt/huntd/driver.c
875
ce(ALL_PLAYERS);
games/hunt/huntd/server.h
268
void ce(PLAYER *);
lib/libc/gdtoa/smisc.c
150
ULong *ce, *x, *xe;
lib/libc/gdtoa/smisc.c
155
ce = c + ((n-1) >> kshift) + 1;
lib/libc/gdtoa/smisc.c
169
while(c < ce)
sbin/unwind/libunbound/services/authzone.c
2408
struct auth_data* ce)
sbin/unwind/libunbound/services/authzone.c
2418
if(ce && nmlen == ce->namelen)
sbin/unwind/libunbound/services/authzone.c
2477
struct auth_data* node, int node_exact, struct auth_data** ce,
sbin/unwind/libunbound/services/authzone.c
2482
*ce = NULL;
sbin/unwind/libunbound/services/authzone.c
2489
*ce = n;
sbin/unwind/libunbound/services/authzone.c
2495
*ce = NULL;
sbin/unwind/libunbound/services/authzone.c
2510
*ce = n;
sbin/unwind/libunbound/services/authzone.c
2518
*ce = n;
sbin/unwind/libunbound/services/authzone.c
2523
if(*ce == NULL && !domain_has_only_nsec3(n)) {
sbin/unwind/libunbound/services/authzone.c
2526
*ce = n;
sbin/unwind/libunbound/services/authzone.c
3237
struct dns_msg* msg, struct auth_data* ce, struct auth_rrset* rrset)
sbin/unwind/libunbound/services/authzone.c
3241
log_assert(ce);
sbin/unwind/libunbound/services/authzone.c
3243
if(!msg_add_rrset_ns(z, region, msg, ce, rrset)) return 0;
sbin/unwind/libunbound/services/authzone.c
3245
if((ds=az_domain_rrset(ce, LDNS_RR_TYPE_DS))!=NULL) {
sbin/unwind/libunbound/services/authzone.c
3246
if(!msg_add_rrset_ns(z, region, msg, ce, ds)) return 0;
sbin/unwind/libunbound/services/authzone.c
3249
if((nsec=az_domain_rrset(ce, LDNS_RR_TYPE_NSEC))!=NULL) {
sbin/unwind/libunbound/services/authzone.c
3250
if(!msg_add_rrset_ns(z, region, msg, ce, nsec))
sbin/unwind/libunbound/services/authzone.c
3253
if(!az_add_nsec3_proof(z, region, msg, ce->name,
sbin/unwind/libunbound/services/authzone.c
3254
ce->namelen, msg->qinfo.qname,
sbin/unwind/libunbound/services/authzone.c
3267
struct regional* region, struct dns_msg* msg, struct auth_data* ce,
sbin/unwind/libunbound/services/authzone.c
3270
log_assert(ce);
sbin/unwind/libunbound/services/authzone.c
3272
if(!msg_add_rrset_an(z, region, msg, ce, rrset)) return 0;
sbin/unwind/libunbound/services/authzone.c
3274
msg, ce, rrset)) return 0;
sbin/unwind/libunbound/services/authzone.c
3290
struct regional* region, struct dns_msg* msg, struct auth_data* ce,
sbin/unwind/libunbound/services/authzone.c
3330
} else if(ce) {
sbin/unwind/libunbound/services/authzone.c
3351
struct dns_msg* msg, struct auth_data* ce, struct auth_data* node)
sbin/unwind/libunbound/services/authzone.c
3358
if(ce && !az_nsec_wildcard_denial(z, region, msg, ce->name,
sbin/unwind/libunbound/services/authzone.c
3359
ce->namelen)) return 0;
sbin/unwind/libunbound/services/authzone.c
3360
} else if(ce) {
sbin/unwind/libunbound/services/authzone.c
3361
if(!az_add_nsec3_proof(z, region, msg, ce->name,
sbin/unwind/libunbound/services/authzone.c
3362
ce->namelen, msg->qinfo.qname,
sbin/unwind/libunbound/services/authzone.c
3396
struct regional* region, struct dns_msg* msg, struct auth_data* ce,
sbin/unwind/libunbound/services/authzone.c
3403
if(ce && rrset && rrset->type == LDNS_RR_TYPE_NS) {
sbin/unwind/libunbound/services/authzone.c
3404
return az_generate_referral_answer(z, region, msg, ce, rrset);
sbin/unwind/libunbound/services/authzone.c
3406
if(ce && rrset && rrset->type == LDNS_RR_TYPE_DNAME) {
sbin/unwind/libunbound/services/authzone.c
3407
return az_generate_dname_answer(z, qinfo, region, msg, ce,
sbin/unwind/libunbound/services/authzone.c
3416
if((wildcard=az_find_wildcard(z, qinfo, ce)) != NULL) {
sbin/unwind/libunbound/services/authzone.c
3418
ce, wildcard, node);
sbin/unwind/libunbound/services/authzone.c
3421
return az_generate_nxdomain_answer(z, region, msg, ce, node);
sbin/unwind/libunbound/services/authzone.c
3429
struct auth_data* node, *ce;
sbin/unwind/libunbound/services/authzone.c
3443
node_exists = az_find_ce(z, qinfo, node, node_exact, &ce, &rrset);
sbin/unwind/libunbound/services/authzone.c
3457
if(ce)
sbin/unwind/libunbound/services/authzone.c
3458
sldns_wire2str_dname_buf(ce->name, ce->namelen,
sbin/unwind/libunbound/services/authzone.c
3476
ce, rrset, node);
sbin/unwind/libunbound/services/cache/rrset.c
249
struct ub_packed_rrset_key* rrset, uint8_t* ce, size_t ce_len,
sbin/unwind/libunbound/services/cache/rrset.c
263
memmove(wc_dname+2, ce, ce_len);
sbin/unwind/libunbound/services/cache/rrset.h
151
struct ub_packed_rrset_key* rrset, uint8_t* ce, size_t ce_len,
sbin/unwind/libunbound/services/rpz.c
1174
uint8_t* ce;
sbin/unwind/libunbound/services/rpz.c
1211
ce = dname_get_shared_topdomain(z->name, qname);
sbin/unwind/libunbound/services/rpz.c
1212
if(!ce /* should not happen */) {
sbin/unwind/libunbound/services/rpz.c
1219
ce_labs = dname_count_size_labels(ce, &ce_len);
sbin/unwind/libunbound/services/rpz.c
1229
memmove(wc+2, ce, ce_len);
sbin/unwind/libunbound/validator/val_neg.c
1388
uint8_t* ce = NULL;
sbin/unwind/libunbound/validator/val_neg.c
1433
if(!(ce = nsec_closest_encloser(qinfo->qname, nsec)))
sbin/unwind/libunbound/validator/val_neg.c
1435
dname_count_size_labels(ce, &ce_len);
sbin/unwind/libunbound/validator/val_neg.c
1439
if(!nodata_wc || query_dname_compare(nodata_wc, ce) != 0) {
sbin/unwind/libunbound/validator/val_neg.c
1448
memmove(wc_ce+2, ce, ce_len);
sbin/unwind/libunbound/validator/val_nsec.c
213
uint8_t* wc = NULL, *ce = NULL;
sbin/unwind/libunbound/validator/val_nsec.c
271
ce = nsec_closest_encloser(qinfo->qname,
sbin/unwind/libunbound/validator/val_nsec.c
275
if(wc && !ce)
sbin/unwind/libunbound/validator/val_nsec.c
277
else if(wc && ce) {
sbin/unwind/libunbound/validator/val_nsec.c
279
if(query_dname_compare(wc, ce) != 0)
sbin/unwind/libunbound/validator/val_nsec.c
330
uint8_t* ce = nsec->rk.dname;
sbin/unwind/libunbound/validator/val_nsec.c
332
dname_remove_label(&ce, &ce_len);
sbin/unwind/libunbound/validator/val_nsec.c
337
if(dname_strict_subdomain_c(qinfo->qname, ce)) {
sbin/unwind/libunbound/validator/val_nsec.c
352
*wc = ce;
sbin/unwind/libunbound/validator/val_nsec.c
368
uint8_t* ce = nm;
sbin/unwind/libunbound/validator/val_nsec.c
369
dname_remove_label(&ce, &ce_len);
sbin/unwind/libunbound/validator/val_nsec.c
370
if(dname_strict_subdomain_c(qinfo->qname, ce)) {
sbin/unwind/libunbound/validator/val_nsec.c
371
*wc = ce;
sbin/unwind/libunbound/validator/val_nsec.c
502
uint8_t* ce;
sbin/unwind/libunbound/validator/val_nsec.c
509
ce = nsec_closest_encloser(qinf->qname, nsec);
sbin/unwind/libunbound/validator/val_nsec.c
510
if(!ce)
sbin/unwind/libunbound/validator/val_nsec.c
512
if(query_dname_compare(wc, ce) != 0) {
sbin/unwind/libunbound/validator/val_nsec.c
525
uint8_t* ce = nsec_closest_encloser(qname, nsec);
sbin/unwind/libunbound/validator/val_nsec.c
529
if(!ce)
sbin/unwind/libunbound/validator/val_nsec.c
535
labs = dname_count_labels(qname) - dname_count_labels(ce);
sbin/unwind/libunbound/validator/val_nsec3.c
1012
int prove_does_not_exist, struct ce_response* ce, int* calculations)
sbin/unwind/libunbound/validator/val_nsec3.c
1017
memset(ce, 0, sizeof(*ce));
sbin/unwind/libunbound/validator/val_nsec3.c
1019
if(!nsec3_find_closest_encloser(env, flt, ct, qinfo, ce, calculations)) {
sbin/unwind/libunbound/validator/val_nsec3.c
1038
log_nametypeclass(VERB_ALGO, "ce candidate", ce->ce, 0, 0);
sbin/unwind/libunbound/validator/val_nsec3.c
1040
if(query_dname_compare(ce->ce, qinfo->qname) == 0) {
sbin/unwind/libunbound/validator/val_nsec3.c
1054
if(nsec3_has_type(ce->ce_rrset, ce->ce_rr, LDNS_RR_TYPE_NS) &&
sbin/unwind/libunbound/validator/val_nsec3.c
1055
!nsec3_has_type(ce->ce_rrset, ce->ce_rr, LDNS_RR_TYPE_SOA)) {
sbin/unwind/libunbound/validator/val_nsec3.c
1056
if(!nsec3_has_type(ce->ce_rrset, ce->ce_rr, LDNS_RR_TYPE_DS)) {
sbin/unwind/libunbound/validator/val_nsec3.c
1065
if(nsec3_has_type(ce->ce_rrset, ce->ce_rr, LDNS_RR_TYPE_DNAME)) {
sbin/unwind/libunbound/validator/val_nsec3.c
1072
next_closer(qinfo->qname, qinfo->qname_len, ce->ce, &nc, &nc_len);
sbin/unwind/libunbound/validator/val_nsec3.c
1074
&ce->nc_rrset, &ce->nc_rr, calculations)) {
sbin/unwind/libunbound/validator/val_nsec3.c
1098
nsec3_ce_wildcard(struct regional* region, uint8_t* ce, size_t celen,
sbin/unwind/libunbound/validator/val_nsec3.c
1111
memmove(nm+2, ce, celen);
sbin/unwind/libunbound/validator/val_nsec3.c
1121
struct ce_response ce;
sbin/unwind/libunbound/validator/val_nsec3.c
1131
sec = nsec3_prove_closest_encloser(env, flt, ct, qinfo, 1, &ce, calc);
sbin/unwind/libunbound/validator/val_nsec3.c
1144
log_nametypeclass(VERB_ALGO, "nsec3 nameerror: proven ce=", ce.ce,0,0);
sbin/unwind/libunbound/validator/val_nsec3.c
1148
log_assert(ce.ce);
sbin/unwind/libunbound/validator/val_nsec3.c
1149
wc = nsec3_ce_wildcard(ct->region, ce.ce, ce.ce_len, &wclen);
sbin/unwind/libunbound/validator/val_nsec3.c
1175
if(ce.nc_rrset && nsec3_has_optout(ce.nc_rrset, ce.nc_rr)) {
sbin/unwind/libunbound/validator/val_nsec3.c
1213
struct ce_response ce;
sbin/unwind/libunbound/validator/val_nsec3.c
1281
sec = nsec3_prove_closest_encloser(env, flt, ct, qinfo, 1, &ce, calc);
sbin/unwind/libunbound/validator/val_nsec3.c
1297
log_assert(ce.ce);
sbin/unwind/libunbound/validator/val_nsec3.c
1298
wc = nsec3_ce_wildcard(ct->region, ce.ce, ce.ce_len, &wclen);
sbin/unwind/libunbound/validator/val_nsec3.c
1324
if(ce.nc_rrset && nsec3_has_optout(ce.nc_rrset, ce.nc_rr)) {
sbin/unwind/libunbound/validator/val_nsec3.c
1348
if(!ce.nc_rrset) {
sbin/unwind/libunbound/validator/val_nsec3.c
1354
log_assert(ce.nc_rrset);
sbin/unwind/libunbound/validator/val_nsec3.c
1355
if(!nsec3_has_optout(ce.nc_rrset, ce.nc_rr)) {
sbin/unwind/libunbound/validator/val_nsec3.c
1393
struct ce_response ce;
sbin/unwind/libunbound/validator/val_nsec3.c
1411
memset(&ce, 0, sizeof(ce));
sbin/unwind/libunbound/validator/val_nsec3.c
1412
ce.ce = wc;
sbin/unwind/libunbound/validator/val_nsec3.c
1413
ce.ce_len = wclen;
sbin/unwind/libunbound/validator/val_nsec3.c
1417
next_closer(qinfo->qname, qinfo->qname_len, ce.ce, &nc, &nc_len);
sbin/unwind/libunbound/validator/val_nsec3.c
1419
&ce.nc_rrset, &ce.nc_rr, calc)) {
sbin/unwind/libunbound/validator/val_nsec3.c
1438
if(ce.nc_rrset && nsec3_has_optout(ce.nc_rrset, ce.nc_rr)) {
sbin/unwind/libunbound/validator/val_nsec3.c
1484
struct ce_response ce;
sbin/unwind/libunbound/validator/val_nsec3.c
1549
sec = nsec3_prove_closest_encloser(env, &flt, ct, qinfo, 1, &ce, &calc);
sbin/unwind/libunbound/validator/val_nsec3.c
1562
if(!ce.nc_rrset) {
sbin/unwind/libunbound/validator/val_nsec3.c
1572
log_assert(ce.nc_rrset);
sbin/unwind/libunbound/validator/val_nsec3.c
1573
if(!nsec3_has_optout(ce.nc_rrset, ce.nc_rr)) {
sbin/unwind/libunbound/validator/val_nsec3.c
93
uint8_t* ce;
sbin/unwind/libunbound/validator/val_nsec3.c
938
struct ce_response* ce, int* calculations)
sbin/unwind/libunbound/validator/val_nsec3.c
958
&ce->ce_rrset, &ce->ce_rr, calculations)) {
sbin/unwind/libunbound/validator/val_nsec3.c
959
ce->ce = nm;
sbin/unwind/libunbound/validator/val_nsec3.c
960
ce->ce_len = nmlen;
sbin/unwind/libunbound/validator/val_nsec3.c
980
next_closer(uint8_t* qname, size_t qnamelen, uint8_t* ce,
sbin/unwind/libunbound/validator/val_nsec3.c
983
int strip = dname_count_labels(qname) - dname_count_labels(ce) -1;
sbin/unwind/libunbound/validator/validator.c
1140
uint8_t* ce = NULL; /* for wildcard nodata responses. This is the
sbin/unwind/libunbound/validator/validator.c
1160
ce = nsec_closest_encloser(qchase->qname, s);
sbin/unwind/libunbound/validator/validator.c
1177
if(wc && !ce)
sbin/unwind/libunbound/validator/validator.c
1179
else if(wc && ce) {
sbin/unwind/libunbound/validator/validator.c
1180
if(query_dname_compare(wc, ce) != 0) {
sbin/unwind/libunbound/validator/validator.c
1251
uint8_t* ce;
sbin/unwind/libunbound/validator/validator.c
1262
ce = nsec_closest_encloser(qchase->qname, s);
sbin/unwind/libunbound/validator/validator.c
1263
ce_labs = dname_count_labels(ce);
sbin/unwind/libunbound/validator/validator.c
1664
uint8_t* ce = NULL; /* for wildcard nodata responses. This is the
sbin/unwind/libunbound/validator/validator.c
1691
ce = nsec_closest_encloser(qchase->qname, s);
sbin/unwind/libunbound/validator/validator.c
1722
if(wc && !ce)
sbin/unwind/libunbound/validator/validator.c
1724
else if(wc && ce) {
sbin/unwind/libunbound/validator/validator.c
1725
if(query_dname_compare(wc, ce) != 0) {
sys/dev/acpi/acpidmar.h
305
context_set_fpd(struct context_entry *ce, int enable)
sys/dev/acpi/acpidmar.h
307
ce->lo &= ~CTX_FPD;
sys/dev/acpi/acpidmar.h
309
ce->lo |= CTX_FPD;
sys/dev/acpi/acpidmar.h
314
context_set_present(struct context_entry *ce)
sys/dev/acpi/acpidmar.h
316
ce->lo |= CTX_P;
sys/dev/acpi/acpidmar.h
321
context_set_slpte(struct context_entry *ce, paddr_t slpte)
sys/dev/acpi/acpidmar.h
323
ce->lo &= VTD_PAGE_MASK;
sys/dev/acpi/acpidmar.h
324
ce->lo |= (slpte & ~VTD_PAGE_MASK);
sys/dev/acpi/acpidmar.h
329
context_set_translation_type(struct context_entry *ce, int tt)
sys/dev/acpi/acpidmar.h
331
ce->lo &= ~(CTX_T_MASK << CTX_T_SHIFT);
sys/dev/acpi/acpidmar.h
332
ce->lo |= ((tt & CTX_T_MASK) << CTX_T_SHIFT);
sys/dev/acpi/acpidmar.h
337
context_set_address_width(struct context_entry *ce, int lvl)
sys/dev/acpi/acpidmar.h
339
ce->hi &= ~(CTX_H_AW_MASK << CTX_H_AW_SHIFT);
sys/dev/acpi/acpidmar.h
340
ce->hi |= ((lvl & CTX_H_AW_MASK) << CTX_H_AW_SHIFT);
sys/dev/acpi/acpidmar.h
345
context_set_domain_id(struct context_entry *ce, int did)
sys/dev/acpi/acpidmar.h
347
ce->hi &= ~(CTX_H_DID_MASK << CTX_H_DID_SHIFT);
sys/dev/acpi/acpidmar.h
348
ce->hi |= ((did & CTX_H_DID_MASK) << CTX_H_DID_SHIFT);
sys/dev/acpi/acpidmar.h
353
context_pte(struct context_entry *ce)
sys/dev/acpi/acpidmar.h
355
return (ce->lo & ~VTD_PAGE_MASK);
sys/dev/acpi/acpidmar.h
360
context_translation_type(struct context_entry *ce)
sys/dev/acpi/acpidmar.h
362
return (ce->lo >> CTX_T_SHIFT) & CTX_T_MASK;
sys/dev/acpi/acpidmar.h
367
context_domain_id(struct context_entry *ce)
sys/dev/acpi/acpidmar.h
369
return (ce->hi >> CTX_H_DID_SHIFT) & CTX_H_DID_MASK;
sys/dev/acpi/acpidmar.h
374
context_address_width(struct context_entry *ce)
sys/dev/acpi/acpidmar.h
376
return VTD_LEVELTOAW((ce->hi >> CTX_H_AW_SHIFT) & CTX_H_AW_MASK);
sys/dev/acpi/acpidmar.h
381
context_entry_is_valid(struct context_entry *ce)
sys/dev/acpi/acpidmar.h
383
return (ce->lo & CTX_P);
sys/dev/acpi/acpidmar.h
388
context_user(struct context_entry *ce)
sys/dev/acpi/acpidmar.h
390
return (ce->hi >> CTX_H_USER_SHIFT) & CTX_H_USER_MASK;
sys/dev/acpi/acpidmar.h
394
context_set_user(struct context_entry *ce, int v)
sys/dev/acpi/acpidmar.h
396
ce->hi &= ~(CTX_H_USER_MASK << CTX_H_USER_SHIFT);
sys/dev/acpi/acpidmar.h
397
ce->hi |= ((v & CTX_H_USER_MASK) << CTX_H_USER_SHIFT);
sys/dev/cardbus/cardslot.c
194
struct cardslot_event *ce;
sys/dev/cardbus/cardslot.c
203
ce = pool_get(&cardsloteventpool, PR_NOWAIT);
sys/dev/cardbus/cardslot.c
204
if (ce == NULL)
sys/dev/cardbus/cardslot.c
206
ce->ce_type = ev;
sys/dev/cardbus/cardslot.c
209
SIMPLEQ_INSERT_TAIL(&sc->sc_events, ce, ce_q);
sys/dev/cardbus/cardslot.c
235
struct cardslot_event *ce;
sys/dev/cardbus/cardslot.c
239
if ((ce = SIMPLEQ_FIRST(&sc->sc_events)) == NULL) {
sys/dev/cardbus/cardslot.c
245
if (IS_CARDSLOT_INSERT_REMOVE_EV(ce->ce_type)) {
sys/dev/cardbus/cardslot.c
260
if (ce1->ce_type != antonym_ev[ce->ce_type])
sys/dev/cardbus/cardslot.c
264
if (ce2->ce_type == ce->ce_type) {
sys/dev/cardbus/cardslot.c
274
ev = ce->ce_type;
sys/dev/cardbus/cardslot.c
275
pool_put(&cardsloteventpool, ce);
sys/dev/fdt/bcm2835_dmac.c
204
uint32_t cs, ce;
sys/dev/fdt/bcm2835_dmac.c
210
ce = bcmdmac_read(sc, DMAC_DEBUG(ch->ch_index));
sys/dev/fdt/bcm2835_dmac.c
211
ce &= DMAC_DEBUG_READ_ERROR | DMAC_DEBUG_FIFO_ERROR
sys/dev/fdt/bcm2835_dmac.c
213
bcmdmac_write(sc, DMAC_DEBUG(ch->ch_index), ce);
sys/dev/fdt/bcm2835_dmac.c
216
ch->ch_callback(cs, ce, ch->ch_callbackarg);
sys/dev/ic/mpi.c
1529
struct mpi_sge *ce = NULL, *nce;
sys/dev/ic/mpi.c
1553
ce = &mcb->mcb_sgl[sc->sc_first_sgl_len - 1];
sys/dev/ic/mpi.c
1554
io->chain_offset = (u_int32_t *)ce - (u_int32_t *)io;
sys/dev/ic/mpi.c
1559
if (nsge == ce) {
sys/dev/ic/mpi.c
1574
ce->sg_hdr = htole32(MPI_SGE_FL_TYPE_CHAIN |
sys/dev/ic/mpi.c
1577
mpi_dvatosge(ce, ccb->ccb_cmd_dva +
sys/dev/ic/mpi.c
1580
ce = nce;
sys/dev/ic/qwx.c
14268
struct qwx_ce_pipe *pipe = &sc->ce.ce_pipe[ep->ul_pipe_id];
sys/dev/ic/qwx.c
21709
spin_lock_bh(&ab->ce.ce_lock);
sys/dev/ic/qwx.c
21734
spin_unlock_bh(&ab->ce.ce_lock);
sys/dev/ic/qwx.c
21774
struct qwx_ce_pipe *pipe = &sc->ce.ce_pipe[pipe_id];
sys/dev/ic/qwx.c
22081
qwx_dp_shadow_stop_timer(sc, &sc->ce.hp_timer[i]);
sys/dev/ic/qwx.c
22091
pipe = &sc->ce.ce_pipe[i];
sys/dev/ic/qwx.c
22093
qwx_dp_shadow_stop_timer(sc, &sc->ce.hp_timer[i]);
sys/dev/ic/qwx.c
22233
struct qwx_ce_pipe *pipe = &sc->ce.ce_pipe[ce_id];
sys/dev/ic/qwx.c
22351
pipe = &sc->ce.ce_pipe[pipe_num];
sys/dev/ic/qwx.c
22369
pipe = &sc->ce.ce_pipe[i];
sys/dev/ic/qwx.c
22473
qwx_dp_shadow_init_timer(sc, &sc->ce.hp_timer[ce_id],
sys/dev/ic/qwx.c
22487
pipe = &sc->ce.ce_pipe[i];
sys/dev/ic/qwx.c
22573
lockdep_assert_held(&ab->ce.ce_lock);
sys/dev/ic/qwx.c
22629
spin_lock_bh(&ab->ce.ce_lock);
sys/dev/ic/qwx.c
22674
spin_unlock_bh(&ab->ce.ce_lock);
sys/dev/ic/qwx.c
22687
pipe = &sc->ce.ce_pipe[i];
sys/dev/ic/qwx.c
22716
spin_lock_bh(&ab->ce.ce_lock);
sys/dev/ic/qwx.c
22752
spin_unlock_bh(&ab->ce.ce_lock);
sys/dev/ic/qwx.c
22809
struct qwx_ce_pipe *pipe = &sc->ce.ce_pipe[ce_id];
sys/dev/ic/qwx.c
22830
struct qwx_ce_pipe *pipe = &sc->ce.ce_pipe[pipe_id];
sys/dev/ic/qwx.c
22848
spin_lock_bh(&ab->ce.ce_lock);
sys/dev/ic/qwx.c
22860
spin_unlock_bh(&ab->ce.ce_lock);
sys/dev/ic/qwx.c
22869
spin_lock_bh(&ab->ce.ce_lock);
sys/dev/ic/qwx.c
22909
qwx_dp_shadow_start_timer(sc, srng, &sc->ce.hp_timer[pipe_id]);
sys/dev/ic/qwx.c
22915
spin_unlock_bh(&ab->ce.ce_lock);
sys/dev/ic/qwxvar.h
1896
struct qwx_ce ce;
sys/dev/ic/qwz.c
11942
struct qwz_ce_pipe *pipe = &sc->ce.ce_pipe[ep->ul_pipe_id];
sys/dev/ic/qwz.c
18997
spin_lock_bh(&ab->ce.ce_lock);
sys/dev/ic/qwz.c
19022
spin_unlock_bh(&ab->ce.ce_lock);
sys/dev/ic/qwz.c
19047
struct qwz_ce_pipe *pipe = &sc->ce.ce_pipe[pipe_id];
sys/dev/ic/qwz.c
19340
pipe = &sc->ce.ce_pipe[i];
sys/dev/ic/qwz.c
19480
struct qwz_ce_pipe *pipe = &sc->ce.ce_pipe[ce_id];
sys/dev/ic/qwz.c
19596
pipe = &sc->ce.ce_pipe[pipe_num];
sys/dev/ic/qwz.c
19614
pipe = &sc->ce.ce_pipe[i];
sys/dev/ic/qwz.c
19730
pipe = &sc->ce.ce_pipe[i];
sys/dev/ic/qwz.c
19816
lockdep_assert_held(&ab->ce.ce_lock);
sys/dev/ic/qwz.c
19872
spin_lock_bh(&ab->ce.ce_lock);
sys/dev/ic/qwz.c
19917
spin_unlock_bh(&ab->ce.ce_lock);
sys/dev/ic/qwz.c
19930
pipe = &sc->ce.ce_pipe[i];
sys/dev/ic/qwz.c
19959
spin_lock_bh(&ab->ce.ce_lock);
sys/dev/ic/qwz.c
19995
spin_unlock_bh(&ab->ce.ce_lock);
sys/dev/ic/qwz.c
20052
struct qwz_ce_pipe *pipe = &sc->ce.ce_pipe[ce_id];
sys/dev/ic/qwz.c
20072
struct qwz_ce_pipe *pipe = &sc->ce.ce_pipe[pipe_id];
sys/dev/ic/qwz.c
20090
spin_lock_bh(&ab->ce.ce_lock);
sys/dev/ic/qwz.c
20102
spin_unlock_bh(&ab->ce.ce_lock);
sys/dev/ic/qwz.c
20111
spin_lock_bh(&ab->ce.ce_lock);
sys/dev/ic/qwz.c
20154
spin_unlock_bh(&ab->ce.ce_lock);
sys/dev/ic/qwzvar.h
1963
struct qwz_ce ce;
sys/dev/ic/sili.c
1533
struct sili_sge *nsge = sgl, *ce = NULL;
sys/dev/ic/sili.c
1550
ce = &sgl[sgllen - 1];
sys/dev/ic/sili.c
1553
if (nsge == ce) {
sys/dev/ic/sili.c
1559
ce->addr_lo = htole32((u_int32_t)addr);
sys/dev/ic/sili.c
1560
ce->addr_hi = htole32((u_int32_t)(addr >> 32));
sys/dev/ic/sili.c
1561
ce->flags = htole32(SILI_SGE_LNK);
sys/dev/ic/sili.c
1564
ce += SILI_SGT_SGLLEN;
sys/dev/ic/sili.c
1566
ce = NULL;
sys/dev/pci/drm/amd/amdgpu/amdgpu_gfx.h
411
struct amdgpu_ce ce;
sys/dev/pci/drm/amd/amdgpu/amdgpu_ras.c
1726
unsigned long ce, ue;
sys/dev/pci/drm/amd/amdgpu/amdgpu_ras.c
1737
ce = 0;
sys/dev/pci/drm/amd/amdgpu/amdgpu_ras.c
1746
ret = amdgpu_ras_query_error_count_helper(adev, &ce, &ue, &info);
sys/dev/pci/drm/amd/amdgpu/amdgpu_ras.c
1750
ret = amdgpu_ras_query_error_count_helper(adev, &ce, &ue, query_info);
sys/dev/pci/drm/amd/amdgpu/amdgpu_ras.c
1757
*ce_count = ce;
sys/dev/pci/drm/amd/amdgpu/gfx_v10_0.c
5008
amdgpu_bo_free_kernel(&adev->gfx.ce.ce_fw_obj,
sys/dev/pci/drm/amd/amdgpu/gfx_v10_0.c
5009
&adev->gfx.ce.ce_fw_gpu_addr,
sys/dev/pci/drm/amd/amdgpu/gfx_v10_0.c
5010
(void **)&adev->gfx.ce.ce_fw_ptr);
sys/dev/pci/drm/amd/amdgpu/gfx_v10_0.c
6199
&adev->gfx.ce.ce_fw_obj,
sys/dev/pci/drm/amd/amdgpu/gfx_v10_0.c
6200
&adev->gfx.ce.ce_fw_gpu_addr,
sys/dev/pci/drm/amd/amdgpu/gfx_v10_0.c
6201
(void **)&adev->gfx.ce.ce_fw_ptr);
sys/dev/pci/drm/amd/amdgpu/gfx_v10_0.c
6208
memcpy(adev->gfx.ce.ce_fw_ptr, fw_data, fw_size);
sys/dev/pci/drm/amd/amdgpu/gfx_v10_0.c
6210
amdgpu_bo_kunmap(adev->gfx.ce.ce_fw_obj);
sys/dev/pci/drm/amd/amdgpu/gfx_v10_0.c
6211
amdgpu_bo_unreserve(adev->gfx.ce.ce_fw_obj);
sys/dev/pci/drm/amd/amdgpu/gfx_v10_0.c
6241
adev->gfx.ce.ce_fw_gpu_addr & 0xFFFFF000);
sys/dev/pci/drm/amd/amdgpu/gfx_v10_0.c
6243
upper_32_bits(adev->gfx.ce.ce_fw_gpu_addr));
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1005
intel_context_set_watchdog_us(ce, (u64)timeout_ms * 1000);
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1009
if (sseu.slice_mask && !WARN_ON(ce->engine->class != RENDER_CLASS))
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1010
ret = intel_context_reconfigure_sseu(ce, sseu);
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1013
__set_bit(CONTEXT_LOW_LATENCY, &ce->flags);
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1021
struct intel_context *ce = e->engines[count], *child;
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1023
if (!ce || !test_bit(CONTEXT_PERMA_PIN, &ce->flags))
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1026
for_each_child(ce, child)
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1028
intel_context_unpin(ce);
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1066
struct intel_context *ce;
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1072
for_each_gem_engine(ce, engines, it) {
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1073
unsigned int class = ce->engine->uabi_class;
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1076
atomic64_add(intel_context_get_total_runtime_ns(ce),
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1135
struct intel_context *ce;
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1145
ce = intel_context_create(engine);
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1146
if (IS_ERR(ce)) {
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1147
err = ERR_CAST(ce);
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1151
e->engines[engine->legacy_idx] = ce;
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1157
ret = intel_context_set_gem(ce, ctx, sseu);
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1172
static int perma_pin_contexts(struct intel_context *ce)
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1177
GEM_BUG_ON(!intel_context_is_parent(ce));
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1179
ret = intel_context_pin(ce);
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1183
for_each_child(ce, child) {
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1190
set_bit(CONTEXT_PERMA_PIN, &ce->flags);
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1195
intel_context_unpin(ce);
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1196
for_each_child(ce, child) {
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1219
struct intel_context *ce, *child;
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1224
ce = intel_context_create(pe[n].engine);
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1228
ce = intel_engine_create_virtual(pe[n].siblings,
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1233
ce = intel_engine_create_parallel(pe[n].siblings,
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1244
if (IS_ERR(ce)) {
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1245
err = ERR_CAST(ce);
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1249
e->engines[n] = ce;
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1251
ret = intel_context_set_gem(ce, ctx, pe->sseu);
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1256
for_each_child(ce, child) {
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1274
ret = perma_pin_contexts(ce);
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1363
static struct intel_engine_cs *active_engine(struct intel_context *ce)
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1368
if (intel_context_has_inflight(ce))
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1369
return intel_context_inflight(ce);
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1371
if (!ce->timeline)
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1380
list_for_each_entry_reverse(rq, &ce->timeline->requests, link) {
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1389
if (likely(rcu_access_pointer(rq->timeline) == ce->timeline))
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1405
struct intel_context *ce;
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1414
for_each_gem_engine(ce, engines, it) {
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1417
if ((exit || !persistent) && intel_context_revoke(ce))
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1427
engine = active_engine(ce);
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1484
struct intel_context *ce;
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1490
for_each_gem_engine(ce, engines, it) {
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1494
intel_context_close(ce);
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1495
if (!intel_context_pin_if_active(ce))
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1500
&ce->active,
sys/dev/pci/drm/i915/gem/i915_gem_context.c
1502
intel_context_unpin(ce);
sys/dev/pci/drm/i915/gem/i915_gem_context.c
2040
struct intel_context *ce;
sys/dev/pci/drm/i915/gem/i915_gem_context.c
2065
ce = lookup_user_engine(ctx, lookup, &user_sseu.engine);
sys/dev/pci/drm/i915/gem/i915_gem_context.c
2066
if (IS_ERR(ce))
sys/dev/pci/drm/i915/gem/i915_gem_context.c
2067
return PTR_ERR(ce);
sys/dev/pci/drm/i915/gem/i915_gem_context.c
2070
if (ce->engine->class != RENDER_CLASS) {
sys/dev/pci/drm/i915/gem/i915_gem_context.c
2075
ret = i915_gem_user_to_context_sseu(ce->engine->gt, &user_sseu, &sseu);
sys/dev/pci/drm/i915/gem/i915_gem_context.c
2079
ret = intel_context_reconfigure_sseu(ce, sseu);
sys/dev/pci/drm/i915/gem/i915_gem_context.c
2086
intel_context_put(ce);
sys/dev/pci/drm/i915/gem/i915_gem_context.c
2104
struct intel_context *ce;
sys/dev/pci/drm/i915/gem/i915_gem_context.c
2113
for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
sys/dev/pci/drm/i915/gem/i915_gem_context.c
2114
if (!intel_engine_has_timeslices(ce->engine))
sys/dev/pci/drm/i915/gem/i915_gem_context.c
2118
intel_engine_has_semaphores(ce->engine))
sys/dev/pci/drm/i915/gem/i915_gem_context.c
2119
intel_context_set_use_semaphores(ce);
sys/dev/pci/drm/i915/gem/i915_gem_context.c
2121
intel_context_clear_use_semaphores(ce);
sys/dev/pci/drm/i915/gem/i915_gem_context.c
2141
struct intel_context *ce;
sys/dev/pci/drm/i915/gem/i915_gem_context.c
2169
ce = lookup_user_engine(ctx, lookup, &user.engine);
sys/dev/pci/drm/i915/gem/i915_gem_context.c
2170
if (IS_ERR(ce))
sys/dev/pci/drm/i915/gem/i915_gem_context.c
2171
return PTR_ERR(ce);
sys/dev/pci/drm/i915/gem/i915_gem_context.c
2173
if (user.size < ce->engine->context_size) {
sys/dev/pci/drm/i915/gem/i915_gem_context.c
2179
test_bit(CONTEXT_ALLOC_BIT, &ce->flags))) {
sys/dev/pci/drm/i915/gem/i915_gem_context.c
2190
state = memdup_user(u64_to_user_ptr(user.image), ce->engine->context_size);
sys/dev/pci/drm/i915/gem/i915_gem_context.c
2196
shmem_state = uao_create_from_data(ce->engine->name,
sys/dev/pci/drm/i915/gem/i915_gem_context.c
2197
state, ce->engine->context_size);
sys/dev/pci/drm/i915/gem/i915_gem_context.c
2203
if (intel_context_set_own_state(ce)) {
sys/dev/pci/drm/i915/gem/i915_gem_context.c
2209
ce->default_state = shmem_state;
sys/dev/pci/drm/i915/gem/i915_gem_context.c
2216
intel_context_put(ce);
sys/dev/pci/drm/i915/gem/i915_gem_context.c
2512
struct intel_context *ce;
sys/dev/pci/drm/i915/gem/i915_gem_context.c
2535
ce = lookup_user_engine(ctx, lookup, &user_sseu.engine);
sys/dev/pci/drm/i915/gem/i915_gem_context.c
2536
if (IS_ERR(ce))
sys/dev/pci/drm/i915/gem/i915_gem_context.c
2537
return PTR_ERR(ce);
sys/dev/pci/drm/i915/gem/i915_gem_context.c
2539
err = intel_context_lock_pinned(ce); /* serialises with set_sseu */
sys/dev/pci/drm/i915/gem/i915_gem_context.c
2541
intel_context_put(ce);
sys/dev/pci/drm/i915/gem/i915_gem_context.c
2545
user_sseu.slice_mask = ce->sseu.slice_mask;
sys/dev/pci/drm/i915/gem/i915_gem_context.c
2546
user_sseu.subslice_mask = ce->sseu.subslice_mask;
sys/dev/pci/drm/i915/gem/i915_gem_context.c
2547
user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice;
sys/dev/pci/drm/i915/gem/i915_gem_context.c
2548
user_sseu.max_eus_per_subslice = ce->sseu.max_eus_per_subslice;
sys/dev/pci/drm/i915/gem/i915_gem_context.c
2550
intel_context_unlock_pinned(ce);
sys/dev/pci/drm/i915/gem/i915_gem_context.c
2551
intel_context_put(ce);
sys/dev/pci/drm/i915/gem/i915_gem_context.c
977
static int intel_context_set_gem(struct intel_context *ce,
sys/dev/pci/drm/i915/gem/i915_gem_context.c
983
GEM_BUG_ON(rcu_access_pointer(ce->gem_context));
sys/dev/pci/drm/i915/gem/i915_gem_context.c
984
RCU_INIT_POINTER(ce->gem_context, ctx);
sys/dev/pci/drm/i915/gem/i915_gem_context.c
986
GEM_BUG_ON(intel_context_is_pinned(ce));
sys/dev/pci/drm/i915/gem/i915_gem_context.c
988
if (ce->engine->class == COMPUTE_CLASS)
sys/dev/pci/drm/i915/gem/i915_gem_context.c
989
ce->ring_size = SZ_512K;
sys/dev/pci/drm/i915/gem/i915_gem_context.c
991
ce->ring_size = SZ_16K;
sys/dev/pci/drm/i915/gem/i915_gem_context.c
993
i915_vm_put(ce->vm);
sys/dev/pci/drm/i915/gem/i915_gem_context.c
994
ce->vm = i915_gem_context_get_eb_vm(ctx);
sys/dev/pci/drm/i915/gem/i915_gem_context.c
997
intel_engine_has_timeslices(ce->engine) &&
sys/dev/pci/drm/i915/gem/i915_gem_context.c
998
intel_engine_has_semaphores(ce->engine))
sys/dev/pci/drm/i915/gem/i915_gem_context.c
999
__set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
sys/dev/pci/drm/i915/gem/i915_gem_context.h
207
struct intel_context *ce;
sys/dev/pci/drm/i915/gem/i915_gem_context.h
212
ce = ERR_PTR(-ENOENT);
sys/dev/pci/drm/i915/gem/i915_gem_context.h
214
ce = intel_context_get(e->engines[idx]);
sys/dev/pci/drm/i915/gem/i915_gem_context.h
216
ce = ERR_PTR(-EINVAL);
sys/dev/pci/drm/i915/gem/i915_gem_context.h
219
return ce;
sys/dev/pci/drm/i915/gem/i915_gem_context.h
233
#define for_each_gem_engine(ce, engines, it) \
sys/dev/pci/drm/i915/gem/i915_gem_context.h
235
((ce) = i915_gem_engines_iter_next(&(it)));)
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
2464
static struct i915_request *eb_throttle(struct i915_execbuffer *eb, struct intel_context *ce)
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
2466
struct intel_ring *ring = ce->ring;
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
2467
struct intel_timeline *tl = ce->timeline;
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
2498
static int eb_pin_timeline(struct i915_execbuffer *eb, struct intel_context *ce,
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
2512
tl = intel_context_timeline_lock(ce);
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
2516
intel_context_enter(ce);
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
2518
rq = eb_throttle(eb, ce);
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
2538
mutex_lock(&ce->timeline->mutex);
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
2539
intel_context_exit(ce);
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
2540
mutex_unlock(&ce->timeline->mutex);
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
2555
struct intel_context *ce = eb->context, *child;
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
2561
if (unlikely(intel_context_is_banned(ce)))
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
2569
err = intel_context_pin_ww(ce, &eb->ww);
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
2572
for_each_child(ce, child) {
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
2577
for_each_child(ce, child) {
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
2583
err = eb_pin_timeline(eb, ce, throttle);
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
2591
for_each_child(ce, child) {
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
2598
for_each_child(ce, child)
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
2600
intel_context_unpin(ce);
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
2606
struct intel_context *ce = eb->context, *child;
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
2613
for_each_child(ce, child) {
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
2621
mutex_lock(&ce->timeline->mutex);
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
2622
intel_context_exit(ce);
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
2623
mutex_unlock(&ce->timeline->mutex);
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
2625
intel_context_unpin(ce);
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
2675
struct intel_context *ce, *child;
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
2685
ce = i915_gem_context_get_engine(eb->gem_context, idx);
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
2686
if (IS_ERR(ce))
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
2687
return PTR_ERR(ce);
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
2689
if (intel_context_is_parallel(ce)) {
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
2690
if (eb->buffer_count < ce->parallel.number_children + 1) {
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
2691
intel_context_put(ce);
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
2695
intel_context_put(ce);
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
2699
eb->num_batches = ce->parallel.number_children + 1;
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
2700
gt = ce->engine->gt;
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
2702
for_each_child(ce, child)
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
2704
eb->wakeref = intel_gt_pm_get(ce->engine->gt);
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
2712
if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
2713
err = intel_context_alloc_state(ce);
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
2717
for_each_child(ce, child) {
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
2729
err = intel_gt_terminally_wedged(ce->engine->gt);
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
2733
if (!i915_vm_tryget(ce->vm)) {
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
2738
eb->context = ce;
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
2739
eb->gt = ce->engine->gt;
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
2752
intel_gt_pm_put(ce->engine->gt, eb->wakeref);
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
2753
for_each_child(ce, child)
sys/dev/pci/drm/i915/gem/i915_gem_execbuffer.c
2755
intel_context_put(ce);
sys/dev/pci/drm/i915/gem/i915_gem_throttle.c
54
struct intel_context *ce;
sys/dev/pci/drm/i915/gem/i915_gem_throttle.c
60
for_each_gem_engine(ce,
sys/dev/pci/drm/i915/gem/i915_gem_throttle.c
65
if (!ce->timeline)
sys/dev/pci/drm/i915/gem/i915_gem_throttle.c
68
mutex_lock(&ce->timeline->mutex);
sys/dev/pci/drm/i915/gem/i915_gem_throttle.c
70
&ce->timeline->requests,
sys/dev/pci/drm/i915/gem/i915_gem_throttle.c
82
mutex_unlock(&ce->timeline->mutex);
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
1055
static int gpu_write(struct intel_context *ce,
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
1068
return igt_gpu_fill_dw(ce, vma, dw * sizeof(u32),
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
1146
static int __igt_write_huge(struct intel_context *ce,
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
1155
vma = i915_vma_instance(obj, ce->vm, NULL);
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
1165
if (err == -ENOSPC && i915_is_ggtt(ce->vm))
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
1175
err = gpu_write(ce, vma, dword, val);
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
1197
struct intel_context *ce;
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
1231
for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
1233
if (!intel_engine_can_store_dword(ce->engine))
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
1236
max = min(max, ce->vm->total);
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
1268
struct intel_context *ce;
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
1270
ce = engines->engines[order[i] % engines->num_engines];
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
1272
if (!ce || !intel_engine_can_store_dword(ce->engine))
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
1289
err = __igt_write_huge(ce, obj, size, offset_low,
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
1294
err = __igt_write_huge(ce, obj, size, offset_high,
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
1301
__func__, ce->engine->name, offset_low, offset_high,
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
1620
struct intel_context *ce;
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
1695
for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
1697
if (!intel_engine_can_store_dword(ce->engine))
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
1720
ce = engines->engines[order[i] % engines->num_engines];
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
1722
if (!ce || !intel_engine_can_store_dword(ce->engine))
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
1725
err = __igt_write_huge(ce, obj, obj->base.size, addr, 0, rnd);
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
1729
err = __igt_write_huge(ce, obj, obj->base.size, addr,
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
1734
err = __igt_write_huge(ce, obj, obj->base.size, addr,
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
1837
struct intel_context *ce;
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
1896
for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
1897
if (!intel_engine_can_store_dword(ce->engine))
sys/dev/pci/drm/i915/gem/selftests/huge_pages.c
1900
err = gpu_write(ce, vma, n++, 0xdeadbeaf);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_client_blt.c
101
struct intel_context *ce;
sys/dev/pci/drm/i915/gem/selftests/i915_gem_client_blt.c
156
struct intel_gt *gt = t->ce->engine->gt;
sys/dev/pci/drm/i915/gem/selftests/i915_gem_client_blt.c
164
*cs++ = i915_mmio_reg_offset(BLIT_CCTL(t->ce->engine->mmio_base));
sys/dev/pci/drm/i915/gem/selftests/i915_gem_client_blt.c
273
struct drm_i915_private *i915 = t->ce->vm->i915;
sys/dev/pci/drm/i915/gem/selftests/i915_gem_client_blt.c
284
vma = i915_vma_instance(obj, t->ce->vm, NULL);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_client_blt.c
300
struct drm_i915_private *i915 = t->ce->engine->i915;
sys/dev/pci/drm/i915/gem/selftests/i915_gem_client_blt.c
501
rq = intel_context_create_request(t->ce);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_client_blt.c
545
t->ce = intel_context_create(engine);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_client_blt.c
546
if (IS_ERR(t->ce)) {
sys/dev/pci/drm/i915/gem/selftests/i915_gem_client_blt.c
547
err = PTR_ERR(t->ce);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_client_blt.c
551
t->align = i915_vm_min_alignment(t->ce->vm, INTEL_MEMORY_LOCAL);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_client_blt.c
553
i915_vm_min_alignment(t->ce->vm, INTEL_MEMORY_SYSTEM));
sys/dev/pci/drm/i915/gem/selftests/i915_gem_client_blt.c
559
mutex_lock(&t->ce->vm->mutex);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_client_blt.c
561
err = drm_mm_insert_node_in_range(&t->ce->vm->mm, &hole,
sys/dev/pci/drm/i915/gem/selftests/i915_gem_client_blt.c
568
mutex_unlock(&t->ce->vm->mutex);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_client_blt.c
584
intel_context_put(t->ce);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_client_blt.c
594
intel_context_put(t->ce);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
1046
struct intel_context *ce,
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
1060
ret = igt_spinner_init(*spin, ce->engine->gt);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
1064
rq = igt_spinner_create_request(*spin, ce, MI_NOOP);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
1090
__read_slice_count(struct intel_context *ce,
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
1101
ret = emit_rpcs_query(obj, ce, &rq);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
1119
if (GRAPHICS_VER(ce->engine->i915) >= 11) {
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
1164
struct intel_context *ce,
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
1169
unsigned int slices = hweight32(ce->engine->sseu.slice_mask);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
1174
ret = intel_engine_reset(ce->engine, "sseu");
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
1179
ret = __read_slice_count(ce, obj,
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
1185
ret = __read_slice_count(ce->engine->kernel_context, obj, NULL, &rpcs);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
1193
ret = igt_flush_test(ce->engine->i915);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
1197
ret = __read_slice_count(ce, obj, NULL, &rpcs);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
1208
struct intel_context *ce,
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
1215
intel_engine_pm_get(ce->engine);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
1217
ret = __sseu_prepare(name, flags, ce, &spin);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
1221
ret = intel_context_reconfigure_sseu(ce, sseu);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
1225
ret = __sseu_finish(name, flags, ce, obj,
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
1235
intel_engine_pm_put(ce->engine);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
1262
struct intel_context *ce;
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
1291
ce = intel_context_create(engine);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
1292
if (IS_ERR(ce)) {
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
1293
ret = PTR_ERR(ce);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
1297
ret = intel_context_pin(ce);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
1302
ret = __sseu_test(name, flags, ce, obj, engine->sseu);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
1307
ret = __sseu_test(name, flags, ce, obj, pg_sseu);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
1312
ret = __sseu_test(name, flags, ce, obj, engine->sseu);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
1317
ret = __sseu_test(name, flags, ce, obj, pg_sseu);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
1322
intel_context_unpin(ce);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
1324
intel_context_put(ce);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
1375
struct intel_context *ce;
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
1410
for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it)
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
1411
if (intel_engine_can_store_dword(ce->engine))
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
1418
for_each_gem_engine(ce,
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
1420
if (!intel_engine_can_store_dword(ce->engine))
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
1424
obj = create_test_object(ce->vm, file, &objects);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
1435
err = gpu_fill(ce, obj, dw);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
1439
ce->engine->name,
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
1446
err = throttle(ce, tq, ARRAY_SIZE(tq));
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
186
struct intel_context *ce[2];
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
203
for (n = 0; !arg->result && n < ARRAY_SIZE(arg->ce); n++) {
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
206
rq = i915_request_create(arg->ce[n]);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
236
arg->ce[0]->engine->name, count, arg->result);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
251
for (n = 0; !arg->result && n < ARRAY_SIZE(arg->ce); n++) {
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
254
rq = i915_request_create(arg->ce[n]);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
279
arg->ce[0]->engine->name, count, arg->result);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
295
struct intel_context *ce;
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
328
for_each_gem_engine(ce, engines, it) {
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
329
err = intel_context_pin(ce);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
334
data[m++].ce[0] = intel_context_get(ce);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
339
for (n = 1; n < ARRAY_SIZE(data->ce); n++) {
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
347
if (!data[m].ce[0])
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
350
ce = intel_context_create(data[m].ce[0]->engine);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
351
if (IS_ERR(ce)) {
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
352
err = PTR_ERR(ce);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
356
err = intel_context_pin(ce);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
358
intel_context_put(ce);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
362
data[m].ce[n] = ce;
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
369
if (!data[n].ce[0])
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
373
data[n].ce[0]->engine->name);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
390
if (!data[n].ce[0])
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
399
if (data[n].ce[0]) {
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
414
for (m = 0; m < ARRAY_SIZE(data->ce); m++) {
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
415
if (!data[n].ce[m])
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
418
intel_context_unpin(data[n].ce[m]);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
419
intel_context_put(data[n].ce[m]);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
441
static int gpu_fill(struct intel_context *ce,
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
448
GEM_BUG_ON(obj->base.size > ce->vm->total);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
449
GEM_BUG_ON(!intel_engine_can_store_dword(ce->engine));
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
451
vma = i915_vma_instance(obj, ce->vm, NULL);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
467
err = igt_gpu_fill_dw(ce, vma,
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
632
static int throttle(struct intel_context *ce,
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
649
q[i] = intel_context_create_request(ce);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
702
struct intel_context *ce;
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
710
ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
711
GEM_BUG_ON(IS_ERR(ce));
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
714
obj = create_test_object(ce->vm, file, &objects);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
717
intel_context_put(ce);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
723
err = gpu_fill(ce, obj, dw);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
730
intel_context_put(ce);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
735
err = throttle(ce, tq, ARRAY_SIZE(tq));
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
737
intel_context_put(ce);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
750
intel_context_put(ce);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
835
struct intel_context *ce;
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
843
ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
844
GEM_BUG_ON(IS_ERR(ce));
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
851
intel_context_put(ce);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
857
err = gpu_fill(ce, obj, dw);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
864
intel_context_put(ce);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
869
err = throttle(ce, tq, ARRAY_SIZE(tq));
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
871
intel_context_put(ce);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
884
intel_context_put(ce);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
941
struct intel_context *ce,
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
952
GEM_BUG_ON(!intel_engine_can_store_dword(ce->engine));
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
957
vma = i915_vma_instance(obj, ce->vm, NULL);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
965
batch = i915_vma_instance(rpcs, ce->vm, NULL);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
987
err = rpcs_query_batch(rpcs, vma, ce->engine);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_context.c
991
rq = i915_request_create(ce);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_dmabuf.c
154
struct intel_context *ce;
sys/dev/pci/drm/i915/gem/selftests/i915_gem_dmabuf.c
171
for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
sys/dev/pci/drm/i915/gem/selftests/i915_gem_dmabuf.c
172
if (intel_engine_can_store_dword(ce->engine))
sys/dev/pci/drm/i915/gem/selftests/i915_gem_dmabuf.c
176
if (!ce)
sys/dev/pci/drm/i915/gem/selftests/i915_gem_dmabuf.c
179
vma = i915_vma_instance(import_obj, ce->vm, NULL);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_dmabuf.c
189
err = igt_gpu_fill_dw(ce, vma, 0,
sys/dev/pci/drm/i915/gem/selftests/i915_gem_migrate.c
393
struct intel_context *ce;
sys/dev/pci/drm/i915/gem/selftests/i915_gem_migrate.c
397
ce = intel_context_create(engine);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_migrate.c
398
if (IS_ERR(ce)) {
sys/dev/pci/drm/i915/gem/selftests/i915_gem_migrate.c
399
err = PTR_ERR(ce);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_migrate.c
411
rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
sys/dev/pci/drm/i915/gem/selftests/i915_gem_migrate.c
412
intel_context_put(ce);
sys/dev/pci/drm/i915/gem/selftests/igt_gem_utils.c
111
int igt_gpu_fill_dw(struct intel_context *ce,
sys/dev/pci/drm/i915/gem/selftests/igt_gem_utils.c
120
GEM_BUG_ON(!intel_engine_can_store_dword(ce->engine));
sys/dev/pci/drm/i915/gem/selftests/igt_gem_utils.c
127
rq = intel_context_create_request(ce);
sys/dev/pci/drm/i915/gem/selftests/igt_gem_utils.c
142
if (GRAPHICS_VER(ce->vm->i915) <= 5)
sys/dev/pci/drm/i915/gem/selftests/igt_gem_utils.c
23
struct intel_context *ce;
sys/dev/pci/drm/i915/gem/selftests/igt_gem_utils.c
31
ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
sys/dev/pci/drm/i915/gem/selftests/igt_gem_utils.c
32
if (IS_ERR(ce))
sys/dev/pci/drm/i915/gem/selftests/igt_gem_utils.c
33
return ERR_CAST(ce);
sys/dev/pci/drm/i915/gem/selftests/igt_gem_utils.c
35
rq = intel_context_create_request(ce);
sys/dev/pci/drm/i915/gem/selftests/igt_gem_utils.c
36
intel_context_put(ce);
sys/dev/pci/drm/i915/gem/selftests/igt_gem_utils.h
30
int igt_gpu_fill_dw(struct intel_context *ce,
sys/dev/pci/drm/i915/gt/gen8_engine_cs.c
486
struct intel_context *ce = rq->context;
sys/dev/pci/drm/i915/gt/gen8_engine_cs.c
487
u32 wa_offset = lrc_indirect_bb(ce);
sys/dev/pci/drm/i915/gt/gen8_engine_cs.c
490
GEM_BUG_ON(!ce->wa_bb_page);
sys/dev/pci/drm/i915/gt/intel_breadcrumbs.c
100
lockdep_assert_held(&ce->signal_lock);
sys/dev/pci/drm/i915/gt/intel_breadcrumbs.c
102
if (!list_empty(&ce->signals))
sys/dev/pci/drm/i915/gt/intel_breadcrumbs.c
106
list_del_rcu(&ce->signal_link);
sys/dev/pci/drm/i915/gt/intel_breadcrumbs.c
113
check_signal_order(struct intel_context *ce, struct i915_request *rq)
sys/dev/pci/drm/i915/gt/intel_breadcrumbs.c
115
if (rq->context != ce)
sys/dev/pci/drm/i915/gt/intel_breadcrumbs.c
118
if (!list_is_last(&rq->signal_link, &ce->signals) &&
sys/dev/pci/drm/i915/gt/intel_breadcrumbs.c
123
if (!list_is_first(&rq->signal_link, &ce->signals) &&
sys/dev/pci/drm/i915/gt/intel_breadcrumbs.c
177
struct intel_context *ce;
sys/dev/pci/drm/i915/gt/intel_breadcrumbs.c
213
list_for_each_entry_rcu(ce, &b->signalers, signal_link) {
sys/dev/pci/drm/i915/gt/intel_breadcrumbs.c
216
list_for_each_entry_rcu(rq, &ce->signals, signal_link) {
sys/dev/pci/drm/i915/gt/intel_breadcrumbs.c
231
spin_lock(&ce->signal_lock);
sys/dev/pci/drm/i915/gt/intel_breadcrumbs.c
233
release = remove_signaling_context(b, ce);
sys/dev/pci/drm/i915/gt/intel_breadcrumbs.c
234
spin_unlock(&ce->signal_lock);
sys/dev/pci/drm/i915/gt/intel_breadcrumbs.c
236
if (intel_timeline_is_last(ce->timeline, rq))
sys/dev/pci/drm/i915/gt/intel_breadcrumbs.c
237
add_retire(b, ce->timeline);
sys/dev/pci/drm/i915/gt/intel_breadcrumbs.c
238
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/intel_breadcrumbs.c
353
struct intel_context *ce = rq->context;
sys/dev/pci/drm/i915/gt/intel_breadcrumbs.c
369
if (list_empty(&ce->signals)) {
sys/dev/pci/drm/i915/gt/intel_breadcrumbs.c
370
intel_context_get(ce);
sys/dev/pci/drm/i915/gt/intel_breadcrumbs.c
371
add_signaling_context(b, ce);
sys/dev/pci/drm/i915/gt/intel_breadcrumbs.c
372
pos = &ce->signals;
sys/dev/pci/drm/i915/gt/intel_breadcrumbs.c
388
list_for_each_prev(pos, &ce->signals) {
sys/dev/pci/drm/i915/gt/intel_breadcrumbs.c
399
GEM_BUG_ON(!check_signal_order(ce, rq));
sys/dev/pci/drm/i915/gt/intel_breadcrumbs.c
414
struct intel_context *ce = rq->context;
sys/dev/pci/drm/i915/gt/intel_breadcrumbs.c
429
spin_lock(&ce->signal_lock);
sys/dev/pci/drm/i915/gt/intel_breadcrumbs.c
432
spin_unlock(&ce->signal_lock);
sys/dev/pci/drm/i915/gt/intel_breadcrumbs.c
440
struct intel_context *ce = rq->context;
sys/dev/pci/drm/i915/gt/intel_breadcrumbs.c
443
spin_lock(&ce->signal_lock);
sys/dev/pci/drm/i915/gt/intel_breadcrumbs.c
445
spin_unlock(&ce->signal_lock);
sys/dev/pci/drm/i915/gt/intel_breadcrumbs.c
450
release = remove_signaling_context(b, ce);
sys/dev/pci/drm/i915/gt/intel_breadcrumbs.c
451
spin_unlock(&ce->signal_lock);
sys/dev/pci/drm/i915/gt/intel_breadcrumbs.c
453
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/intel_breadcrumbs.c
461
void intel_context_remove_breadcrumbs(struct intel_context *ce,
sys/dev/pci/drm/i915/gt/intel_breadcrumbs.c
468
spin_lock_irqsave(&ce->signal_lock, flags);
sys/dev/pci/drm/i915/gt/intel_breadcrumbs.c
470
if (list_empty(&ce->signals))
sys/dev/pci/drm/i915/gt/intel_breadcrumbs.c
473
list_for_each_entry_safe(rq, rn, &ce->signals, signal_link) {
sys/dev/pci/drm/i915/gt/intel_breadcrumbs.c
483
release = remove_signaling_context(b, ce);
sys/dev/pci/drm/i915/gt/intel_breadcrumbs.c
486
spin_unlock_irqrestore(&ce->signal_lock, flags);
sys/dev/pci/drm/i915/gt/intel_breadcrumbs.c
488
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/intel_breadcrumbs.c
496
struct intel_context *ce;
sys/dev/pci/drm/i915/gt/intel_breadcrumbs.c
502
list_for_each_entry_rcu(ce, &b->signalers, signal_link) {
sys/dev/pci/drm/i915/gt/intel_breadcrumbs.c
503
list_for_each_entry_rcu(rq, &ce->signals, signal_link)
sys/dev/pci/drm/i915/gt/intel_breadcrumbs.c
88
struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_breadcrumbs.c
90
lockdep_assert_held(&ce->signal_lock);
sys/dev/pci/drm/i915/gt/intel_breadcrumbs.c
93
list_add_rcu(&ce->signal_link, &b->signalers);
sys/dev/pci/drm/i915/gt/intel_breadcrumbs.c
98
struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_breadcrumbs.h
48
void intel_context_remove_breadcrumbs(struct intel_context *ce,
sys/dev/pci/drm/i915/gt/intel_context.c
100
static int intel_context_active_acquire(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_context.c
104
__i915_active_acquire(&ce->active);
sys/dev/pci/drm/i915/gt/intel_context.c
106
if (intel_context_is_barrier(ce) || intel_engine_uses_guc(ce->engine) ||
sys/dev/pci/drm/i915/gt/intel_context.c
107
intel_context_is_parallel(ce))
sys/dev/pci/drm/i915/gt/intel_context.c
111
err = i915_active_acquire_preallocate_barrier(&ce->active,
sys/dev/pci/drm/i915/gt/intel_context.c
112
ce->engine);
sys/dev/pci/drm/i915/gt/intel_context.c
114
i915_active_release(&ce->active);
sys/dev/pci/drm/i915/gt/intel_context.c
119
static void intel_context_active_release(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_context.c
122
i915_active_acquire_barrier(&ce->active);
sys/dev/pci/drm/i915/gt/intel_context.c
123
i915_active_release(&ce->active);
sys/dev/pci/drm/i915/gt/intel_context.c
186
static int intel_context_pre_pin(struct intel_context *ce,
sys/dev/pci/drm/i915/gt/intel_context.c
191
CE_TRACE(ce, "active\n");
sys/dev/pci/drm/i915/gt/intel_context.c
193
err = __ring_active(ce->ring, ww);
sys/dev/pci/drm/i915/gt/intel_context.c
197
err = intel_timeline_pin(ce->timeline, ww);
sys/dev/pci/drm/i915/gt/intel_context.c
201
if (!ce->state)
sys/dev/pci/drm/i915/gt/intel_context.c
204
err = __context_pin_state(ce->state, ww);
sys/dev/pci/drm/i915/gt/intel_context.c
212
intel_timeline_unpin(ce->timeline);
sys/dev/pci/drm/i915/gt/intel_context.c
214
__ring_retire(ce->ring);
sys/dev/pci/drm/i915/gt/intel_context.c
218
static void intel_context_post_unpin(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_context.c
220
if (ce->state)
sys/dev/pci/drm/i915/gt/intel_context.c
221
__context_unpin_state(ce->state);
sys/dev/pci/drm/i915/gt/intel_context.c
223
intel_timeline_unpin(ce->timeline);
sys/dev/pci/drm/i915/gt/intel_context.c
224
__ring_retire(ce->ring);
sys/dev/pci/drm/i915/gt/intel_context.c
227
int __intel_context_do_pin_ww(struct intel_context *ce,
sys/dev/pci/drm/i915/gt/intel_context.c
234
if (unlikely(!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))) {
sys/dev/pci/drm/i915/gt/intel_context.c
235
err = intel_context_alloc_state(ce);
sys/dev/pci/drm/i915/gt/intel_context.c
246
err = i915_gem_object_lock(ce->timeline->hwsp_ggtt->obj, ww);
sys/dev/pci/drm/i915/gt/intel_context.c
248
err = i915_gem_object_lock(ce->ring->vma->obj, ww);
sys/dev/pci/drm/i915/gt/intel_context.c
249
if (!err && ce->state)
sys/dev/pci/drm/i915/gt/intel_context.c
250
err = i915_gem_object_lock(ce->state->obj, ww);
sys/dev/pci/drm/i915/gt/intel_context.c
252
err = intel_context_pre_pin(ce, ww);
sys/dev/pci/drm/i915/gt/intel_context.c
256
err = ce->ops->pre_pin(ce, ww, &vaddr);
sys/dev/pci/drm/i915/gt/intel_context.c
260
err = i915_active_acquire(&ce->active);
sys/dev/pci/drm/i915/gt/intel_context.c
264
err = mutex_lock_interruptible(&ce->pin_mutex);
sys/dev/pci/drm/i915/gt/intel_context.c
268
intel_engine_pm_might_get(ce->engine);
sys/dev/pci/drm/i915/gt/intel_context.c
270
if (unlikely(intel_context_is_closed(ce))) {
sys/dev/pci/drm/i915/gt/intel_context.c
275
if (likely(!atomic_add_unless(&ce->pin_count, 1, 0))) {
sys/dev/pci/drm/i915/gt/intel_context.c
276
err = intel_context_active_acquire(ce);
sys/dev/pci/drm/i915/gt/intel_context.c
280
err = ce->ops->pin(ce, vaddr);
sys/dev/pci/drm/i915/gt/intel_context.c
282
intel_context_active_release(ce);
sys/dev/pci/drm/i915/gt/intel_context.c
286
CE_TRACE(ce, "pin ring:{start:%08x, head:%04x, tail:%04x}\n",
sys/dev/pci/drm/i915/gt/intel_context.c
287
i915_ggtt_offset(ce->ring->vma),
sys/dev/pci/drm/i915/gt/intel_context.c
288
ce->ring->head, ce->ring->tail);
sys/dev/pci/drm/i915/gt/intel_context.c
292
atomic_inc(&ce->pin_count);
sys/dev/pci/drm/i915/gt/intel_context.c
295
GEM_BUG_ON(!intel_context_is_pinned(ce)); /* no overflow! */
sys/dev/pci/drm/i915/gt/intel_context.c
297
trace_intel_context_do_pin(ce);
sys/dev/pci/drm/i915/gt/intel_context.c
300
mutex_unlock(&ce->pin_mutex);
sys/dev/pci/drm/i915/gt/intel_context.c
302
i915_active_release(&ce->active);
sys/dev/pci/drm/i915/gt/intel_context.c
305
ce->ops->post_unpin(ce);
sys/dev/pci/drm/i915/gt/intel_context.c
307
intel_context_post_unpin(ce);
sys/dev/pci/drm/i915/gt/intel_context.c
31
struct intel_context *ce = container_of(rcu, typeof(*ce), rcu);
sys/dev/pci/drm/i915/gt/intel_context.c
315
i915_gem_ww_unlock_single(ce->timeline->hwsp_ggtt->obj);
sys/dev/pci/drm/i915/gt/intel_context.c
320
int __intel_context_do_pin(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_context.c
327
err = __intel_context_do_pin_ww(ce, &ww);
sys/dev/pci/drm/i915/gt/intel_context.c
33
trace_intel_context_free(ce);
sys/dev/pci/drm/i915/gt/intel_context.c
337
void __intel_context_do_unpin(struct intel_context *ce, int sub)
sys/dev/pci/drm/i915/gt/intel_context.c
339
if (!atomic_sub_and_test(sub, &ce->pin_count))
sys/dev/pci/drm/i915/gt/intel_context.c
34
if (intel_context_has_own_state(ce))
sys/dev/pci/drm/i915/gt/intel_context.c
342
CE_TRACE(ce, "unpin\n");
sys/dev/pci/drm/i915/gt/intel_context.c
343
ce->ops->unpin(ce);
sys/dev/pci/drm/i915/gt/intel_context.c
344
ce->ops->post_unpin(ce);
sys/dev/pci/drm/i915/gt/intel_context.c
35
uao_detach(ce->default_state);
sys/dev/pci/drm/i915/gt/intel_context.c
352
intel_context_get(ce);
sys/dev/pci/drm/i915/gt/intel_context.c
353
intel_context_active_release(ce);
sys/dev/pci/drm/i915/gt/intel_context.c
354
trace_intel_context_do_unpin(ce);
sys/dev/pci/drm/i915/gt/intel_context.c
355
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/intel_context.c
360
struct intel_context *ce = container_of(active, typeof(*ce), active);
sys/dev/pci/drm/i915/gt/intel_context.c
362
CE_TRACE(ce, "retire runtime: { total:%lluns, avg:%lluns }\n",
sys/dev/pci/drm/i915/gt/intel_context.c
363
intel_context_get_total_runtime_ns(ce),
sys/dev/pci/drm/i915/gt/intel_context.c
364
intel_context_get_avg_runtime_ns(ce));
sys/dev/pci/drm/i915/gt/intel_context.c
366
set_bit(CONTEXT_VALID_BIT, &ce->flags);
sys/dev/pci/drm/i915/gt/intel_context.c
367
intel_context_post_unpin(ce);
sys/dev/pci/drm/i915/gt/intel_context.c
368
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/intel_context.c
37
kmem_cache_free(slab_ce, ce);
sys/dev/pci/drm/i915/gt/intel_context.c
373
struct intel_context *ce = container_of(active, typeof(*ce), active);
sys/dev/pci/drm/i915/gt/intel_context.c
375
intel_context_get(ce);
sys/dev/pci/drm/i915/gt/intel_context.c
378
GEM_WARN_ON(!i915_active_acquire_if_busy(&ce->ring->vma->active));
sys/dev/pci/drm/i915/gt/intel_context.c
379
__intel_ring_pin(ce->ring);
sys/dev/pci/drm/i915/gt/intel_context.c
381
__intel_timeline_pin(ce->timeline);
sys/dev/pci/drm/i915/gt/intel_context.c
383
if (ce->state) {
sys/dev/pci/drm/i915/gt/intel_context.c
384
GEM_WARN_ON(!i915_active_acquire_if_busy(&ce->state->active));
sys/dev/pci/drm/i915/gt/intel_context.c
385
__i915_vma_pin(ce->state);
sys/dev/pci/drm/i915/gt/intel_context.c
386
i915_vma_make_unshrinkable(ce->state);
sys/dev/pci/drm/i915/gt/intel_context.c
39
pool_put(&slab_ce, ce);
sys/dev/pci/drm/i915/gt/intel_context.c
400
intel_context_init(struct intel_context *ce, struct intel_engine_cs *engine)
sys/dev/pci/drm/i915/gt/intel_context.c
405
kref_init(&ce->ref);
sys/dev/pci/drm/i915/gt/intel_context.c
407
ce->engine = engine;
sys/dev/pci/drm/i915/gt/intel_context.c
408
ce->ops = engine->cops;
sys/dev/pci/drm/i915/gt/intel_context.c
409
ce->sseu = engine->sseu;
sys/dev/pci/drm/i915/gt/intel_context.c
410
ce->ring = NULL;
sys/dev/pci/drm/i915/gt/intel_context.c
411
ce->ring_size = SZ_4K;
sys/dev/pci/drm/i915/gt/intel_context.c
413
ewma_runtime_init(&ce->stats.runtime.avg);
sys/dev/pci/drm/i915/gt/intel_context.c
415
ce->vm = i915_vm_get(engine->gt->vm);
sys/dev/pci/drm/i915/gt/intel_context.c
418
mtx_init(&ce->signal_lock, IPL_NONE);
sys/dev/pci/drm/i915/gt/intel_context.c
419
INIT_LIST_HEAD(&ce->signals);
sys/dev/pci/drm/i915/gt/intel_context.c
421
rw_init(&ce->pin_mutex, "cepin");
sys/dev/pci/drm/i915/gt/intel_context.c
423
mtx_init(&ce->guc_state.lock, IPL_TTY);
sys/dev/pci/drm/i915/gt/intel_context.c
424
INIT_LIST_HEAD(&ce->guc_state.fences);
sys/dev/pci/drm/i915/gt/intel_context.c
425
INIT_LIST_HEAD(&ce->guc_state.requests);
sys/dev/pci/drm/i915/gt/intel_context.c
427
ce->guc_id.id = GUC_INVALID_CONTEXT_ID;
sys/dev/pci/drm/i915/gt/intel_context.c
428
INIT_LIST_HEAD(&ce->guc_id.link);
sys/dev/pci/drm/i915/gt/intel_context.c
43
void intel_context_free(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_context.c
430
INIT_LIST_HEAD(&ce->destroyed_link);
sys/dev/pci/drm/i915/gt/intel_context.c
432
INIT_LIST_HEAD(&ce->parallel.child_list);
sys/dev/pci/drm/i915/gt/intel_context.c
438
i915_sw_fence_init(&ce->guc_state.blocked,
sys/dev/pci/drm/i915/gt/intel_context.c
440
i915_sw_fence_commit(&ce->guc_state.blocked);
sys/dev/pci/drm/i915/gt/intel_context.c
442
i915_active_init(&ce->active,
sys/dev/pci/drm/i915/gt/intel_context.c
446
void intel_context_fini(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_context.c
45
call_rcu(&ce->rcu, rcu_context_free);
sys/dev/pci/drm/i915/gt/intel_context.c
450
if (ce->timeline)
sys/dev/pci/drm/i915/gt/intel_context.c
451
intel_timeline_put(ce->timeline);
sys/dev/pci/drm/i915/gt/intel_context.c
452
i915_vm_put(ce->vm);
sys/dev/pci/drm/i915/gt/intel_context.c
455
if (intel_context_is_parent(ce))
sys/dev/pci/drm/i915/gt/intel_context.c
456
for_each_child_safe(ce, child, next)
sys/dev/pci/drm/i915/gt/intel_context.c
459
mutex_destroy(&ce->pin_mutex);
sys/dev/pci/drm/i915/gt/intel_context.c
460
i915_active_fini(&ce->active);
sys/dev/pci/drm/i915/gt/intel_context.c
461
i915_sw_fence_fini(&ce->guc_state.blocked);
sys/dev/pci/drm/i915/gt/intel_context.c
487
void intel_context_enter_engine(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_context.c
489
intel_engine_pm_get(ce->engine);
sys/dev/pci/drm/i915/gt/intel_context.c
490
intel_timeline_enter(ce->timeline);
sys/dev/pci/drm/i915/gt/intel_context.c
493
void intel_context_exit_engine(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_context.c
495
intel_timeline_exit(ce->timeline);
sys/dev/pci/drm/i915/gt/intel_context.c
496
intel_engine_pm_put(ce->engine);
sys/dev/pci/drm/i915/gt/intel_context.c
499
int intel_context_prepare_remote_request(struct intel_context *ce,
sys/dev/pci/drm/i915/gt/intel_context.c
502
struct intel_timeline *tl = ce->timeline;
sys/dev/pci/drm/i915/gt/intel_context.c
506
GEM_BUG_ON(rq->context == ce);
sys/dev/pci/drm/i915/gt/intel_context.c
51
struct intel_context *ce;
sys/dev/pci/drm/i915/gt/intel_context.c
522
GEM_BUG_ON(i915_active_is_idle(&ce->active));
sys/dev/pci/drm/i915/gt/intel_context.c
523
return i915_active_add_request(&ce->active, rq);
sys/dev/pci/drm/i915/gt/intel_context.c
526
struct i915_request *intel_context_create_request(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_context.c
53
ce = intel_context_alloc();
sys/dev/pci/drm/i915/gt/intel_context.c
534
err = intel_context_pin_ww(ce, &ww);
sys/dev/pci/drm/i915/gt/intel_context.c
536
rq = i915_request_create(ce);
sys/dev/pci/drm/i915/gt/intel_context.c
537
intel_context_unpin(ce);
sys/dev/pci/drm/i915/gt/intel_context.c
54
if (!ce)
sys/dev/pci/drm/i915/gt/intel_context.c
556
lockdep_unpin_lock(&ce->timeline->mutex, rq->cookie);
sys/dev/pci/drm/i915/gt/intel_context.c
557
mutex_release(&ce->timeline->mutex.dep_map, _RET_IP_);
sys/dev/pci/drm/i915/gt/intel_context.c
558
mutex_acquire(&ce->timeline->mutex.dep_map, SINGLE_DEPTH_NESTING, 0, _RET_IP_);
sys/dev/pci/drm/i915/gt/intel_context.c
559
rq->cookie = lockdep_pin_lock(&ce->timeline->mutex);
sys/dev/pci/drm/i915/gt/intel_context.c
564
struct i915_request *intel_context_get_active_request(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_context.c
566
struct intel_context *parent = intel_context_to_parent(ce);
sys/dev/pci/drm/i915/gt/intel_context.c
57
intel_context_init(ce, engine);
sys/dev/pci/drm/i915/gt/intel_context.c
570
GEM_BUG_ON(!intel_engine_uses_guc(ce->engine));
sys/dev/pci/drm/i915/gt/intel_context.c
58
trace_intel_context_create(ce);
sys/dev/pci/drm/i915/gt/intel_context.c
581
if (rq->context != ce)
sys/dev/pci/drm/i915/gt/intel_context.c
59
return ce;
sys/dev/pci/drm/i915/gt/intel_context.c
614
u64 intel_context_get_total_runtime_ns(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_context.c
618
if (ce->ops->update_stats)
sys/dev/pci/drm/i915/gt/intel_context.c
619
ce->ops->update_stats(ce);
sys/dev/pci/drm/i915/gt/intel_context.c
62
int intel_context_alloc_state(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_context.c
621
total = ce->stats.runtime.total;
sys/dev/pci/drm/i915/gt/intel_context.c
622
if (ce->ops->flags & COPS_RUNTIME_CYCLES)
sys/dev/pci/drm/i915/gt/intel_context.c
623
total *= ce->engine->gt->clock_period_ns;
sys/dev/pci/drm/i915/gt/intel_context.c
625
active = READ_ONCE(ce->stats.active);
sys/dev/pci/drm/i915/gt/intel_context.c
632
u64 intel_context_get_avg_runtime_ns(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_context.c
634
u64 avg = ewma_runtime_read(&ce->stats.runtime.avg);
sys/dev/pci/drm/i915/gt/intel_context.c
636
if (ce->ops->flags & COPS_RUNTIME_CYCLES)
sys/dev/pci/drm/i915/gt/intel_context.c
637
avg *= ce->engine->gt->clock_period_ns;
sys/dev/pci/drm/i915/gt/intel_context.c
642
bool intel_context_ban(struct intel_context *ce, struct i915_request *rq)
sys/dev/pci/drm/i915/gt/intel_context.c
644
bool ret = intel_context_set_banned(ce);
sys/dev/pci/drm/i915/gt/intel_context.c
646
trace_intel_context_ban(ce);
sys/dev/pci/drm/i915/gt/intel_context.c
648
if (ce->ops->revoke)
sys/dev/pci/drm/i915/gt/intel_context.c
649
ce->ops->revoke(ce, rq,
sys/dev/pci/drm/i915/gt/intel_context.c
655
bool intel_context_revoke(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_context.c
657
bool ret = intel_context_set_exiting(ce);
sys/dev/pci/drm/i915/gt/intel_context.c
659
if (ce->ops->revoke)
sys/dev/pci/drm/i915/gt/intel_context.c
660
ce->ops->revoke(ce, NULL, ce->engine->props.preempt_timeout_ms);
sys/dev/pci/drm/i915/gt/intel_context.c
67
if (mutex_lock_interruptible(&ce->pin_mutex))
sys/dev/pci/drm/i915/gt/intel_context.c
70
if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
sys/dev/pci/drm/i915/gt/intel_context.c
71
if (intel_context_is_banned(ce)) {
sys/dev/pci/drm/i915/gt/intel_context.c
76
err = ce->ops->alloc(ce);
sys/dev/pci/drm/i915/gt/intel_context.c
80
set_bit(CONTEXT_ALLOC_BIT, &ce->flags);
sys/dev/pci/drm/i915/gt/intel_context.c
83
ctx = rcu_dereference(ce->gem_context);
sys/dev/pci/drm/i915/gt/intel_context.c
90
ce);
sys/dev/pci/drm/i915/gt/intel_context.c
96
mutex_unlock(&ce->pin_mutex);
sys/dev/pci/drm/i915/gt/intel_context.h
106
static inline int intel_context_lock_pinned(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_context.h
107
__acquires(ce->pin_mutex)
sys/dev/pci/drm/i915/gt/intel_context.h
109
return mutex_lock_interruptible(&ce->pin_mutex);
sys/dev/pci/drm/i915/gt/intel_context.h
122
intel_context_is_pinned(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_context.h
124
return atomic_read(&ce->pin_count);
sys/dev/pci/drm/i915/gt/intel_context.h
127
static inline void intel_context_cancel_request(struct intel_context *ce,
sys/dev/pci/drm/i915/gt/intel_context.h
130
GEM_BUG_ON(!ce->ops->cancel_request);
sys/dev/pci/drm/i915/gt/intel_context.h
131
return ce->ops->cancel_request(ce, rq);
sys/dev/pci/drm/i915/gt/intel_context.h
140
static inline void intel_context_unlock_pinned(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_context.h
141
__releases(ce->pin_mutex)
sys/dev/pci/drm/i915/gt/intel_context.h
143
mutex_unlock(&ce->pin_mutex);
sys/dev/pci/drm/i915/gt/intel_context.h
146
int __intel_context_do_pin(struct intel_context *ce);
sys/dev/pci/drm/i915/gt/intel_context.h
147
int __intel_context_do_pin_ww(struct intel_context *ce,
sys/dev/pci/drm/i915/gt/intel_context.h
150
static inline bool intel_context_pin_if_active(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_context.h
152
return atomic_inc_not_zero(&ce->pin_count);
sys/dev/pci/drm/i915/gt/intel_context.h
155
static inline int intel_context_pin(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_context.h
157
if (likely(intel_context_pin_if_active(ce)))
sys/dev/pci/drm/i915/gt/intel_context.h
160
return __intel_context_do_pin(ce);
sys/dev/pci/drm/i915/gt/intel_context.h
163
static inline int intel_context_pin_ww(struct intel_context *ce,
sys/dev/pci/drm/i915/gt/intel_context.h
166
if (likely(intel_context_pin_if_active(ce)))
sys/dev/pci/drm/i915/gt/intel_context.h
169
return __intel_context_do_pin_ww(ce, ww);
sys/dev/pci/drm/i915/gt/intel_context.h
172
static inline void __intel_context_pin(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_context.h
174
GEM_BUG_ON(!intel_context_is_pinned(ce));
sys/dev/pci/drm/i915/gt/intel_context.h
175
atomic_inc(&ce->pin_count);
sys/dev/pci/drm/i915/gt/intel_context.h
178
void __intel_context_do_unpin(struct intel_context *ce, int sub);
sys/dev/pci/drm/i915/gt/intel_context.h
180
static inline void intel_context_sched_disable_unpin(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_context.h
182
__intel_context_do_unpin(ce, 2);
sys/dev/pci/drm/i915/gt/intel_context.h
185
static inline void intel_context_unpin(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_context.h
187
if (!ce->ops->sched_disable) {
sys/dev/pci/drm/i915/gt/intel_context.h
188
__intel_context_do_unpin(ce, 1);
sys/dev/pci/drm/i915/gt/intel_context.h
196
while (!atomic_add_unless(&ce->pin_count, -1, 1)) {
sys/dev/pci/drm/i915/gt/intel_context.h
197
if (atomic_cmpxchg(&ce->pin_count, 1, 2) == 1) {
sys/dev/pci/drm/i915/gt/intel_context.h
198
ce->ops->sched_disable(ce);
sys/dev/pci/drm/i915/gt/intel_context.h
205
void intel_context_enter_engine(struct intel_context *ce);
sys/dev/pci/drm/i915/gt/intel_context.h
206
void intel_context_exit_engine(struct intel_context *ce);
sys/dev/pci/drm/i915/gt/intel_context.h
208
static inline void intel_context_enter(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_context.h
210
lockdep_assert_held(&ce->timeline->mutex);
sys/dev/pci/drm/i915/gt/intel_context.h
211
if (ce->active_count++)
sys/dev/pci/drm/i915/gt/intel_context.h
214
ce->ops->enter(ce);
sys/dev/pci/drm/i915/gt/intel_context.h
215
ce->wakeref = intel_gt_pm_get(ce->vm->gt);
sys/dev/pci/drm/i915/gt/intel_context.h
218
static inline void intel_context_mark_active(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_context.h
22
#define CE_TRACE(ce, fmt, ...) do { \
sys/dev/pci/drm/i915/gt/intel_context.h
220
lockdep_assert(lockdep_is_held(&ce->timeline->mutex) ||
sys/dev/pci/drm/i915/gt/intel_context.h
221
test_bit(CONTEXT_IS_PARKING, &ce->flags));
sys/dev/pci/drm/i915/gt/intel_context.h
222
++ce->active_count;
sys/dev/pci/drm/i915/gt/intel_context.h
225
static inline void intel_context_exit(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_context.h
227
lockdep_assert_held(&ce->timeline->mutex);
sys/dev/pci/drm/i915/gt/intel_context.h
228
GEM_BUG_ON(!ce->active_count);
sys/dev/pci/drm/i915/gt/intel_context.h
229
if (--ce->active_count)
sys/dev/pci/drm/i915/gt/intel_context.h
23
const struct intel_context *ce__ = (ce); \
sys/dev/pci/drm/i915/gt/intel_context.h
232
intel_gt_pm_put_async(ce->vm->gt, ce->wakeref);
sys/dev/pci/drm/i915/gt/intel_context.h
233
ce->ops->exit(ce);
sys/dev/pci/drm/i915/gt/intel_context.h
236
static inline struct intel_context *intel_context_get(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_context.h
238
kref_get(&ce->ref);
sys/dev/pci/drm/i915/gt/intel_context.h
239
return ce;
sys/dev/pci/drm/i915/gt/intel_context.h
242
static inline void intel_context_put(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_context.h
244
kref_put(&ce->ref, ce->ops->destroy);
sys/dev/pci/drm/i915/gt/intel_context.h
248
intel_context_timeline_lock(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_context.h
249
__acquires(&ce->timeline->mutex)
sys/dev/pci/drm/i915/gt/intel_context.h
251
struct intel_timeline *tl = ce->timeline;
sys/dev/pci/drm/i915/gt/intel_context.h
254
if (intel_context_is_parent(ce))
sys/dev/pci/drm/i915/gt/intel_context.h
256
else if (intel_context_is_child(ce))
sys/dev/pci/drm/i915/gt/intel_context.h
258
ce->parallel.child_index + 1);
sys/dev/pci/drm/i915/gt/intel_context.h
273
int intel_context_prepare_remote_request(struct intel_context *ce,
sys/dev/pci/drm/i915/gt/intel_context.h
276
struct i915_request *intel_context_create_request(struct intel_context *ce);
sys/dev/pci/drm/i915/gt/intel_context.h
278
struct i915_request *intel_context_get_active_request(struct intel_context *ce);
sys/dev/pci/drm/i915/gt/intel_context.h
280
static inline bool intel_context_is_barrier(const struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_context.h
282
return test_bit(CONTEXT_BARRIER_BIT, &ce->flags);
sys/dev/pci/drm/i915/gt/intel_context.h
285
static inline void intel_context_close(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_context.h
287
set_bit(CONTEXT_CLOSED_BIT, &ce->flags);
sys/dev/pci/drm/i915/gt/intel_context.h
289
if (ce->ops->close)
sys/dev/pci/drm/i915/gt/intel_context.h
290
ce->ops->close(ce);
sys/dev/pci/drm/i915/gt/intel_context.h
293
static inline bool intel_context_is_closed(const struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_context.h
295
return test_bit(CONTEXT_CLOSED_BIT, &ce->flags);
sys/dev/pci/drm/i915/gt/intel_context.h
298
static inline bool intel_context_has_inflight(const struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_context.h
300
return test_bit(COPS_HAS_INFLIGHT_BIT, &ce->ops->flags);
sys/dev/pci/drm/i915/gt/intel_context.h
303
static inline bool intel_context_use_semaphores(const struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_context.h
305
return test_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
sys/dev/pci/drm/i915/gt/intel_context.h
308
static inline void intel_context_set_use_semaphores(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_context.h
310
set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
sys/dev/pci/drm/i915/gt/intel_context.h
313
static inline void intel_context_clear_use_semaphores(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_context.h
315
clear_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
sys/dev/pci/drm/i915/gt/intel_context.h
318
static inline bool intel_context_is_banned(const struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_context.h
320
return test_bit(CONTEXT_BANNED, &ce->flags);
sys/dev/pci/drm/i915/gt/intel_context.h
323
static inline bool intel_context_set_banned(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_context.h
325
return test_and_set_bit(CONTEXT_BANNED, &ce->flags);
sys/dev/pci/drm/i915/gt/intel_context.h
328
bool intel_context_ban(struct intel_context *ce, struct i915_request *rq);
sys/dev/pci/drm/i915/gt/intel_context.h
33
void intel_context_init(struct intel_context *ce,
sys/dev/pci/drm/i915/gt/intel_context.h
330
static inline bool intel_context_is_schedulable(const struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_context.h
332
return !test_bit(CONTEXT_EXITING, &ce->flags) &&
sys/dev/pci/drm/i915/gt/intel_context.h
333
!test_bit(CONTEXT_BANNED, &ce->flags);
sys/dev/pci/drm/i915/gt/intel_context.h
336
static inline bool intel_context_is_exiting(const struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_context.h
338
return test_bit(CONTEXT_EXITING, &ce->flags);
sys/dev/pci/drm/i915/gt/intel_context.h
341
static inline bool intel_context_set_exiting(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_context.h
343
return test_and_set_bit(CONTEXT_EXITING, &ce->flags);
sys/dev/pci/drm/i915/gt/intel_context.h
346
bool intel_context_revoke(struct intel_context *ce);
sys/dev/pci/drm/i915/gt/intel_context.h
349
intel_context_force_single_submission(const struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_context.h
35
void intel_context_fini(struct intel_context *ce);
sys/dev/pci/drm/i915/gt/intel_context.h
351
return test_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ce->flags);
sys/dev/pci/drm/i915/gt/intel_context.h
355
intel_context_set_single_submission(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_context.h
357
__set_bit(CONTEXT_FORCE_SINGLE_SUBMISSION, &ce->flags);
sys/dev/pci/drm/i915/gt/intel_context.h
361
intel_context_nopreempt(const struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_context.h
363
return test_bit(CONTEXT_NOPREEMPT, &ce->flags);
sys/dev/pci/drm/i915/gt/intel_context.h
367
intel_context_set_nopreempt(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_context.h
369
set_bit(CONTEXT_NOPREEMPT, &ce->flags);
sys/dev/pci/drm/i915/gt/intel_context.h
373
intel_context_clear_nopreempt(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_context.h
375
clear_bit(CONTEXT_NOPREEMPT, &ce->flags);
sys/dev/pci/drm/i915/gt/intel_context.h
379
static inline bool intel_context_has_own_state(const struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_context.h
381
return test_bit(CONTEXT_OWN_STATE, &ce->flags);
sys/dev/pci/drm/i915/gt/intel_context.h
384
static inline bool intel_context_set_own_state(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_context.h
386
return test_and_set_bit(CONTEXT_OWN_STATE, &ce->flags);
sys/dev/pci/drm/i915/gt/intel_context.h
389
static inline bool intel_context_has_own_state(const struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_context.h
394
static inline bool intel_context_set_own_state(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_context.h
400
u64 intel_context_get_total_runtime_ns(struct intel_context *ce);
sys/dev/pci/drm/i915/gt/intel_context.h
401
u64 intel_context_get_avg_runtime_ns(struct intel_context *ce);
sys/dev/pci/drm/i915/gt/intel_context.h
43
int intel_context_alloc_state(struct intel_context *ce);
sys/dev/pci/drm/i915/gt/intel_context.h
45
void intel_context_free(struct intel_context *ce);
sys/dev/pci/drm/i915/gt/intel_context.h
47
int intel_context_reconfigure_sseu(struct intel_context *ce,
sys/dev/pci/drm/i915/gt/intel_context.h
52
static inline bool intel_context_is_child(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_context.h
54
return !!ce->parallel.parent;
sys/dev/pci/drm/i915/gt/intel_context.h
57
static inline bool intel_context_is_parent(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_context.h
59
return !!ce->parallel.number_children;
sys/dev/pci/drm/i915/gt/intel_context.h
62
static inline bool intel_context_is_pinned(struct intel_context *ce);
sys/dev/pci/drm/i915/gt/intel_context.h
65
intel_context_to_parent(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_context.h
67
if (intel_context_is_child(ce)) {
sys/dev/pci/drm/i915/gt/intel_context.h
75
GEM_BUG_ON(!intel_context_is_pinned(ce->parallel.parent));
sys/dev/pci/drm/i915/gt/intel_context.h
77
return ce->parallel.parent;
sys/dev/pci/drm/i915/gt/intel_context.h
79
return ce;
sys/dev/pci/drm/i915/gt/intel_context.h
83
static inline bool intel_context_is_parallel(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_context.h
85
return intel_context_is_child(ce) || intel_context_is_parent(ce);
sys/dev/pci/drm/i915/gt/intel_context.h
91
#define for_each_child(parent, ce)\
sys/dev/pci/drm/i915/gt/intel_context.h
92
list_for_each_entry(ce, &(parent)->parallel.child_list,\
sys/dev/pci/drm/i915/gt/intel_context.h
94
#define for_each_child_safe(parent, ce, cn)\
sys/dev/pci/drm/i915/gt/intel_context.h
95
list_for_each_entry_safe(ce, cn, &(parent)->parallel.child_list,\
sys/dev/pci/drm/i915/gt/intel_context_param.h
14
intel_context_set_watchdog_us(struct intel_context *ce, u64 timeout_us)
sys/dev/pci/drm/i915/gt/intel_context_param.h
16
ce->watchdog.timeout_us = timeout_us;
sys/dev/pci/drm/i915/gt/intel_context_sseu.c
17
const struct intel_context *ce,
sys/dev/pci/drm/i915/gt/intel_context_sseu.c
27
offset = i915_ggtt_offset(ce->state) +
sys/dev/pci/drm/i915/gt/intel_context_sseu.c
41
gen8_modify_rpcs(struct intel_context *ce, const struct intel_sseu sseu)
sys/dev/pci/drm/i915/gt/intel_context_sseu.c
46
lockdep_assert_held(&ce->pin_mutex);
sys/dev/pci/drm/i915/gt/intel_context_sseu.c
54
if (!intel_context_pin_if_active(ce))
sys/dev/pci/drm/i915/gt/intel_context_sseu.c
57
rq = intel_engine_create_kernel_request(ce->engine);
sys/dev/pci/drm/i915/gt/intel_context_sseu.c
64
ret = intel_context_prepare_remote_request(ce, rq);
sys/dev/pci/drm/i915/gt/intel_context_sseu.c
66
ret = gen8_emit_rpcs_config(rq, ce, sseu);
sys/dev/pci/drm/i915/gt/intel_context_sseu.c
70
intel_context_unpin(ce);
sys/dev/pci/drm/i915/gt/intel_context_sseu.c
75
intel_context_reconfigure_sseu(struct intel_context *ce,
sys/dev/pci/drm/i915/gt/intel_context_sseu.c
80
GEM_BUG_ON(GRAPHICS_VER(ce->engine->i915) < 8);
sys/dev/pci/drm/i915/gt/intel_context_sseu.c
82
ret = intel_context_lock_pinned(ce);
sys/dev/pci/drm/i915/gt/intel_context_sseu.c
87
if (!memcmp(&ce->sseu, &sseu, sizeof(sseu)))
sys/dev/pci/drm/i915/gt/intel_context_sseu.c
90
ret = gen8_modify_rpcs(ce, sseu);
sys/dev/pci/drm/i915/gt/intel_context_sseu.c
92
ce->sseu = sseu;
sys/dev/pci/drm/i915/gt/intel_context_sseu.c
95
intel_context_unlock_pinned(ce);
sys/dev/pci/drm/i915/gt/intel_context_types.h
41
int (*alloc)(struct intel_context *ce);
sys/dev/pci/drm/i915/gt/intel_context_types.h
43
void (*revoke)(struct intel_context *ce, struct i915_request *rq,
sys/dev/pci/drm/i915/gt/intel_context_types.h
46
void (*close)(struct intel_context *ce);
sys/dev/pci/drm/i915/gt/intel_context_types.h
48
int (*pre_pin)(struct intel_context *ce, struct i915_gem_ww_ctx *ww, void **vaddr);
sys/dev/pci/drm/i915/gt/intel_context_types.h
49
int (*pin)(struct intel_context *ce, void *vaddr);
sys/dev/pci/drm/i915/gt/intel_context_types.h
50
void (*unpin)(struct intel_context *ce);
sys/dev/pci/drm/i915/gt/intel_context_types.h
51
void (*post_unpin)(struct intel_context *ce);
sys/dev/pci/drm/i915/gt/intel_context_types.h
53
void (*cancel_request)(struct intel_context *ce,
sys/dev/pci/drm/i915/gt/intel_context_types.h
56
void (*enter)(struct intel_context *ce);
sys/dev/pci/drm/i915/gt/intel_context_types.h
57
void (*exit)(struct intel_context *ce);
sys/dev/pci/drm/i915/gt/intel_context_types.h
59
void (*sched_disable)(struct intel_context *ce);
sys/dev/pci/drm/i915/gt/intel_context_types.h
61
void (*update_stats)(struct intel_context *ce);
sys/dev/pci/drm/i915/gt/intel_context_types.h
63
void (*reset)(struct intel_context *ce);
sys/dev/pci/drm/i915/gt/intel_context_types.h
93
#define intel_context_inflight(ce) \
sys/dev/pci/drm/i915/gt/intel_context_types.h
94
__intel_context_inflight(READ_ONCE((ce)->inflight))
sys/dev/pci/drm/i915/gt/intel_context_types.h
95
#define intel_context_inflight_count(ce) \
sys/dev/pci/drm/i915/gt/intel_context_types.h
96
__intel_context_inflight_count(READ_ONCE((ce)->inflight))
sys/dev/pci/drm/i915/gt/intel_engine.h
276
struct intel_context **ce, struct i915_request **rq);
sys/dev/pci/drm/i915/gt/intel_engine.h
287
void intel_engine_destroy_pinned_context(struct intel_context *ce);
sys/dev/pci/drm/i915/gt/intel_engine.h
358
struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_engine.h
360
engine->hung_ce = ce;
sys/dev/pci/drm/i915/gt/intel_engine_cs.c
1312
static int measure_breadcrumb_dw(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_engine_cs.c
1314
struct intel_engine_cs *engine = ce->engine;
sys/dev/pci/drm/i915/gt/intel_engine_cs.c
1326
frame->rq.context = ce;
sys/dev/pci/drm/i915/gt/intel_engine_cs.c
1327
rcu_assign_pointer(frame->rq.timeline, ce->timeline);
sys/dev/pci/drm/i915/gt/intel_engine_cs.c
1328
frame->rq.hwsp_seqno = ce->timeline->hwsp_seqno;
sys/dev/pci/drm/i915/gt/intel_engine_cs.c
1338
mutex_lock(&ce->timeline->mutex);
sys/dev/pci/drm/i915/gt/intel_engine_cs.c
1344
mutex_unlock(&ce->timeline->mutex);
sys/dev/pci/drm/i915/gt/intel_engine_cs.c
1360
struct intel_context *ce;
sys/dev/pci/drm/i915/gt/intel_engine_cs.c
1363
ce = intel_context_create(engine);
sys/dev/pci/drm/i915/gt/intel_engine_cs.c
1364
if (IS_ERR(ce))
sys/dev/pci/drm/i915/gt/intel_engine_cs.c
1365
return ce;
sys/dev/pci/drm/i915/gt/intel_engine_cs.c
1367
__set_bit(CONTEXT_BARRIER_BIT, &ce->flags);
sys/dev/pci/drm/i915/gt/intel_engine_cs.c
1368
ce->timeline = page_pack_bits(NULL, hwsp);
sys/dev/pci/drm/i915/gt/intel_engine_cs.c
1369
ce->ring = NULL;
sys/dev/pci/drm/i915/gt/intel_engine_cs.c
1370
ce->ring_size = ring_size;
sys/dev/pci/drm/i915/gt/intel_engine_cs.c
1372
i915_vm_put(ce->vm);
sys/dev/pci/drm/i915/gt/intel_engine_cs.c
1373
ce->vm = i915_vm_get(vm);
sys/dev/pci/drm/i915/gt/intel_engine_cs.c
1375
err = intel_context_pin(ce); /* perma-pin so it is always available */
sys/dev/pci/drm/i915/gt/intel_engine_cs.c
1377
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/intel_engine_cs.c
1381
list_add_tail(&ce->pinned_contexts_link, &engine->pinned_contexts_list);
sys/dev/pci/drm/i915/gt/intel_engine_cs.c
1389
lockdep_set_class_and_name(&ce->timeline->mutex, key, name);
sys/dev/pci/drm/i915/gt/intel_engine_cs.c
1391
return ce;
sys/dev/pci/drm/i915/gt/intel_engine_cs.c
1394
void intel_engine_destroy_pinned_context(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_engine_cs.c
1396
struct intel_engine_cs *engine = ce->engine;
sys/dev/pci/drm/i915/gt/intel_engine_cs.c
1399
GEM_BUG_ON(ce->timeline->hwsp_ggtt != hwsp);
sys/dev/pci/drm/i915/gt/intel_engine_cs.c
1402
list_del(&ce->timeline->engine_link);
sys/dev/pci/drm/i915/gt/intel_engine_cs.c
1405
list_del(&ce->pinned_contexts_link);
sys/dev/pci/drm/i915/gt/intel_engine_cs.c
1406
intel_context_unpin(ce);
sys/dev/pci/drm/i915/gt/intel_engine_cs.c
1407
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/intel_engine_cs.c
1447
struct intel_context *ce, *bce = NULL;
sys/dev/pci/drm/i915/gt/intel_engine_cs.c
1460
ce = create_kernel_context(engine);
sys/dev/pci/drm/i915/gt/intel_engine_cs.c
1461
if (IS_ERR(ce))
sys/dev/pci/drm/i915/gt/intel_engine_cs.c
1462
return PTR_ERR(ce);
sys/dev/pci/drm/i915/gt/intel_engine_cs.c
1477
ret = measure_breadcrumb_dw(ce);
sys/dev/pci/drm/i915/gt/intel_engine_cs.c
1482
engine->kernel_context = ce;
sys/dev/pci/drm/i915/gt/intel_engine_cs.c
1491
intel_engine_destroy_pinned_context(ce);
sys/dev/pci/drm/i915/gt/intel_engine_cs.c
2524
struct intel_context **ce, struct i915_request **rq)
sys/dev/pci/drm/i915/gt/intel_engine_cs.c
2528
*ce = intel_engine_get_hung_context(engine);
sys/dev/pci/drm/i915/gt/intel_engine_cs.c
2529
if (*ce) {
sys/dev/pci/drm/i915/gt/intel_engine_cs.c
2532
*rq = intel_context_get_active_request(*ce);
sys/dev/pci/drm/i915/gt/intel_engine_heartbeat.c
141
struct intel_context *ce = engine->kernel_context;
sys/dev/pci/drm/i915/gt/intel_engine_heartbeat.c
213
if (!mutex_trylock(&ce->timeline->mutex)) {
sys/dev/pci/drm/i915/gt/intel_engine_heartbeat.c
223
rq = heartbeat_create(ce, GFP_NOWAIT | __GFP_NOWARN);
sys/dev/pci/drm/i915/gt/intel_engine_heartbeat.c
230
mutex_unlock(&ce->timeline->mutex);
sys/dev/pci/drm/i915/gt/intel_engine_heartbeat.c
278
struct intel_context *ce = engine->kernel_context;
sys/dev/pci/drm/i915/gt/intel_engine_heartbeat.c
281
lockdep_assert_held(&ce->timeline->mutex);
sys/dev/pci/drm/i915/gt/intel_engine_heartbeat.c
285
rq = heartbeat_create(ce, GFP_NOWAIT | __GFP_NOWARN);
sys/dev/pci/drm/i915/gt/intel_engine_heartbeat.c
317
struct intel_context *ce = engine->kernel_context;
sys/dev/pci/drm/i915/gt/intel_engine_heartbeat.c
336
err = mutex_lock_interruptible(&ce->timeline->mutex);
sys/dev/pci/drm/i915/gt/intel_engine_heartbeat.c
351
mutex_unlock(&ce->timeline->mutex);
sys/dev/pci/drm/i915/gt/intel_engine_heartbeat.c
360
struct intel_context *ce = engine->kernel_context;
sys/dev/pci/drm/i915/gt/intel_engine_heartbeat.c
370
if (!mutex_lock_interruptible(&ce->timeline->mutex)) {
sys/dev/pci/drm/i915/gt/intel_engine_heartbeat.c
372
mutex_unlock(&ce->timeline->mutex);
sys/dev/pci/drm/i915/gt/intel_engine_heartbeat.c
383
struct intel_context *ce = engine->kernel_context;
sys/dev/pci/drm/i915/gt/intel_engine_heartbeat.c
393
if (mutex_lock_interruptible(&ce->timeline->mutex)) {
sys/dev/pci/drm/i915/gt/intel_engine_heartbeat.c
398
rq = heartbeat_create(ce, GFP_KERNEL);
sys/dev/pci/drm/i915/gt/intel_engine_heartbeat.c
408
mutex_unlock(&ce->timeline->mutex);
sys/dev/pci/drm/i915/gt/intel_engine_heartbeat.c
68
heartbeat_create(struct intel_context *ce, gfp_t gfp)
sys/dev/pci/drm/i915/gt/intel_engine_heartbeat.c
72
intel_context_enter(ce);
sys/dev/pci/drm/i915/gt/intel_engine_heartbeat.c
73
rq = __i915_request_create(ce, gfp);
sys/dev/pci/drm/i915/gt/intel_engine_heartbeat.c
74
intel_context_exit(ce);
sys/dev/pci/drm/i915/gt/intel_engine_pm.c
153
struct intel_context *ce = engine->kernel_context;
sys/dev/pci/drm/i915/gt/intel_engine_pm.c
174
GEM_BUG_ON(!intel_context_is_barrier(ce));
sys/dev/pci/drm/i915/gt/intel_engine_pm.c
175
GEM_BUG_ON(ce->timeline->hwsp_ggtt != engine->status_page.vma);
sys/dev/pci/drm/i915/gt/intel_engine_pm.c
209
set_bit(CONTEXT_IS_PARKING, &ce->flags);
sys/dev/pci/drm/i915/gt/intel_engine_pm.c
210
GEM_BUG_ON(atomic_read(&ce->timeline->active_count) < 0);
sys/dev/pci/drm/i915/gt/intel_engine_pm.c
212
rq = __i915_request_create(ce, GFP_NOWAIT);
sys/dev/pci/drm/i915/gt/intel_engine_pm.c
237
__queue_and_release_pm(rq, ce->timeline, engine);
sys/dev/pci/drm/i915/gt/intel_engine_pm.c
241
clear_bit(CONTEXT_IS_PARKING, &ce->flags);
sys/dev/pci/drm/i915/gt/intel_engine_pm.c
313
struct intel_context *ce;
sys/dev/pci/drm/i915/gt/intel_engine_pm.c
315
list_for_each_entry(ce, &engine->pinned_contexts_list,
sys/dev/pci/drm/i915/gt/intel_engine_pm.c
318
if (ce == engine->kernel_context)
sys/dev/pci/drm/i915/gt/intel_engine_pm.c
321
dbg_poison_ce(ce);
sys/dev/pci/drm/i915/gt/intel_engine_pm.c
322
ce->ops->reset(ce);
sys/dev/pci/drm/i915/gt/intel_engine_pm.c
35
static void dbg_poison_ce(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_engine_pm.c
40
if (ce->state) {
sys/dev/pci/drm/i915/gt/intel_engine_pm.c
41
struct drm_i915_gem_object *obj = ce->state->obj;
sys/dev/pci/drm/i915/gt/intel_engine_pm.c
42
int type = intel_gt_coherent_map_type(ce->engine->gt, obj, true);
sys/dev/pci/drm/i915/gt/intel_engine_pm.c
62
struct intel_context *ce;
sys/dev/pci/drm/i915/gt/intel_engine_pm.c
69
ce = engine->kernel_context;
sys/dev/pci/drm/i915/gt/intel_engine_pm.c
70
if (ce) {
sys/dev/pci/drm/i915/gt/intel_engine_pm.c
71
GEM_BUG_ON(test_bit(CONTEXT_VALID_BIT, &ce->flags));
sys/dev/pci/drm/i915/gt/intel_engine_pm.c
74
while (unlikely(intel_context_inflight(ce)))
sys/dev/pci/drm/i915/gt/intel_engine_pm.c
78
dbg_poison_ce(ce);
sys/dev/pci/drm/i915/gt/intel_engine_pm.c
81
ce->ops->reset(ce);
sys/dev/pci/drm/i915/gt/intel_engine_pm.c
83
CE_TRACE(ce, "reset { seqno:%x, *hwsp:%x, ring:%x }\n",
sys/dev/pci/drm/i915/gt/intel_engine_pm.c
84
ce->timeline->seqno,
sys/dev/pci/drm/i915/gt/intel_engine_pm.c
85
READ_ONCE(*ce->timeline->hwsp_seqno),
sys/dev/pci/drm/i915/gt/intel_engine_pm.c
86
ce->ring->emit);
sys/dev/pci/drm/i915/gt/intel_engine_pm.c
87
GEM_BUG_ON(ce->timeline->seqno !=
sys/dev/pci/drm/i915/gt/intel_engine_pm.c
88
READ_ONCE(*ce->timeline->hwsp_seqno));
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
2606
__execlists_context_pre_pin(struct intel_context *ce,
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
2612
err = lrc_pre_pin(ce, engine, ww, vaddr);
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
2616
if (!__test_and_set_bit(CONTEXT_INIT_BIT, &ce->flags)) {
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
2617
lrc_init_state(ce, engine, *vaddr);
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
2619
__i915_gem_object_flush_map(ce->state->obj, 0, engine->context_size);
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
2625
static int execlists_context_pre_pin(struct intel_context *ce,
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
2629
return __execlists_context_pre_pin(ce, ce->engine, ww, vaddr);
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
2632
static int execlists_context_pin(struct intel_context *ce, void *vaddr)
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
2634
return lrc_pin(ce, ce->engine, vaddr);
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
2637
static int execlists_context_alloc(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
2639
return lrc_alloc(ce, ce->engine);
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
2642
static void execlists_context_cancel_request(struct intel_context *ce,
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
2660
struct intel_context *parent = NULL, *ce, *err;
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
2666
ce = intel_context_create(engines[i]);
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
2667
if (IS_ERR(ce)) {
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
2668
err = ce;
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
2673
parent = ce;
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
2675
intel_context_bind_parent_child(parent, ce);
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
2681
for_each_child(parent, ce)
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
2682
intel_context_set_nopreempt(ce);
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
3029
struct intel_context *ce;
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
3042
ce = rq->context;
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
3043
GEM_BUG_ON(!i915_vma_is_pinned(ce->state));
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
3047
head = intel_ring_wrap(ce->ring, rq->tail);
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
3055
GEM_BUG_ON(i915_active_is_idle(&ce->active));
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
3057
rq = active_request(ce->timeline, rq);
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
3058
head = intel_ring_wrap(ce->ring, rq->head);
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
3059
GEM_BUG_ON(head == ce->ring->tail);
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
3099
head, ce->ring->tail);
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
3100
lrc_reset_regs(ce, engine);
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
3101
ce->lrc.lrca = lrc_update_regs(ce, engine, head);
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
3715
static int virtual_context_alloc(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
3717
struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
3719
return lrc_alloc(ce, ve->siblings[0]);
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
3722
static int virtual_context_pre_pin(struct intel_context *ce,
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
3726
struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
3729
return __execlists_context_pre_pin(ce, ve->siblings[0], ww, vaddr);
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
3732
static int virtual_context_pin(struct intel_context *ce, void *vaddr)
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
3734
struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
3736
return lrc_pin(ce, ve->siblings[0], vaddr);
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
3739
static void virtual_context_enter(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
3741
struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
3747
intel_timeline_enter(ce->timeline);
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
3750
static void virtual_context_exit(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
3752
struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
3755
intel_timeline_exit(ce->timeline);
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
432
struct intel_context * const ce = rq->context;
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
457
head = __active_request(ce->timeline, rq, -EIO)->head;
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
458
head = intel_ring_wrap(ce->ring, head);
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
461
lrc_init_regs(ce, engine, true);
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
464
ce->lrc.lrca = lrc_update_regs(ce, engine, head);
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
476
struct intel_context * const ce = rq->context;
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
478
intel_context_get(ce);
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
480
if (unlikely(intel_context_is_closed(ce) &&
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
482
intel_context_set_exiting(ce);
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
484
if (unlikely(!intel_context_is_schedulable(ce) || bad_request(rq)))
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
488
lrc_check_regs(ce, engine, "before");
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
490
if (ce->tag) {
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
492
GEM_BUG_ON(ce->tag <= BITS_PER_LONG);
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
493
ce->lrc.ccid = ce->tag;
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
500
ce->lrc.ccid = tag << (XEHP_SW_CTX_ID_SHIFT - 32);
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
510
ce->lrc.ccid = (1 + tag) << (GEN11_SW_CTX_ID_SHIFT - 32);
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
515
ce->lrc.ccid |= engine->execlists.ccid;
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
523
CE_TRACE(ce, "schedule-in, ccid:%x\n", ce->lrc.ccid);
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
530
struct intel_context * const ce = rq->context;
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
536
old = ce->inflight;
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
539
WRITE_ONCE(ce->inflight, ptr_inc(old));
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
541
GEM_BUG_ON(intel_context_inflight(ce) != rq->engine);
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
558
static void kick_siblings(struct i915_request *rq, struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
560
struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
570
if (!list_empty(&ce->signals))
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
571
intel_context_remove_breadcrumbs(ce, engine->breadcrumbs);
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
588
struct intel_context * const ce)
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
599
CE_TRACE(ce, "schedule-out, ccid:%x\n", ce->lrc.ccid);
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
600
GEM_BUG_ON(ce->inflight != engine);
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
603
lrc_check_regs(ce, engine, "after");
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
609
if (intel_timeline_is_last(ce->timeline, rq) &&
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
611
intel_engine_add_retire(engine, ce->timeline);
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
613
ccid = ce->lrc.ccid;
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
642
if (ce->engine != engine)
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
643
kick_siblings(rq, ce);
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
645
WRITE_ONCE(ce->inflight, NULL);
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
646
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
651
struct intel_context * const ce = rq->context;
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
655
GEM_BUG_ON(!ce->inflight);
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
656
ce->inflight = ptr_dec(ce->inflight);
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
657
if (!__intel_context_inflight_count(ce->inflight))
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
658
__execlists_schedule_out(rq, ce);
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
675
struct intel_context *ce = rq->context;
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
679
desc = ce->lrc.desc;
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
700
GEM_BUG_ON(ce->lrc_reg_state[CTX_RING_TAIL] != rq->ring->tail);
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
705
ce->lrc_reg_state[CTX_RING_TAIL] = tail;
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
720
ce->lrc.desc &= ~CTX_DESC_FORCE_RESTORE;
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
783
struct intel_context *ce = NULL;
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
811
if (ce == rq->context) {
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
814
ce->timeline->fence_context,
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
818
ce = rq->context;
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
820
if (ccid == ce->lrc.ccid) {
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
823
ccid, ce->timeline->fence_context,
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
827
ccid = ce->lrc.ccid;
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
842
ce->timeline->fence_context,
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
857
ce->timeline->fence_context,
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
869
if (i915_active_is_idle(&ce->active) &&
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
870
!intel_context_is_barrier(ce)) {
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
873
ce->timeline->fence_context,
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
879
if (!i915_vma_is_pinned(ce->state)) {
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
882
ce->timeline->fence_context,
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
888
if (!i915_vma_is_pinned(ce->ring->vma)) {
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
891
ce->timeline->fence_context,
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
903
return ce;
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
942
static bool ctx_single_port_submission(const struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_execlists_submission.c
945
intel_context_force_single_submission(ce));
sys/dev/pci/drm/i915/gt/intel_ggtt.c
339
struct intel_context *ce;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
345
ce = gt->engine[BCS0]->bind_context;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
346
GEM_BUG_ON(!ce);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
358
intel_engine_pm_get(ce->engine);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
360
return ce;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
363
static void gen8_ggtt_bind_put_ce(struct intel_context *ce, intel_wakeref_t wakeref)
sys/dev/pci/drm/i915/gt/intel_ggtt.c
365
intel_engine_pm_put(ce->engine);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
366
intel_gt_pm_put(ce->engine->gt, wakeref);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
378
struct intel_context *ce;
sys/dev/pci/drm/i915/gt/intel_ggtt.c
385
ce = gen8_ggtt_bind_get_ce(ggtt, &wakeref);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
386
if (!ce)
sys/dev/pci/drm/i915/gt/intel_ggtt.c
401
if (mutex_lock_interruptible(&ce->timeline->mutex))
sys/dev/pci/drm/i915/gt/intel_ggtt.c
404
intel_context_enter(ce);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
405
rq = __i915_request_create(ce, GFP_NOWAIT | GFP_ATOMIC);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
406
intel_context_exit(ce);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
409
mutex_unlock(&ce->timeline->mutex);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
449
mutex_unlock(&ce->timeline->mutex);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
461
gen8_ggtt_bind_put_ce(ce, wakeref);
sys/dev/pci/drm/i915/gt/intel_ggtt.c
467
gen8_ggtt_bind_put_ce(ce, wakeref);
sys/dev/pci/drm/i915/gt/intel_gt.c
532
struct intel_context *ce;
sys/dev/pci/drm/i915/gt/intel_gt.c
538
ce = intel_context_create(engine);
sys/dev/pci/drm/i915/gt/intel_gt.c
539
if (IS_ERR(ce)) {
sys/dev/pci/drm/i915/gt/intel_gt.c
540
err = PTR_ERR(ce);
sys/dev/pci/drm/i915/gt/intel_gt.c
544
err = intel_renderstate_init(&so, ce);
sys/dev/pci/drm/i915/gt/intel_gt.c
548
rq = i915_request_create(ce);
sys/dev/pci/drm/i915/gt/intel_gt.c
566
intel_renderstate_fini(&so, ce);
sys/dev/pci/drm/i915/gt/intel_gt.c
569
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/intel_gt.c
620
struct intel_context *ce;
sys/dev/pci/drm/i915/gt/intel_gt.c
627
ce = rq->context;
sys/dev/pci/drm/i915/gt/intel_gt.c
629
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/intel_lrc.c
1000
static u32 *context_wabb(const struct intel_context *ce, bool per_ctx)
sys/dev/pci/drm/i915/gt/intel_lrc.c
1004
GEM_BUG_ON(!ce->wa_bb_page);
sys/dev/pci/drm/i915/gt/intel_lrc.c
1006
ptr = ce->lrc_reg_state;
sys/dev/pci/drm/i915/gt/intel_lrc.c
1008
ptr += context_wa_bb_offset(ce);
sys/dev/pci/drm/i915/gt/intel_lrc.c
1014
void lrc_init_state(struct intel_context *ce,
sys/dev/pci/drm/i915/gt/intel_lrc.c
1022
if (ce->default_state) {
sys/dev/pci/drm/i915/gt/intel_lrc.c
1024
shmem_read(ce->default_state, 0, state, engine->context_size);
sys/dev/pci/drm/i915/gt/intel_lrc.c
1026
uao_read(ce->default_state, 0, state, engine->context_size);
sys/dev/pci/drm/i915/gt/intel_lrc.c
1028
__set_bit(CONTEXT_VALID_BIT, &ce->flags);
sys/dev/pci/drm/i915/gt/intel_lrc.c
1036
if (ce->wa_bb_page)
sys/dev/pci/drm/i915/gt/intel_lrc.c
1037
memset(state + context_wa_bb_offset(ce), 0, PAGE_SIZE);
sys/dev/pci/drm/i915/gt/intel_lrc.c
1043
__lrc_init_regs(state + LRC_STATE_OFFSET, ce, engine, inhibit);
sys/dev/pci/drm/i915/gt/intel_lrc.c
1046
u32 lrc_indirect_bb(const struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_lrc.c
1048
return i915_ggtt_offset(ce->state) + context_wa_bb_offset(ce);
sys/dev/pci/drm/i915/gt/intel_lrc.c
1051
static u32 *setup_predicate_disable_wa(const struct intel_context *ce, u32 *cs)
sys/dev/pci/drm/i915/gt/intel_lrc.c
1055
*cs++ = lrc_indirect_bb(ce) + DG2_PREDICATE_RESULT_WA;
sys/dev/pci/drm/i915/gt/intel_lrc.c
1065
*cs++ = lrc_indirect_bb(ce) + DG2_PREDICATE_RESULT_WA;
sys/dev/pci/drm/i915/gt/intel_lrc.c
1076
__lrc_alloc_state(struct intel_context *ce, struct intel_engine_cs *engine)
sys/dev/pci/drm/i915/gt/intel_lrc.c
1088
ce->wa_bb_page = context_size / PAGE_SIZE;
sys/dev/pci/drm/i915/gt/intel_lrc.c
1093
if (intel_context_is_parent(ce) && intel_engine_uses_guc(engine)) {
sys/dev/pci/drm/i915/gt/intel_lrc.c
1094
ce->parallel.guc.parent_page = context_size / PAGE_SIZE;
sys/dev/pci/drm/i915/gt/intel_lrc.c
1124
pinned_timeline(struct intel_context *ce, struct intel_engine_cs *engine)
sys/dev/pci/drm/i915/gt/intel_lrc.c
1126
struct intel_timeline *tl = fetch_and_zero(&ce->timeline);
sys/dev/pci/drm/i915/gt/intel_lrc.c
1131
int lrc_alloc(struct intel_context *ce, struct intel_engine_cs *engine)
sys/dev/pci/drm/i915/gt/intel_lrc.c
1137
GEM_BUG_ON(ce->state);
sys/dev/pci/drm/i915/gt/intel_lrc.c
1139
if (!intel_context_has_own_state(ce))
sys/dev/pci/drm/i915/gt/intel_lrc.c
1140
ce->default_state = engine->default_state;
sys/dev/pci/drm/i915/gt/intel_lrc.c
1142
vma = __lrc_alloc_state(ce, engine);
sys/dev/pci/drm/i915/gt/intel_lrc.c
1146
ring = intel_engine_create_ring(engine, ce->ring_size);
sys/dev/pci/drm/i915/gt/intel_lrc.c
1152
if (!page_mask_bits(ce->timeline)) {
sys/dev/pci/drm/i915/gt/intel_lrc.c
1159
if (unlikely(ce->timeline))
sys/dev/pci/drm/i915/gt/intel_lrc.c
1160
tl = pinned_timeline(ce, engine);
sys/dev/pci/drm/i915/gt/intel_lrc.c
1168
ce->timeline = tl;
sys/dev/pci/drm/i915/gt/intel_lrc.c
1171
ce->ring = ring;
sys/dev/pci/drm/i915/gt/intel_lrc.c
1172
ce->state = vma;
sys/dev/pci/drm/i915/gt/intel_lrc.c
1183
void lrc_reset(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_lrc.c
1185
GEM_BUG_ON(!intel_context_is_pinned(ce));
sys/dev/pci/drm/i915/gt/intel_lrc.c
1187
intel_ring_reset(ce->ring, ce->ring->emit);
sys/dev/pci/drm/i915/gt/intel_lrc.c
1190
lrc_init_regs(ce, ce->engine, true);
sys/dev/pci/drm/i915/gt/intel_lrc.c
1191
ce->lrc.lrca = lrc_update_regs(ce, ce->engine, ce->ring->tail);
sys/dev/pci/drm/i915/gt/intel_lrc.c
1195
lrc_pre_pin(struct intel_context *ce,
sys/dev/pci/drm/i915/gt/intel_lrc.c
1200
GEM_BUG_ON(!ce->state);
sys/dev/pci/drm/i915/gt/intel_lrc.c
1201
GEM_BUG_ON(!i915_vma_is_pinned(ce->state));
sys/dev/pci/drm/i915/gt/intel_lrc.c
1203
*vaddr = i915_gem_object_pin_map(ce->state->obj,
sys/dev/pci/drm/i915/gt/intel_lrc.c
1204
intel_gt_coherent_map_type(ce->engine->gt,
sys/dev/pci/drm/i915/gt/intel_lrc.c
1205
ce->state->obj,
sys/dev/pci/drm/i915/gt/intel_lrc.c
1213
lrc_pin(struct intel_context *ce,
sys/dev/pci/drm/i915/gt/intel_lrc.c
1217
ce->lrc_reg_state = vaddr + LRC_STATE_OFFSET;
sys/dev/pci/drm/i915/gt/intel_lrc.c
1219
if (!__test_and_set_bit(CONTEXT_INIT_BIT, &ce->flags))
sys/dev/pci/drm/i915/gt/intel_lrc.c
1220
lrc_init_state(ce, engine, vaddr);
sys/dev/pci/drm/i915/gt/intel_lrc.c
1222
ce->lrc.lrca = lrc_update_regs(ce, engine, ce->ring->tail);
sys/dev/pci/drm/i915/gt/intel_lrc.c
1226
void lrc_unpin(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_lrc.c
1228
if (unlikely(ce->parallel.last_rq)) {
sys/dev/pci/drm/i915/gt/intel_lrc.c
1229
i915_request_put(ce->parallel.last_rq);
sys/dev/pci/drm/i915/gt/intel_lrc.c
1230
ce->parallel.last_rq = NULL;
sys/dev/pci/drm/i915/gt/intel_lrc.c
1232
check_redzone((void *)ce->lrc_reg_state - LRC_STATE_OFFSET,
sys/dev/pci/drm/i915/gt/intel_lrc.c
1233
ce->engine);
sys/dev/pci/drm/i915/gt/intel_lrc.c
1236
void lrc_post_unpin(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_lrc.c
1238
i915_gem_object_unpin_map(ce->state->obj);
sys/dev/pci/drm/i915/gt/intel_lrc.c
1241
void lrc_fini(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_lrc.c
1243
if (!ce->state)
sys/dev/pci/drm/i915/gt/intel_lrc.c
1246
intel_ring_put(fetch_and_zero(&ce->ring));
sys/dev/pci/drm/i915/gt/intel_lrc.c
1247
i915_vma_put(fetch_and_zero(&ce->state));
sys/dev/pci/drm/i915/gt/intel_lrc.c
1252
struct intel_context *ce = container_of(kref, typeof(*ce), ref);
sys/dev/pci/drm/i915/gt/intel_lrc.c
1254
GEM_BUG_ON(!i915_active_is_idle(&ce->active));
sys/dev/pci/drm/i915/gt/intel_lrc.c
1255
GEM_BUG_ON(intel_context_is_pinned(ce));
sys/dev/pci/drm/i915/gt/intel_lrc.c
1257
lrc_fini(ce);
sys/dev/pci/drm/i915/gt/intel_lrc.c
1259
intel_context_fini(ce);
sys/dev/pci/drm/i915/gt/intel_lrc.c
1260
intel_context_free(ce);
sys/dev/pci/drm/i915/gt/intel_lrc.c
1264
gen12_emit_timestamp_wa(const struct intel_context *ce, u32 *cs)
sys/dev/pci/drm/i915/gt/intel_lrc.c
1270
*cs++ = i915_ggtt_offset(ce->state) + LRC_STATE_OFFSET +
sys/dev/pci/drm/i915/gt/intel_lrc.c
1290
gen12_emit_restore_scratch(const struct intel_context *ce, u32 *cs)
sys/dev/pci/drm/i915/gt/intel_lrc.c
1292
GEM_BUG_ON(lrc_ring_gpr0(ce->engine) == -1);
sys/dev/pci/drm/i915/gt/intel_lrc.c
1298
*cs++ = i915_ggtt_offset(ce->state) + LRC_STATE_OFFSET +
sys/dev/pci/drm/i915/gt/intel_lrc.c
1299
(lrc_ring_gpr0(ce->engine) + 1) * sizeof(u32);
sys/dev/pci/drm/i915/gt/intel_lrc.c
1306
gen12_emit_cmd_buf_wa(const struct intel_context *ce, u32 *cs)
sys/dev/pci/drm/i915/gt/intel_lrc.c
1308
GEM_BUG_ON(lrc_ring_cmd_buf_cctl(ce->engine) == -1);
sys/dev/pci/drm/i915/gt/intel_lrc.c
1314
*cs++ = i915_ggtt_offset(ce->state) + LRC_STATE_OFFSET +
sys/dev/pci/drm/i915/gt/intel_lrc.c
1315
(lrc_ring_cmd_buf_cctl(ce->engine) + 1) * sizeof(u32);
sys/dev/pci/drm/i915/gt/intel_lrc.c
1354
gen12_emit_indirect_ctx_rcs(const struct intel_context *ce, u32 *cs)
sys/dev/pci/drm/i915/gt/intel_lrc.c
1356
cs = gen12_emit_timestamp_wa(ce, cs);
sys/dev/pci/drm/i915/gt/intel_lrc.c
1357
cs = gen12_emit_cmd_buf_wa(ce, cs);
sys/dev/pci/drm/i915/gt/intel_lrc.c
1358
cs = gen12_emit_restore_scratch(ce, cs);
sys/dev/pci/drm/i915/gt/intel_lrc.c
1361
if (IS_DG2_G11(ce->engine->i915))
sys/dev/pci/drm/i915/gt/intel_lrc.c
1364
cs = gen12_emit_aux_table_inv(ce->engine, cs);
sys/dev/pci/drm/i915/gt/intel_lrc.c
1367
if (IS_GFX_GT_IP_RANGE(ce->engine->gt, IP_VER(12, 0), IP_VER(12, 10)))
sys/dev/pci/drm/i915/gt/intel_lrc.c
1371
if (IS_GFX_GT_IP_STEP(ce->engine->gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
sys/dev/pci/drm/i915/gt/intel_lrc.c
1372
IS_GFX_GT_IP_STEP(ce->engine->gt, IP_VER(12, 71), STEP_A0, STEP_B0) ||
sys/dev/pci/drm/i915/gt/intel_lrc.c
1373
IS_DG2(ce->engine->i915))
sys/dev/pci/drm/i915/gt/intel_lrc.c
1380
gen12_emit_indirect_ctx_xcs(const struct intel_context *ce, u32 *cs)
sys/dev/pci/drm/i915/gt/intel_lrc.c
1382
cs = gen12_emit_timestamp_wa(ce, cs);
sys/dev/pci/drm/i915/gt/intel_lrc.c
1383
cs = gen12_emit_restore_scratch(ce, cs);
sys/dev/pci/drm/i915/gt/intel_lrc.c
1386
if (IS_DG2_G11(ce->engine->i915))
sys/dev/pci/drm/i915/gt/intel_lrc.c
1387
if (ce->engine->class == COMPUTE_CLASS)
sys/dev/pci/drm/i915/gt/intel_lrc.c
1392
return gen12_emit_aux_table_inv(ce->engine, cs);
sys/dev/pci/drm/i915/gt/intel_lrc.c
1395
static u32 *xehp_emit_fastcolor_blt_wabb(const struct intel_context *ce, u32 *cs)
sys/dev/pci/drm/i915/gt/intel_lrc.c
1397
struct intel_gt *gt = ce->engine->gt;
sys/dev/pci/drm/i915/gt/intel_lrc.c
1423
*cs++ = lower_32_bits(i915_vma_offset(ce->vm->rsvd.vma));
sys/dev/pci/drm/i915/gt/intel_lrc.c
1424
*cs++ = upper_32_bits(i915_vma_offset(ce->vm->rsvd.vma));
sys/dev/pci/drm/i915/gt/intel_lrc.c
1440
xehp_emit_per_ctx_bb(const struct intel_context *ce, u32 *cs)
sys/dev/pci/drm/i915/gt/intel_lrc.c
1443
if (NEEDS_FASTCOLOR_BLT_WABB(ce->engine))
sys/dev/pci/drm/i915/gt/intel_lrc.c
1444
cs = xehp_emit_fastcolor_blt_wabb(ce, cs);
sys/dev/pci/drm/i915/gt/intel_lrc.c
1450
setup_per_ctx_bb(const struct intel_context *ce,
sys/dev/pci/drm/i915/gt/intel_lrc.c
1455
u32 * const start = context_wabb(ce, true);
sys/dev/pci/drm/i915/gt/intel_lrc.c
1458
cs = emit(ce, start);
sys/dev/pci/drm/i915/gt/intel_lrc.c
1464
lrc_setup_bb_per_ctx(ce->lrc_reg_state, engine,
sys/dev/pci/drm/i915/gt/intel_lrc.c
1465
lrc_indirect_bb(ce) + PAGE_SIZE);
sys/dev/pci/drm/i915/gt/intel_lrc.c
1469
setup_indirect_ctx_bb(const struct intel_context *ce,
sys/dev/pci/drm/i915/gt/intel_lrc.c
1473
u32 * const start = context_wabb(ce, false);
sys/dev/pci/drm/i915/gt/intel_lrc.c
1476
cs = emit(ce, start);
sys/dev/pci/drm/i915/gt/intel_lrc.c
1482
setup_predicate_disable_wa(ce, start + DG2_PREDICATE_RESULT_BB / sizeof(*start));
sys/dev/pci/drm/i915/gt/intel_lrc.c
1484
lrc_setup_indirect_ctx(ce->lrc_reg_state, engine,
sys/dev/pci/drm/i915/gt/intel_lrc.c
1485
lrc_indirect_bb(ce),
sys/dev/pci/drm/i915/gt/intel_lrc.c
1523
static u32 lrc_descriptor(const struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_lrc.c
1528
if (i915_vm_is_4lvl(ce->vm))
sys/dev/pci/drm/i915/gt/intel_lrc.c
1533
if (GRAPHICS_VER(ce->vm->i915) == 8)
sys/dev/pci/drm/i915/gt/intel_lrc.c
1536
return i915_ggtt_offset(ce->state) | desc;
sys/dev/pci/drm/i915/gt/intel_lrc.c
1539
u32 lrc_update_regs(const struct intel_context *ce,
sys/dev/pci/drm/i915/gt/intel_lrc.c
1543
struct intel_ring *ring = ce->ring;
sys/dev/pci/drm/i915/gt/intel_lrc.c
1544
u32 *regs = ce->lrc_reg_state;
sys/dev/pci/drm/i915/gt/intel_lrc.c
1557
intel_sseu_make_rpcs(engine->gt, &ce->sseu);
sys/dev/pci/drm/i915/gt/intel_lrc.c
1559
i915_oa_init_reg_state(ce, engine);
sys/dev/pci/drm/i915/gt/intel_lrc.c
1562
if (ce->wa_bb_page) {
sys/dev/pci/drm/i915/gt/intel_lrc.c
1563
u32 *(*fn)(const struct intel_context *ce, u32 *cs);
sys/dev/pci/drm/i915/gt/intel_lrc.c
1566
if (ce->engine->class == RENDER_CLASS)
sys/dev/pci/drm/i915/gt/intel_lrc.c
1571
setup_indirect_ctx_bb(ce, engine, fn);
sys/dev/pci/drm/i915/gt/intel_lrc.c
1572
setup_per_ctx_bb(ce, engine, xehp_emit_per_ctx_bb);
sys/dev/pci/drm/i915/gt/intel_lrc.c
1575
return lrc_descriptor(ce) | CTX_DESC_FORCE_RESTORE;
sys/dev/pci/drm/i915/gt/intel_lrc.c
1578
void lrc_update_offsets(struct intel_context *ce,
sys/dev/pci/drm/i915/gt/intel_lrc.c
1581
set_offsets(ce->lrc_reg_state, reg_offsets(engine), engine, false);
sys/dev/pci/drm/i915/gt/intel_lrc.c
1584
void lrc_check_regs(const struct intel_context *ce,
sys/dev/pci/drm/i915/gt/intel_lrc.c
1588
const struct intel_ring *ring = ce->ring;
sys/dev/pci/drm/i915/gt/intel_lrc.c
1589
u32 *regs = ce->lrc_reg_state;
sys/dev/pci/drm/i915/gt/intel_lrc.c
1948
static u32 lrc_get_runtime(const struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_lrc.c
1956
return READ_ONCE(ce->lrc_reg_state[CTX_TIMESTAMP]);
sys/dev/pci/drm/i915/gt/intel_lrc.c
1959
void lrc_update_runtime(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_lrc.c
1961
struct intel_context_stats *stats = &ce->stats;
sys/dev/pci/drm/i915/gt/intel_lrc.c
1966
stats->runtime.last = lrc_get_runtime(ce);
sys/dev/pci/drm/i915/gt/intel_lrc.c
1972
CE_TRACE(ce, "runtime underflow: last=%u, new=%u, delta=%d\n",
sys/dev/pci/drm/i915/gt/intel_lrc.c
816
static bool ctx_needs_runalone(const struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_lrc.c
827
if (GRAPHICS_VER_FULL(ce->engine->i915) >= IP_VER(12, 70) &&
sys/dev/pci/drm/i915/gt/intel_lrc.c
828
(ce->engine->class == COMPUTE_CLASS || ce->engine->class == RENDER_CLASS)) {
sys/dev/pci/drm/i915/gt/intel_lrc.c
830
gem_ctx = rcu_dereference(ce->gem_context);
sys/dev/pci/drm/i915/gt/intel_lrc.c
840
const struct intel_context *ce,
sys/dev/pci/drm/i915/gt/intel_lrc.c
855
if (ctx_needs_runalone(ce))
sys/dev/pci/drm/i915/gt/intel_lrc.c
859
regs[CTX_TIMESTAMP] = ce->stats.runtime.last;
sys/dev/pci/drm/i915/gt/intel_lrc.c
923
const struct intel_context *ce,
sys/dev/pci/drm/i915/gt/intel_lrc.c
943
init_common_regs(regs, ce, engine, inhibit);
sys/dev/pci/drm/i915/gt/intel_lrc.c
944
init_ppgtt_regs(regs, vm_alias(ce->vm));
sys/dev/pci/drm/i915/gt/intel_lrc.c
951
void lrc_init_regs(const struct intel_context *ce,
sys/dev/pci/drm/i915/gt/intel_lrc.c
955
__lrc_init_regs(ce->lrc_reg_state, ce, engine, inhibit);
sys/dev/pci/drm/i915/gt/intel_lrc.c
958
void lrc_reset_regs(const struct intel_context *ce,
sys/dev/pci/drm/i915/gt/intel_lrc.c
961
__reset_stop_ring(ce->lrc_reg_state, engine);
sys/dev/pci/drm/i915/gt/intel_lrc.c
989
static u32 context_wa_bb_offset(const struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_lrc.c
991
return PAGE_SIZE * ce->wa_bb_page;
sys/dev/pci/drm/i915/gt/intel_lrc.h
116
static inline void lrc_runtime_start(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_lrc.h
118
struct intel_context_stats *stats = &ce->stats;
sys/dev/pci/drm/i915/gt/intel_lrc.h
120
if (intel_context_is_barrier(ce))
sys/dev/pci/drm/i915/gt/intel_lrc.h
129
static inline void lrc_runtime_stop(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_lrc.h
131
struct intel_context_stats *stats = &ce->stats;
sys/dev/pci/drm/i915/gt/intel_lrc.h
136
lrc_update_runtime(ce);
sys/dev/pci/drm/i915/gt/intel_lrc.h
143
u32 lrc_indirect_bb(const struct intel_context *ce);
sys/dev/pci/drm/i915/gt/intel_lrc.h
36
int lrc_alloc(struct intel_context *ce,
sys/dev/pci/drm/i915/gt/intel_lrc.h
38
void lrc_reset(struct intel_context *ce);
sys/dev/pci/drm/i915/gt/intel_lrc.h
39
void lrc_fini(struct intel_context *ce);
sys/dev/pci/drm/i915/gt/intel_lrc.h
43
lrc_pre_pin(struct intel_context *ce,
sys/dev/pci/drm/i915/gt/intel_lrc.h
48
lrc_pin(struct intel_context *ce,
sys/dev/pci/drm/i915/gt/intel_lrc.h
51
void lrc_unpin(struct intel_context *ce);
sys/dev/pci/drm/i915/gt/intel_lrc.h
52
void lrc_post_unpin(struct intel_context *ce);
sys/dev/pci/drm/i915/gt/intel_lrc.h
54
void lrc_init_state(struct intel_context *ce,
sys/dev/pci/drm/i915/gt/intel_lrc.h
58
void lrc_init_regs(const struct intel_context *ce,
sys/dev/pci/drm/i915/gt/intel_lrc.h
61
void lrc_reset_regs(const struct intel_context *ce,
sys/dev/pci/drm/i915/gt/intel_lrc.h
64
u32 lrc_update_regs(const struct intel_context *ce,
sys/dev/pci/drm/i915/gt/intel_lrc.h
67
void lrc_update_offsets(struct intel_context *ce,
sys/dev/pci/drm/i915/gt/intel_lrc.h
70
void lrc_check_regs(const struct intel_context *ce,
sys/dev/pci/drm/i915/gt/intel_lrc.h
74
void lrc_update_runtime(struct intel_context *ce);
sys/dev/pci/drm/i915/gt/intel_migrate.c
1001
GEM_BUG_ON(ce->ring->size < SZ_64K);
sys/dev/pci/drm/i915/gt/intel_migrate.c
1010
rq = i915_request_create(ce);
sys/dev/pci/drm/i915/gt/intel_migrate.c
1089
struct intel_context *ce;
sys/dev/pci/drm/i915/gt/intel_migrate.c
1096
ce = intel_migrate_create_context(m);
sys/dev/pci/drm/i915/gt/intel_migrate.c
1097
if (IS_ERR(ce))
sys/dev/pci/drm/i915/gt/intel_migrate.c
1098
ce = intel_context_get(m->context);
sys/dev/pci/drm/i915/gt/intel_migrate.c
1099
GEM_BUG_ON(IS_ERR(ce));
sys/dev/pci/drm/i915/gt/intel_migrate.c
1101
err = intel_context_pin_ww(ce, ww);
sys/dev/pci/drm/i915/gt/intel_migrate.c
1105
err = intel_context_migrate_copy(ce, deps,
sys/dev/pci/drm/i915/gt/intel_migrate.c
1110
intel_context_unpin(ce);
sys/dev/pci/drm/i915/gt/intel_migrate.c
1112
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/intel_migrate.c
1126
struct intel_context *ce;
sys/dev/pci/drm/i915/gt/intel_migrate.c
1133
ce = intel_migrate_create_context(m);
sys/dev/pci/drm/i915/gt/intel_migrate.c
1134
if (IS_ERR(ce))
sys/dev/pci/drm/i915/gt/intel_migrate.c
1135
ce = intel_context_get(m->context);
sys/dev/pci/drm/i915/gt/intel_migrate.c
1136
GEM_BUG_ON(IS_ERR(ce));
sys/dev/pci/drm/i915/gt/intel_migrate.c
1138
err = intel_context_pin_ww(ce, ww);
sys/dev/pci/drm/i915/gt/intel_migrate.c
1142
err = intel_context_migrate_clear(ce, deps, sg, pat_index,
sys/dev/pci/drm/i915/gt/intel_migrate.c
1145
intel_context_unpin(ce);
sys/dev/pci/drm/i915/gt/intel_migrate.c
1147
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/intel_migrate.c
1153
struct intel_context *ce;
sys/dev/pci/drm/i915/gt/intel_migrate.c
1155
ce = fetch_and_zero(&m->context);
sys/dev/pci/drm/i915/gt/intel_migrate.c
1156
if (!ce)
sys/dev/pci/drm/i915/gt/intel_migrate.c
1159
intel_engine_destroy_pinned_context(ce);
sys/dev/pci/drm/i915/gt/intel_migrate.c
250
struct intel_context *ce;
sys/dev/pci/drm/i915/gt/intel_migrate.c
260
ce = intel_engine_create_pinned_context(engine, vm, SZ_512K,
sys/dev/pci/drm/i915/gt/intel_migrate.c
264
return ce;
sys/dev/pci/drm/i915/gt/intel_migrate.c
269
struct intel_context *ce;
sys/dev/pci/drm/i915/gt/intel_migrate.c
273
ce = pinned_context(gt);
sys/dev/pci/drm/i915/gt/intel_migrate.c
274
if (IS_ERR(ce))
sys/dev/pci/drm/i915/gt/intel_migrate.c
275
return PTR_ERR(ce);
sys/dev/pci/drm/i915/gt/intel_migrate.c
277
m->context = ce;
sys/dev/pci/drm/i915/gt/intel_migrate.c
304
struct intel_context *ce;
sys/dev/pci/drm/i915/gt/intel_migrate.c
314
ce = __migrate_engines(m->context->engine->gt);
sys/dev/pci/drm/i915/gt/intel_migrate.c
315
if (IS_ERR(ce))
sys/dev/pci/drm/i915/gt/intel_migrate.c
316
return ce;
sys/dev/pci/drm/i915/gt/intel_migrate.c
318
ce->ring = NULL;
sys/dev/pci/drm/i915/gt/intel_migrate.c
319
ce->ring_size = SZ_256K;
sys/dev/pci/drm/i915/gt/intel_migrate.c
321
i915_vm_put(ce->vm);
sys/dev/pci/drm/i915/gt/intel_migrate.c
322
ce->vm = i915_vm_get(m->context->vm);
sys/dev/pci/drm/i915/gt/intel_migrate.c
324
return ce;
sys/dev/pci/drm/i915/gt/intel_migrate.c
678
intel_context_migrate_copy(struct intel_context *ce,
sys/dev/pci/drm/i915/gt/intel_migrate.c
689
struct drm_i915_private *i915 = ce->engine->i915;
sys/dev/pci/drm/i915/gt/intel_migrate.c
699
GEM_BUG_ON(ce->vm != ce->engine->gt->migrate.context->vm);
sys/dev/pci/drm/i915/gt/intel_migrate.c
700
GEM_BUG_ON(IS_DGFX(ce->engine->i915) && (!src_is_lmem && !dst_is_lmem));
sys/dev/pci/drm/i915/gt/intel_migrate.c
703
GEM_BUG_ON(ce->ring->size < SZ_64K);
sys/dev/pci/drm/i915/gt/intel_migrate.c
741
if (HAS_64K_PAGES(ce->engine->i915)) {
sys/dev/pci/drm/i915/gt/intel_migrate.c
753
rq = i915_request_create(ce);
sys/dev/pci/drm/i915/gt/intel_migrate.c
984
intel_context_migrate_clear(struct intel_context *ce,
sys/dev/pci/drm/i915/gt/intel_migrate.c
992
struct drm_i915_private *i915 = ce->engine->i915;
sys/dev/pci/drm/i915/gt/intel_migrate.c
998
GEM_BUG_ON(ce->vm != ce->engine->gt->migrate.context->vm);
sys/dev/pci/drm/i915/gt/intel_migrate.h
35
int intel_context_migrate_copy(struct intel_context *ce,
sys/dev/pci/drm/i915/gt/intel_migrate.h
55
intel_context_migrate_clear(struct intel_context *ce,
sys/dev/pci/drm/i915/gt/intel_renderstate.c
143
struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_renderstate.c
145
struct intel_engine_cs *engine = ce->engine;
sys/dev/pci/drm/i915/gt/intel_renderstate.c
169
err = intel_context_pin_ww(ce, &so->ww);
sys/dev/pci/drm/i915/gt/intel_renderstate.c
194
intel_context_unpin(ce);
sys/dev/pci/drm/i915/gt/intel_renderstate.c
240
struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_renderstate.c
247
intel_context_unpin(ce);
sys/dev/pci/drm/i915/gt/intel_renderstate.h
46
struct intel_context *ce);
sys/dev/pci/drm/i915/gt/intel_renderstate.h
50
struct intel_context *ce);
sys/dev/pci/drm/i915/gt/intel_ring_submission.c
1015
*residuals = intel_context_get(ce);
sys/dev/pci/drm/i915/gt/intel_ring_submission.c
485
static void __ring_context_fini(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_ring_submission.c
487
i915_vma_put(ce->state);
sys/dev/pci/drm/i915/gt/intel_ring_submission.c
492
struct intel_context *ce = container_of(ref, typeof(*ce), ref);
sys/dev/pci/drm/i915/gt/intel_ring_submission.c
494
GEM_BUG_ON(intel_context_is_pinned(ce));
sys/dev/pci/drm/i915/gt/intel_ring_submission.c
496
if (ce->state)
sys/dev/pci/drm/i915/gt/intel_ring_submission.c
497
__ring_context_fini(ce);
sys/dev/pci/drm/i915/gt/intel_ring_submission.c
499
intel_context_fini(ce);
sys/dev/pci/drm/i915/gt/intel_ring_submission.c
500
intel_context_free(ce);
sys/dev/pci/drm/i915/gt/intel_ring_submission.c
503
static int ring_context_init_default_state(struct intel_context *ce,
sys/dev/pci/drm/i915/gt/intel_ring_submission.c
506
struct drm_i915_gem_object *obj = ce->state->obj;
sys/dev/pci/drm/i915/gt/intel_ring_submission.c
514
shmem_read(ce->default_state, 0, vaddr, ce->engine->context_size);
sys/dev/pci/drm/i915/gt/intel_ring_submission.c
516
uao_read(ce->default_state, 0, vaddr, ce->engine->context_size);
sys/dev/pci/drm/i915/gt/intel_ring_submission.c
522
__set_bit(CONTEXT_VALID_BIT, &ce->flags);
sys/dev/pci/drm/i915/gt/intel_ring_submission.c
526
static int ring_context_pre_pin(struct intel_context *ce,
sys/dev/pci/drm/i915/gt/intel_ring_submission.c
533
if (ce->default_state &&
sys/dev/pci/drm/i915/gt/intel_ring_submission.c
534
!test_bit(CONTEXT_VALID_BIT, &ce->flags)) {
sys/dev/pci/drm/i915/gt/intel_ring_submission.c
535
err = ring_context_init_default_state(ce, ww);
sys/dev/pci/drm/i915/gt/intel_ring_submission.c
540
vm = vm_alias(ce->vm);
sys/dev/pci/drm/i915/gt/intel_ring_submission.c
547
static void __context_unpin_ppgtt(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_ring_submission.c
551
vm = vm_alias(ce->vm);
sys/dev/pci/drm/i915/gt/intel_ring_submission.c
556
static void ring_context_unpin(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_ring_submission.c
560
static void ring_context_post_unpin(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_ring_submission.c
562
__context_unpin_ppgtt(ce);
sys/dev/pci/drm/i915/gt/intel_ring_submission.c
608
static int ring_context_alloc(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_ring_submission.c
610
struct intel_engine_cs *engine = ce->engine;
sys/dev/pci/drm/i915/gt/intel_ring_submission.c
612
if (!intel_context_has_own_state(ce))
sys/dev/pci/drm/i915/gt/intel_ring_submission.c
613
ce->default_state = engine->default_state;
sys/dev/pci/drm/i915/gt/intel_ring_submission.c
617
ce->ring = engine->legacy.ring;
sys/dev/pci/drm/i915/gt/intel_ring_submission.c
619
GEM_BUG_ON(ce->state);
sys/dev/pci/drm/i915/gt/intel_ring_submission.c
627
ce->state = vma;
sys/dev/pci/drm/i915/gt/intel_ring_submission.c
630
ce->timeline = intel_timeline_get(engine->legacy.timeline);
sys/dev/pci/drm/i915/gt/intel_ring_submission.c
635
static int ring_context_pin(struct intel_context *ce, void *unused)
sys/dev/pci/drm/i915/gt/intel_ring_submission.c
640
static void ring_context_reset(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/intel_ring_submission.c
642
intel_ring_reset(ce->ring, ce->ring->emit);
sys/dev/pci/drm/i915/gt/intel_ring_submission.c
643
clear_bit(CONTEXT_VALID_BIT, &ce->flags);
sys/dev/pci/drm/i915/gt/intel_ring_submission.c
646
static void ring_context_revoke(struct intel_context *ce,
sys/dev/pci/drm/i915/gt/intel_ring_submission.c
659
if (rq->context == ce) {
sys/dev/pci/drm/i915/gt/intel_ring_submission.c
665
static void ring_context_cancel_request(struct intel_context *ce,
sys/dev/pci/drm/i915/gt/intel_ring_submission.c
732
struct intel_context *ce,
sys/dev/pci/drm/i915/gt/intel_ring_submission.c
808
*cs++ = i915_ggtt_offset(ce->state) | flags;
sys/dev/pci/drm/i915/gt/intel_ring_submission.c
959
struct intel_context *ce = rq->context;
sys/dev/pci/drm/i915/gt/intel_ring_submission.c
965
if (engine->wa_ctx.vma && ce != engine->kernel_context) {
sys/dev/pci/drm/i915/gt/intel_ring_submission.c
966
if (engine->wa_ctx.vma->private != ce &&
sys/dev/pci/drm/i915/gt/intel_ring_submission.c
976
ret = switch_mm(rq, vm_alias(ce->vm));
sys/dev/pci/drm/i915/gt/intel_ring_submission.c
980
if (ce->state) {
sys/dev/pci/drm/i915/gt/intel_ring_submission.c
990
if (test_bit(CONTEXT_VALID_BIT, &ce->flags))
sys/dev/pci/drm/i915/gt/intel_ring_submission.c
995
ret = mi_set_context(rq, ce, flags);
sys/dev/pci/drm/i915/gt/intel_workarounds.c
3027
static int engine_wa_list_verify(struct intel_context *ce,
sys/dev/pci/drm/i915/gt/intel_workarounds.c
3042
vma = __vm_create_scratch_for_read(&ce->engine->gt->ggtt->vm,
sys/dev/pci/drm/i915/gt/intel_workarounds.c
3047
intel_engine_pm_get(ce->engine);
sys/dev/pci/drm/i915/gt/intel_workarounds.c
3052
err = intel_context_pin_ww(ce, &ww);
sys/dev/pci/drm/i915/gt/intel_workarounds.c
3061
rq = i915_request_create(ce);
sys/dev/pci/drm/i915/gt/intel_workarounds.c
3106
intel_context_unpin(ce);
sys/dev/pci/drm/i915/gt/intel_workarounds.c
3114
intel_engine_pm_put(ce->engine);
sys/dev/pci/drm/i915/gt/mock_engine.c
139
static void mock_context_unpin(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/mock_engine.c
143
static void mock_context_post_unpin(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/mock_engine.c
145
i915_vma_unpin(ce->ring->vma);
sys/dev/pci/drm/i915/gt/mock_engine.c
150
struct intel_context *ce = container_of(ref, typeof(*ce), ref);
sys/dev/pci/drm/i915/gt/mock_engine.c
152
GEM_BUG_ON(intel_context_is_pinned(ce));
sys/dev/pci/drm/i915/gt/mock_engine.c
154
if (test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
sys/dev/pci/drm/i915/gt/mock_engine.c
155
mock_ring_free(ce->ring);
sys/dev/pci/drm/i915/gt/mock_engine.c
156
mock_timeline_unpin(ce->timeline);
sys/dev/pci/drm/i915/gt/mock_engine.c
159
intel_context_fini(ce);
sys/dev/pci/drm/i915/gt/mock_engine.c
160
intel_context_free(ce);
sys/dev/pci/drm/i915/gt/mock_engine.c
163
static int mock_context_alloc(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/mock_engine.c
167
ce->ring = mock_ring(ce->engine);
sys/dev/pci/drm/i915/gt/mock_engine.c
168
if (!ce->ring)
sys/dev/pci/drm/i915/gt/mock_engine.c
171
ce->timeline = intel_timeline_create(ce->engine->gt);
sys/dev/pci/drm/i915/gt/mock_engine.c
172
if (IS_ERR(ce->timeline)) {
sys/dev/pci/drm/i915/gt/mock_engine.c
173
kfree(ce->engine);
sys/dev/pci/drm/i915/gt/mock_engine.c
174
return PTR_ERR(ce->timeline);
sys/dev/pci/drm/i915/gt/mock_engine.c
177
err = mock_timeline_pin(ce->timeline);
sys/dev/pci/drm/i915/gt/mock_engine.c
179
intel_timeline_put(ce->timeline);
sys/dev/pci/drm/i915/gt/mock_engine.c
180
ce->timeline = NULL;
sys/dev/pci/drm/i915/gt/mock_engine.c
187
static int mock_context_pre_pin(struct intel_context *ce,
sys/dev/pci/drm/i915/gt/mock_engine.c
190
return i915_vma_pin_ww(ce->ring->vma, ww, 0, 0, PIN_GLOBAL | PIN_HIGH);
sys/dev/pci/drm/i915/gt/mock_engine.c
193
static int mock_context_pin(struct intel_context *ce, void *unused)
sys/dev/pci/drm/i915/gt/mock_engine.c
198
static void mock_context_reset(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/mock_engine.c
395
struct intel_context *ce;
sys/dev/pci/drm/i915/gt/mock_engine.c
412
ce = create_kernel_context(engine);
sys/dev/pci/drm/i915/gt/mock_engine.c
413
if (IS_ERR(ce))
sys/dev/pci/drm/i915/gt/mock_engine.c
417
engine->status_page.vma = ce->timeline->hwsp_ggtt;
sys/dev/pci/drm/i915/gt/mock_engine.c
419
engine->kernel_context = ce;
sys/dev/pci/drm/i915/gt/selftest_execlists.c
1049
create_rewinder(struct intel_context *ce,
sys/dev/pci/drm/i915/gt/selftest_execlists.c
1054
i915_ggtt_offset(ce->engine->status_page.vma) +
sys/dev/pci/drm/i915/gt/selftest_execlists.c
1060
rq = intel_context_create_request(ce);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
1131
struct intel_context *ce;
sys/dev/pci/drm/i915/gt/selftest_execlists.c
1155
ce = intel_context_create(engine);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
1156
if (IS_ERR(ce)) {
sys/dev/pci/drm/i915/gt/selftest_execlists.c
1157
err = PTR_ERR(ce);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
1161
rq[A1] = create_rewinder(ce, NULL, slot, X);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
1163
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
1167
rq[A2] = create_rewinder(ce, NULL, slot, Y);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
1168
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
1179
ce = intel_context_create(engine);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
1180
if (IS_ERR(ce)) {
sys/dev/pci/drm/i915/gt/selftest_execlists.c
1181
err = PTR_ERR(ce);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
1185
rq[B1] = create_rewinder(ce, rq[A1], slot, Z);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
1186
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
124
struct intel_context *ce;
sys/dev/pci/drm/i915/gt/selftest_execlists.c
127
ce = intel_context_create(engine);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
128
if (IS_ERR(ce)) {
sys/dev/pci/drm/i915/gt/selftest_execlists.c
129
err = PTR_ERR(ce);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
133
rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
1429
struct intel_context *ce;
sys/dev/pci/drm/i915/gt/selftest_execlists.c
1436
ce = intel_context_create(engine);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
1437
if (IS_ERR(ce)) {
sys/dev/pci/drm/i915/gt/selftest_execlists.c
1438
err = PTR_ERR(ce);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
1447
rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
1448
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
1468
ce = intel_context_create(engine);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
1469
if (IS_ERR(ce)) {
sys/dev/pci/drm/i915/gt/selftest_execlists.c
1470
err = PTR_ERR(ce);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
1474
rq = intel_context_create_request(ce);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
1475
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
155
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
1720
struct intel_context *ce;
sys/dev/pci/drm/i915/gt/selftest_execlists.c
1723
ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
1724
if (IS_ERR(ce))
sys/dev/pci/drm/i915/gt/selftest_execlists.c
1725
return ERR_CAST(ce);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
1727
rq = igt_spinner_create_request(spin, ce, arb);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
1728
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
181
struct intel_context *ce[2] = {};
sys/dev/pci/drm/i915/gt/selftest_execlists.c
198
for (n = 0; n < ARRAY_SIZE(ce); n++) {
sys/dev/pci/drm/i915/gt/selftest_execlists.c
222
ce[n] = tmp;
sys/dev/pci/drm/i915/gt/selftest_execlists.c
224
GEM_BUG_ON(!ce[1]->ring->size);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
225
intel_ring_reset(ce[1]->ring, ce[1]->ring->size / 2);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
226
lrc_update_regs(ce[1], engine, ce[1]->ring->head);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
228
rq[0] = igt_spinner_create_request(&spin, ce[0], MI_ARB_CHECK);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
236
GEM_BUG_ON(rq[0]->postfix > ce[1]->ring->emit);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
243
rq[1] = i915_request_create(ce[1]);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
2705
struct intel_context *ce;
sys/dev/pci/drm/i915/gt/selftest_execlists.c
2711
ce = intel_context_create(engine);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
2712
if (IS_ERR(ce))
sys/dev/pci/drm/i915/gt/selftest_execlists.c
2713
return PTR_ERR(ce);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
2721
vma = i915_vma_instance(obj, ce->vm, NULL);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
2761
rq = intel_context_create_request(ce);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
2780
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
279
rq[0] = i915_request_create(ce[0]);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
2792
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
2800
struct intel_context *ce[2] = {};
sys/dev/pci/drm/i915/gt/selftest_execlists.c
2809
for (n = 0; n < ARRAY_SIZE(ce); n++) {
sys/dev/pci/drm/i915/gt/selftest_execlists.c
2830
ce[n] = tmp;
sys/dev/pci/drm/i915/gt/selftest_execlists.c
2833
rq = igt_spinner_create_request(spin, ce[0], MI_ARB_CHECK);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
2852
while (ce[0]->ring->tail - rq->wa_tail <= queue_sz) {
sys/dev/pci/drm/i915/gt/selftest_execlists.c
2855
tmp = intel_context_create_request(ce[0]);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
2869
ce[0]->ring->size,
sys/dev/pci/drm/i915/gt/selftest_execlists.c
2870
ce[0]->ring->tail,
sys/dev/pci/drm/i915/gt/selftest_execlists.c
2871
ce[0]->ring->emit,
sys/dev/pci/drm/i915/gt/selftest_execlists.c
2876
rq = intel_context_create_request(ce[1]);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
2896
ce[0]->ring->tail, ce[0]->ring->emit,
sys/dev/pci/drm/i915/gt/selftest_execlists.c
2897
ce[1]->ring->tail, ce[1]->ring->emit);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
2902
for (n = 0; n < ARRAY_SIZE(ce); n++) {
sys/dev/pci/drm/i915/gt/selftest_execlists.c
2903
if (IS_ERR_OR_NULL(ce[n]))
sys/dev/pci/drm/i915/gt/selftest_execlists.c
2906
intel_context_unpin(ce[n]);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
2907
intel_context_put(ce[n]);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
296
for (n = 0; n < ARRAY_SIZE(ce); n++) {
sys/dev/pci/drm/i915/gt/selftest_execlists.c
297
if (IS_ERR_OR_NULL(ce[n]))
sys/dev/pci/drm/i915/gt/selftest_execlists.c
300
intel_context_unpin(ce[n]);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
301
intel_context_put(ce[n]);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
3152
struct intel_context *ce;
sys/dev/pci/drm/i915/gt/selftest_execlists.c
3156
ce = intel_context_create(engine);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
3157
if (IS_ERR(ce))
sys/dev/pci/drm/i915/gt/selftest_execlists.c
3158
return ERR_CAST(ce);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
3160
vma = i915_vma_instance(global->obj, ce->vm, NULL);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
3176
rq = intel_context_create_request(ce);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
3203
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
343
struct intel_context *ce[2] = {};
sys/dev/pci/drm/i915/gt/selftest_execlists.c
360
for (n = 0; n < ARRAY_SIZE(ce); n++) {
sys/dev/pci/drm/i915/gt/selftest_execlists.c
379
ce[n] = tmp;
sys/dev/pci/drm/i915/gt/selftest_execlists.c
383
rq = igt_spinner_create_request(&spin, ce[0], MI_ARB_CHECK);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
402
while (intel_ring_direction(ce[0]->ring,
sys/dev/pci/drm/i915/gt/selftest_execlists.c
4026
struct intel_context *ce;
sys/dev/pci/drm/i915/gt/selftest_execlists.c
404
ce[0]->ring->tail) <= 0) {
sys/dev/pci/drm/i915/gt/selftest_execlists.c
4040
ce = intel_context_create(siblings[n]);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
4041
if (IS_ERR(ce)) {
sys/dev/pci/drm/i915/gt/selftest_execlists.c
4042
err = PTR_ERR(ce);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
4046
rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
4047
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
4056
ce = intel_engine_create_virtual(siblings, nsibling, 0);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
4057
if (IS_ERR(ce)) {
sys/dev/pci/drm/i915/gt/selftest_execlists.c
4058
err = PTR_ERR(ce);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
4062
rq = intel_context_create_request(ce);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
4063
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
407
tmp = intel_context_create_request(ce[0]);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
4093
struct intel_context *ce;
sys/dev/pci/drm/i915/gt/selftest_execlists.c
4108
ce = intel_engine_create_virtual(siblings, nsibling, 0);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
4109
if (IS_ERR(ce)) {
sys/dev/pci/drm/i915/gt/selftest_execlists.c
4110
err = PTR_ERR(ce);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
4114
rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
4115
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
4125
ce = intel_context_create(siblings[n]);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
4126
if (IS_ERR(ce)) {
sys/dev/pci/drm/i915/gt/selftest_execlists.c
4127
err = PTR_ERR(ce);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
4131
rq = intel_context_create_request(ce);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
4132
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
421
ce[0]->ring->size,
sys/dev/pci/drm/i915/gt/selftest_execlists.c
422
ce[0]->ring->tail,
sys/dev/pci/drm/i915/gt/selftest_execlists.c
423
ce[0]->ring->emit,
sys/dev/pci/drm/i915/gt/selftest_execlists.c
425
GEM_BUG_ON(intel_ring_direction(ce[0]->ring,
sys/dev/pci/drm/i915/gt/selftest_execlists.c
427
ce[0]->ring->tail) <= 0);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
431
rq = intel_context_create_request(ce[1]);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
451
ce[0]->ring->tail, ce[0]->ring->emit,
sys/dev/pci/drm/i915/gt/selftest_execlists.c
452
ce[1]->ring->tail, ce[1]->ring->emit);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
457
for (n = 0; n < ARRAY_SIZE(ce); n++) {
sys/dev/pci/drm/i915/gt/selftest_execlists.c
458
if (IS_ERR_OR_NULL(ce[n]))
sys/dev/pci/drm/i915/gt/selftest_execlists.c
461
intel_context_unpin(ce[n]);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
462
intel_context_put(ce[n]);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
492
struct intel_context *ce;
sys/dev/pci/drm/i915/gt/selftest_execlists.c
502
ce = intel_context_create(engine);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
503
if (IS_ERR(ce)) {
sys/dev/pci/drm/i915/gt/selftest_execlists.c
504
err = PTR_ERR(ce);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
508
err = intel_context_pin(ce);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
510
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
515
err = i915_active_acquire(&ce->active);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
517
intel_context_unpin(ce);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
518
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
521
ring = ce->ring;
sys/dev/pci/drm/i915/gt/selftest_execlists.c
529
intel_context_unpin(ce);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
532
GEM_BUG_ON(intel_context_is_pinned(ce));
sys/dev/pci/drm/i915/gt/selftest_execlists.c
533
rq = intel_context_create_request(ce);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
534
i915_active_release(&ce->active); /* e.g. async retire */
sys/dev/pci/drm/i915/gt/selftest_execlists.c
535
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
600
struct intel_context *ce;
sys/dev/pci/drm/i915/gt/selftest_execlists.c
603
ce = intel_context_create(engine);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
604
if (IS_ERR(ce)) {
sys/dev/pci/drm/i915/gt/selftest_execlists.c
605
err = PTR_ERR(ce);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
611
rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
664
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
719
struct intel_context *ce;
sys/dev/pci/drm/i915/gt/selftest_execlists.c
722
ce = intel_context_create(engine);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
723
if (IS_ERR(ce)) {
sys/dev/pci/drm/i915/gt/selftest_execlists.c
724
err = PTR_ERR(ce);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
728
rq = intel_context_create_request(ce);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
729
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
859
struct intel_context *ce;
sys/dev/pci/drm/i915/gt/selftest_execlists.c
863
ce = intel_context_create(engine);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
864
if (IS_ERR(ce))
sys/dev/pci/drm/i915/gt/selftest_execlists.c
865
return ERR_CAST(ce);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
867
rq = intel_context_create_request(ce);
sys/dev/pci/drm/i915/gt/selftest_execlists.c
883
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/selftest_hangcheck.c
364
struct intel_context *ce;
sys/dev/pci/drm/i915/gt/selftest_hangcheck.c
367
ce = intel_context_create(engine);
sys/dev/pci/drm/i915/gt/selftest_hangcheck.c
368
if (IS_ERR(ce)) {
sys/dev/pci/drm/i915/gt/selftest_hangcheck.c
369
err = PTR_ERR(ce);
sys/dev/pci/drm/i915/gt/selftest_hangcheck.c
377
rq = intel_context_create_request(ce);
sys/dev/pci/drm/i915/gt/selftest_hangcheck.c
388
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/selftest_hangcheck.c
438
struct intel_context *ce;
sys/dev/pci/drm/i915/gt/selftest_hangcheck.c
450
ce = intel_context_create(engine);
sys/dev/pci/drm/i915/gt/selftest_hangcheck.c
451
if (IS_ERR(ce)) {
sys/dev/pci/drm/i915/gt/selftest_hangcheck.c
452
pr_err("[%s] Create context failed: %pe!\n", engine->name, ce);
sys/dev/pci/drm/i915/gt/selftest_hangcheck.c
453
return PTR_ERR(ce);
sys/dev/pci/drm/i915/gt/selftest_hangcheck.c
476
rq = intel_context_create_request(ce);
sys/dev/pci/drm/i915/gt/selftest_hangcheck.c
524
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/selftest_hangcheck.c
558
struct intel_context *ce;
sys/dev/pci/drm/i915/gt/selftest_hangcheck.c
566
ce = intel_context_create(engine);
sys/dev/pci/drm/i915/gt/selftest_hangcheck.c
567
if (IS_ERR(ce)) {
sys/dev/pci/drm/i915/gt/selftest_hangcheck.c
568
pr_err("[%s] Create context failed: %pe!\n", engine->name, ce);
sys/dev/pci/drm/i915/gt/selftest_hangcheck.c
569
return PTR_ERR(ce);
sys/dev/pci/drm/i915/gt/selftest_hangcheck.c
597
rq = intel_context_create_request(ce);
sys/dev/pci/drm/i915/gt/selftest_hangcheck.c
673
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/selftest_hangcheck.c
897
struct intel_context *ce[ARRAY_SIZE(rq)];
sys/dev/pci/drm/i915/gt/selftest_hangcheck.c
901
for (count = 0; count < ARRAY_SIZE(ce); count++) {
sys/dev/pci/drm/i915/gt/selftest_hangcheck.c
902
ce[count] = intel_context_create(engine);
sys/dev/pci/drm/i915/gt/selftest_hangcheck.c
903
if (IS_ERR(ce[count])) {
sys/dev/pci/drm/i915/gt/selftest_hangcheck.c
904
arg->result = PTR_ERR(ce[count]);
sys/dev/pci/drm/i915/gt/selftest_hangcheck.c
908
intel_context_put(ce[count]);
sys/dev/pci/drm/i915/gt/selftest_hangcheck.c
919
new = intel_context_create_request(ce[idx]);
sys/dev/pci/drm/i915/gt/selftest_hangcheck.c
956
intel_context_put(ce[count]);
sys/dev/pci/drm/i915/gt/selftest_lrc.c
1030
ce->engine->name);
sys/dev/pci/drm/i915/gt/selftest_lrc.c
1051
shmem_unpin_map(ce->engine->default_state, defaults);
sys/dev/pci/drm/i915/gt/selftest_lrc.c
1060
record_registers(struct intel_context *ce,
sys/dev/pci/drm/i915/gt/selftest_lrc.c
1070
b_before = store_context(ce, before);
sys/dev/pci/drm/i915/gt/selftest_lrc.c
1074
b_after = store_context(ce, after);
sys/dev/pci/drm/i915/gt/selftest_lrc.c
1080
rq = intel_context_create_request(ce);
sys/dev/pci/drm/i915/gt/selftest_lrc.c
109
static int context_flush(struct intel_context *ce, long timeout)
sys/dev/pci/drm/i915/gt/selftest_lrc.c
1117
*cs++ = i915_ggtt_offset(ce->engine->status_page.vma) +
sys/dev/pci/drm/i915/gt/selftest_lrc.c
1144
static struct i915_vma *load_context(struct intel_context *ce, u32 poison)
sys/dev/pci/drm/i915/gt/selftest_lrc.c
115
rq = intel_engine_create_kernel_request(ce->engine);
sys/dev/pci/drm/i915/gt/selftest_lrc.c
1150
batch = create_user_vma(ce->vm, SZ_64K);
sys/dev/pci/drm/i915/gt/selftest_lrc.c
1160
defaults = shmem_pin_map(ce->engine->default_state);
sys/dev/pci/drm/i915/gt/selftest_lrc.c
1189
ce->engine->name);
sys/dev/pci/drm/i915/gt/selftest_lrc.c
119
fence = i915_active_fence_get(&ce->timeline->last_request);
sys/dev/pci/drm/i915/gt/selftest_lrc.c
1199
*cs++ = safe_poison(hw[dw] & get_lri_mask(ce->engine,
sys/dev/pci/drm/i915/gt/selftest_lrc.c
1209
shmem_unpin_map(ce->engine->default_state, defaults);
sys/dev/pci/drm/i915/gt/selftest_lrc.c
1217
static int poison_registers(struct intel_context *ce, u32 poison, u32 *sema)
sys/dev/pci/drm/i915/gt/selftest_lrc.c
1224
batch = load_context(ce, poison);
sys/dev/pci/drm/i915/gt/selftest_lrc.c
1228
rq = intel_context_create_request(ce);
sys/dev/pci/drm/i915/gt/selftest_lrc.c
1250
*cs++ = i915_ggtt_offset(ce->engine->status_page.vma) +
sys/dev/pci/drm/i915/gt/selftest_lrc.c
1273
struct intel_context *ce,
sys/dev/pci/drm/i915/gt/selftest_lrc.c
1303
lrc = i915_gem_object_pin_map_unlocked(ce->state->obj,
sys/dev/pci/drm/i915/gt/selftest_lrc.c
1305
ce->state->obj,
sys/dev/pci/drm/i915/gt/selftest_lrc.c
1313
defaults = shmem_pin_map(ce->engine->default_state);
sys/dev/pci/drm/i915/gt/selftest_lrc.c
1372
shmem_unpin_map(ce->engine->default_state, defaults);
sys/dev/pci/drm/i915/gt/selftest_lrc.c
1374
i915_gem_object_unpin_map(ce->state->obj);
sys/dev/pci/drm/i915/gt/selftest_lrc.c
1567
static int wabb_ctx_submit_req(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/selftest_lrc.c
1572
rq = intel_context_create_request(ce);
sys/dev/pci/drm/i915/gt/selftest_lrc.c
1591
emit_wabb_ctx_canary(const struct intel_context *ce,
sys/dev/pci/drm/i915/gt/selftest_lrc.c
1598
*cs++ = i915_ggtt_offset(ce->state) +
sys/dev/pci/drm/i915/gt/selftest_lrc.c
1599
context_wa_bb_offset(ce) +
sys/dev/pci/drm/i915/gt/selftest_lrc.c
1608
emit_indirect_ctx_bb_canary(const struct intel_context *ce, u32 *cs)
sys/dev/pci/drm/i915/gt/selftest_lrc.c
1610
return emit_wabb_ctx_canary(ce, cs, false);
sys/dev/pci/drm/i915/gt/selftest_lrc.c
1614
emit_per_ctx_bb_canary(const struct intel_context *ce, u32 *cs)
sys/dev/pci/drm/i915/gt/selftest_lrc.c
1616
return emit_wabb_ctx_canary(ce, cs, true);
sys/dev/pci/drm/i915/gt/selftest_lrc.c
1620
wabb_ctx_setup(struct intel_context *ce, bool per_ctx)
sys/dev/pci/drm/i915/gt/selftest_lrc.c
1622
u32 *cs = context_wabb(ce, per_ctx);
sys/dev/pci/drm/i915/gt/selftest_lrc.c
1627
setup_per_ctx_bb(ce, ce->engine, emit_per_ctx_bb_canary);
sys/dev/pci/drm/i915/gt/selftest_lrc.c
1629
setup_indirect_ctx_bb(ce, ce->engine, emit_indirect_ctx_bb_canary);
sys/dev/pci/drm/i915/gt/selftest_lrc.c
1632
static bool check_ring_start(struct intel_context *ce, bool per_ctx)
sys/dev/pci/drm/i915/gt/selftest_lrc.c
1634
const u32 * const ctx_bb = (void *)(ce->lrc_reg_state) -
sys/dev/pci/drm/i915/gt/selftest_lrc.c
1635
LRC_STATE_OFFSET + context_wa_bb_offset(ce) +
sys/dev/pci/drm/i915/gt/selftest_lrc.c
1638
if (ctx_bb[CTX_BB_CANARY_INDEX] == ce->lrc_reg_state[CTX_RING_START])
sys/dev/pci/drm/i915/gt/selftest_lrc.c
1643
ce->lrc_reg_state[CTX_RING_START]);
sys/dev/pci/drm/i915/gt/selftest_lrc.c
1648
static int wabb_ctx_check(struct intel_context *ce, bool per_ctx)
sys/dev/pci/drm/i915/gt/selftest_lrc.c
1652
err = wabb_ctx_submit_req(ce);
sys/dev/pci/drm/i915/gt/selftest_lrc.c
1656
if (!check_ring_start(ce, per_ctx))
sys/dev/pci/drm/i915/gt/selftest_lrc.c
1769
static struct i915_request *garbage(struct intel_context *ce,
sys/dev/pci/drm/i915/gt/selftest_lrc.c
1775
err = intel_context_pin(ce);
sys/dev/pci/drm/i915/gt/selftest_lrc.c
1780
ce->lrc_reg_state,
sys/dev/pci/drm/i915/gt/selftest_lrc.c
1781
ce->engine->context_size -
sys/dev/pci/drm/i915/gt/selftest_lrc.c
1784
rq = intel_context_create_request(ce);
sys/dev/pci/drm/i915/gt/selftest_lrc.c
1795
intel_context_unpin(ce);
sys/dev/pci/drm/i915/gt/selftest_lrc.c
1801
struct intel_context *ce;
sys/dev/pci/drm/i915/gt/selftest_lrc.c
1805
ce = intel_context_create(engine);
sys/dev/pci/drm/i915/gt/selftest_lrc.c
1806
if (IS_ERR(ce))
sys/dev/pci/drm/i915/gt/selftest_lrc.c
1807
return PTR_ERR(ce);
sys/dev/pci/drm/i915/gt/selftest_lrc.c
1809
hang = garbage(ce, prng);
sys/dev/pci/drm/i915/gt/selftest_lrc.c
1821
intel_context_set_banned(ce);
sys/dev/pci/drm/i915/gt/selftest_lrc.c
1843
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/selftest_lrc.c
1887
struct intel_context *ce;
sys/dev/pci/drm/i915/gt/selftest_lrc.c
1892
ce = intel_context_create(engine);
sys/dev/pci/drm/i915/gt/selftest_lrc.c
1893
if (IS_ERR(ce))
sys/dev/pci/drm/i915/gt/selftest_lrc.c
1894
return PTR_ERR(ce);
sys/dev/pci/drm/i915/gt/selftest_lrc.c
1896
ce->stats.runtime.num_underflow = 0;
sys/dev/pci/drm/i915/gt/selftest_lrc.c
1897
ce->stats.runtime.max_underflow = 0;
sys/dev/pci/drm/i915/gt/selftest_lrc.c
1903
rq = intel_context_create_request(ce);
sys/dev/pci/drm/i915/gt/selftest_lrc.c
1931
intel_context_get_total_runtime_ns(ce),
sys/dev/pci/drm/i915/gt/selftest_lrc.c
1932
intel_context_get_avg_runtime_ns(ce));
sys/dev/pci/drm/i915/gt/selftest_lrc.c
1935
if (ce->stats.runtime.num_underflow) {
sys/dev/pci/drm/i915/gt/selftest_lrc.c
1938
ce->stats.runtime.num_underflow,
sys/dev/pci/drm/i915/gt/selftest_lrc.c
1939
ce->stats.runtime.max_underflow);
sys/dev/pci/drm/i915/gt/selftest_lrc.c
1947
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/selftest_lrc.c
406
struct intel_context *ce;
sys/dev/pci/drm/i915/gt/selftest_lrc.c
419
ce = intel_context_create(engine);
sys/dev/pci/drm/i915/gt/selftest_lrc.c
420
if (IS_ERR(ce))
sys/dev/pci/drm/i915/gt/selftest_lrc.c
421
return PTR_ERR(ce);
sys/dev/pci/drm/i915/gt/selftest_lrc.c
427
err = intel_context_pin_ww(ce, &ww);
sys/dev/pci/drm/i915/gt/selftest_lrc.c
431
rq = i915_request_create(ce);
sys/dev/pci/drm/i915/gt/selftest_lrc.c
449
expected[RING_START_IDX] = i915_ggtt_offset(ce->ring->vma);
sys/dev/pci/drm/i915/gt/selftest_lrc.c
464
expected[RING_TAIL_IDX] = ce->ring->tail;
sys/dev/pci/drm/i915/gt/selftest_lrc.c
491
intel_context_unpin(ce);
sys/dev/pci/drm/i915/gt/selftest_lrc.c
499
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/selftest_lrc.c
533
static int gpr_make_dirty(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/selftest_lrc.c
539
rq = intel_context_create_request(ce);
sys/dev/pci/drm/i915/gt/selftest_lrc.c
551
*cs++ = CS_GPR(ce->engine, n);
sys/dev/pci/drm/i915/gt/selftest_lrc.c
565
__gpr_read(struct intel_context *ce, struct i915_vma *scratch, u32 *slot)
sys/dev/pci/drm/i915/gt/selftest_lrc.c
568
i915_ggtt_offset(ce->engine->status_page.vma) +
sys/dev/pci/drm/i915/gt/selftest_lrc.c
575
rq = intel_context_create_request(ce);
sys/dev/pci/drm/i915/gt/selftest_lrc.c
598
*cs++ = CS_GPR(ce->engine, n);
sys/dev/pci/drm/i915/gt/selftest_lrc.c
620
struct intel_context *ce;
sys/dev/pci/drm/i915/gt/selftest_lrc.c
633
ce = intel_context_create(engine);
sys/dev/pci/drm/i915/gt/selftest_lrc.c
634
if (IS_ERR(ce))
sys/dev/pci/drm/i915/gt/selftest_lrc.c
635
return PTR_ERR(ce);
sys/dev/pci/drm/i915/gt/selftest_lrc.c
637
rq = __gpr_read(ce, scratch, slot);
sys/dev/pci/drm/i915/gt/selftest_lrc.c
693
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/selftest_lrc.c
738
create_timestamp(struct intel_context *ce, void *slot, int idx)
sys/dev/pci/drm/i915/gt/selftest_lrc.c
741
i915_ggtt_offset(ce->engine->status_page.vma) +
sys/dev/pci/drm/i915/gt/selftest_lrc.c
747
rq = intel_context_create_request(ce);
sys/dev/pci/drm/i915/gt/selftest_lrc.c
789
struct intel_context *ce[2];
sys/dev/pci/drm/i915/gt/selftest_lrc.c
79
static int emit_semaphore_signal(struct intel_context *ce, void *slot)
sys/dev/pci/drm/i915/gt/selftest_lrc.c
805
arg->ce[0]->lrc_reg_state[CTX_TIMESTAMP] = arg->poison;
sys/dev/pci/drm/i915/gt/selftest_lrc.c
806
rq = create_timestamp(arg->ce[0], slot, 1);
sys/dev/pci/drm/i915/gt/selftest_lrc.c
815
arg->ce[1]->lrc_reg_state[CTX_TIMESTAMP] = 0xdeadbeef;
sys/dev/pci/drm/i915/gt/selftest_lrc.c
816
err = emit_semaphore_signal(arg->ce[1], slot);
sys/dev/pci/drm/i915/gt/selftest_lrc.c
82
i915_ggtt_offset(ce->engine->status_page.vma) +
sys/dev/pci/drm/i915/gt/selftest_lrc.c
825
err = context_flush(arg->ce[0], HZ / 2);
sys/dev/pci/drm/i915/gt/selftest_lrc.c
836
timestamp = READ_ONCE(arg->ce[0]->lrc_reg_state[CTX_TIMESTAMP]);
sys/dev/pci/drm/i915/gt/selftest_lrc.c
87
rq = intel_context_create_request(ce);
sys/dev/pci/drm/i915/gt/selftest_lrc.c
884
for (i = 0; i < ARRAY_SIZE(data.ce); i++) {
sys/dev/pci/drm/i915/gt/selftest_lrc.c
899
data.ce[i] = tmp;
sys/dev/pci/drm/i915/gt/selftest_lrc.c
916
for (i = 0; i < ARRAY_SIZE(data.ce); i++) {
sys/dev/pci/drm/i915/gt/selftest_lrc.c
917
if (!data.ce[i])
sys/dev/pci/drm/i915/gt/selftest_lrc.c
920
intel_context_unpin(data.ce[i]);
sys/dev/pci/drm/i915/gt/selftest_lrc.c
921
intel_context_put(data.ce[i]);
sys/dev/pci/drm/i915/gt/selftest_lrc.c
973
store_context(struct intel_context *ce, struct i915_vma *scratch)
sys/dev/pci/drm/i915/gt/selftest_lrc.c
979
batch = create_user_vma(ce->vm, SZ_64K);
sys/dev/pci/drm/i915/gt/selftest_lrc.c
989
defaults = shmem_pin_map(ce->engine->default_state);
sys/dev/pci/drm/i915/gt/selftest_migrate.c
137
static int intel_context_copy_ccs(struct intel_context *ce,
sys/dev/pci/drm/i915/gt/selftest_migrate.c
151
GEM_BUG_ON(ce->vm != ce->engine->gt->migrate.context->vm);
sys/dev/pci/drm/i915/gt/selftest_migrate.c
154
GEM_BUG_ON(ce->ring->size < SZ_64K);
sys/dev/pci/drm/i915/gt/selftest_migrate.c
157
if (HAS_64K_PAGES(ce->engine->i915))
sys/dev/pci/drm/i915/gt/selftest_migrate.c
163
rq = i915_request_create(ce);
sys/dev/pci/drm/i915/gt/selftest_migrate.c
230
struct intel_context *ce;
sys/dev/pci/drm/i915/gt/selftest_migrate.c
237
ce = intel_migrate_create_context(m);
sys/dev/pci/drm/i915/gt/selftest_migrate.c
238
if (IS_ERR(ce))
sys/dev/pci/drm/i915/gt/selftest_migrate.c
239
ce = intel_context_get(m->context);
sys/dev/pci/drm/i915/gt/selftest_migrate.c
240
GEM_BUG_ON(IS_ERR(ce));
sys/dev/pci/drm/i915/gt/selftest_migrate.c
242
err = intel_context_pin_ww(ce, ww);
sys/dev/pci/drm/i915/gt/selftest_migrate.c
246
err = intel_context_copy_ccs(ce, deps, sg, pat_index,
sys/dev/pci/drm/i915/gt/selftest_migrate.c
249
intel_context_unpin(ce);
sys/dev/pci/drm/i915/gt/selftest_migrate.c
251
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/selftest_migrate.c
552
struct intel_context *ce;
sys/dev/pci/drm/i915/gt/selftest_migrate.c
578
ce = intel_migrate_create_context(migrate);
sys/dev/pci/drm/i915/gt/selftest_migrate.c
579
if (IS_ERR(ce)) {
sys/dev/pci/drm/i915/gt/selftest_migrate.c
580
err = PTR_ERR(ce);
sys/dev/pci/drm/i915/gt/selftest_migrate.c
584
ce->ring_size = SZ_4K; /* Not too big */
sys/dev/pci/drm/i915/gt/selftest_migrate.c
586
err = intel_context_pin(ce);
sys/dev/pci/drm/i915/gt/selftest_migrate.c
590
rq = igt_spinner_create_request(&st.spin, ce, MI_ARB_CHECK);
sys/dev/pci/drm/i915/gt/selftest_migrate.c
616
rq = i915_request_create(ce);
sys/dev/pci/drm/i915/gt/selftest_migrate.c
666
intel_context_unpin(ce);
sys/dev/pci/drm/i915/gt/selftest_migrate.c
668
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/selftest_migrate.c
843
static int __perf_clear_blt(struct intel_context *ce,
sys/dev/pci/drm/i915/gt/selftest_migrate.c
859
err = intel_context_migrate_clear(ce, NULL, sg, pat_index,
sys/dev/pci/drm/i915/gt/selftest_migrate.c
877
ce->engine->name, sz >> 10,
sys/dev/pci/drm/i915/gt/selftest_migrate.c
919
static int __perf_copy_blt(struct intel_context *ce,
sys/dev/pci/drm/i915/gt/selftest_migrate.c
938
err = intel_context_migrate_copy(ce, NULL,
sys/dev/pci/drm/i915/gt/selftest_migrate.c
960
ce->engine->name, sz >> 10,
sys/dev/pci/drm/i915/gt/selftest_mocs.c
218
struct intel_context *ce)
sys/dev/pci/drm/i915/gt/selftest_mocs.c
228
rq = intel_context_create_request(ce);
sys/dev/pci/drm/i915/gt/selftest_mocs.c
238
if (!err && ce->engine->class == RENDER_CLASS)
sys/dev/pci/drm/i915/gt/selftest_mocs.c
250
err = check_mocs_table(ce->engine, arg->mocs, &vaddr);
sys/dev/pci/drm/i915/gt/selftest_mocs.c
251
if (!err && ce->engine->class == RENDER_CLASS)
sys/dev/pci/drm/i915/gt/selftest_mocs.c
252
err = check_l3cc_table(ce->engine, arg->l3cc, &vaddr);
sys/dev/pci/drm/i915/gt/selftest_mocs.c
26
struct intel_context *ce;
sys/dev/pci/drm/i915/gt/selftest_mocs.c
28
ce = intel_context_create(engine);
sys/dev/pci/drm/i915/gt/selftest_mocs.c
29
if (IS_ERR(ce))
sys/dev/pci/drm/i915/gt/selftest_mocs.c
30
return ce;
sys/dev/pci/drm/i915/gt/selftest_mocs.c
301
struct intel_context *ce;
sys/dev/pci/drm/i915/gt/selftest_mocs.c
303
ce = mocs_context_create(engine);
sys/dev/pci/drm/i915/gt/selftest_mocs.c
304
if (IS_ERR(ce)) {
sys/dev/pci/drm/i915/gt/selftest_mocs.c
305
err = PTR_ERR(ce);
sys/dev/pci/drm/i915/gt/selftest_mocs.c
309
err = check_mocs_engine(&mocs, ce);
sys/dev/pci/drm/i915/gt/selftest_mocs.c
310
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/selftest_mocs.c
319
static int active_engine_reset(struct intel_context *ce,
sys/dev/pci/drm/i915/gt/selftest_mocs.c
327
err = igt_spinner_init(&spin, ce->engine->gt);
sys/dev/pci/drm/i915/gt/selftest_mocs.c
33
ce->ring_size = SZ_16K;
sys/dev/pci/drm/i915/gt/selftest_mocs.c
331
rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
sys/dev/pci/drm/i915/gt/selftest_mocs.c
339
err = intel_engine_reset(ce->engine, reason);
sys/dev/pci/drm/i915/gt/selftest_mocs.c
35
return ce;
sys/dev/pci/drm/i915/gt/selftest_mocs.c
352
struct intel_context *ce, bool using_guc)
sys/dev/pci/drm/i915/gt/selftest_mocs.c
354
struct intel_gt *gt = ce->engine->gt;
sys/dev/pci/drm/i915/gt/selftest_mocs.c
359
err = intel_engine_reset(ce->engine, "mocs");
sys/dev/pci/drm/i915/gt/selftest_mocs.c
363
err = check_mocs_engine(mocs, ce);
sys/dev/pci/drm/i915/gt/selftest_mocs.c
368
err = active_engine_reset(ce, "mocs", using_guc);
sys/dev/pci/drm/i915/gt/selftest_mocs.c
372
err = check_mocs_engine(mocs, ce);
sys/dev/pci/drm/i915/gt/selftest_mocs.c
378
intel_gt_reset(gt, ce->engine->mask, "mocs");
sys/dev/pci/drm/i915/gt/selftest_mocs.c
380
err = check_mocs_engine(mocs, ce);
sys/dev/pci/drm/i915/gt/selftest_mocs.c
406
struct intel_context *ce;
sys/dev/pci/drm/i915/gt/selftest_mocs.c
414
ce = mocs_context_create(engine);
sys/dev/pci/drm/i915/gt/selftest_mocs.c
415
if (IS_ERR(ce)) {
sys/dev/pci/drm/i915/gt/selftest_mocs.c
416
err = PTR_ERR(ce);
sys/dev/pci/drm/i915/gt/selftest_mocs.c
422
err = __live_mocs_reset(&mocs, ce, using_guc);
sys/dev/pci/drm/i915/gt/selftest_mocs.c
425
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/selftest_rc6.c
156
static const u32 *__live_rc6_ctx(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/selftest_rc6.c
163
rq = intel_context_create_request(ce);
sys/dev/pci/drm/i915/gt/selftest_rc6.c
179
*cs++ = ce->timeline->hwsp_offset + 8;
sys/dev/pci/drm/i915/gt/selftest_rc6.c
240
struct intel_context *ce;
sys/dev/pci/drm/i915/gt/selftest_rc6.c
246
ce = intel_context_create(engine);
sys/dev/pci/drm/i915/gt/selftest_rc6.c
247
if (IS_ERR(ce)) {
sys/dev/pci/drm/i915/gt/selftest_rc6.c
248
err = PTR_ERR(ce);
sys/dev/pci/drm/i915/gt/selftest_rc6.c
253
res = __live_rc6_ctx(ce);
sys/dev/pci/drm/i915/gt/selftest_rc6.c
255
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/selftest_reset.c
59
struct intel_context *ce;
sys/dev/pci/drm/i915/gt/selftest_reset.c
68
ce = intel_context_create(engine);
sys/dev/pci/drm/i915/gt/selftest_reset.c
69
if (IS_ERR(ce)) {
sys/dev/pci/drm/i915/gt/selftest_reset.c
70
err = PTR_ERR(ce);
sys/dev/pci/drm/i915/gt/selftest_reset.c
73
rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
sys/dev/pci/drm/i915/gt/selftest_reset.c
74
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/selftest_ring_submission.c
149
struct intel_context *ce;
sys/dev/pci/drm/i915/gt/selftest_ring_submission.c
152
ce = intel_context_create(engine);
sys/dev/pci/drm/i915/gt/selftest_ring_submission.c
153
if (IS_ERR(ce))
sys/dev/pci/drm/i915/gt/selftest_ring_submission.c
154
return PTR_ERR(ce);
sys/dev/pci/drm/i915/gt/selftest_ring_submission.c
158
err = context_sync(ce);
sys/dev/pci/drm/i915/gt/selftest_ring_submission.c
162
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/selftest_ring_submission.c
176
struct intel_context *ce;
sys/dev/pci/drm/i915/gt/selftest_ring_submission.c
179
ce = intel_context_create(engine);
sys/dev/pci/drm/i915/gt/selftest_ring_submission.c
180
if (IS_ERR(ce))
sys/dev/pci/drm/i915/gt/selftest_ring_submission.c
181
return PTR_ERR(ce);
sys/dev/pci/drm/i915/gt/selftest_ring_submission.c
185
err = context_sync(ce);
sys/dev/pci/drm/i915/gt/selftest_ring_submission.c
193
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/selftest_ring_submission.c
70
static int context_sync(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/selftest_ring_submission.c
75
rq = intel_context_create_request(ce);
sys/dev/pci/drm/i915/gt/selftest_ring_submission.c
91
struct intel_context *ce;
sys/dev/pci/drm/i915/gt/selftest_ring_submission.c
94
ce = intel_context_create(engine);
sys/dev/pci/drm/i915/gt/selftest_ring_submission.c
95
if (IS_ERR(ce))
sys/dev/pci/drm/i915/gt/selftest_ring_submission.c
96
return PTR_ERR(ce);
sys/dev/pci/drm/i915/gt/selftest_ring_submission.c
98
err = context_sync(ce);
sys/dev/pci/drm/i915/gt/selftest_ring_submission.c
99
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/selftest_timeline.c
1020
struct intel_context *ce;
sys/dev/pci/drm/i915/gt/selftest_timeline.c
1044
ce = intel_context_create(engine);
sys/dev/pci/drm/i915/gt/selftest_timeline.c
1045
if (IS_ERR(ce)) {
sys/dev/pci/drm/i915/gt/selftest_timeline.c
1046
err = PTR_ERR(ce);
sys/dev/pci/drm/i915/gt/selftest_timeline.c
1050
ce->timeline = intel_timeline_get(tl);
sys/dev/pci/drm/i915/gt/selftest_timeline.c
1053
err = intel_context_pin(ce);
sys/dev/pci/drm/i915/gt/selftest_timeline.c
1055
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/selftest_timeline.c
1066
rq = i915_request_create(ce);
sys/dev/pci/drm/i915/gt/selftest_timeline.c
1069
intel_context_unpin(ce);
sys/dev/pci/drm/i915/gt/selftest_timeline.c
1070
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/selftest_timeline.c
1079
intel_context_unpin(ce);
sys/dev/pci/drm/i915/gt/selftest_timeline.c
1080
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/selftest_timeline.c
1093
intel_context_unpin(ce);
sys/dev/pci/drm/i915/gt/selftest_timeline.c
1094
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/selftest_timeline.c
1107
intel_context_unpin(ce);
sys/dev/pci/drm/i915/gt/selftest_timeline.c
1108
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/selftest_timeline.c
1116
intel_context_unpin(ce);
sys/dev/pci/drm/i915/gt/selftest_timeline.c
1117
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/selftest_timeline.c
1192
struct intel_context *ce = engine->kernel_context;
sys/dev/pci/drm/i915/gt/selftest_timeline.c
1193
struct intel_timeline *tl = ce->timeline;
sys/dev/pci/drm/i915/gt/selftest_timeline.c
1210
this = i915_request_create(ce);
sys/dev/pci/drm/i915/gt/selftest_timeline.c
1272
struct intel_context *ce;
sys/dev/pci/drm/i915/gt/selftest_timeline.c
1275
ce = intel_context_create(engine);
sys/dev/pci/drm/i915/gt/selftest_timeline.c
1276
if (IS_ERR(ce))
sys/dev/pci/drm/i915/gt/selftest_timeline.c
1277
return PTR_ERR(ce);
sys/dev/pci/drm/i915/gt/selftest_timeline.c
1279
err = intel_context_alloc_state(ce);
sys/dev/pci/drm/i915/gt/selftest_timeline.c
1283
tl = ce->timeline;
sys/dev/pci/drm/i915/gt/selftest_timeline.c
1287
err = intel_context_pin(ce);
sys/dev/pci/drm/i915/gt/selftest_timeline.c
1297
this = intel_context_create_request(ce);
sys/dev/pci/drm/i915/gt/selftest_timeline.c
1330
intel_context_unpin(ce);
sys/dev/pci/drm/i915/gt/selftest_timeline.c
1334
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/selftest_timeline.c
877
struct intel_context *ce;
sys/dev/pci/drm/i915/gt/selftest_timeline.c
879
ce = intel_context_create(engine);
sys/dev/pci/drm/i915/gt/selftest_timeline.c
880
if (IS_ERR(ce))
sys/dev/pci/drm/i915/gt/selftest_timeline.c
881
return PTR_ERR(ce);
sys/dev/pci/drm/i915/gt/selftest_timeline.c
883
ce->ring_size = ringsz;
sys/dev/pci/drm/i915/gt/selftest_timeline.c
884
w->rq = intel_context_create_request(ce);
sys/dev/pci/drm/i915/gt/selftest_timeline.c
885
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/selftest_timeline.c
958
struct intel_context *ce = rq->context;
sys/dev/pci/drm/i915/gt/selftest_timeline.c
959
struct intel_timeline *tl = ce->timeline;
sys/dev/pci/drm/i915/gt/selftest_timeline.c
964
rq = intel_context_create_request(ce);
sys/dev/pci/drm/i915/gt/selftest_timeline.c
973
rq = i915_request_create(ce);
sys/dev/pci/drm/i915/gt/selftest_tlb.c
124
rq = i915_request_create(ce);
sys/dev/pci/drm/i915/gt/selftest_tlb.c
143
if (ce->engine->class == OTHER_CLASS)
sys/dev/pci/drm/i915/gt/selftest_tlb.c
151
ce->engine->name, va->obj->mm.region->name ?: "smem",
sys/dev/pci/drm/i915/gt/selftest_tlb.c
168
ce->vm->insert_entries(ce->vm, &vb_res, pat_index, pte_flags);
sys/dev/pci/drm/i915/gt/selftest_tlb.c
171
tlbinv(ce->vm, addr & -length, length);
sys/dev/pci/drm/i915/gt/selftest_tlb.c
175
ce->engine->name);
sys/dev/pci/drm/i915/gt/selftest_tlb.c
298
struct intel_context *ce;
sys/dev/pci/drm/i915/gt/selftest_tlb.c
301
ce = intel_context_create(engine);
sys/dev/pci/drm/i915/gt/selftest_tlb.c
302
if (IS_ERR(ce)) {
sys/dev/pci/drm/i915/gt/selftest_tlb.c
303
err = PTR_ERR(ce);
sys/dev/pci/drm/i915/gt/selftest_tlb.c
307
i915_vm_put(ce->vm);
sys/dev/pci/drm/i915/gt/selftest_tlb.c
308
ce->vm = i915_vm_get(&ppgtt->vm);
sys/dev/pci/drm/i915/gt/selftest_tlb.c
311
err = intel_context_pin_ww(ce, &ww);
sys/dev/pci/drm/i915/gt/selftest_tlb.c
32
pte_tlbinv(struct intel_context *ce,
sys/dev/pci/drm/i915/gt/selftest_tlb.c
324
err = pte_tlbinv(ce, va, va,
sys/dev/pci/drm/i915/gt/selftest_tlb.c
332
err = pte_tlbinv(ce, va, vb,
sys/dev/pci/drm/i915/gt/selftest_tlb.c
344
intel_context_unpin(ce);
sys/dev/pci/drm/i915/gt/selftest_tlb.c
346
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/selftest_tlb.c
41
i915_gem_get_pat_index(ce->vm->i915, I915_CACHE_NONE);
sys/dev/pci/drm/i915/gt/selftest_tlb.c
50
batch = i915_gem_object_create_internal(ce->vm->i915, 4096);
sys/dev/pci/drm/i915/gt/selftest_tlb.c
54
vma = i915_vma_instance(batch, ce->vm, NULL);
sys/dev/pci/drm/i915/gt/selftest_tlb.c
67
addr = igt_random_offset(prng, addr, min(ce->vm->total, BIT_ULL(48)),
sys/dev/pci/drm/i915/gt/selftest_tlb.c
95
ce->engine->name, va->obj->mm.region->name ?: "smem",
sys/dev/pci/drm/i915/gt/selftest_workarounds.c
101
struct intel_engine_cs *engine = ce->engine;
sys/dev/pci/drm/i915/gt/selftest_workarounds.c
1093
struct intel_context *ce[2];
sys/dev/pci/drm/i915/gt/selftest_workarounds.c
1101
ce[0] = intel_context_create(engine);
sys/dev/pci/drm/i915/gt/selftest_workarounds.c
1102
if (IS_ERR(ce[0])) {
sys/dev/pci/drm/i915/gt/selftest_workarounds.c
1103
err = PTR_ERR(ce[0]);
sys/dev/pci/drm/i915/gt/selftest_workarounds.c
1106
ce[1] = intel_context_create(engine);
sys/dev/pci/drm/i915/gt/selftest_workarounds.c
1107
if (IS_ERR(ce[1])) {
sys/dev/pci/drm/i915/gt/selftest_workarounds.c
1108
err = PTR_ERR(ce[1]);
sys/dev/pci/drm/i915/gt/selftest_workarounds.c
1109
intel_context_put(ce[0]);
sys/dev/pci/drm/i915/gt/selftest_workarounds.c
1114
err = read_whitelisted_registers(ce[0], client[0].scratch[0]);
sys/dev/pci/drm/i915/gt/selftest_workarounds.c
1119
err = scrub_whitelisted_registers(ce[0]);
sys/dev/pci/drm/i915/gt/selftest_workarounds.c
1124
err = read_whitelisted_registers(ce[1], client[1].scratch[0]);
sys/dev/pci/drm/i915/gt/selftest_workarounds.c
1137
err = read_whitelisted_registers(ce[0], client[0].scratch[1]);
sys/dev/pci/drm/i915/gt/selftest_workarounds.c
1147
intel_context_put(ce[1]);
sys/dev/pci/drm/i915/gt/selftest_workarounds.c
1148
intel_context_put(ce[0]);
sys/dev/pci/drm/i915/gt/selftest_workarounds.c
1176
struct intel_context *ce;
sys/dev/pci/drm/i915/gt/selftest_workarounds.c
1178
ce = intel_context_create(engine);
sys/dev/pci/drm/i915/gt/selftest_workarounds.c
1179
if (IS_ERR(ce))
sys/dev/pci/drm/i915/gt/selftest_workarounds.c
1182
ok &= engine_wa_list_verify(ce,
sys/dev/pci/drm/i915/gt/selftest_workarounds.c
1186
ok &= engine_wa_list_verify(ce,
sys/dev/pci/drm/i915/gt/selftest_workarounds.c
1190
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/selftest_workarounds.c
1241
struct intel_context *ce;
sys/dev/pci/drm/i915/gt/selftest_workarounds.c
1272
ce = intel_context_create(engine);
sys/dev/pci/drm/i915/gt/selftest_workarounds.c
1273
if (IS_ERR(ce)) {
sys/dev/pci/drm/i915/gt/selftest_workarounds.c
1274
ret = PTR_ERR(ce);
sys/dev/pci/drm/i915/gt/selftest_workarounds.c
1302
rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
sys/dev/pci/drm/i915/gt/selftest_workarounds.c
1345
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/selftest_workarounds.c
135
rq = intel_context_create_request(ce);
sys/dev/pci/drm/i915/gt/selftest_workarounds.c
201
static int check_whitelist(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/selftest_workarounds.c
203
struct intel_engine_cs *engine = ce->engine;
sys/dev/pci/drm/i915/gt/selftest_workarounds.c
210
results = read_nonprivs(ce);
sys/dev/pci/drm/i915/gt/selftest_workarounds.c
273
struct intel_context *ce;
sys/dev/pci/drm/i915/gt/selftest_workarounds.c
276
ce = intel_context_create(engine);
sys/dev/pci/drm/i915/gt/selftest_workarounds.c
277
if (IS_ERR(ce))
sys/dev/pci/drm/i915/gt/selftest_workarounds.c
278
return PTR_ERR(ce);
sys/dev/pci/drm/i915/gt/selftest_workarounds.c
280
*rq = igt_spinner_create_request(spin, ce, MI_NOOP);
sys/dev/pci/drm/i915/gt/selftest_workarounds.c
281
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/selftest_workarounds.c
301
struct intel_context *ce, *tmp;
sys/dev/pci/drm/i915/gt/selftest_workarounds.c
310
ce = intel_context_create(engine);
sys/dev/pci/drm/i915/gt/selftest_workarounds.c
311
if (IS_ERR(ce))
sys/dev/pci/drm/i915/gt/selftest_workarounds.c
312
return PTR_ERR(ce);
sys/dev/pci/drm/i915/gt/selftest_workarounds.c
318
err = check_whitelist(ce);
sys/dev/pci/drm/i915/gt/selftest_workarounds.c
349
err = check_whitelist(ce);
sys/dev/pci/drm/i915/gt/selftest_workarounds.c
361
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/selftest_workarounds.c
362
ce = tmp;
sys/dev/pci/drm/i915/gt/selftest_workarounds.c
364
err = check_whitelist(ce);
sys/dev/pci/drm/i915/gt/selftest_workarounds.c
374
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/selftest_workarounds.c
474
static int check_dirty_whitelist(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/selftest_workarounds.c
502
struct intel_engine_cs *engine = ce->engine;
sys/dev/pci/drm/i915/gt/selftest_workarounds.c
509
scratch = __vm_create_scratch_for_read_pinned(ce->vm, sz);
sys/dev/pci/drm/i915/gt/selftest_workarounds.c
513
batch = create_batch(ce->vm);
sys/dev/pci/drm/i915/gt/selftest_workarounds.c
544
err = intel_context_pin_ww(ce, &ww);
sys/dev/pci/drm/i915/gt/selftest_workarounds.c
619
rq = i915_request_create(ce);
sys/dev/pci/drm/i915/gt/selftest_workarounds.c
735
intel_context_unpin(ce);
sys/dev/pci/drm/i915/gt/selftest_workarounds.c
768
struct intel_context *ce;
sys/dev/pci/drm/i915/gt/selftest_workarounds.c
774
ce = intel_context_create(engine);
sys/dev/pci/drm/i915/gt/selftest_workarounds.c
775
if (IS_ERR(ce))
sys/dev/pci/drm/i915/gt/selftest_workarounds.c
776
return PTR_ERR(ce);
sys/dev/pci/drm/i915/gt/selftest_workarounds.c
778
err = check_dirty_whitelist(ce);
sys/dev/pci/drm/i915/gt/selftest_workarounds.c
779
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/selftest_workarounds.c
842
static int read_whitelisted_registers(struct intel_context *ce,
sys/dev/pci/drm/i915/gt/selftest_workarounds.c
845
struct intel_engine_cs *engine = ce->engine;
sys/dev/pci/drm/i915/gt/selftest_workarounds.c
850
rq = intel_context_create_request(ce);
sys/dev/pci/drm/i915/gt/selftest_workarounds.c
886
static int scrub_whitelisted_registers(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/selftest_workarounds.c
888
struct intel_engine_cs *engine = ce->engine;
sys/dev/pci/drm/i915/gt/selftest_workarounds.c
894
batch = create_batch(ce->vm);
sys/dev/pci/drm/i915/gt/selftest_workarounds.c
922
rq = intel_context_create_request(ce);
sys/dev/pci/drm/i915/gt/selftest_workarounds.c
99
read_nonprivs(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_gsc_fw.c
284
struct intel_context *ce = gsc->ce;
sys/dev/pci/drm/i915/gt/uc/intel_gsc_fw.c
288
if (!ce)
sys/dev/pci/drm/i915/gt/uc/intel_gsc_fw.c
291
rq = i915_request_create(ce);
sys/dev/pci/drm/i915/gt/uc/intel_gsc_fw.c
295
if (ce->engine->emit_init_breadcrumb) {
sys/dev/pci/drm/i915/gt/uc/intel_gsc_fw.c
296
err = ce->engine->emit_init_breadcrumb(rq);
sys/dev/pci/drm/i915/gt/uc/intel_gsc_fw.c
305
err = ce->engine->emit_flush(rq, 0);
sys/dev/pci/drm/i915/gt/uc/intel_gsc_uc.c
215
struct intel_context *ce;
sys/dev/pci/drm/i915/gt/uc/intel_gsc_uc.c
226
ce = intel_engine_create_pinned_context(engine, engine->gt->vm, SZ_4K,
sys/dev/pci/drm/i915/gt/uc/intel_gsc_uc.c
229
if (IS_ERR(ce)) {
sys/dev/pci/drm/i915/gt/uc/intel_gsc_uc.c
231
err = PTR_ERR(ce);
sys/dev/pci/drm/i915/gt/uc/intel_gsc_uc.c
235
gsc->ce = ce;
sys/dev/pci/drm/i915/gt/uc/intel_gsc_uc.c
266
if (gsc->ce)
sys/dev/pci/drm/i915/gt/uc/intel_gsc_uc.c
267
intel_engine_destroy_pinned_context(fetch_and_zero(&gsc->ce));
sys/dev/pci/drm/i915/gt/uc/intel_gsc_uc.h
41
struct intel_context *ce; /* for submission to GSC FW via GSC engine */
sys/dev/pci/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c
139
struct intel_context *ce,
sys/dev/pci/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c
156
err = intel_context_pin_ww(ce, &ww);
sys/dev/pci/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c
160
rq = i915_request_create(ce);
sys/dev/pci/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c
186
err = ce->engine->emit_flush(rq, 0);
sys/dev/pci/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c
215
intel_context_unpin(ce);
sys/dev/pci/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c
48
struct intel_context *ce = gsc->ce;
sys/dev/pci/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c
58
if (!ce)
sys/dev/pci/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c
61
rq = i915_request_create(ce);
sys/dev/pci/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c
65
if (ce->engine->emit_init_breadcrumb) {
sys/dev/pci/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c
66
err = ce->engine->emit_init_breadcrumb(rq);
sys/dev/pci/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.c
76
err = ce->engine->emit_flush(rq, 0);
sys/dev/pci/drm/i915/gt/uc/intel_gsc_uc_heci_cmd_submit.h
90
struct intel_context *ce,
sys/dev/pci/drm/i915/gt/uc/intel_guc.h
533
void intel_guc_context_ban(struct intel_context *ce, struct i915_request *rq);
sys/dev/pci/drm/i915/gt/uc/intel_guc_capture.c
1533
struct intel_context *ce,
sys/dev/pci/drm/i915/gt/uc/intel_guc_capture.c
1539
if (!gt || !ce || !engine)
sys/dev/pci/drm/i915/gt/uc/intel_guc_capture.c
1554
n->guc_id == ce->guc_id.id &&
sys/dev/pci/drm/i915/gt/uc/intel_guc_capture.c
1555
(n->lrca & CTX_GTT_ADDRESS_MASK) == (ce->lrc.lrca & CTX_GTT_ADDRESS_MASK))
sys/dev/pci/drm/i915/gt/uc/intel_guc_capture.c
1564
struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_capture.c
1569
if (!gt || !ee || !ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_capture.c
1586
n->guc_id == ce->guc_id.id &&
sys/dev/pci/drm/i915/gt/uc/intel_guc_capture.c
1587
(n->lrca & CTX_GTT_ADDRESS_MASK) == (ce->lrc.lrca & CTX_GTT_ADDRESS_MASK)) {
sys/dev/pci/drm/i915/gt/uc/intel_guc_capture.c
1597
ce->guc_id.id, ce->lrc.lrca);
sys/dev/pci/drm/i915/gt/uc/intel_guc_capture.h
23
struct intel_context *ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_capture.h
24
bool intel_guc_capture_is_matching_engine(struct intel_gt *gt, struct intel_context *ce,
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1071
static void __guc_context_destroy(struct intel_context *ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1072
static void release_guc_id(struct intel_guc *guc, struct intel_context *ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1073
static void guc_signal_context_fence(struct intel_context *ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1074
static void guc_cancel_context_requests(struct intel_context *ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1075
static void guc_blocked_fence_complete(struct intel_context *ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1079
struct intel_context *ce;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1084
xa_for_each(&guc->context_lookup, index, ce) {
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1090
bool do_put = kref_get_unless_zero(&ce->ref);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1094
if (test_bit(CONTEXT_GUC_INIT, &ce->flags) &&
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1095
(cancel_delayed_work(&ce->guc_state.sched_disable_delay_work))) {
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1097
intel_context_sched_disable_unpin(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1100
spin_lock(&ce->guc_state.lock);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1109
destroyed = context_destroyed(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1110
pending_enable = context_pending_enable(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1111
pending_disable = context_pending_disable(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1112
deregister = context_wait_for_deregister_to_register(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1113
banned = context_banned(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1114
init_sched_state(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1116
spin_unlock(&ce->guc_state.lock);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1121
guc_signal_context_fence(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1124
release_guc_id(guc, ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1125
__guc_context_destroy(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1128
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1133
guc_signal_context_fence(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1135
guc_cancel_context_requests(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1136
intel_engine_signal_breadcrumbs(ce->engine);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1138
intel_context_sched_disable_unpin(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1141
spin_lock(&ce->guc_state.lock);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1142
guc_blocked_fence_complete(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1143
spin_unlock(&ce->guc_state.lock);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1145
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1149
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1501
static void __guc_context_update_stats(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1503
struct intel_guc *guc = ce_to_guc(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1507
lrc_update_runtime(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1511
static void guc_context_update_stats(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1513
if (!intel_context_pin_if_active(ce))
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1516
__guc_context_update_stats(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1517
intel_context_unpin(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1526
struct intel_context *ce;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1574
xa_for_each(&guc->context_lookup, index, ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1575
guc_context_update_stats(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1767
__context_to_physical_engine(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1769
struct intel_engine_cs *engine = ce->engine;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1777
static void guc_reset_state(struct intel_context *ce, u32 head, bool scrub)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1779
struct intel_engine_cs *engine = __context_to_physical_engine(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1781
if (!intel_context_is_schedulable(ce))
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1784
GEM_BUG_ON(!intel_context_is_pinned(ce));
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1795
lrc_init_regs(ce, engine, true);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1798
lrc_update_regs(ce, engine, head);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
181
static inline void init_sched_state(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1822
__unwind_incomplete_requests(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1828
ce->engine->sched_engine;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
183
lockdep_assert_held(&ce->guc_state.lock);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1832
spin_lock(&ce->guc_state.lock);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1834
&ce->guc_state.requests,
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
184
ce->guc_state.sched_state &= SCHED_STATE_BLOCKED_MASK;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1853
spin_unlock(&ce->guc_state.lock);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1857
static void __guc_reset_context(struct intel_context *ce, intel_engine_mask_t stalled)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1863
int i, number_children = ce->parallel.number_children;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1864
struct intel_context *parent = ce;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1866
GEM_BUG_ON(intel_context_is_child(ce));
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1868
intel_context_get(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1875
spin_lock_irqsave(&ce->guc_state.lock, flags);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1876
clr_context_enabled(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1877
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1884
if (!intel_context_is_pinned(ce))
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1888
rq = intel_context_get_active_request(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1890
head = ce->ring->tail;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1895
guilty = stalled & ce->engine->mask;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1897
GEM_BUG_ON(i915_active_is_idle(&ce->active));
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1898
head = intel_ring_wrap(ce->ring, rq->head);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1903
guc_reset_state(ce, head, guilty);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1906
ce = list_next_entry(ce, parallel.child_link);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1929
struct intel_context *ce;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1939
xa_for_each(&guc->context_lookup, index, ce) {
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1940
if (!kref_get_unless_zero(&ce->ref))
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1945
if (intel_context_is_pinned(ce) &&
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1946
!intel_context_is_child(ce))
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1947
__guc_reset_context(ce, stalled);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1949
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1959
static void guc_cancel_context_requests(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1961
struct i915_sched_engine *sched_engine = ce_to_guc(ce)->sched_engine;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1967
spin_lock(&ce->guc_state.lock);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1968
list_for_each_entry(rq, &ce->guc_state.requests, sched.link)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
1970
spin_unlock(&ce->guc_state.lock);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
198
static bool sched_state_is_init(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
200
return !(ce->guc_state.sched_state & ~SCHED_STATE_VALID_INIT);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2027
struct intel_context *ce;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2032
xa_for_each(&guc->context_lookup, index, ce) {
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2033
if (!kref_get_unless_zero(&ce->ref))
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2038
if (intel_context_is_pinned(ce) &&
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2039
!intel_context_is_child(ce))
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
204
context_wait_for_deregister_to_register(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2040
guc_cancel_context_requests(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2042
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
206
return ce->guc_state.sched_state &
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
211
set_context_wait_for_deregister_to_register(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
213
lockdep_assert_held(&ce->guc_state.lock);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
214
ce->guc_state.sched_state |=
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
219
clr_context_wait_for_deregister_to_register(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
221
lockdep_assert_held(&ce->guc_state.lock);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
222
ce->guc_state.sched_state &=
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2241
struct intel_context *ce = request_to_scheduling_context(rq);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2245
!ctx_id_mapped(guc, ce->guc_id.id);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2265
static int new_guc_id(struct intel_guc *guc, struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2269
GEM_BUG_ON(intel_context_is_child(ce));
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
227
context_destroyed(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2271
if (intel_context_is_parent(ce))
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2274
order_base_2(ce->parallel.number_children
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2284
if (!intel_context_is_parent(ce))
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2287
ce->guc_id.id = ret;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
229
return ce->guc_state.sched_state & SCHED_STATE_DESTROYED;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2291
static void __release_guc_id(struct intel_guc *guc, struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2293
GEM_BUG_ON(intel_context_is_child(ce));
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2295
if (!context_guc_id_invalid(ce)) {
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2296
if (intel_context_is_parent(ce)) {
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2298
ce->guc_id.id,
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2299
order_base_2(ce->parallel.number_children
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2304
ce->guc_id.id);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2306
clr_ctx_id_mapping(guc, ce->guc_id.id);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2307
set_context_guc_id_invalid(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2309
if (!list_empty(&ce->guc_id.link))
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2310
list_del_init(&ce->guc_id.link);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2313
static void release_guc_id(struct intel_guc *guc, struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2318
__release_guc_id(guc, ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2322
static int steal_guc_id(struct intel_guc *guc, struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2327
GEM_BUG_ON(intel_context_is_child(ce));
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2328
GEM_BUG_ON(intel_context_is_parent(ce));
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
233
set_context_destroyed(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2341
ce->guc_id.id = cn->guc_id.id;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
235
lockdep_assert_held(&ce->guc_state.lock);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2359
static int assign_guc_id(struct intel_guc *guc, struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
236
ce->guc_state.sched_state |= SCHED_STATE_DESTROYED;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2364
GEM_BUG_ON(intel_context_is_child(ce));
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2366
ret = new_guc_id(guc, ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2368
if (intel_context_is_parent(ce))
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2371
ret = steal_guc_id(guc, ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2376
if (intel_context_is_parent(ce)) {
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2380
for_each_child(ce, child)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2381
child->guc_id.id = ce->guc_id.id + i++;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2388
static int pin_guc_id(struct intel_guc *guc, struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2393
GEM_BUG_ON(atomic_read(&ce->guc_id.ref));
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2398
might_lock(&ce->guc_state.lock);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
240
clr_context_destroyed(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2400
if (context_guc_id_invalid(ce)) {
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2401
ret = assign_guc_id(guc, ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2406
if (!list_empty(&ce->guc_id.link))
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2407
list_del_init(&ce->guc_id.link);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2408
atomic_inc(&ce->guc_id.ref);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
242
lockdep_assert_held(&ce->guc_state.lock);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2424
ce->engine->props.timeslice_duration_ms <<
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
243
ce->guc_state.sched_state &= ~SCHED_STATE_DESTROYED;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2438
static void unpin_guc_id(struct intel_guc *guc, struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2442
GEM_BUG_ON(atomic_read(&ce->guc_id.ref) < 0);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2443
GEM_BUG_ON(intel_context_is_child(ce));
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2445
if (unlikely(context_guc_id_invalid(ce) ||
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2446
intel_context_is_parent(ce)))
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2450
if (!context_guc_id_invalid(ce) && list_empty(&ce->guc_id.link) &&
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2451
!atomic_read(&ce->guc_id.ref))
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2452
list_add_tail(&ce->guc_id.link,
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2458
struct intel_context *ce,
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
246
static inline bool context_pending_disable(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2467
GEM_BUG_ON(ce->parallel.number_children > MAX_ENGINE_INSTANCE);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2471
action[len++] = ce->parallel.number_children + 1;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2473
for_each_child(ce, child) {
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
248
return ce->guc_state.sched_state & SCHED_STATE_PENDING_DISABLE;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2482
struct intel_context *ce,
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2491
GEM_BUG_ON(ce->parallel.number_children > MAX_ENGINE_INSTANCE);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2503
action[len++] = ce->parallel.number_children + 1;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2508
for_each_child(ce, child) {
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
251
static inline void set_context_pending_disable(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
253
lockdep_assert_held(&ce->guc_state.lock);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
254
ce->guc_state.sched_state |= SCHED_STATE_PENDING_DISABLE;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2562
static void prepare_context_registration_info_v69(struct intel_context *ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2563
static void prepare_context_registration_info_v70(struct intel_context *ce,
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2567
register_context_v69(struct intel_guc *guc, struct intel_context *ce, bool loop)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
257
static inline void clr_context_pending_disable(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2570
ce->guc_id.id * sizeof(struct guc_lrc_desc_v69);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2572
prepare_context_registration_info_v69(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2574
if (intel_context_is_parent(ce))
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2575
return __guc_action_register_multi_lrc_v69(guc, ce, ce->guc_id.id,
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2578
return __guc_action_register_context_v69(guc, ce->guc_id.id,
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2583
register_context_v70(struct intel_guc *guc, struct intel_context *ce, bool loop)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2587
prepare_context_registration_info_v70(ce, &info);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2589
if (intel_context_is_parent(ce))
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
259
lockdep_assert_held(&ce->guc_state.lock);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2590
return __guc_action_register_multi_lrc_v70(guc, ce, &info, loop);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2595
static int register_context(struct intel_context *ce, bool loop)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2597
struct intel_guc *guc = ce_to_guc(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
260
ce->guc_state.sched_state &= ~SCHED_STATE_PENDING_DISABLE;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2600
GEM_BUG_ON(intel_context_is_child(ce));
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2601
trace_intel_context_register(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2604
ret = register_context_v70(guc, ce, loop);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2606
ret = register_context_v69(guc, ce, loop);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2611
spin_lock_irqsave(&ce->guc_state.lock, flags);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2612
set_context_registered(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2613
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2616
guc_context_policy_init_v70(ce, loop);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
263
static inline bool context_banned(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2635
static int deregister_context(struct intel_context *ce, u32 guc_id)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2637
struct intel_guc *guc = ce_to_guc(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2639
GEM_BUG_ON(intel_context_is_child(ce));
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2640
trace_intel_context_deregister(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2645
static inline void clear_children_join_go_memory(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2647
struct parent_scratch *ps = __get_parent_scratch(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
265
return ce->guc_state.sched_state & SCHED_STATE_BANNED;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2651
for (i = 0; i < ce->parallel.number_children + 1; ++i)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2655
static inline u32 get_children_go_value(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2657
return __get_parent_scratch(ce)->go.semaphore;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2660
static inline u32 get_children_join_value(struct intel_context *ce,
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2663
return __get_parent_scratch(ce)->join[child_index].semaphore;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
268
static inline void set_context_banned(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
270
lockdep_assert_held(&ce->guc_state.lock);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
271
ce->guc_state.sched_state |= SCHED_STATE_BANNED;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2714
static int guc_context_policy_init_v70(struct intel_context *ce, bool loop)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2716
struct intel_engine_cs *engine = ce->engine;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2733
if (ce->flags & BIT(CONTEXT_LOW_LATENCY))
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2736
__guc_context_policy_start_klv(&policy, ce->guc_id.id);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2738
__guc_context_policy_add_priority(&policy, ce->guc_state.prio);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
274
static inline void clr_context_banned(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2748
spin_lock_irqsave(&ce->guc_state.lock, flags);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2750
set_context_policy_required(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2752
clr_context_policy_required(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2753
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
276
lockdep_assert_held(&ce->guc_state.lock);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
277
ce->guc_state.sched_state &= ~SCHED_STATE_BANNED;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2795
static void prepare_context_registration_info_v69(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2797
struct intel_engine_cs *engine = ce->engine;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2799
u32 ctx_id = ce->guc_id.id;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
280
static inline bool context_enabled(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2810
i915_gem_object_is_lmem(ce->ring->vma->obj));
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2816
desc->hw_context_desc = ce->lrc.lrca;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2817
desc->priority = ce->guc_state.prio;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
282
return ce->guc_state.sched_state & SCHED_STATE_ENABLED;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2825
if (intel_context_is_parent(ce)) {
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2828
ce->parallel.guc.wqi_tail = 0;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2829
ce->parallel.guc.wqi_head = 0;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2831
desc->process_desc = i915_ggtt_offset(ce->state) +
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2832
__get_parent_scratch_offset(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2833
desc->wq_addr = i915_ggtt_offset(ce->state) +
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2834
__get_wq_offset(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2837
pdesc = __get_process_desc_v69(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2839
pdesc->stage_id = ce->guc_id.id;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2844
ce->parallel.guc.wq_head = &pdesc->head;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2845
ce->parallel.guc.wq_tail = &pdesc->tail;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2846
ce->parallel.guc.wq_status = &pdesc->wq_status;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2848
for_each_child(ce, child) {
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
285
static inline void set_context_enabled(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2854
desc->priority = ce->guc_state.prio;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2859
clear_children_join_go_memory(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2863
static void prepare_context_registration_info_v70(struct intel_context *ce,
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2866
struct intel_engine_cs *engine = ce->engine;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2868
u32 ctx_id = ce->guc_id.id;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
287
lockdep_assert_held(&ce->guc_state.lock);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2877
i915_gem_object_is_lmem(ce->ring->vma->obj));
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
288
ce->guc_state.sched_state |= SCHED_STATE_ENABLED;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2887
info->hwlrca_lo = lower_32_bits(ce->lrc.lrca);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2888
info->hwlrca_hi = upper_32_bits(ce->lrc.lrca);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2890
info->hwlrca_lo |= map_guc_prio_to_lrc_desc_prio(ce->guc_state.prio);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2897
if (intel_context_is_parent(ce)) {
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2901
ce->parallel.guc.wqi_tail = 0;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2902
ce->parallel.guc.wqi_head = 0;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2904
wq_desc_offset = (u64)i915_ggtt_offset(ce->state) +
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2905
__get_parent_scratch_offset(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2906
wq_base_offset = (u64)i915_ggtt_offset(ce->state) +
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2907
__get_wq_offset(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
291
static inline void clr_context_enabled(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2914
wq_desc = __get_wq_desc_v70(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2918
ce->parallel.guc.wq_head = &wq_desc->head;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2919
ce->parallel.guc.wq_tail = &wq_desc->tail;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2920
ce->parallel.guc.wq_status = &wq_desc->wq_status;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2922
clear_children_join_go_memory(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2926
static int try_context_registration(struct intel_context *ce, bool loop)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2928
struct intel_engine_cs *engine = ce->engine;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
293
lockdep_assert_held(&ce->guc_state.lock);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2932
u32 ctx_id = ce->guc_id.id;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2936
GEM_BUG_ON(!sched_state_is_init(ce));
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
294
ce->guc_state.sched_state &= ~SCHED_STATE_ENABLED;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2941
set_ctx_id_mapping(guc, ctx_id, ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2955
trace_intel_context_steal_guc_id(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2959
spin_lock_irqsave(&ce->guc_state.lock, flags);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2962
set_context_wait_for_deregister_to_register(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2963
intel_context_get(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2965
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
297
static inline bool context_pending_enable(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2976
ret = deregister_context(ce, ce->guc_id.id);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2981
ret = register_context(ce, loop);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
299
return ce->guc_state.sched_state & SCHED_STATE_PENDING_ENABLE;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2993
static int __guc_context_pre_pin(struct intel_context *ce,
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
2998
return lrc_pre_pin(ce, engine, ww, vaddr);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3001
static int __guc_context_pin(struct intel_context *ce,
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3005
if (i915_ggtt_offset(ce->state) !=
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3006
(ce->lrc.lrca & CTX_GTT_ADDRESS_MASK))
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3007
set_bit(CONTEXT_LRCA_DIRTY, &ce->flags);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3014
return lrc_pin(ce, engine, vaddr);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3017
static int guc_context_pre_pin(struct intel_context *ce,
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
302
static inline void set_context_pending_enable(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3021
return __guc_context_pre_pin(ce, ce->engine, ww, vaddr);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3024
static int guc_context_pin(struct intel_context *ce, void *vaddr)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3026
int ret = __guc_context_pin(ce, ce->engine, vaddr);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3028
if (likely(!ret && !intel_context_is_barrier(ce)))
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3029
intel_engine_pm_get(ce->engine);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3034
static void guc_context_unpin(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3036
struct intel_guc *guc = ce_to_guc(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3038
__guc_context_update_stats(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3039
unpin_guc_id(guc, ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
304
lockdep_assert_held(&ce->guc_state.lock);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3040
lrc_unpin(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3042
if (likely(!intel_context_is_barrier(ce)))
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3043
intel_engine_pm_put_async(ce->engine);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3046
static void guc_context_post_unpin(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3048
lrc_post_unpin(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
305
ce->guc_state.sched_state |= SCHED_STATE_PENDING_ENABLE;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3052
struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3056
ce->guc_id.id,
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3060
trace_intel_context_sched_enable(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3067
struct intel_context *ce,
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3078
GEM_BUG_ON(intel_context_is_child(ce));
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3079
trace_intel_context_sched_disable(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
308
static inline void clr_context_pending_enable(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3085
static void guc_blocked_fence_complete(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3087
lockdep_assert_held(&ce->guc_state.lock);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3089
if (!i915_sw_fence_done(&ce->guc_state.blocked))
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3090
i915_sw_fence_complete(&ce->guc_state.blocked);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3093
static void guc_blocked_fence_reinit(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3095
lockdep_assert_held(&ce->guc_state.lock);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3096
GEM_BUG_ON(!i915_sw_fence_done(&ce->guc_state.blocked));
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
310
lockdep_assert_held(&ce->guc_state.lock);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3103
i915_sw_fence_fini(&ce->guc_state.blocked);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3104
i915_sw_fence_reinit(&ce->guc_state.blocked);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3105
i915_sw_fence_await(&ce->guc_state.blocked);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3106
i915_sw_fence_commit(&ce->guc_state.blocked);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3109
static u16 prep_context_pending_disable(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
311
ce->guc_state.sched_state &= ~SCHED_STATE_PENDING_ENABLE;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3111
lockdep_assert_held(&ce->guc_state.lock);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3113
set_context_pending_disable(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3114
clr_context_enabled(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3115
guc_blocked_fence_reinit(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3116
intel_context_get(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3118
return ce->guc_id.id;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3121
static struct i915_sw_fence *guc_context_block(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3123
struct intel_guc *guc = ce_to_guc(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3125
struct intel_runtime_pm *runtime_pm = ce->engine->uncore->rpm;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3130
GEM_BUG_ON(intel_context_is_child(ce));
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3132
spin_lock_irqsave(&ce->guc_state.lock, flags);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3134
incr_context_blocked(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3136
enabled = context_enabled(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3139
clr_context_enabled(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
314
static inline bool context_registered(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3140
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3141
return &ce->guc_state.blocked;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3148
atomic_add(2, &ce->pin_count);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3150
guc_id = prep_context_pending_disable(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3152
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3155
__guc_context_sched_disable(guc, ce, guc_id);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3157
return &ce->guc_state.blocked;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
316
return ce->guc_state.sched_state & SCHED_STATE_REGISTERED;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3167
static bool context_cant_unblock(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3169
lockdep_assert_held(&ce->guc_state.lock);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3171
return (ce->guc_state.sched_state & SCHED_STATE_NO_UNBLOCK) ||
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3172
context_guc_id_invalid(ce) ||
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3173
!ctx_id_mapped(ce_to_guc(ce), ce->guc_id.id) ||
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3174
!intel_context_is_pinned(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3177
static void guc_context_unblock(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3179
struct intel_guc *guc = ce_to_guc(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3181
struct intel_runtime_pm *runtime_pm = ce->engine->uncore->rpm;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3185
GEM_BUG_ON(context_enabled(ce));
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3186
GEM_BUG_ON(intel_context_is_child(ce));
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3188
spin_lock_irqsave(&ce->guc_state.lock, flags);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
319
static inline void set_context_registered(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3191
context_cant_unblock(ce))) {
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3195
set_context_pending_enable(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3196
set_context_enabled(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3197
intel_context_get(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3200
decr_context_blocked(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3202
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3206
__guc_context_sched_enable(guc, ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
321
lockdep_assert_held(&ce->guc_state.lock);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3210
static void guc_context_cancel_request(struct intel_context *ce,
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3219
intel_context_get(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
322
ce->guc_state.sched_state |= SCHED_STATE_REGISTERED;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3224
guc_reset_state(ce, intel_ring_wrap(ce->ring, rq->head),
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3229
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
325
static inline void clr_context_registered(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3255
guc_context_revoke(struct intel_context *ce, struct i915_request *rq,
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3258
struct intel_guc *guc = ce_to_guc(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3260
&ce->engine->gt->i915->runtime_pm;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3264
GEM_BUG_ON(intel_context_is_child(ce));
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3268
spin_lock_irqsave(&ce->guc_state.lock, flags);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3269
set_context_banned(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
327
lockdep_assert_held(&ce->guc_state.lock);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3272
(!context_enabled(ce) && !context_pending_disable(ce))) {
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3273
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3275
guc_cancel_context_requests(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3276
intel_engine_signal_breadcrumbs(ce->engine);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3277
} else if (!context_pending_disable(ce)) {
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
328
ce->guc_state.sched_state &= ~SCHED_STATE_REGISTERED;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3284
atomic_add(2, &ce->pin_count);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3286
guc_id = prep_context_pending_disable(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3287
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3297
__guc_context_sched_disable(guc, ce, guc_id);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3300
if (!context_guc_id_invalid(ce))
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3303
ce->guc_id.id,
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3305
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3309
static void do_sched_disable(struct intel_guc *guc, struct intel_context *ce,
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
331
static inline bool context_policy_required(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3311
__releases(ce->guc_state.lock)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3313
struct intel_runtime_pm *runtime_pm = &ce->engine->gt->i915->runtime_pm;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3317
lockdep_assert_held(&ce->guc_state.lock);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3318
guc_id = prep_context_pending_disable(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3320
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3323
__guc_context_sched_disable(guc, ce, guc_id);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3327
struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3329
lockdep_assert_held(&ce->guc_state.lock);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
333
return ce->guc_state.sched_state & SCHED_STATE_POLICY_REQUIRED;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3330
GEM_BUG_ON(intel_context_is_child(ce));
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3332
if (submission_disabled(guc) || context_guc_id_invalid(ce) ||
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3333
!ctx_id_mapped(guc, ce->guc_id.id)) {
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3334
clr_context_enabled(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3338
return !context_enabled(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3343
struct intel_context *ce =
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3344
container_of(wrk, typeof(*ce), guc_state.sched_disable_delay_work.work);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3345
struct intel_guc *guc = ce_to_guc(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3348
spin_lock_irqsave(&ce->guc_state.lock, flags);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3350
if (bypass_sched_disable(guc, ce)) {
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3351
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3352
intel_context_sched_disable_unpin(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3354
do_sched_disable(guc, ce, flags);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3358
static bool guc_id_pressure(struct intel_guc *guc, struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
336
static inline void set_context_policy_required(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3364
if (intel_context_is_parent(ce))
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3374
static void guc_context_sched_disable(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3376
struct intel_guc *guc = ce_to_guc(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
338
lockdep_assert_held(&ce->guc_state.lock);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3380
spin_lock_irqsave(&ce->guc_state.lock, flags);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3382
if (bypass_sched_disable(guc, ce)) {
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3383
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3384
intel_context_sched_disable_unpin(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3385
} else if (!intel_context_is_closed(ce) && !guc_id_pressure(guc, ce) &&
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3387
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3389
&ce->guc_state.sched_disable_delay_work,
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
339
ce->guc_state.sched_state |= SCHED_STATE_POLICY_REQUIRED;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3392
do_sched_disable(guc, ce, flags);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3396
static void guc_context_close(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3400
if (test_bit(CONTEXT_GUC_INIT, &ce->flags) &&
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3401
cancel_delayed_work(&ce->guc_state.sched_disable_delay_work))
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3402
__delay_sched_disable(&ce->guc_state.sched_disable_delay_work.work);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3404
spin_lock_irqsave(&ce->guc_state.lock, flags);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3405
set_context_close_done(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3406
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3409
static inline int guc_lrc_desc_unpin(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3411
struct intel_guc *guc = ce_to_guc(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3418
GEM_BUG_ON(!ctx_id_mapped(guc, ce->guc_id.id));
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3419
GEM_BUG_ON(ce != __get_context(guc, ce->guc_id.id));
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
342
static inline void clr_context_policy_required(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3420
GEM_BUG_ON(context_enabled(ce));
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3423
spin_lock_irqsave(&ce->guc_state.lock, flags);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3431
set_context_destroyed(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3432
clr_context_registered(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3434
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3437
release_guc_id(guc, ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3438
__guc_context_destroy(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
344
lockdep_assert_held(&ce->guc_state.lock);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
345
ce->guc_state.sched_state &= ~SCHED_STATE_POLICY_REQUIRED;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3453
ret = deregister_context(ce, ce->guc_id.id);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3456
spin_lock_irqsave(&ce->guc_state.lock, flags);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3457
pending_destroyed = context_destroyed(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3459
set_context_registered(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3460
clr_context_destroyed(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3462
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3474
static void __guc_context_destroy(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3476
GEM_BUG_ON(ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_KMD_HIGH] ||
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3477
ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_HIGH] ||
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3478
ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_KMD_NORMAL] ||
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3479
ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_NORMAL]);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
348
static inline bool context_close_done(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3481
lrc_fini(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3482
intel_context_fini(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3484
if (intel_engine_is_virtual(ce->engine)) {
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3486
container_of(ce, typeof(*ve), context);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3493
intel_context_free(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3499
struct intel_context *ce;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
350
return ce->guc_state.sched_state & SCHED_STATE_CLOSED;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3507
ce = list_first_entry_or_null(&guc->submission_state.destroyed_contexts,
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3510
if (ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3511
list_del_init(&ce->destroyed_link);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3514
if (!ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3517
release_guc_id(guc, ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3518
__guc_context_destroy(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3524
struct intel_context *ce;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3529
ce = list_first_entry_or_null(&guc->submission_state.destroyed_contexts,
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
353
static inline void set_context_close_done(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3532
if (ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3533
list_del_init(&ce->destroyed_link);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3536
if (!ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3539
if (guc_lrc_desc_unpin(ce)) {
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3548
list_add_tail(&ce->destroyed_link,
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
355
lockdep_assert_held(&ce->guc_state.lock);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
356
ce->guc_state.sched_state |= SCHED_STATE_CLOSED;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3582
struct intel_context *ce = container_of(kref, typeof(*ce), ref);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3583
struct intel_guc *guc = ce_to_guc(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
359
static inline u32 context_blocked(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3593
destroy = submission_disabled(guc) || context_guc_id_invalid(ce) ||
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3594
!ctx_id_mapped(guc, ce->guc_id.id);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3596
if (!list_empty(&ce->guc_id.link))
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3597
list_del_init(&ce->guc_id.link);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3598
list_add_tail(&ce->destroyed_link,
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3601
__release_guc_id(guc, ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3605
__guc_context_destroy(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
361
return (ce->guc_state.sched_state & SCHED_STATE_BLOCKED_MASK) >>
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3617
static int guc_context_alloc(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3619
return lrc_alloc(ce, ce->engine);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3623
struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3628
__guc_context_policy_start_klv(&policy, ce->guc_id.id);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3629
__guc_context_policy_add_priority(&policy, ce->guc_state.prio);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3634
ce->guc_id.id,
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3635
ce->guc_state.prio,
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3643
struct intel_context *ce,
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3648
lockdep_assert_held(&ce->guc_state.lock);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
365
static inline void incr_context_blocked(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3650
if (ce->guc_state.prio == prio || submission_disabled(guc) ||
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3651
!context_registered(ce)) {
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3652
ce->guc_state.prio = prio;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3656
ce->guc_state.prio = prio;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3657
__guc_context_set_prio(guc, ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3659
trace_intel_context_set_prio(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
367
lockdep_assert_held(&ce->guc_state.lock);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3674
static inline void add_context_inflight_prio(struct intel_context *ce,
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3677
lockdep_assert_held(&ce->guc_state.lock);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3678
GEM_BUG_ON(guc_prio >= ARRAY_SIZE(ce->guc_state.prio_count));
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3680
++ce->guc_state.prio_count[guc_prio];
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3683
GEM_WARN_ON(!ce->guc_state.prio_count[guc_prio]);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3686
static inline void sub_context_inflight_prio(struct intel_context *ce,
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3689
lockdep_assert_held(&ce->guc_state.lock);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
369
ce->guc_state.sched_state += SCHED_STATE_BLOCKED;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3690
GEM_BUG_ON(guc_prio >= ARRAY_SIZE(ce->guc_state.prio_count));
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3693
GEM_WARN_ON(!ce->guc_state.prio_count[guc_prio]);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3695
--ce->guc_state.prio_count[guc_prio];
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3698
static inline void update_context_prio(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3700
struct intel_guc *guc = &ce->engine->gt->uc.guc;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3706
lockdep_assert_held(&ce->guc_state.lock);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3708
for (i = 0; i < ARRAY_SIZE(ce->guc_state.prio_count); ++i) {
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3709
if (ce->guc_state.prio_count[i]) {
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
371
GEM_BUG_ON(!context_blocked(ce)); /* Overflow check */
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3710
guc_context_set_prio(guc, ce, i);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3724
struct intel_context *ce = request_to_scheduling_context(rq);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3727
GEM_BUG_ON(intel_context_is_child(ce));
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3730
spin_lock(&ce->guc_state.lock);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3731
list_move_tail(&rq->sched.link, &ce->guc_state.requests);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3735
add_context_inflight_prio(ce, rq->guc_prio);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3737
sub_context_inflight_prio(ce, rq->guc_prio);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3739
add_context_inflight_prio(ce, rq->guc_prio);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
374
static inline void decr_context_blocked(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3741
update_context_prio(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3743
spin_unlock(&ce->guc_state.lock);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3746
static void guc_prio_fini(struct i915_request *rq, struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3748
lockdep_assert_held(&ce->guc_state.lock);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3752
sub_context_inflight_prio(ce, rq->guc_prio);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3753
update_context_prio(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
376
lockdep_assert_held(&ce->guc_state.lock);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3760
struct intel_context *ce = request_to_scheduling_context(rq);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3762
GEM_BUG_ON(intel_context_is_child(ce));
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3764
spin_lock_irq(&ce->guc_state.lock);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3772
guc_prio_fini(rq, ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3774
spin_unlock_irq(&ce->guc_state.lock);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3776
atomic_dec(&ce->guc_id.ref);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
378
GEM_BUG_ON(!context_blocked(ce)); /* Underflow check */
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
380
ce->guc_state.sched_state -= SCHED_STATE_BLOCKED;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3817
static void __guc_signal_context_fence(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3821
lockdep_assert_held(&ce->guc_state.lock);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3823
if (!list_empty(&ce->guc_state.fences))
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3824
trace_intel_context_fence_release(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3830
list_for_each_entry_safe(rq, rn, &ce->guc_state.fences,
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3836
INIT_LIST_HEAD(&ce->guc_state.fences);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3839
static void guc_signal_context_fence(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3843
GEM_BUG_ON(intel_context_is_child(ce));
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3845
spin_lock_irqsave(&ce->guc_state.lock, flags);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3846
clr_context_wait_for_deregister_to_register(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3847
__guc_signal_context_fence(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3848
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3851
static bool context_needs_register(struct intel_context *ce, bool new_guc_id)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3853
return (new_guc_id || test_bit(CONTEXT_LRCA_DIRTY, &ce->flags) ||
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3854
!ctx_id_mapped(ce_to_guc(ce), ce->guc_id.id)) &&
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3855
!submission_disabled(ce_to_guc(ce));
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3858
static void guc_context_init(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3864
ctx = rcu_dereference(ce->gem_context);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3869
ce->guc_state.prio = map_i915_prio_to_guc_prio(prio);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3871
INIT_DELAYED_WORK(&ce->guc_state.sched_disable_delay_work,
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3874
set_bit(CONTEXT_GUC_INIT, &ce->flags);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3879
struct intel_context *ce = request_to_scheduling_context(rq);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3880
struct intel_guc *guc = ce_to_guc(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
389
static inline bool context_guc_id_invalid(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3908
if (unlikely(!test_bit(CONTEXT_GUC_INIT, &ce->flags)))
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3909
guc_context_init(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
391
return ce->guc_id.id == GUC_INVALID_CONTEXT_ID;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3925
if (cancel_delayed_work_sync(&ce->guc_state.sched_disable_delay_work))
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3926
intel_context_sched_disable_unpin(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3927
else if (intel_context_is_closed(ce))
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3928
if (wait_for(context_close_done(ce), 1500))
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
394
static inline void set_context_guc_id_invalid(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3947
if (atomic_add_unless(&ce->guc_id.ref, 1, 0))
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3950
ret = pin_guc_id(guc, ce); /* returns 1 if new guc_id assigned */
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3953
if (context_needs_register(ce, !!ret)) {
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3954
ret = try_context_registration(ce, true);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
396
ce->guc_id.id = GUC_INVALID_CONTEXT_ID;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3960
atomic_dec(&ce->guc_id.ref);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3961
unpin_guc_id(guc, ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3966
clear_bit(CONTEXT_LRCA_DIRTY, &ce->flags);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3976
spin_lock_irqsave(&ce->guc_state.lock, flags);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3977
if (context_wait_for_deregister_to_register(ce) ||
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3978
context_pending_disable(ce)) {
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3982
list_add_tail(&rq->guc_fence_link, &ce->guc_state.fences);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3984
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3989
static int guc_virtual_context_pre_pin(struct intel_context *ce,
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
399
static inline struct intel_guc *ce_to_guc(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3993
struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3995
return __guc_context_pre_pin(ce, engine, ww, vaddr);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
3998
static int guc_virtual_context_pin(struct intel_context *ce, void *vaddr)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4000
struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4001
int ret = __guc_context_pin(ce, engine, vaddr);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4002
intel_engine_mask_t tmp, mask = ce->engine->mask;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4005
for_each_engine_masked(engine, ce->engine->gt, mask, tmp)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
401
return gt_to_guc(ce->engine->gt);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4011
static void guc_virtual_context_unpin(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4013
intel_engine_mask_t tmp, mask = ce->engine->mask;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4015
struct intel_guc *guc = ce_to_guc(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4017
GEM_BUG_ON(context_enabled(ce));
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4018
GEM_BUG_ON(intel_context_is_barrier(ce));
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4020
unpin_guc_id(guc, ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4021
lrc_unpin(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4023
for_each_engine_masked(engine, ce->engine->gt, mask, tmp)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4027
static void guc_virtual_context_enter(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4029
intel_engine_mask_t tmp, mask = ce->engine->mask;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4032
for_each_engine_masked(engine, ce->engine->gt, mask, tmp)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4035
intel_timeline_enter(ce->timeline);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4038
static void guc_virtual_context_exit(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4040
intel_engine_mask_t tmp, mask = ce->engine->mask;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4043
for_each_engine_masked(engine, ce->engine->gt, mask, tmp)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4046
intel_timeline_exit(ce->timeline);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4049
static int guc_virtual_context_alloc(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4051
struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4053
return lrc_alloc(ce, engine);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4082
static int guc_parent_context_pin(struct intel_context *ce, void *vaddr)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4084
struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4085
struct intel_guc *guc = ce_to_guc(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4088
GEM_BUG_ON(!intel_context_is_parent(ce));
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4089
GEM_BUG_ON(!intel_engine_is_virtual(ce->engine));
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4091
ret = pin_guc_id(guc, ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4095
return __guc_context_pin(ce, engine, vaddr);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4098
static int guc_child_context_pin(struct intel_context *ce, void *vaddr)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4100
struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4102
GEM_BUG_ON(!intel_context_is_child(ce));
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4103
GEM_BUG_ON(!intel_engine_is_virtual(ce->engine));
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4105
__intel_context_pin(ce->parallel.parent);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4106
return __guc_context_pin(ce, engine, vaddr);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4109
static void guc_parent_context_unpin(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4111
struct intel_guc *guc = ce_to_guc(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4113
GEM_BUG_ON(context_enabled(ce));
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4114
GEM_BUG_ON(intel_context_is_barrier(ce));
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4115
GEM_BUG_ON(!intel_context_is_parent(ce));
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4116
GEM_BUG_ON(!intel_engine_is_virtual(ce->engine));
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4118
unpin_guc_id(guc, ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4119
lrc_unpin(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4122
static void guc_child_context_unpin(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4124
GEM_BUG_ON(context_enabled(ce));
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4125
GEM_BUG_ON(intel_context_is_barrier(ce));
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4126
GEM_BUG_ON(!intel_context_is_child(ce));
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4127
GEM_BUG_ON(!intel_engine_is_virtual(ce->engine));
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4129
lrc_unpin(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4132
static void guc_child_context_post_unpin(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4134
GEM_BUG_ON(!intel_context_is_child(ce));
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4135
GEM_BUG_ON(!intel_context_is_pinned(ce->parallel.parent));
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4136
GEM_BUG_ON(!intel_engine_is_virtual(ce->engine));
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4138
lrc_post_unpin(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4139
intel_context_unpin(ce->parallel.parent);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4144
struct intel_context *ce = container_of(kref, typeof(*ce), ref);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4146
__guc_context_destroy(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4222
struct intel_context *parent = NULL, *ce, *err;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4235
ce = intel_engine_create_virtual(siblings, num_siblings,
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4237
if (IS_ERR(ce)) {
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4238
err = ERR_CAST(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4243
parent = ce;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4246
ce->ops = &virtual_child_context_ops;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4247
intel_context_bind_parent_child(parent, ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4259
for_each_child(parent, ce) {
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4260
ce->engine->emit_bb_start =
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4262
ce->engine->emit_fini_breadcrumb =
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4264
ce->engine->emit_fini_breadcrumb_dw = 16;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4336
struct intel_context *ce = request_to_scheduling_context(rq);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4343
spin_lock(&ce->guc_state.lock);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4352
sub_context_inflight_prio(ce, rq->guc_prio);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4355
add_context_inflight_prio(ce, rq->guc_prio);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4356
update_context_prio(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4359
spin_unlock(&ce->guc_state.lock);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4364
struct intel_context *ce = request_to_scheduling_context(rq);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4366
spin_lock(&ce->guc_state.lock);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4367
guc_prio_fini(rq, ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4368
spin_unlock(&ce->guc_state.lock);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4453
struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4464
if (context_guc_id_invalid(ce)) {
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4465
ret = pin_guc_id(guc, ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4471
if (!test_bit(CONTEXT_GUC_INIT, &ce->flags))
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4472
guc_context_init(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4474
ret = try_context_registration(ce, true);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4476
unpin_guc_id(guc, ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
449
static u32 __get_parent_scratch_offset(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4505
struct intel_context *ce;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4507
list_for_each_entry(ce, &engine->pinned_contexts_list,
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4509
int ret = guc_kernel_context_pin(guc, ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
451
GEM_BUG_ON(!ce->parallel.guc.parent_page);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
453
return ce->parallel.guc.parent_page * PAGE_SIZE;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
456
static u32 __get_wq_offset(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
460
return __get_parent_scratch_offset(ce) + WQ_OFFSET;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
464
__get_parent_scratch(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
475
(ce->lrc_reg_state +
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
476
((__get_parent_scratch_offset(ce) -
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
481
__get_process_desc_v69(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
483
struct parent_scratch *ps = __get_parent_scratch(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4884
struct intel_context *ce;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
489
__get_wq_desc_v70(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4891
ce = __get_context(guc, ctx_id);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4892
if (unlikely(!ce)) {
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4897
if (unlikely(intel_context_is_child(ce))) {
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
4902
return ce;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
491
struct parent_scratch *ps = __get_parent_scratch(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
496
static u32 *get_wq_pointer(struct intel_context *ce, u32 wqi_size)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
504
CIRC_SPACE(ce->parallel.guc.wqi_tail, ce->parallel.guc.wqi_head, WQ_SIZE)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5057
struct intel_context *ce;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
506
ce->parallel.guc.wqi_head = READ_ONCE(*ce->parallel.guc.wq_head);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5066
ce = g2h_context_lookup(guc, ctx_id);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5067
if (unlikely(!ce))
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5070
trace_intel_context_deregister_done(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5073
if (unlikely(ce->drop_deregister)) {
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5074
ce->drop_deregister = false;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5079
if (context_wait_for_deregister_to_register(ce)) {
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5081
&ce->engine->gt->i915->runtime_pm;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5089
register_context(ce, true);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5090
guc_signal_context_fence(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5091
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5092
} else if (context_destroyed(ce)) {
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5095
release_guc_id(guc, ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5096
__guc_context_destroy(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5108
struct intel_context *ce;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5118
ce = g2h_context_lookup(guc, ctx_id);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5119
if (unlikely(!ce))
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5122
if (unlikely(context_destroyed(ce) ||
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5123
(!context_pending_enable(ce) &&
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5124
!context_pending_disable(ce)))) {
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5126
ce->guc_state.sched_state, ctx_id);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
513
return &__get_parent_scratch(ce)->wq[ce->parallel.guc.wqi_tail / sizeof(u32)];
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5130
trace_intel_context_sched_done(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5132
if (context_pending_enable(ce)) {
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5134
if (unlikely(ce->drop_schedule_enable)) {
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5135
ce->drop_schedule_enable = false;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5140
spin_lock_irqsave(&ce->guc_state.lock, flags);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5141
clr_context_pending_enable(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5142
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5143
} else if (context_pending_disable(ce)) {
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5147
if (unlikely(ce->drop_schedule_disable)) {
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5148
ce->drop_schedule_disable = false;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5160
intel_context_sched_disable_unpin(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5162
spin_lock_irqsave(&ce->guc_state.lock, flags);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5163
banned = context_banned(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5164
clr_context_banned(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5165
clr_context_pending_disable(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5166
__guc_signal_context_fence(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5167
guc_blocked_fence_complete(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5168
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5171
guc_cancel_context_requests(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5172
intel_engine_signal_breadcrumbs(ce->engine);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5177
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
518
struct intel_context *ce = xa_load(&guc->context_lookup, id);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5183
struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5190
if (intel_engine_is_virtual(ce->engine)) {
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5192
intel_engine_mask_t tmp, virtual_mask = ce->engine->mask;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5195
for_each_engine_masked(e, ce->engine->gt, virtual_mask, tmp) {
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5196
bool match = intel_guc_capture_is_matching_engine(gt, ce, e);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5199
intel_engine_set_hung_context(e, ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5208
ce->guc_id.id, ce->engine->name);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5212
intel_engine_set_hung_context(ce->engine, ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5213
engine_mask = ce->engine->mask;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5214
i915_increase_reset_engine_count(&i915->gpu_error, ce->engine);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
522
return ce;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5221
static void guc_context_replay(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5223
struct i915_sched_engine *sched_engine = ce->engine->sched_engine;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5225
__guc_reset_context(ce, ce->engine->mask);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5230
struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5232
bool capture = intel_context_is_schedulable(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5234
trace_intel_context_reset(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5238
ce->guc_id.id, ce->engine->name,
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5239
str_yes_no(intel_context_is_exiting(ce)),
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5240
str_yes_no(intel_context_is_banned(ce)));
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5243
capture_error_state(guc, ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5244
guc_context_replay(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5251
struct intel_context *ce;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5269
ce = g2h_context_lookup(guc, ctx_id);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5270
if (ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5271
intel_context_get(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5274
if (unlikely(!ce))
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5277
guc_handle_context_reset(guc, ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5278
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5393
struct intel_context *ce;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5403
xa_for_each(&guc->context_lookup, index, ce) {
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5406
if (!kref_get_unless_zero(&ce->ref))
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5411
if (!intel_context_is_pinned(ce))
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5414
if (intel_engine_is_virtual(ce->engine)) {
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5415
if (!(ce->engine->mask & engine->mask))
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5418
if (ce->engine != engine)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5423
spin_lock(&ce->guc_state.lock);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5424
list_for_each_entry(rq, &ce->guc_state.requests, sched.link) {
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5431
spin_unlock(&ce->guc_state.lock);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5434
intel_engine_set_hung_context(engine, ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5437
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5443
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5455
struct intel_context *ce;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5464
xa_for_each(&guc->context_lookup, index, ce) {
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5465
if (!kref_get_unless_zero(&ce->ref))
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5470
if (!intel_context_is_pinned(ce))
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5473
if (intel_engine_is_virtual(ce->engine)) {
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5474
if (!(ce->engine->mask & engine->mask))
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5477
if (ce->engine != engine)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5481
spin_lock(&ce->guc_state.lock);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5482
intel_engine_dump_active_requests(&ce->guc_state.requests,
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5484
spin_unlock(&ce->guc_state.lock);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5487
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5527
struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5531
drm_printf(p, "\t\tPriority: %d\n", ce->guc_state.prio);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5536
i, ce->guc_state.prio_count[i]);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5542
struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5544
drm_printf(p, "GuC lrc descriptor %u:\n", ce->guc_id.id);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5545
drm_printf(p, "\tHW Context Desc: 0x%08x\n", ce->lrc.lrca);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5546
if (intel_context_pin_if_active(ce)) {
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5548
ce->ring->head,
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5549
ce->lrc_reg_state[CTX_RING_HEAD]);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5551
ce->ring->tail,
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5552
ce->lrc_reg_state[CTX_RING_TAIL]);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5553
intel_context_unpin(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5556
ce->ring->head);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5558
ce->ring->tail);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5561
atomic_read(&ce->pin_count));
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5563
atomic_read(&ce->guc_id.ref));
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5565
ce->guc_state.sched_state);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5571
struct intel_context *ce;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5576
xa_for_each(&guc->context_lookup, index, ce) {
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5577
GEM_BUG_ON(intel_context_is_child(ce));
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5579
guc_log_context(p, ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5580
guc_log_context_priority(p, ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5582
if (intel_context_is_parent(ce)) {
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5586
ce->parallel.number_children);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5588
if (ce->parallel.guc.wq_status) {
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5590
READ_ONCE(*ce->parallel.guc.wq_head));
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5592
READ_ONCE(*ce->parallel.guc.wq_tail));
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5594
READ_ONCE(*ce->parallel.guc.wq_status));
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5597
if (ce->engine->emit_bb_start ==
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5602
get_children_go_value(ce));
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5603
for (i = 0; i < ce->parallel.number_children; ++i)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5605
get_children_join_value(ce, i));
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5608
for_each_child(ce, child)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5615
static inline u32 get_children_go_addr(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5617
GEM_BUG_ON(!intel_context_is_parent(ce));
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5619
return i915_ggtt_offset(ce->state) +
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5620
__get_parent_scratch_offset(ce) +
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5624
static inline u32 get_children_join_addr(struct intel_context *ce,
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5627
GEM_BUG_ON(!intel_context_is_parent(ce));
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5629
return i915_ggtt_offset(ce->state) +
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5630
__get_parent_scratch_offset(ce) +
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5642
struct intel_context *ce = rq->context;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5646
GEM_BUG_ON(!intel_context_is_parent(ce));
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5648
cs = intel_ring_begin(rq, 10 + 4 * ce->parallel.number_children);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5653
for (i = 0; i < ce->parallel.number_children; ++i) {
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5659
*cs++ = get_children_join_addr(ce, i);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5670
get_children_go_addr(ce),
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5689
struct intel_context *ce = rq->context;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5690
struct intel_context *parent = intel_context_to_parent(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5693
GEM_BUG_ON(!intel_context_is_child(ce));
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5703
ce->parallel.child_index),
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5733
struct intel_context *ce = rq->context;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5736
GEM_BUG_ON(!intel_context_is_parent(ce));
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5739
for (i = 0; i < ce->parallel.number_children; ++i) {
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5745
*cs++ = get_children_join_addr(ce, i);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5756
get_children_go_addr(ce),
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5783
struct intel_context *ce = rq->context;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5787
GEM_BUG_ON(!intel_context_is_parent(ce));
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5795
(ce->engine->emit_fini_breadcrumb_dw - NON_SKIP_LEN));
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5796
cs += ce->engine->emit_fini_breadcrumb_dw - NON_SKIP_LEN;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
580
struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5816
ce->engine->emit_fini_breadcrumb_dw != cs);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5827
struct intel_context *ce = rq->context;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5828
struct intel_context *parent = intel_context_to_parent(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5830
GEM_BUG_ON(!intel_context_is_child(ce));
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5840
ce->parallel.child_index),
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5859
struct intel_context *ce = rq->context;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5863
GEM_BUG_ON(!intel_context_is_child(ce));
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5871
(ce->engine->emit_fini_breadcrumb_dw - NON_SKIP_LEN));
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5872
cs += ce->engine->emit_fini_breadcrumb_dw - NON_SKIP_LEN;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
589
__xa_store(&guc->context_lookup, id, ce, GFP_ATOMIC);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
5892
ce->engine->emit_fini_breadcrumb_dw != cs);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
693
static int guc_context_policy_init_v70(struct intel_context *ce, bool loop);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
694
static int try_context_registration(struct intel_context *ce, bool loop);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
699
struct intel_context *ce = request_to_scheduling_context(rq);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
711
if (unlikely(!intel_context_is_schedulable(ce))) {
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
713
intel_engine_signal_breadcrumbs(ce->engine);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
717
GEM_BUG_ON(!atomic_read(&ce->guc_id.ref));
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
718
GEM_BUG_ON(context_guc_id_invalid(ce));
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
720
if (context_policy_required(ce)) {
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
721
err = guc_context_policy_init_v70(ce, false);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
726
spin_lock(&ce->guc_state.lock);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
733
if (unlikely(context_blocked(ce) && !intel_context_is_parent(ce)))
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
736
enabled = context_enabled(ce) || context_blocked(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
740
action[len++] = ce->guc_id.id;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
742
set_context_pending_enable(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
743
intel_context_get(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
747
action[len++] = ce->guc_id.id;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
752
trace_intel_context_sched_enable(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
754
set_context_enabled(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
763
if (intel_context_is_parent(ce)) {
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
768
clr_context_pending_enable(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
769
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
775
spin_unlock(&ce->guc_state.lock);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
814
static u32 wq_space_until_wrap(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
816
return (WQ_SIZE - ce->parallel.guc.wqi_tail);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
819
static void write_wqi(struct intel_context *ce, u32 wqi_size)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
826
intel_guc_write_barrier(ce_to_guc(ce));
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
828
ce->parallel.guc.wqi_tail = (ce->parallel.guc.wqi_tail + wqi_size) &
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
830
WRITE_ONCE(*ce->parallel.guc.wq_tail, ce->parallel.guc.wqi_tail);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
833
static int guc_wq_noop_append(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
835
u32 *wqi = get_wq_pointer(ce, wq_space_until_wrap(ce));
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
836
u32 len_dw = wq_space_until_wrap(ce) / sizeof(u32) - 1;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
845
ce->parallel.guc.wqi_tail = 0;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
852
struct intel_context *ce = request_to_scheduling_context(rq);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
854
unsigned int wqi_size = (ce->parallel.number_children + 4) *
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
861
GEM_BUG_ON(!atomic_read(&ce->guc_id.ref));
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
862
GEM_BUG_ON(context_guc_id_invalid(ce));
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
863
GEM_BUG_ON(context_wait_for_deregister_to_register(ce));
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
864
GEM_BUG_ON(!ctx_id_mapped(ce_to_guc(ce), ce->guc_id.id));
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
867
if (wqi_size > wq_space_until_wrap(ce)) {
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
868
ret = guc_wq_noop_append(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
873
wqi = get_wq_pointer(ce, wqi_size);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
881
*wqi++ = ce->lrc.lrca;
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
882
*wqi++ = FIELD_PREP(WQ_GUC_ID_MASK, ce->guc_id.id) |
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
883
FIELD_PREP(WQ_RING_TAIL_MASK, ce->ring->tail / sizeof(u64));
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
885
for_each_child(ce, child)
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
888
write_wqi(ce, wqi_size);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
896
struct intel_context *ce = request_to_scheduling_context(rq);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
899
if (unlikely(!intel_context_is_schedulable(ce)))
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
913
struct intel_context *ce = request_to_scheduling_context(rq);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
924
!intel_context_is_schedulable(ce);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
990
struct intel_context *ce = request_to_scheduling_context(last);
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
992
if (unlikely(!ctx_id_mapped(guc, ce->guc_id.id) &&
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
993
intel_context_is_schedulable(ce))) {
sys/dev/pci/drm/i915/gt/uc/intel_guc_submission.c
994
ret = try_context_registration(ce, false);
sys/dev/pci/drm/i915/gt/uc/selftest_guc.c
151
struct intel_context **ce;
sys/dev/pci/drm/i915/gt/uc/selftest_guc.c
156
ce = kcalloc(GUC_MAX_CONTEXT_ID, sizeof(*ce), GFP_KERNEL);
sys/dev/pci/drm/i915/gt/uc/selftest_guc.c
157
if (!ce) {
sys/dev/pci/drm/i915/gt/uc/selftest_guc.c
168
ce[context_index] = intel_context_create(engine);
sys/dev/pci/drm/i915/gt/uc/selftest_guc.c
169
if (IS_ERR(ce[context_index])) {
sys/dev/pci/drm/i915/gt/uc/selftest_guc.c
170
ret = PTR_ERR(ce[context_index]);
sys/dev/pci/drm/i915/gt/uc/selftest_guc.c
171
guc_err(guc, "Failed to create context: %pe\n", ce[context_index]);
sys/dev/pci/drm/i915/gt/uc/selftest_guc.c
172
ce[context_index] = NULL;
sys/dev/pci/drm/i915/gt/uc/selftest_guc.c
180
spin_rq = igt_spinner_create_request(&spin, ce[context_index],
sys/dev/pci/drm/i915/gt/uc/selftest_guc.c
195
ce[++context_index] = intel_context_create(engine);
sys/dev/pci/drm/i915/gt/uc/selftest_guc.c
196
if (IS_ERR(ce[context_index])) {
sys/dev/pci/drm/i915/gt/uc/selftest_guc.c
197
ret = PTR_ERR(ce[context_index]);
sys/dev/pci/drm/i915/gt/uc/selftest_guc.c
198
guc_err(guc, "Failed to create context: %pe\n", ce[context_index]);
sys/dev/pci/drm/i915/gt/uc/selftest_guc.c
199
ce[context_index--] = NULL;
sys/dev/pci/drm/i915/gt/uc/selftest_guc.c
203
rq = nop_user_request(ce[context_index], spin_rq);
sys/dev/pci/drm/i915/gt/uc/selftest_guc.c
23
static struct i915_request *nop_user_request(struct intel_context *ce,
sys/dev/pci/drm/i915/gt/uc/selftest_guc.c
240
rq = nop_user_request(ce[context_index], NULL);
sys/dev/pci/drm/i915/gt/uc/selftest_guc.c
279
for (; context_index >= 0 && ce[context_index]; --context_index)
sys/dev/pci/drm/i915/gt/uc/selftest_guc.c
280
intel_context_put(ce[context_index]);
sys/dev/pci/drm/i915/gt/uc/selftest_guc.c
283
kfree(ce);
sys/dev/pci/drm/i915/gt/uc/selftest_guc.c
29
rq = intel_context_create_request(ce);
sys/dev/pci/drm/i915/gt/uc/selftest_guc.c
313
struct intel_context *ce;
sys/dev/pci/drm/i915/gt/uc/selftest_guc.c
326
ce = intel_context_create(engine);
sys/dev/pci/drm/i915/gt/uc/selftest_guc.c
327
if (IS_ERR(ce)) {
sys/dev/pci/drm/i915/gt/uc/selftest_guc.c
328
ret = PTR_ERR(ce);
sys/dev/pci/drm/i915/gt/uc/selftest_guc.c
329
gt_err(gt, "Failed to create spinner request: %pe\n", ce);
sys/dev/pci/drm/i915/gt/uc/selftest_guc.c
340
rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
sys/dev/pci/drm/i915/gt/uc/selftest_guc.c
341
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/uc/selftest_guc.c
57
struct intel_context *ce;
sys/dev/pci/drm/i915/gt/uc/selftest_guc.c
67
ce = intel_context_create(engine);
sys/dev/pci/drm/i915/gt/uc/selftest_guc.c
68
if (IS_ERR(ce)) {
sys/dev/pci/drm/i915/gt/uc/selftest_guc.c
69
ret = PTR_ERR(ce);
sys/dev/pci/drm/i915/gt/uc/selftest_guc.c
70
gt_err(gt, "Failed to create context %d: %pe\n", i, ce);
sys/dev/pci/drm/i915/gt/uc/selftest_guc.c
76
ce->drop_schedule_enable = true;
sys/dev/pci/drm/i915/gt/uc/selftest_guc.c
79
ce->drop_schedule_disable = true;
sys/dev/pci/drm/i915/gt/uc/selftest_guc.c
82
ce->drop_deregister = true;
sys/dev/pci/drm/i915/gt/uc/selftest_guc.c
86
rq = nop_user_request(ce, NULL);
sys/dev/pci/drm/i915/gt/uc/selftest_guc.c
87
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/uc/selftest_guc_hangcheck.c
34
struct intel_context *ce;
sys/dev/pci/drm/i915/gt/uc/selftest_guc_hangcheck.c
55
ce = intel_context_create(engine);
sys/dev/pci/drm/i915/gt/uc/selftest_guc_hangcheck.c
56
if (IS_ERR(ce)) {
sys/dev/pci/drm/i915/gt/uc/selftest_guc_hangcheck.c
57
ret = PTR_ERR(ce);
sys/dev/pci/drm/i915/gt/uc/selftest_guc_hangcheck.c
58
gt_err(gt, "Failed to create spinner request: %pe\n", ce);
sys/dev/pci/drm/i915/gt/uc/selftest_guc_hangcheck.c
77
rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
sys/dev/pci/drm/i915/gt/uc/selftest_guc_hangcheck.c
78
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/uc/selftest_guc_multi_lrc.c
54
static void multi_lrc_context_unpin(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/selftest_guc_multi_lrc.c
58
GEM_BUG_ON(!intel_context_is_parent(ce));
sys/dev/pci/drm/i915/gt/uc/selftest_guc_multi_lrc.c
60
for_each_child(ce, child)
sys/dev/pci/drm/i915/gt/uc/selftest_guc_multi_lrc.c
62
intel_context_unpin(ce);
sys/dev/pci/drm/i915/gt/uc/selftest_guc_multi_lrc.c
65
static void multi_lrc_context_put(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/selftest_guc_multi_lrc.c
67
GEM_BUG_ON(!intel_context_is_parent(ce));
sys/dev/pci/drm/i915/gt/uc/selftest_guc_multi_lrc.c
73
intel_context_put(ce);
sys/dev/pci/drm/i915/gt/uc/selftest_guc_multi_lrc.c
77
multi_lrc_nop_request(struct intel_context *ce)
sys/dev/pci/drm/i915/gt/uc/selftest_guc_multi_lrc.c
83
GEM_BUG_ON(!intel_context_is_parent(ce));
sys/dev/pci/drm/i915/gt/uc/selftest_guc_multi_lrc.c
85
rq = intel_context_create_request(ce);
sys/dev/pci/drm/i915/gt/uc/selftest_guc_multi_lrc.c
92
for_each_child(ce, child) {
sys/dev/pci/drm/i915/gt/uc/selftest_guc_multi_lrc.c
97
if (++i == ce->parallel.number_children)
sys/dev/pci/drm/i915/gvt/cmd_parser.c
3168
struct intel_context *ce = vgpu->submission.shadow[ring_id];
sys/dev/pci/drm/i915/gvt/cmd_parser.c
3171
GEM_BUG_ON(atomic_read(&ce->pin_count) < 0);
sys/dev/pci/drm/i915/gvt/cmd_parser.c
3178
if (is_inhibit_context(ce))
sys/dev/pci/drm/i915/gvt/cmd_parser.c
3181
gma_start = i915_ggtt_offset(ce->state) + LRC_STATE_PN*PAGE_SIZE;
sys/dev/pci/drm/i915/gvt/cmd_parser.c
3193
s.rb_va = ce->lrc_reg_state;
sys/dev/pci/drm/i915/gvt/mmio_context.c
472
bool is_inhibit_context(struct intel_context *ce)
sys/dev/pci/drm/i915/gvt/mmio_context.c
474
const u32 *reg_state = ce->lrc_reg_state;
sys/dev/pci/drm/i915/gvt/mmio_context.h
55
bool is_inhibit_context(struct intel_context *ce);
sys/dev/pci/drm/i915/gvt/scheduler.c
1399
struct intel_context *ce;
sys/dev/pci/drm/i915/gvt/scheduler.c
1404
ce = intel_context_create(engine);
sys/dev/pci/drm/i915/gvt/scheduler.c
1405
if (IS_ERR(ce)) {
sys/dev/pci/drm/i915/gvt/scheduler.c
1406
ret = PTR_ERR(ce);
sys/dev/pci/drm/i915/gvt/scheduler.c
1410
i915_vm_put(ce->vm);
sys/dev/pci/drm/i915/gvt/scheduler.c
1411
ce->vm = i915_vm_get(&ppgtt->vm);
sys/dev/pci/drm/i915/gvt/scheduler.c
1412
intel_context_set_single_submission(ce);
sys/dev/pci/drm/i915/gvt/scheduler.c
1416
ce->ring_size = SZ_2M;
sys/dev/pci/drm/i915/gvt/scheduler.c
1418
s->shadow[i] = ce;
sys/dev/pci/drm/i915/gvt/scheduler.c
345
shadow_context_descriptor_update(struct intel_context *ce,
sys/dev/pci/drm/i915/gvt/scheduler.c
348
u64 desc = ce->lrc.desc;
sys/dev/pci/drm/i915/gvt/scheduler.c
358
ce->lrc.desc = desc;
sys/dev/pci/drm/i915/gvt/scheduler.c
435
struct intel_context *ce)
sys/dev/pci/drm/i915/gvt/scheduler.c
438
struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(ce->vm);
sys/dev/pci/drm/i915/i915_drm_client.c
122
struct intel_context *ce;
sys/dev/pci/drm/i915/i915_drm_client.c
125
for_each_gem_engine(ce, rcu_dereference(ctx->engines), it) {
sys/dev/pci/drm/i915/i915_drm_client.c
126
if (ce->engine->uabi_class != class)
sys/dev/pci/drm/i915/i915_drm_client.c
129
total += intel_context_get_total_runtime_ns(ce);
sys/dev/pci/drm/i915/i915_drm_client.c
212
struct intel_context *ce)
sys/dev/pci/drm/i915/i915_drm_client.c
214
if (ce->state)
sys/dev/pci/drm/i915/i915_drm_client.c
215
i915_drm_client_add_object(client, ce->state->obj);
sys/dev/pci/drm/i915/i915_drm_client.c
217
if (ce->ring != ce->engine->legacy.ring && ce->ring->vma)
sys/dev/pci/drm/i915/i915_drm_client.c
218
i915_drm_client_add_object(client, ce->ring->vma->obj);
sys/dev/pci/drm/i915/i915_drm_client.h
73
struct intel_context *ce);
sys/dev/pci/drm/i915/i915_drm_client.h
87
struct intel_context *ce)
sys/dev/pci/drm/i915/i915_gpu_error.c
1508
struct intel_context *ce)
sys/dev/pci/drm/i915/i915_gpu_error.c
1515
ctx = rcu_dereference(ce->gem_context);
sys/dev/pci/drm/i915/i915_gpu_error.c
1535
e->hwsp_seqno = (ce->timeline && ce->timeline->hwsp_seqno) ?
sys/dev/pci/drm/i915/i915_gpu_error.c
1536
*ce->timeline->hwsp_seqno : ~0U;
sys/dev/pci/drm/i915/i915_gpu_error.c
1538
e->total_runtime = intel_context_get_total_runtime_ns(ce);
sys/dev/pci/drm/i915/i915_gpu_error.c
1539
e->avg_runtime = intel_context_get_avg_runtime_ns(ce);
sys/dev/pci/drm/i915/i915_gpu_error.c
1676
struct intel_context *ce,
sys/dev/pci/drm/i915/i915_gpu_error.c
1681
ee->simulated |= record_context(&ee->context, ce);
sys/dev/pci/drm/i915/i915_gpu_error.c
1690
vma = capture_vma(vma, ce->ring->vma, "ring", gfp);
sys/dev/pci/drm/i915/i915_gpu_error.c
1691
vma = capture_vma(vma, ce->state, "HW context", gfp);
sys/dev/pci/drm/i915/i915_gpu_error.c
1758
struct intel_context *ce = NULL;
sys/dev/pci/drm/i915/i915_gpu_error.c
1765
intel_engine_get_hung_entity(engine, &ce, &rq);
sys/dev/pci/drm/i915/i915_gpu_error.c
1772
if (ce)
sys/dev/pci/drm/i915/i915_gpu_error.c
1775
engine->name, rq->fence.context, rq->fence.seqno, ce->guc_id.id);
sys/dev/pci/drm/i915/i915_gpu_error.c
1785
} else if (ce) {
sys/dev/pci/drm/i915/i915_gpu_error.c
1786
capture = engine_coredump_add_context(ee, ce, ATOMIC_MAYFAIL);
sys/dev/pci/drm/i915/i915_gpu_error.c
1793
intel_guc_capture_get_matching_node(engine->gt, ee, ce);
sys/dev/pci/drm/i915/i915_perf.c
1297
struct intel_context *ce;
sys/dev/pci/drm/i915/i915_perf.c
1301
for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
sys/dev/pci/drm/i915/i915_perf.c
1302
if (ce->engine != stream->engine) /* first match! */
sys/dev/pci/drm/i915/i915_perf.c
1319
err = intel_context_pin_ww(ce, &ww);
sys/dev/pci/drm/i915/i915_perf.c
1330
stream->pinned_ctx = ce;
sys/dev/pci/drm/i915/i915_perf.c
1358
__read_reg(struct intel_context *ce, i915_reg_t reg, u32 ggtt_offset)
sys/dev/pci/drm/i915/i915_perf.c
1363
rq = i915_request_create(ce);
sys/dev/pci/drm/i915/i915_perf.c
1381
gen12_guc_sw_ctx_id(struct intel_context *ce, u32 *ctx_id)
sys/dev/pci/drm/i915/i915_perf.c
1387
scratch = __vm_create_scratch_for_read_pinned(&ce->engine->gt->ggtt->vm, 4);
sys/dev/pci/drm/i915/i915_perf.c
1395
err = __read_reg(ce, RING_EXECLIST_STATUS_HI(ce->engine->mmio_base),
sys/dev/pci/drm/i915/i915_perf.c
1474
static u32 oa_context_image_offset(struct intel_context *ce, u32 reg)
sys/dev/pci/drm/i915/i915_perf.c
1476
u32 offset, len = (ce->engine->context_size - PAGE_SIZE) / 4;
sys/dev/pci/drm/i915/i915_perf.c
1477
u32 *state = ce->lrc_reg_state;
sys/dev/pci/drm/i915/i915_perf.c
1479
if (drm_WARN_ON(&ce->engine->i915->drm, !state))
sys/dev/pci/drm/i915/i915_perf.c
1488
drm_WARN_ON(&ce->engine->i915->drm,
sys/dev/pci/drm/i915/i915_perf.c
1501
static int set_oa_ctx_ctrl_offset(struct intel_context *ce)
sys/dev/pci/drm/i915/i915_perf.c
1503
i915_reg_t reg = GEN12_OACTXCONTROL(ce->engine->mmio_base);
sys/dev/pci/drm/i915/i915_perf.c
1504
struct i915_perf *perf = &ce->engine->i915->perf;
sys/dev/pci/drm/i915/i915_perf.c
1511
offset = oa_context_image_offset(ce, i915_mmio_reg_offset(reg));
sys/dev/pci/drm/i915/i915_perf.c
1514
drm_dbg(&ce->engine->i915->drm,
sys/dev/pci/drm/i915/i915_perf.c
1516
ce->engine->name, offset);
sys/dev/pci/drm/i915/i915_perf.c
1539
struct intel_context *ce;
sys/dev/pci/drm/i915/i915_perf.c
1542
ce = oa_pin_context(stream);
sys/dev/pci/drm/i915/i915_perf.c
1543
if (IS_ERR(ce))
sys/dev/pci/drm/i915/i915_perf.c
1544
return PTR_ERR(ce);
sys/dev/pci/drm/i915/i915_perf.c
1552
ret = set_oa_ctx_ctrl_offset(ce);
sys/dev/pci/drm/i915/i915_perf.c
1554
intel_context_unpin(ce);
sys/dev/pci/drm/i915/i915_perf.c
1562
switch (GRAPHICS_VER(ce->engine->i915)) {
sys/dev/pci/drm/i915/i915_perf.c
1568
stream->specific_ctx_id = i915_ggtt_offset(ce->state);
sys/dev/pci/drm/i915/i915_perf.c
1575
if (intel_engine_uses_guc(ce->engine)) {
sys/dev/pci/drm/i915/i915_perf.c
1586
stream->specific_ctx_id = ce->lrc.lrca >> 12;
sys/dev/pci/drm/i915/i915_perf.c
1607
MISSING_CASE(GRAPHICS_VER(ce->engine->i915));
sys/dev/pci/drm/i915/i915_perf.c
1610
ce->tag = stream->specific_ctx_id;
sys/dev/pci/drm/i915/i915_perf.c
1629
struct intel_context *ce;
sys/dev/pci/drm/i915/i915_perf.c
1631
ce = fetch_and_zero(&stream->pinned_ctx);
sys/dev/pci/drm/i915/i915_perf.c
1632
if (ce) {
sys/dev/pci/drm/i915/i915_perf.c
1633
ce->tag = 0; /* recomputed on next submission after parking */
sys/dev/pci/drm/i915/i915_perf.c
1634
intel_context_unpin(ce);
sys/dev/pci/drm/i915/i915_perf.c
2329
struct intel_context *ce,
sys/dev/pci/drm/i915/i915_perf.c
2351
intel_engine_pm_get(ce->engine);
sys/dev/pci/drm/i915/i915_perf.c
2352
rq = i915_request_create(ce);
sys/dev/pci/drm/i915/i915_perf.c
2353
intel_engine_pm_put(ce->engine);
sys/dev/pci/drm/i915/i915_perf.c
2469
gen8_update_reg_state_unlocked(const struct intel_context *ce,
sys/dev/pci/drm/i915/i915_perf.c
2484
u32 *reg_state = ce->lrc_reg_state;
sys/dev/pci/drm/i915/i915_perf.c
2505
struct intel_context *ce,
sys/dev/pci/drm/i915/i915_perf.c
2515
offset = i915_ggtt_offset(ce->state) + LRC_STATE_OFFSET;
sys/dev/pci/drm/i915/i915_perf.c
2530
struct intel_context *ce,
sys/dev/pci/drm/i915/i915_perf.c
2553
static int gen8_modify_context(struct intel_context *ce,
sys/dev/pci/drm/i915/i915_perf.c
2559
rq = intel_engine_create_kernel_request(ce->engine);
sys/dev/pci/drm/i915/i915_perf.c
2564
err = intel_context_prepare_remote_request(ce, rq);
sys/dev/pci/drm/i915/i915_perf.c
2566
err = gen8_store_flex(rq, ce, flex, count);
sys/dev/pci/drm/i915/i915_perf.c
2573
gen8_modify_self(struct intel_context *ce,
sys/dev/pci/drm/i915/i915_perf.c
2580
intel_engine_pm_get(ce->engine);
sys/dev/pci/drm/i915/i915_perf.c
2581
rq = i915_request_create(ce);
sys/dev/pci/drm/i915/i915_perf.c
2582
intel_engine_pm_put(ce->engine);
sys/dev/pci/drm/i915/i915_perf.c
2592
err = gen8_load_flex(rq, ce, flex, count);
sys/dev/pci/drm/i915/i915_perf.c
2606
struct intel_context *ce;
sys/dev/pci/drm/i915/i915_perf.c
2609
for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
sys/dev/pci/drm/i915/i915_perf.c
2610
GEM_BUG_ON(ce == ce->engine->kernel_context);
sys/dev/pci/drm/i915/i915_perf.c
2612
if (ce->engine->class != RENDER_CLASS)
sys/dev/pci/drm/i915/i915_perf.c
2616
if (!intel_context_pin_if_active(ce))
sys/dev/pci/drm/i915/i915_perf.c
2619
flex->value = intel_sseu_make_rpcs(ce->engine->gt, &ce->sseu);
sys/dev/pci/drm/i915/i915_perf.c
2620
err = gen8_modify_context(ce, flex, count);
sys/dev/pci/drm/i915/i915_perf.c
2622
intel_context_unpin(ce);
sys/dev/pci/drm/i915/i915_perf.c
2635
struct intel_context *ce = stream->pinned_ctx;
sys/dev/pci/drm/i915/i915_perf.c
2657
RING_CONTEXT_CONTROL(ce->engine->mmio_base),
sys/dev/pci/drm/i915/i915_perf.c
2667
err = intel_context_lock_pinned(ce);
sys/dev/pci/drm/i915/i915_perf.c
2671
err = gen8_modify_context(ce, regs_context,
sys/dev/pci/drm/i915/i915_perf.c
2673
intel_context_unlock_pinned(ce);
sys/dev/pci/drm/i915/i915_perf.c
2678
return gen8_modify_self(ce, regs_lri, ARRAY_SIZE(regs_lri), active);
sys/dev/pci/drm/i915/i915_perf.c
2761
struct intel_context *ce = engine->kernel_context;
sys/dev/pci/drm/i915/i915_perf.c
2766
regs[0].value = intel_sseu_make_rpcs(engine->gt, &ce->sseu);
sys/dev/pci/drm/i915/i915_perf.c
2768
err = gen8_modify_self(ce, regs, num_regs, active);
sys/dev/pci/drm/i915/i915_perf.c
3429
void i915_oa_init_reg_state(const struct intel_context *ce,
sys/dev/pci/drm/i915/i915_perf.c
3440
gen8_update_reg_state_unlocked(ce, stream);
sys/dev/pci/drm/i915/i915_perf.h
36
void i915_oa_init_reg_state(const struct intel_context *ce,
sys/dev/pci/drm/i915/i915_request.c
1018
rq = request_alloc_slow(tl, &ce->engine->request_pool, gfp);
sys/dev/pci/drm/i915/i915_request.c
1025
rq->context = ce;
sys/dev/pci/drm/i915/i915_request.c
1026
rq->engine = ce->engine;
sys/dev/pci/drm/i915/i915_request.c
1027
rq->ring = ce->ring;
sys/dev/pci/drm/i915/i915_request.c
1028
rq->execution_mask = ce->engine->mask;
sys/dev/pci/drm/i915/i915_request.c
1029
rq->i915 = ce->engine->i915;
sys/dev/pci/drm/i915/i915_request.c
1088
intel_context_mark_active(ce);
sys/dev/pci/drm/i915/i915_request.c
1094
ce->ring->emit = rq->head;
sys/dev/pci/drm/i915/i915_request.c
1107
intel_context_unpin(ce);
sys/dev/pci/drm/i915/i915_request.c
1112
i915_request_create(struct intel_context *ce)
sys/dev/pci/drm/i915/i915_request.c
1117
tl = intel_context_timeline_lock(ce);
sys/dev/pci/drm/i915/i915_request.c
1126
intel_context_enter(ce);
sys/dev/pci/drm/i915/i915_request.c
1127
rq = __i915_request_create(ce, GFP_KERNEL);
sys/dev/pci/drm/i915/i915_request.c
1128
intel_context_exit(ce); /* active reference transferred to request */
sys/dev/pci/drm/i915/i915_request.c
345
struct intel_context *ce = rq->context;
sys/dev/pci/drm/i915/i915_request.c
347
if (!ce->watchdog.timeout_us)
sys/dev/pci/drm/i915/i915_request.c
354
ns_to_ktime(ce->watchdog.timeout_us *
sys/dev/pci/drm/i915/i915_request.c
967
__i915_request_create(struct intel_context *ce, gfp_t gfp)
sys/dev/pci/drm/i915/i915_request.c
969
struct intel_timeline *tl = ce->timeline;
sys/dev/pci/drm/i915/i915_request.c
977
__intel_context_pin(ce);
sys/dev/pci/drm/i915/i915_request.h
379
__i915_request_create(struct intel_context *ce, gfp_t gfp);
sys/dev/pci/drm/i915/i915_request.h
381
i915_request_create(struct intel_context *ce);
sys/dev/pci/drm/i915/pxp/intel_pxp.c
100
ce = intel_engine_create_pinned_context(engine, engine->gt->vm, SZ_4K,
sys/dev/pci/drm/i915/pxp/intel_pxp.c
103
if (IS_ERR(ce)) {
sys/dev/pci/drm/i915/pxp/intel_pxp.c
105
return PTR_ERR(ce);
sys/dev/pci/drm/i915/pxp/intel_pxp.c
108
pxp->ce = ce;
sys/dev/pci/drm/i915/pxp/intel_pxp.c
115
if (pxp->ce)
sys/dev/pci/drm/i915/pxp/intel_pxp.c
116
intel_engine_destroy_pinned_context(fetch_and_zero(&pxp->ce));
sys/dev/pci/drm/i915/pxp/intel_pxp.c
501
struct intel_context *ce;
sys/dev/pci/drm/i915/pxp/intel_pxp.c
522
for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it)
sys/dev/pci/drm/i915/pxp/intel_pxp.c
523
intel_context_ban(ce, NULL);
sys/dev/pci/drm/i915/pxp/intel_pxp.c
57
return IS_ENABLED(CONFIG_DRM_I915_PXP) && pxp && pxp->ce;
sys/dev/pci/drm/i915/pxp/intel_pxp.c
88
struct intel_context *ce;
sys/dev/pci/drm/i915/pxp/intel_pxp_cmd.c
100
struct intel_context *ce = pxp->ce;
sys/dev/pci/drm/i915/pxp/intel_pxp_cmd.c
107
rq = i915_request_create(ce);
sys/dev/pci/drm/i915/pxp/intel_pxp_cmd.c
111
if (ce->engine->emit_init_breadcrumb) {
sys/dev/pci/drm/i915/pxp/intel_pxp_cmd.c
112
err = ce->engine->emit_init_breadcrumb(rq);
sys/dev/pci/drm/i915/pxp/intel_pxp_gsccs.c
114
exec_res->ce, &pkt, exec_res->bb_vaddr,
sys/dev/pci/drm/i915/pxp/intel_pxp_gsccs.c
307
if (exec_res->ce)
sys/dev/pci/drm/i915/pxp/intel_pxp_gsccs.c
308
intel_context_put(exec_res->ce);
sys/dev/pci/drm/i915/pxp/intel_pxp_gsccs.c
374
struct intel_context *ce;
sys/dev/pci/drm/i915/pxp/intel_pxp_gsccs.c
401
ce = intel_context_create(engine);
sys/dev/pci/drm/i915/pxp/intel_pxp_gsccs.c
402
if (IS_ERR(ce)) {
sys/dev/pci/drm/i915/pxp/intel_pxp_gsccs.c
404
err = PTR_ERR(ce);
sys/dev/pci/drm/i915/pxp/intel_pxp_gsccs.c
408
i915_vm_put(ce->vm);
sys/dev/pci/drm/i915/pxp/intel_pxp_gsccs.c
409
ce->vm = i915_vm_get(pxp->ctrl_gt->vm);
sys/dev/pci/drm/i915/pxp/intel_pxp_gsccs.c
410
exec_res->ce = ce;
sys/dev/pci/drm/i915/pxp/intel_pxp_gsccs.c
68
if (!exec_res->ce)
sys/dev/pci/drm/i915/pxp/intel_pxp_types.h
49
struct intel_context *ce; /* context for gsc command submission */
sys/dev/pci/drm/i915/pxp/intel_pxp_types.h
76
struct intel_context *ce;
sys/dev/pci/drm/i915/selftests/i915_gem.c
24
struct intel_context *ce;
sys/dev/pci/drm/i915/selftests/i915_gem.c
27
for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
sys/dev/pci/drm/i915/selftests/i915_gem.c
30
rq = intel_context_create_request(ce);
sys/dev/pci/drm/i915/selftests/i915_gem_evict.c
456
struct intel_context *ce;
sys/dev/pci/drm/i915/selftests/i915_gem_evict.c
459
ce = intel_context_create(engine);
sys/dev/pci/drm/i915/selftests/i915_gem_evict.c
460
if (IS_ERR(ce))
sys/dev/pci/drm/i915/selftests/i915_gem_evict.c
465
rq = intel_context_create_request(ce);
sys/dev/pci/drm/i915/selftests/i915_gem_evict.c
467
intel_context_put(ce);
sys/dev/pci/drm/i915/selftests/i915_perf.c
293
struct intel_context *ce;
sys/dev/pci/drm/i915/selftests/i915_perf.c
309
ce = intel_context_create(stream->engine);
sys/dev/pci/drm/i915/selftests/i915_perf.c
310
if (IS_ERR(ce)) {
sys/dev/pci/drm/i915/selftests/i915_perf.c
311
err = PTR_ERR(ce);
sys/dev/pci/drm/i915/selftests/i915_perf.c
316
scratch = __px_vaddr(ce->vm->scratch[0]);
sys/dev/pci/drm/i915/selftests/i915_perf.c
319
rq = intel_context_create_request(ce);
sys/dev/pci/drm/i915/selftests/i915_perf.c
414
intel_context_put(ce);
sys/dev/pci/drm/i915/selftests/i915_request.c
1888
static int switch_to_kernel_sync(struct intel_context *ce, int err)
sys/dev/pci/drm/i915/selftests/i915_request.c
1893
rq = intel_engine_create_kernel_request(ce->engine);
sys/dev/pci/drm/i915/selftests/i915_request.c
1897
fence = i915_active_fence_get(&ce->timeline->last_request);
sys/dev/pci/drm/i915/selftests/i915_request.c
1909
while (!err && !intel_engine_is_idle(ce->engine))
sys/dev/pci/drm/i915/selftests/i915_request.c
1910
intel_engine_flush_submission(ce->engine);
sys/dev/pci/drm/i915/selftests/i915_request.c
1926
struct intel_context *ce[] __counted_by(nengines);
sys/dev/pci/drm/i915/selftests/i915_request.c
1959
static u32 *emit_timestamp_store(u32 *cs, struct intel_context *ce, u32 offset)
sys/dev/pci/drm/i915/selftests/i915_request.c
1962
*cs++ = i915_mmio_reg_offset(RING_TIMESTAMP((ce->engine->mmio_base)));
sys/dev/pci/drm/i915/selftests/i915_request.c
2003
static u32 *hwsp_scratch(const struct intel_context *ce)
sys/dev/pci/drm/i915/selftests/i915_request.c
2005
return memset32(ce->engine->status_page.addr + 1000, 0, 21);
sys/dev/pci/drm/i915/selftests/i915_request.c
2008
static u32 hwsp_offset(const struct intel_context *ce, u32 *dw)
sys/dev/pci/drm/i915/selftests/i915_request.c
2010
return (i915_ggtt_offset(ce->engine->status_page.vma) +
sys/dev/pci/drm/i915/selftests/i915_request.c
2014
static int measure_semaphore_response(struct intel_context *ce)
sys/dev/pci/drm/i915/selftests/i915_request.c
2016
u32 *sema = hwsp_scratch(ce);
sys/dev/pci/drm/i915/selftests/i915_request.c
2017
const u32 offset = hwsp_offset(ce, sema);
sys/dev/pci/drm/i915/selftests/i915_request.c
2037
rq = i915_request_create(ce);
sys/dev/pci/drm/i915/selftests/i915_request.c
2051
cs = emit_timestamp_store(cs, ce, offset + i * sizeof(u32));
sys/dev/pci/drm/i915/selftests/i915_request.c
2065
cycles = ENGINE_READ_FW(ce->engine, RING_TIMESTAMP);
sys/dev/pci/drm/i915/selftests/i915_request.c
2079
ce->engine->name, cycles >> TF_BIAS,
sys/dev/pci/drm/i915/selftests/i915_request.c
208
struct intel_context *ce;
sys/dev/pci/drm/i915/selftests/i915_request.c
2080
cycles_to_ns(ce->engine, cycles));
sys/dev/pci/drm/i915/selftests/i915_request.c
2082
return intel_gt_wait_for_idle(ce->engine->gt, HZ);
sys/dev/pci/drm/i915/selftests/i915_request.c
2085
intel_gt_set_wedged(ce->engine->gt);
sys/dev/pci/drm/i915/selftests/i915_request.c
2089
static int measure_idle_dispatch(struct intel_context *ce)
sys/dev/pci/drm/i915/selftests/i915_request.c
2091
u32 *sema = hwsp_scratch(ce);
sys/dev/pci/drm/i915/selftests/i915_request.c
2092
const u32 offset = hwsp_offset(ce, sema);
sys/dev/pci/drm/i915/selftests/i915_request.c
2112
err = intel_gt_wait_for_idle(ce->engine->gt, HZ / 2);
sys/dev/pci/drm/i915/selftests/i915_request.c
2116
rq = i915_request_create(ce);
sys/dev/pci/drm/i915/selftests/i915_request.c
2129
cs = emit_timestamp_store(cs, ce, offset + i * sizeof(u32));
sys/dev/pci/drm/i915/selftests/i915_request.c
2135
elapsed[i] = ENGINE_READ_FW(ce->engine, RING_TIMESTAMP);
sys/dev/pci/drm/i915/selftests/i915_request.c
2141
err = intel_gt_wait_for_idle(ce->engine->gt, HZ / 2);
sys/dev/pci/drm/i915/selftests/i915_request.c
2150
ce->engine->name, cycles >> TF_BIAS,
sys/dev/pci/drm/i915/selftests/i915_request.c
2151
cycles_to_ns(ce->engine, cycles));
sys/dev/pci/drm/i915/selftests/i915_request.c
2153
return intel_gt_wait_for_idle(ce->engine->gt, HZ);
sys/dev/pci/drm/i915/selftests/i915_request.c
2156
intel_gt_set_wedged(ce->engine->gt);
sys/dev/pci/drm/i915/selftests/i915_request.c
2160
static int measure_busy_dispatch(struct intel_context *ce)
sys/dev/pci/drm/i915/selftests/i915_request.c
2162
u32 *sema = hwsp_scratch(ce);
sys/dev/pci/drm/i915/selftests/i915_request.c
2163
const u32 offset = hwsp_offset(ce, sema);
sys/dev/pci/drm/i915/selftests/i915_request.c
217
ce = i915_gem_context_get_engine(ctx[0], RCS0);
sys/dev/pci/drm/i915/selftests/i915_request.c
218
GEM_BUG_ON(IS_ERR(ce));
sys/dev/pci/drm/i915/selftests/i915_request.c
2184
rq = i915_request_create(ce);
sys/dev/pci/drm/i915/selftests/i915_request.c
219
request = mock_request(ce, 2 * HZ);
sys/dev/pci/drm/i915/selftests/i915_request.c
2199
cs = emit_timestamp_store(cs, ce, offset + i * sizeof(u32));
sys/dev/pci/drm/i915/selftests/i915_request.c
220
intel_context_put(ce);
sys/dev/pci/drm/i915/selftests/i915_request.c
2210
elapsed[i - 1] = ENGINE_READ_FW(ce->engine, RING_TIMESTAMP);
sys/dev/pci/drm/i915/selftests/i915_request.c
2227
ce->engine->name, cycles >> TF_BIAS,
sys/dev/pci/drm/i915/selftests/i915_request.c
2228
cycles_to_ns(ce->engine, cycles));
sys/dev/pci/drm/i915/selftests/i915_request.c
2230
return intel_gt_wait_for_idle(ce->engine->gt, HZ);
sys/dev/pci/drm/i915/selftests/i915_request.c
2233
intel_gt_set_wedged(ce->engine->gt);
sys/dev/pci/drm/i915/selftests/i915_request.c
2263
static int measure_inter_request(struct intel_context *ce)
sys/dev/pci/drm/i915/selftests/i915_request.c
2265
u32 *sema = hwsp_scratch(ce);
sys/dev/pci/drm/i915/selftests/i915_request.c
2266
const u32 offset = hwsp_offset(ce, sema);
sys/dev/pci/drm/i915/selftests/i915_request.c
2285
err = plug(ce->engine, sema, MI_SEMAPHORE_SAD_NEQ_SDD, 0);
sys/dev/pci/drm/i915/selftests/i915_request.c
2295
intel_engine_flush_submission(ce->engine);
sys/dev/pci/drm/i915/selftests/i915_request.c
2300
rq = i915_request_create(ce);
sys/dev/pci/drm/i915/selftests/i915_request.c
2321
cs = emit_timestamp_store(cs, ce, offset + i * sizeof(u32));
sys/dev/pci/drm/i915/selftests/i915_request.c
2327
intel_engine_flush_submission(ce->engine);
sys/dev/pci/drm/i915/selftests/i915_request.c
2331
err = intel_gt_wait_for_idle(ce->engine->gt, HZ / 2);
sys/dev/pci/drm/i915/selftests/i915_request.c
2340
ce->engine->name, cycles >> TF_BIAS,
sys/dev/pci/drm/i915/selftests/i915_request.c
2341
cycles_to_ns(ce->engine, cycles));
sys/dev/pci/drm/i915/selftests/i915_request.c
2343
return intel_gt_wait_for_idle(ce->engine->gt, HZ);
sys/dev/pci/drm/i915/selftests/i915_request.c
235
ce = i915_gem_context_get_engine(ctx[1], RCS0);
sys/dev/pci/drm/i915/selftests/i915_request.c
2350
intel_gt_set_wedged(ce->engine->gt);
sys/dev/pci/drm/i915/selftests/i915_request.c
2354
static int measure_context_switch(struct intel_context *ce)
sys/dev/pci/drm/i915/selftests/i915_request.c
2356
u32 *sema = hwsp_scratch(ce);
sys/dev/pci/drm/i915/selftests/i915_request.c
2357
const u32 offset = hwsp_offset(ce, sema);
sys/dev/pci/drm/i915/selftests/i915_request.c
236
GEM_BUG_ON(IS_ERR(ce));
sys/dev/pci/drm/i915/selftests/i915_request.c
237
vip = mock_request(ce, 0);
sys/dev/pci/drm/i915/selftests/i915_request.c
2376
err = plug(ce->engine, sema, MI_SEMAPHORE_SAD_NEQ_SDD, 0);
sys/dev/pci/drm/i915/selftests/i915_request.c
238
intel_context_put(ce);
sys/dev/pci/drm/i915/selftests/i915_request.c
2382
ce, ce->engine->kernel_context
sys/dev/pci/drm/i915/selftests/i915_request.c
2411
cs = emit_timestamp_store(cs, ce, addr);
sys/dev/pci/drm/i915/selftests/i915_request.c
2423
intel_engine_flush_submission(ce->engine);
sys/dev/pci/drm/i915/selftests/i915_request.c
2426
err = intel_gt_wait_for_idle(ce->engine->gt, HZ / 2);
sys/dev/pci/drm/i915/selftests/i915_request.c
2435
ce->engine->name, cycles >> TF_BIAS,
sys/dev/pci/drm/i915/selftests/i915_request.c
2436
cycles_to_ns(ce->engine, cycles));
sys/dev/pci/drm/i915/selftests/i915_request.c
2438
return intel_gt_wait_for_idle(ce->engine->gt, HZ);
sys/dev/pci/drm/i915/selftests/i915_request.c
2444
intel_gt_set_wedged(ce->engine->gt);
sys/dev/pci/drm/i915/selftests/i915_request.c
2448
static int measure_preemption(struct intel_context *ce)
sys/dev/pci/drm/i915/selftests/i915_request.c
2450
u32 *sema = hwsp_scratch(ce);
sys/dev/pci/drm/i915/selftests/i915_request.c
2451
const u32 offset = hwsp_offset(ce, sema);
sys/dev/pci/drm/i915/selftests/i915_request.c
2473
if (!intel_engine_has_preemption(ce->engine))
sys/dev/pci/drm/i915/selftests/i915_request.c
2480
rq = i915_request_create(ce);
sys/dev/pci/drm/i915/selftests/i915_request.c
2495
cs = emit_timestamp_store(cs, ce, addr + sizeof(u32));
sys/dev/pci/drm/i915/selftests/i915_request.c
2505
rq = i915_request_create(ce->engine->kernel_context);
sys/dev/pci/drm/i915/selftests/i915_request.c
2518
cs = emit_timestamp_store(cs, ce, addr);
sys/dev/pci/drm/i915/selftests/i915_request.c
2524
elapsed[i - 1] = ENGINE_READ_FW(ce->engine, RING_TIMESTAMP);
sys/dev/pci/drm/i915/selftests/i915_request.c
2538
ce->engine->name, cycles >> TF_BIAS,
sys/dev/pci/drm/i915/selftests/i915_request.c
2539
cycles_to_ns(ce->engine, cycles));
sys/dev/pci/drm/i915/selftests/i915_request.c
2546
ce->engine->name, cycles >> TF_BIAS,
sys/dev/pci/drm/i915/selftests/i915_request.c
2547
cycles_to_ns(ce->engine, cycles));
sys/dev/pci/drm/i915/selftests/i915_request.c
2549
return intel_gt_wait_for_idle(ce->engine->gt, HZ);
sys/dev/pci/drm/i915/selftests/i915_request.c
2552
intel_gt_set_wedged(ce->engine->gt);
sys/dev/pci/drm/i915/selftests/i915_request.c
2568
static int measure_completion(struct intel_context *ce)
sys/dev/pci/drm/i915/selftests/i915_request.c
2570
u32 *sema = hwsp_scratch(ce);
sys/dev/pci/drm/i915/selftests/i915_request.c
2571
const u32 offset = hwsp_offset(ce, sema);
sys/dev/pci/drm/i915/selftests/i915_request.c
2592
rq = i915_request_create(ce);
sys/dev/pci/drm/i915/selftests/i915_request.c
2607
cs = emit_timestamp_store(cs, ce, offset + i * sizeof(u32));
sys/dev/pci/drm/i915/selftests/i915_request.c
2614
intel_engine_flush_submission(ce->engine);
sys/dev/pci/drm/i915/selftests/i915_request.c
2625
elapsed[i - 1] = ENGINE_READ_FW(ce->engine, RING_TIMESTAMP);
sys/dev/pci/drm/i915/selftests/i915_request.c
2629
err = intel_gt_wait_for_idle(ce->engine->gt, HZ / 2);
sys/dev/pci/drm/i915/selftests/i915_request.c
2640
ce->engine->name, cycles >> TF_BIAS,
sys/dev/pci/drm/i915/selftests/i915_request.c
2641
cycles_to_ns(ce->engine, cycles));
sys/dev/pci/drm/i915/selftests/i915_request.c
2643
return intel_gt_wait_for_idle(ce->engine->gt, HZ);
sys/dev/pci/drm/i915/selftests/i915_request.c
2646
intel_gt_set_wedged(ce->engine->gt);
sys/dev/pci/drm/i915/selftests/i915_request.c
2680
struct intel_context *ce;
sys/dev/pci/drm/i915/selftests/i915_request.c
2682
ce = intel_context_create(engine);
sys/dev/pci/drm/i915/selftests/i915_request.c
2683
if (IS_ERR(ce)) {
sys/dev/pci/drm/i915/selftests/i915_request.c
2684
err = PTR_ERR(ce);
sys/dev/pci/drm/i915/selftests/i915_request.c
2688
err = intel_context_pin(ce);
sys/dev/pci/drm/i915/selftests/i915_request.c
2690
intel_context_put(ce);
sys/dev/pci/drm/i915/selftests/i915_request.c
2698
err = measure_semaphore_response(ce);
sys/dev/pci/drm/i915/selftests/i915_request.c
2700
err = measure_idle_dispatch(ce);
sys/dev/pci/drm/i915/selftests/i915_request.c
2702
err = measure_busy_dispatch(ce);
sys/dev/pci/drm/i915/selftests/i915_request.c
2704
err = measure_inter_request(ce);
sys/dev/pci/drm/i915/selftests/i915_request.c
2706
err = measure_context_switch(ce);
sys/dev/pci/drm/i915/selftests/i915_request.c
2708
err = measure_preemption(ce);
sys/dev/pci/drm/i915/selftests/i915_request.c
2710
err = measure_completion(ce);
sys/dev/pci/drm/i915/selftests/i915_request.c
2715
intel_context_unpin(ce);
sys/dev/pci/drm/i915/selftests/i915_request.c
2716
intel_context_put(ce);
sys/dev/pci/drm/i915/selftests/i915_request.c
2740
rq = i915_request_create(ps->ce[idx]);
sys/dev/pci/drm/i915/selftests/i915_request.c
2774
rq = i915_request_create(ps->ce[idx]);
sys/dev/pci/drm/i915/selftests/i915_request.c
2808
rq = i915_request_create(ps->ce[idx]);
sys/dev/pci/drm/i915/selftests/i915_request.c
2843
ps = kzalloc(struct_size(ps, ce, nengines), GFP_KERNEL);
sys/dev/pci/drm/i915/selftests/i915_request.c
2856
struct intel_context *ce;
sys/dev/pci/drm/i915/selftests/i915_request.c
2858
ce = intel_context_create(engine);
sys/dev/pci/drm/i915/selftests/i915_request.c
2859
if (IS_ERR(ce)) {
sys/dev/pci/drm/i915/selftests/i915_request.c
286
struct i915_request *(*request_alloc)(struct intel_context *ce);
sys/dev/pci/drm/i915/selftests/i915_request.c
2860
err = PTR_ERR(ce);
sys/dev/pci/drm/i915/selftests/i915_request.c
2864
err = intel_context_pin(ce);
sys/dev/pci/drm/i915/selftests/i915_request.c
2866
intel_context_put(ce);
sys/dev/pci/drm/i915/selftests/i915_request.c
2870
ps->ce[idx++] = ce;
sys/dev/pci/drm/i915/selftests/i915_request.c
2886
struct intel_context *ce = ps->ce[idx];
sys/dev/pci/drm/i915/selftests/i915_request.c
2888
p->engine = ps->ce[idx]->engine;
sys/dev/pci/drm/i915/selftests/i915_request.c
2896
p->runtime = -intel_context_get_total_runtime_ns(ce);
sys/dev/pci/drm/i915/selftests/i915_request.c
290
__mock_request_alloc(struct intel_context *ce)
sys/dev/pci/drm/i915/selftests/i915_request.c
2905
struct intel_context *ce = ps->ce[idx];
sys/dev/pci/drm/i915/selftests/i915_request.c
2917
err = switch_to_kernel_sync(ce, err);
sys/dev/pci/drm/i915/selftests/i915_request.c
2918
p->runtime += intel_context_get_total_runtime_ns(ce);
sys/dev/pci/drm/i915/selftests/i915_request.c
292
return mock_request(ce, 0);
sys/dev/pci/drm/i915/selftests/i915_request.c
2933
name, p->engine->name, ce->timeline->seqno,
sys/dev/pci/drm/i915/selftests/i915_request.c
2942
if (IS_ERR_OR_NULL(ps->ce[idx]))
sys/dev/pci/drm/i915/selftests/i915_request.c
2945
intel_context_unpin(ps->ce[idx]);
sys/dev/pci/drm/i915/selftests/i915_request.c
2946
intel_context_put(ps->ce[idx]);
sys/dev/pci/drm/i915/selftests/i915_request.c
296
__live_request_alloc(struct intel_context *ce)
sys/dev/pci/drm/i915/selftests/i915_request.c
2968
struct intel_context *ce;
sys/dev/pci/drm/i915/selftests/i915_request.c
2974
ce = intel_context_create(engine);
sys/dev/pci/drm/i915/selftests/i915_request.c
2975
if (IS_ERR(ce)) {
sys/dev/pci/drm/i915/selftests/i915_request.c
2976
thread->result = PTR_ERR(ce);
sys/dev/pci/drm/i915/selftests/i915_request.c
298
return intel_context_create_request(ce);
sys/dev/pci/drm/i915/selftests/i915_request.c
2980
err = intel_context_pin(ce);
sys/dev/pci/drm/i915/selftests/i915_request.c
2982
intel_context_put(ce);
sys/dev/pci/drm/i915/selftests/i915_request.c
2999
rq = i915_request_create(ce);
sys/dev/pci/drm/i915/selftests/i915_request.c
3028
err = switch_to_kernel_sync(ce, err);
sys/dev/pci/drm/i915/selftests/i915_request.c
3029
p->runtime = intel_context_get_total_runtime_ns(ce);
sys/dev/pci/drm/i915/selftests/i915_request.c
3032
intel_context_unpin(ce);
sys/dev/pci/drm/i915/selftests/i915_request.c
3033
intel_context_put(ce);
sys/dev/pci/drm/i915/selftests/i915_request.c
3043
struct intel_context *ce;
sys/dev/pci/drm/i915/selftests/i915_request.c
3049
ce = intel_context_create(engine);
sys/dev/pci/drm/i915/selftests/i915_request.c
3050
if (IS_ERR(ce)) {
sys/dev/pci/drm/i915/selftests/i915_request.c
3051
thread->result = PTR_ERR(ce);
sys/dev/pci/drm/i915/selftests/i915_request.c
3055
err = intel_context_pin(ce);
sys/dev/pci/drm/i915/selftests/i915_request.c
3057
intel_context_put(ce);
sys/dev/pci/drm/i915/selftests/i915_request.c
3074
rq = i915_request_create(ce);
sys/dev/pci/drm/i915/selftests/i915_request.c
3105
err = switch_to_kernel_sync(ce, err);
sys/dev/pci/drm/i915/selftests/i915_request.c
3106
p->runtime = intel_context_get_total_runtime_ns(ce);
sys/dev/pci/drm/i915/selftests/i915_request.c
3109
intel_context_unpin(ce);
sys/dev/pci/drm/i915/selftests/i915_request.c
3110
intel_context_put(ce);
sys/dev/pci/drm/i915/selftests/i915_request.c
3119
struct intel_context *ce;
sys/dev/pci/drm/i915/selftests/i915_request.c
3125
ce = intel_context_create(engine);
sys/dev/pci/drm/i915/selftests/i915_request.c
3126
if (IS_ERR(ce)) {
sys/dev/pci/drm/i915/selftests/i915_request.c
3127
thread->result = PTR_ERR(ce);
sys/dev/pci/drm/i915/selftests/i915_request.c
3131
err = intel_context_pin(ce);
sys/dev/pci/drm/i915/selftests/i915_request.c
3133
intel_context_put(ce);
sys/dev/pci/drm/i915/selftests/i915_request.c
3150
rq = i915_request_create(ce);
sys/dev/pci/drm/i915/selftests/i915_request.c
3170
err = switch_to_kernel_sync(ce, err);
sys/dev/pci/drm/i915/selftests/i915_request.c
3171
p->runtime = intel_context_get_total_runtime_ns(ce);
sys/dev/pci/drm/i915/selftests/i915_request.c
3174
intel_context_unpin(ce);
sys/dev/pci/drm/i915/selftests/i915_request.c
3175
intel_context_put(ce);
sys/dev/pci/drm/i915/selftests/i915_request.c
367
struct intel_context *ce;
sys/dev/pci/drm/i915/selftests/i915_request.c
369
ce = i915_gem_context_get_engine(ctx, t->engine->legacy_idx);
sys/dev/pci/drm/i915/selftests/i915_request.c
370
GEM_BUG_ON(IS_ERR(ce));
sys/dev/pci/drm/i915/selftests/i915_request.c
371
rq = t->request_alloc(ce);
sys/dev/pci/drm/i915/selftests/i915_request.c
372
intel_context_put(ce);
sys/dev/pci/drm/i915/selftests/i915_request.c
644
struct intel_context *ce;
sys/dev/pci/drm/i915/selftests/i915_request.c
652
ce = intel_context_create(engine);
sys/dev/pci/drm/i915/selftests/i915_request.c
653
if (IS_ERR(ce)) {
sys/dev/pci/drm/i915/selftests/i915_request.c
654
err = PTR_ERR(ce);
sys/dev/pci/drm/i915/selftests/i915_request.c
658
rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
sys/dev/pci/drm/i915/selftests/i915_request.c
687
intel_context_put(ce);
sys/dev/pci/drm/i915/selftests/i915_request.c
697
struct intel_context *ce;
sys/dev/pci/drm/i915/selftests/i915_request.c
705
ce = intel_context_create(engine);
sys/dev/pci/drm/i915/selftests/i915_request.c
706
if (IS_ERR(ce)) {
sys/dev/pci/drm/i915/selftests/i915_request.c
707
err = PTR_ERR(ce);
sys/dev/pci/drm/i915/selftests/i915_request.c
711
rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
sys/dev/pci/drm/i915/selftests/i915_request.c
748
intel_context_put(ce);
sys/dev/pci/drm/i915/selftests/i915_request.c
758
struct intel_context *ce;
sys/dev/pci/drm/i915/selftests/i915_request.c
766
ce = intel_context_create(engine);
sys/dev/pci/drm/i915/selftests/i915_request.c
767
if (IS_ERR(ce)) {
sys/dev/pci/drm/i915/selftests/i915_request.c
768
err = PTR_ERR(ce);
sys/dev/pci/drm/i915/selftests/i915_request.c
772
rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
sys/dev/pci/drm/i915/selftests/i915_request.c
797
intel_context_put(ce);
sys/dev/pci/drm/i915/selftests/i915_request.c
819
struct intel_context *ce;
sys/dev/pci/drm/i915/selftests/i915_request.c
835
ce = intel_context_create(engine);
sys/dev/pci/drm/i915/selftests/i915_request.c
836
if (IS_ERR(ce)) {
sys/dev/pci/drm/i915/selftests/i915_request.c
837
err = PTR_ERR(ce);
sys/dev/pci/drm/i915/selftests/i915_request.c
841
rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
sys/dev/pci/drm/i915/selftests/i915_request.c
860
nop = intel_context_create_request(ce);
sys/dev/pci/drm/i915/selftests/i915_request.c
904
intel_context_put(ce);
sys/dev/pci/drm/i915/selftests/igt_spinner.c
102
vaddr = igt_spinner_pin_obj(ce, ww, spin->obj, mode, &spin->batch_vma);
sys/dev/pci/drm/i915/selftests/igt_spinner.c
125
struct intel_context *ce,
sys/dev/pci/drm/i915/selftests/igt_spinner.c
128
struct intel_engine_cs *engine = ce->engine;
sys/dev/pci/drm/i915/selftests/igt_spinner.c
135
GEM_BUG_ON(spin->gt != ce->vm->gt);
sys/dev/pci/drm/i915/selftests/igt_spinner.c
137
if (!intel_engine_can_store_dword(ce->engine))
sys/dev/pci/drm/i915/selftests/igt_spinner.c
141
err = igt_spinner_pin(spin, ce, NULL);
sys/dev/pci/drm/i915/selftests/igt_spinner.c
149
rq = intel_context_create_request(ce);
sys/dev/pci/drm/i915/selftests/igt_spinner.c
43
static void *igt_spinner_pin_obj(struct intel_context *ce,
sys/dev/pci/drm/i915/selftests/igt_spinner.c
51
*vma = i915_vma_instance(obj, ce->vm, NULL);
sys/dev/pci/drm/i915/selftests/igt_spinner.c
81
struct intel_context *ce,
sys/dev/pci/drm/i915/selftests/igt_spinner.c
86
if (spin->ce && WARN_ON(spin->ce != ce))
sys/dev/pci/drm/i915/selftests/igt_spinner.c
88
spin->ce = ce;
sys/dev/pci/drm/i915/selftests/igt_spinner.c
91
vaddr = igt_spinner_pin_obj(ce, ww, spin->hws, I915_MAP_WB, &spin->hws_vma);
sys/dev/pci/drm/i915/selftests/igt_spinner.h
23
struct intel_context *ce;
sys/dev/pci/drm/i915/selftests/igt_spinner.h
31
struct intel_context *ce,
sys/dev/pci/drm/i915/selftests/igt_spinner.h
37
struct intel_context *ce,
sys/dev/pci/drm/i915/selftests/intel_memory_region.c
666
static int igt_gpu_write_dw(struct intel_context *ce,
sys/dev/pci/drm/i915/selftests/intel_memory_region.c
671
return igt_gpu_fill_dw(ce, vma, dword * sizeof(u32),
sys/dev/pci/drm/i915/selftests/intel_memory_region.c
711
struct intel_context *ce;
sys/dev/pci/drm/i915/selftests/intel_memory_region.c
724
for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
sys/dev/pci/drm/i915/selftests/intel_memory_region.c
726
if (!intel_engine_can_store_dword(ce->engine))
sys/dev/pci/drm/i915/selftests/intel_memory_region.c
729
vm = ce->vm;
sys/dev/pci/drm/i915/selftests/intel_memory_region.c
756
ce = engines->engines[order[i] % engines->num_engines];
sys/dev/pci/drm/i915/selftests/intel_memory_region.c
758
if (!ce || !intel_engine_can_store_dword(ce->engine))
sys/dev/pci/drm/i915/selftests/intel_memory_region.c
761
err = igt_gpu_write_dw(ce, vma, dword, rng);
sys/dev/pci/drm/i915/selftests/mock_request.c
31
mock_request(struct intel_context *ce, unsigned long delay)
sys/dev/pci/drm/i915/selftests/mock_request.c
36
request = intel_context_create_request(ce);
sys/dev/pci/drm/i915/selftests/mock_request.h
33
mock_request(struct intel_context *ce, unsigned long delay);
sys/dev/pci/if_qwx_pci.c
1703
ce_pipe = &sc->ce.ce_pipe[i];
sys/dev/pci/if_qwx_pci.c
4141
struct qwx_ce_pipe *ce_pipe = &sc->ce.ce_pipe[i];
sys/dev/pci/if_qwz_pci.c
1544
ce_pipe = &sc->ce.ce_pipe[i];
sys/dev/pci/if_qwz_pci.c
4005
struct qwz_ce_pipe *ce_pipe = &sc->ce.ce_pipe[i];
sys/dev/pci/mfii.c
2579
struct mfii_sge *ce = NULL;
sys/dev/pci/mfii.c
2605
ce = nsge + space;
sys/dev/pci/mfii.c
2606
ce->sg_addr = htole64(ccb->ccb_sgl_dva);
sys/dev/pci/mfii.c
2607
ce->sg_len = htole32(ccb->ccb_sgl_len);
sys/dev/pci/mfii.c
2608
ce->sg_flags = sc->sc_iop->sge_flag_chain;
sys/dev/pci/mfii.c
2610
req->chain_offset = ((u_int8_t *)ce - (u_int8_t *)req) / 16;
sys/dev/pci/mfii.c
2614
if (nsge == ce)
sys/dev/pci/mpii.c
1810
struct mpii_evt_ir_cfg_element *ce;
sys/dev/pci/mpii.c
1824
ce = (struct mpii_evt_ir_cfg_element *)(ccl + 1);
sys/dev/pci/mpii.c
1826
for (i = 0; i < ccl->num_elements; i++, ce++) {
sys/dev/pci/mpii.c
1827
type = (lemtoh16(&ce->element_flags) &
sys/dev/pci/mpii.c
1832
switch (ce->reason_code) {
sys/dev/pci/mpii.c
1836
lemtoh16(&ce->vol_dev_handle))) {
sys/dev/pci/mpii.c
1839
lemtoh16(&ce->vol_dev_handle));
sys/dev/pci/mpii.c
1851
dev->dev_handle = lemtoh16(&ce->vol_dev_handle);
sys/dev/pci/mpii.c
1861
lemtoh16(&ce->vol_dev_handle))))
sys/dev/pci/mpii.c
1869
if (ce->reason_code ==
sys/dev/pci/mpii.c
1871
ce->reason_code ==
sys/dev/pci/mpii.c
1875
lemtoh16(&ce->phys_disk_dev_handle))))
sys/dev/pci/mpii.c
1884
if (ce->reason_code ==
sys/dev/pci/mpii.c
1888
lemtoh16(&ce->phys_disk_dev_handle))))
sys/kern/tty.c
1721
int cc, ce, obufcc = 0;
sys/kern/tty.c
1824
ce = cc;
sys/kern/tty.c
1826
ce = cc - scanc((u_int)cc, cp, char_type,
sys/kern/tty.c
1832
if (ce == 0) {
sys/kern/tty.c
1855
i = b_to_q(cp, ce, &tp->t_outq);
sys/kern/tty.c
1856
ce -= i;
sys/kern/tty.c
1857
tp->t_column += ce;
sys/kern/tty.c
1858
cp += ce, cc -= ce, tk_nout += ce;
sys/kern/tty.c
1859
tp->t_outcc += ce;
sys/scsi/ch.c
384
ch_exchange(struct ch_softc *sc, struct changer_exchange *ce)
sys/scsi/ch.c
394
if ((ce->ce_srctype > CHET_DT) || (ce->ce_fdsttype > CHET_DT) ||
sys/scsi/ch.c
395
(ce->ce_sdsttype > CHET_DT))
sys/scsi/ch.c
397
if ((ce->ce_srcunit > (sc->sc_counts[ce->ce_srctype] - 1)) ||
sys/scsi/ch.c
398
(ce->ce_fdstunit > (sc->sc_counts[ce->ce_fdsttype] - 1)) ||
sys/scsi/ch.c
399
(ce->ce_sdstunit > (sc->sc_counts[ce->ce_sdsttype] - 1)))
sys/scsi/ch.c
405
if (((sc->sc_exchangemask[ce->ce_srctype] &
sys/scsi/ch.c
406
(1 << ce->ce_fdsttype)) == 0) ||
sys/scsi/ch.c
407
((sc->sc_exchangemask[ce->ce_fdsttype] &
sys/scsi/ch.c
408
(1 << ce->ce_sdsttype)) == 0))
sys/scsi/ch.c
414
src = sc->sc_firsts[ce->ce_srctype] + ce->ce_srcunit;
sys/scsi/ch.c
415
dst1 = sc->sc_firsts[ce->ce_fdsttype] + ce->ce_fdstunit;
sys/scsi/ch.c
416
dst2 = sc->sc_firsts[ce->ce_sdsttype] + ce->ce_sdstunit;
sys/scsi/ch.c
434
if (ISSET(ce->ce_flags, CE_INVERT1))
sys/scsi/ch.c
436
if (ISSET(ce->ce_flags, CE_INVERT2))
usr.bin/deroff/deroff.c
1627
M(NBLK, 'c','e', ce), /* centered */
usr.bin/deroff/deroff.c
224
int ce(void);
usr.sbin/bgpd/bgpd.c
132
struct connect_elm *ce;
usr.sbin/bgpd/bgpd.c
1360
struct connect_elm *ce;
usr.sbin/bgpd/bgpd.c
1372
if ((ce = calloc(1, sizeof(*ce))) == NULL) {
usr.sbin/bgpd/bgpd.c
1377
if (pfkey_establish(&ce->auth_state, &r->auth,
usr.sbin/bgpd/bgpd.c
1381
ce->id = r->id;
usr.sbin/bgpd/bgpd.c
1382
ce->fd = socket(aid2af(r->remote_addr.aid),
usr.sbin/bgpd/bgpd.c
1384
if (ce->fd == -1) {
usr.sbin/bgpd/bgpd.c
1391
if (setsockopt(ce->fd, IPPROTO_IP, IP_TOS, &pre, sizeof(pre)) ==
usr.sbin/bgpd/bgpd.c
1398
if (setsockopt(ce->fd, IPPROTO_IPV6, IPV6_TCLASS, &pre,
usr.sbin/bgpd/bgpd.c
1406
if (setsockopt(ce->fd, IPPROTO_TCP, TCP_NODELAY, &nodelay,
usr.sbin/bgpd/bgpd.c
1412
if (tcp_md5_set(ce->fd, &r->auth, &r->remote_addr) == -1)
usr.sbin/bgpd/bgpd.c
1416
if (bind(ce->fd, sa, len) == -1) {
usr.sbin/bgpd/bgpd.c
1424
if (connect(ce->fd, sa, len) == -1) {
usr.sbin/bgpd/bgpd.c
1430
TAILQ_INSERT_TAIL(&connect_queue, ce, entry);
usr.sbin/bgpd/bgpd.c
1435
imsg_compose(ibuf_rtr, IMSG_SOCKET_SETUP, ce->id, 0, ce->fd, NULL, 0);
usr.sbin/bgpd/bgpd.c
1436
TAILQ_INSERT_TAIL(&socket_queue, ce, entry);
usr.sbin/bgpd/bgpd.c
1440
if (ce->fd != -1)
usr.sbin/bgpd/bgpd.c
1441
close(ce->fd);
usr.sbin/bgpd/bgpd.c
1442
free(ce);
usr.sbin/bgpd/bgpd.c
1449
struct connect_elm *ce;
usr.sbin/bgpd/bgpd.c
1453
TAILQ_FOREACH(ce, &connect_queue, entry) {
usr.sbin/bgpd/bgpd.c
1454
if (ce->fd == fd)
usr.sbin/bgpd/bgpd.c
1457
if (ce == NULL)
usr.sbin/bgpd/bgpd.c
1460
TAILQ_REMOVE(&connect_queue, ce, entry);
usr.sbin/bgpd/bgpd.c
1464
if (ce->id == r->id)
usr.sbin/bgpd/bgpd.c
1468
log_warnx("rtr id %d no longer exists", ce->id);
usr.sbin/bgpd/bgpd.c
1485
imsg_compose(ibuf_rtr, IMSG_SOCKET_SETUP, ce->id, 0, ce->fd, NULL, 0);
usr.sbin/bgpd/bgpd.c
1486
TAILQ_INSERT_TAIL(&socket_queue, ce, entry);
usr.sbin/bgpd/bgpd.c
1491
free(ce);
usr.sbin/bgpd/bgpd.c
1497
struct connect_elm *ce;
usr.sbin/bgpd/bgpd.c
1499
TAILQ_FOREACH(ce, &socket_queue, entry) {
usr.sbin/bgpd/bgpd.c
1500
if (ce->id == id) {
usr.sbin/bgpd/bgpd.c
1501
pfkey_remove(&ce->auth_state);
usr.sbin/bgpd/bgpd.c
1502
TAILQ_REMOVE(&socket_queue, ce, entry);
usr.sbin/bgpd/bgpd.c
1503
free(ce);
usr.sbin/bgpd/bgpd.c
355
TAILQ_FOREACH(ce, &connect_queue, entry) {
usr.sbin/bgpd/bgpd.c
356
pfd[npfd].fd = ce->fd;
usr.sbin/nsd/difffile.c
1130
domain_type* ce = NULL; /* for speeding up has_data_below */
usr.sbin/nsd/difffile.c
1140
ce = rrset_zero_nonexist_check(domain, ce);
usr.sbin/nsd/difffile.c
264
rrset_zero_nonexist_check(domain_type* domain, domain_type* ce)
usr.sbin/nsd/difffile.c
275
if(p == ce || has_data_below(p))
usr.sbin/procmap/procmap.c
1008
struct cache_entry *ce;
usr.sbin/procmap/procmap.c
1017
ce = malloc(sizeof(struct cache_entry));
usr.sbin/procmap/procmap.c
1018
if (ce == NULL)
usr.sbin/procmap/procmap.c
1021
ce->ce_vp = ncp->nc_vp;
usr.sbin/procmap/procmap.c
1022
ce->ce_pvp = ncp->nc_dvp;
usr.sbin/procmap/procmap.c
1023
ce->ce_cid = ncp->nc_vpid;
usr.sbin/procmap/procmap.c
1024
ce->ce_pcid = ncp->nc_dvpid;
usr.sbin/procmap/procmap.c
1025
ce->ce_nlen = (unsigned)ncp->nc_nlen;
usr.sbin/procmap/procmap.c
1026
strlcpy(ce->ce_name, ncp->nc_name, sizeof(ce->ce_name));
usr.sbin/procmap/procmap.c
1028
LIST_INSERT_HEAD(&lcache, ce, ce_next);
usr.sbin/procmap/procmap.c
938
struct cache_entry *ce;
usr.sbin/procmap/procmap.c
953
LIST_FOREACH(ce, &lcache, ce_next)
usr.sbin/procmap/procmap.c
954
if (ce->ce_vp == P(&svp) && ce->ce_cid == cid)
usr.sbin/procmap/procmap.c
956
if (ce && ce->ce_vp == P(&svp) && ce->ce_cid == cid) {
usr.sbin/procmap/procmap.c
962
if (o - ce->ce_nlen <= buf)
usr.sbin/procmap/procmap.c
964
o -= ce->ce_nlen;
usr.sbin/procmap/procmap.c
965
memcpy(o, ce->ce_name, ce->ce_nlen);
usr.sbin/procmap/procmap.c
966
P(&svp) = ce->ce_pvp;
usr.sbin/procmap/procmap.c
967
cid = ce->ce_pcid;
usr.sbin/unbound/services/authzone.c
2408
struct auth_data* ce)
usr.sbin/unbound/services/authzone.c
2418
if(ce && nmlen == ce->namelen)
usr.sbin/unbound/services/authzone.c
2477
struct auth_data* node, int node_exact, struct auth_data** ce,
usr.sbin/unbound/services/authzone.c
2482
*ce = NULL;
usr.sbin/unbound/services/authzone.c
2489
*ce = n;
usr.sbin/unbound/services/authzone.c
2495
*ce = NULL;
usr.sbin/unbound/services/authzone.c
2510
*ce = n;
usr.sbin/unbound/services/authzone.c
2518
*ce = n;
usr.sbin/unbound/services/authzone.c
2523
if(*ce == NULL && !domain_has_only_nsec3(n)) {
usr.sbin/unbound/services/authzone.c
2526
*ce = n;
usr.sbin/unbound/services/authzone.c
3237
struct dns_msg* msg, struct auth_data* ce, struct auth_rrset* rrset)
usr.sbin/unbound/services/authzone.c
3241
log_assert(ce);
usr.sbin/unbound/services/authzone.c
3243
if(!msg_add_rrset_ns(z, region, msg, ce, rrset)) return 0;
usr.sbin/unbound/services/authzone.c
3245
if((ds=az_domain_rrset(ce, LDNS_RR_TYPE_DS))!=NULL) {
usr.sbin/unbound/services/authzone.c
3246
if(!msg_add_rrset_ns(z, region, msg, ce, ds)) return 0;
usr.sbin/unbound/services/authzone.c
3249
if((nsec=az_domain_rrset(ce, LDNS_RR_TYPE_NSEC))!=NULL) {
usr.sbin/unbound/services/authzone.c
3250
if(!msg_add_rrset_ns(z, region, msg, ce, nsec))
usr.sbin/unbound/services/authzone.c
3253
if(!az_add_nsec3_proof(z, region, msg, ce->name,
usr.sbin/unbound/services/authzone.c
3254
ce->namelen, msg->qinfo.qname,
usr.sbin/unbound/services/authzone.c
3267
struct regional* region, struct dns_msg* msg, struct auth_data* ce,
usr.sbin/unbound/services/authzone.c
3270
log_assert(ce);
usr.sbin/unbound/services/authzone.c
3272
if(!msg_add_rrset_an(z, region, msg, ce, rrset)) return 0;
usr.sbin/unbound/services/authzone.c
3274
msg, ce, rrset)) return 0;
usr.sbin/unbound/services/authzone.c
3290
struct regional* region, struct dns_msg* msg, struct auth_data* ce,
usr.sbin/unbound/services/authzone.c
3330
} else if(ce) {
usr.sbin/unbound/services/authzone.c
3351
struct dns_msg* msg, struct auth_data* ce, struct auth_data* node)
usr.sbin/unbound/services/authzone.c
3358
if(ce && !az_nsec_wildcard_denial(z, region, msg, ce->name,
usr.sbin/unbound/services/authzone.c
3359
ce->namelen)) return 0;
usr.sbin/unbound/services/authzone.c
3360
} else if(ce) {
usr.sbin/unbound/services/authzone.c
3361
if(!az_add_nsec3_proof(z, region, msg, ce->name,
usr.sbin/unbound/services/authzone.c
3362
ce->namelen, msg->qinfo.qname,
usr.sbin/unbound/services/authzone.c
3396
struct regional* region, struct dns_msg* msg, struct auth_data* ce,
usr.sbin/unbound/services/authzone.c
3403
if(ce && rrset && rrset->type == LDNS_RR_TYPE_NS) {
usr.sbin/unbound/services/authzone.c
3404
return az_generate_referral_answer(z, region, msg, ce, rrset);
usr.sbin/unbound/services/authzone.c
3406
if(ce && rrset && rrset->type == LDNS_RR_TYPE_DNAME) {
usr.sbin/unbound/services/authzone.c
3407
return az_generate_dname_answer(z, qinfo, region, msg, ce,
usr.sbin/unbound/services/authzone.c
3416
if((wildcard=az_find_wildcard(z, qinfo, ce)) != NULL) {
usr.sbin/unbound/services/authzone.c
3418
ce, wildcard, node);
usr.sbin/unbound/services/authzone.c
3421
return az_generate_nxdomain_answer(z, region, msg, ce, node);
usr.sbin/unbound/services/authzone.c
3429
struct auth_data* node, *ce;
usr.sbin/unbound/services/authzone.c
3443
node_exists = az_find_ce(z, qinfo, node, node_exact, &ce, &rrset);
usr.sbin/unbound/services/authzone.c
3457
if(ce)
usr.sbin/unbound/services/authzone.c
3458
sldns_wire2str_dname_buf(ce->name, ce->namelen,
usr.sbin/unbound/services/authzone.c
3476
ce, rrset, node);
usr.sbin/unbound/services/cache/rrset.c
249
struct ub_packed_rrset_key* rrset, uint8_t* ce, size_t ce_len,
usr.sbin/unbound/services/cache/rrset.c
263
memmove(wc_dname+2, ce, ce_len);
usr.sbin/unbound/services/cache/rrset.h
151
struct ub_packed_rrset_key* rrset, uint8_t* ce, size_t ce_len,
usr.sbin/unbound/services/rpz.c
1174
uint8_t* ce;
usr.sbin/unbound/services/rpz.c
1211
ce = dname_get_shared_topdomain(z->name, qname);
usr.sbin/unbound/services/rpz.c
1212
if(!ce /* should not happen */) {
usr.sbin/unbound/services/rpz.c
1219
ce_labs = dname_count_size_labels(ce, &ce_len);
usr.sbin/unbound/services/rpz.c
1229
memmove(wc+2, ce, ce_len);
usr.sbin/unbound/validator/val_neg.c
1388
uint8_t* ce = NULL;
usr.sbin/unbound/validator/val_neg.c
1433
if(!(ce = nsec_closest_encloser(qinfo->qname, nsec)))
usr.sbin/unbound/validator/val_neg.c
1435
dname_count_size_labels(ce, &ce_len);
usr.sbin/unbound/validator/val_neg.c
1439
if(!nodata_wc || query_dname_compare(nodata_wc, ce) != 0) {
usr.sbin/unbound/validator/val_neg.c
1448
memmove(wc_ce+2, ce, ce_len);
usr.sbin/unbound/validator/val_nsec.c
213
uint8_t* wc = NULL, *ce = NULL;
usr.sbin/unbound/validator/val_nsec.c
271
ce = nsec_closest_encloser(qinfo->qname,
usr.sbin/unbound/validator/val_nsec.c
275
if(wc && !ce)
usr.sbin/unbound/validator/val_nsec.c
277
else if(wc && ce) {
usr.sbin/unbound/validator/val_nsec.c
279
if(query_dname_compare(wc, ce) != 0)
usr.sbin/unbound/validator/val_nsec.c
330
uint8_t* ce = nsec->rk.dname;
usr.sbin/unbound/validator/val_nsec.c
332
dname_remove_label(&ce, &ce_len);
usr.sbin/unbound/validator/val_nsec.c
337
if(dname_strict_subdomain_c(qinfo->qname, ce)) {
usr.sbin/unbound/validator/val_nsec.c
352
*wc = ce;
usr.sbin/unbound/validator/val_nsec.c
368
uint8_t* ce = nm;
usr.sbin/unbound/validator/val_nsec.c
369
dname_remove_label(&ce, &ce_len);
usr.sbin/unbound/validator/val_nsec.c
370
if(dname_strict_subdomain_c(qinfo->qname, ce)) {
usr.sbin/unbound/validator/val_nsec.c
371
*wc = ce;
usr.sbin/unbound/validator/val_nsec.c
502
uint8_t* ce;
usr.sbin/unbound/validator/val_nsec.c
509
ce = nsec_closest_encloser(qinf->qname, nsec);
usr.sbin/unbound/validator/val_nsec.c
510
if(!ce)
usr.sbin/unbound/validator/val_nsec.c
512
if(query_dname_compare(wc, ce) != 0) {
usr.sbin/unbound/validator/val_nsec.c
525
uint8_t* ce = nsec_closest_encloser(qname, nsec);
usr.sbin/unbound/validator/val_nsec.c
529
if(!ce)
usr.sbin/unbound/validator/val_nsec.c
535
labs = dname_count_labels(qname) - dname_count_labels(ce);
usr.sbin/unbound/validator/val_nsec3.c
1012
int prove_does_not_exist, struct ce_response* ce, int* calculations)
usr.sbin/unbound/validator/val_nsec3.c
1017
memset(ce, 0, sizeof(*ce));
usr.sbin/unbound/validator/val_nsec3.c
1019
if(!nsec3_find_closest_encloser(env, flt, ct, qinfo, ce, calculations)) {
usr.sbin/unbound/validator/val_nsec3.c
1038
log_nametypeclass(VERB_ALGO, "ce candidate", ce->ce, 0, 0);
usr.sbin/unbound/validator/val_nsec3.c
1040
if(query_dname_compare(ce->ce, qinfo->qname) == 0) {
usr.sbin/unbound/validator/val_nsec3.c
1054
if(nsec3_has_type(ce->ce_rrset, ce->ce_rr, LDNS_RR_TYPE_NS) &&
usr.sbin/unbound/validator/val_nsec3.c
1055
!nsec3_has_type(ce->ce_rrset, ce->ce_rr, LDNS_RR_TYPE_SOA)) {
usr.sbin/unbound/validator/val_nsec3.c
1056
if(!nsec3_has_type(ce->ce_rrset, ce->ce_rr, LDNS_RR_TYPE_DS)) {
usr.sbin/unbound/validator/val_nsec3.c
1065
if(nsec3_has_type(ce->ce_rrset, ce->ce_rr, LDNS_RR_TYPE_DNAME)) {
usr.sbin/unbound/validator/val_nsec3.c
1072
next_closer(qinfo->qname, qinfo->qname_len, ce->ce, &nc, &nc_len);
usr.sbin/unbound/validator/val_nsec3.c
1074
&ce->nc_rrset, &ce->nc_rr, calculations)) {
usr.sbin/unbound/validator/val_nsec3.c
1098
nsec3_ce_wildcard(struct regional* region, uint8_t* ce, size_t celen,
usr.sbin/unbound/validator/val_nsec3.c
1111
memmove(nm+2, ce, celen);
usr.sbin/unbound/validator/val_nsec3.c
1121
struct ce_response ce;
usr.sbin/unbound/validator/val_nsec3.c
1131
sec = nsec3_prove_closest_encloser(env, flt, ct, qinfo, 1, &ce, calc);
usr.sbin/unbound/validator/val_nsec3.c
1144
log_nametypeclass(VERB_ALGO, "nsec3 nameerror: proven ce=", ce.ce,0,0);
usr.sbin/unbound/validator/val_nsec3.c
1148
log_assert(ce.ce);
usr.sbin/unbound/validator/val_nsec3.c
1149
wc = nsec3_ce_wildcard(ct->region, ce.ce, ce.ce_len, &wclen);
usr.sbin/unbound/validator/val_nsec3.c
1175
if(ce.nc_rrset && nsec3_has_optout(ce.nc_rrset, ce.nc_rr)) {
usr.sbin/unbound/validator/val_nsec3.c
1213
struct ce_response ce;
usr.sbin/unbound/validator/val_nsec3.c
1281
sec = nsec3_prove_closest_encloser(env, flt, ct, qinfo, 1, &ce, calc);
usr.sbin/unbound/validator/val_nsec3.c
1297
log_assert(ce.ce);
usr.sbin/unbound/validator/val_nsec3.c
1298
wc = nsec3_ce_wildcard(ct->region, ce.ce, ce.ce_len, &wclen);
usr.sbin/unbound/validator/val_nsec3.c
1324
if(ce.nc_rrset && nsec3_has_optout(ce.nc_rrset, ce.nc_rr)) {
usr.sbin/unbound/validator/val_nsec3.c
1348
if(!ce.nc_rrset) {
usr.sbin/unbound/validator/val_nsec3.c
1354
log_assert(ce.nc_rrset);
usr.sbin/unbound/validator/val_nsec3.c
1355
if(!nsec3_has_optout(ce.nc_rrset, ce.nc_rr)) {
usr.sbin/unbound/validator/val_nsec3.c
1393
struct ce_response ce;
usr.sbin/unbound/validator/val_nsec3.c
1411
memset(&ce, 0, sizeof(ce));
usr.sbin/unbound/validator/val_nsec3.c
1412
ce.ce = wc;
usr.sbin/unbound/validator/val_nsec3.c
1413
ce.ce_len = wclen;
usr.sbin/unbound/validator/val_nsec3.c
1417
next_closer(qinfo->qname, qinfo->qname_len, ce.ce, &nc, &nc_len);
usr.sbin/unbound/validator/val_nsec3.c
1419
&ce.nc_rrset, &ce.nc_rr, calc)) {
usr.sbin/unbound/validator/val_nsec3.c
1438
if(ce.nc_rrset && nsec3_has_optout(ce.nc_rrset, ce.nc_rr)) {
usr.sbin/unbound/validator/val_nsec3.c
1484
struct ce_response ce;
usr.sbin/unbound/validator/val_nsec3.c
1549
sec = nsec3_prove_closest_encloser(env, &flt, ct, qinfo, 1, &ce, &calc);
usr.sbin/unbound/validator/val_nsec3.c
1562
if(!ce.nc_rrset) {
usr.sbin/unbound/validator/val_nsec3.c
1572
log_assert(ce.nc_rrset);
usr.sbin/unbound/validator/val_nsec3.c
1573
if(!nsec3_has_optout(ce.nc_rrset, ce.nc_rr)) {
usr.sbin/unbound/validator/val_nsec3.c
93
uint8_t* ce;
usr.sbin/unbound/validator/val_nsec3.c
938
struct ce_response* ce, int* calculations)
usr.sbin/unbound/validator/val_nsec3.c
958
&ce->ce_rrset, &ce->ce_rr, calculations)) {
usr.sbin/unbound/validator/val_nsec3.c
959
ce->ce = nm;
usr.sbin/unbound/validator/val_nsec3.c
960
ce->ce_len = nmlen;
usr.sbin/unbound/validator/val_nsec3.c
980
next_closer(uint8_t* qname, size_t qnamelen, uint8_t* ce,
usr.sbin/unbound/validator/val_nsec3.c
983
int strip = dname_count_labels(qname) - dname_count_labels(ce) -1;
usr.sbin/unbound/validator/validator.c
1140
uint8_t* ce = NULL; /* for wildcard nodata responses. This is the
usr.sbin/unbound/validator/validator.c
1160
ce = nsec_closest_encloser(qchase->qname, s);
usr.sbin/unbound/validator/validator.c
1177
if(wc && !ce)
usr.sbin/unbound/validator/validator.c
1179
else if(wc && ce) {
usr.sbin/unbound/validator/validator.c
1180
if(query_dname_compare(wc, ce) != 0) {
usr.sbin/unbound/validator/validator.c
1251
uint8_t* ce;
usr.sbin/unbound/validator/validator.c
1262
ce = nsec_closest_encloser(qchase->qname, s);
usr.sbin/unbound/validator/validator.c
1263
ce_labs = dname_count_labels(ce);
usr.sbin/unbound/validator/validator.c
1664
uint8_t* ce = NULL; /* for wildcard nodata responses. This is the
usr.sbin/unbound/validator/validator.c
1691
ce = nsec_closest_encloser(qchase->qname, s);
usr.sbin/unbound/validator/validator.c
1722
if(wc && !ce)
usr.sbin/unbound/validator/validator.c
1724
else if(wc && ce) {
usr.sbin/unbound/validator/validator.c
1725
if(query_dname_compare(wc, ce) != 0) {