ctx_info
int arch_apei_report_x86_error(struct cper_ia_proc_ctx *ctx_info,
static inline int arch_apei_report_x86_error(struct cper_ia_proc_ctx *ctx_info,
int apei_smca_report_x86_error(struct cper_ia_proc_ctx *ctx_info,
static inline int apei_smca_report_x86_error(struct cper_ia_proc_ctx *ctx_info,
int arch_apei_report_x86_error(struct cper_ia_proc_ctx *ctx_info, u64 lapic_id)
return apei_smca_report_x86_error(ctx_info, lapic_id);
m->bank = (ctx_info->msr_addr >> 4) & 0xFF;
int apei_smca_report_x86_error(struct cper_ia_proc_ctx *ctx_info, u64 lapic_id)
const u64 *i_mce = ((const u64 *) (ctx_info + 1));
if ((ctx_info->msr_addr & MSR_AMD64_SMCA_MC0_STATUS) !=
num_regs = ctx_info->reg_arr_size >> 3;
struct ctx_info *ci)
total += (sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 4) + sizeof(struct ctx_info);
struct ctx_info *ci;
struct cper_arm_ctx_info *ctx_info;
ctx_info = (struct cper_arm_ctx_info *)err_info;
int size = ALIGN(sizeof(*ctx_info) + ctx_info->size, 16);
if (ctx_info->type > max_ctx_type) {
newpfx, ctx_info->type, max_ctx_type);
arm_reg_ctx_strs[ctx_info->type]);
(ctx_info + 1), ctx_info->size, 0);
ctx_info = (struct cper_arm_ctx_info *)((long)ctx_info + size);
print_hex_dump(newpfx, "", DUMP_PREFIX_OFFSET, 16, 4, ctx_info,
struct cper_ia_proc_ctx *ctx_info;
ctx_info = (struct cper_ia_proc_ctx *)err_info;
int size = ALIGN(sizeof(*ctx_info) + ctx_info->reg_arr_size, 16);
ctx_info->reg_ctx_type < ARRAY_SIZE(ia_reg_ctx_strs) ?
ia_reg_ctx_strs[ctx_info->reg_ctx_type] : "unknown");
ctx_info->reg_arr_size);
if (ctx_info->reg_ctx_type == CTX_TYPE_MSR) {
ctx_info->msr_addr);
if (ctx_info->reg_ctx_type == CTX_TYPE_MMREG) {
ctx_info->mm_reg_addr);
if (ctx_info->reg_ctx_type != CTX_TYPE_MSR ||
arch_apei_report_x86_error(ctx_info, proc->lapic_id)) {
groupsize, (ctx_info + 1),
ctx_info->reg_arr_size, 0);
ctx_info = (struct cper_ia_proc_ctx *)((long)ctx_info + size);
struct vmw_ctx_validation_info *ctx_info;
ctx_info = vmw_execbuf_info_from_res(sw_context, ctx);
if (!ctx_info)
vmw_binding_add(ctx_info->staged, &binding.bi, binding.shader_slot, 0);
struct vmw_ctx_validation_info *ctx_info;
(void **)&ctx_info, NULL);
dirty, (void **)&ctx_info,
ctx_info);
vmw_execbuf_rcache_update(rcache, res, ctx_info);
struct irdma_qp_host_ctx_info *ctx_info;
ctx_info = &iwqp->ctx_info;
ctx_info->tcp_info = &iwqp->tcp_info;
ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
ctx_info->tcp_info_valid = true;
ctx_info->iwarp_info_valid = true;
ctx_info->user_pri = cm_node->user_pri;
irdma_sc_qp_setctx(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
ctx_info->tcp_info_valid = false;
ctx_info->iwarp_info_valid = false;
struct irdma_qp_host_ctx_info *ctx_info = NULL;
ctx_info = &iwqp->ctx_info;
ctx_info->roce_info->err_rq_idx_valid =
ctx_info->srq_valid ? false : info->err_rq_idx_valid;
if (ctx_info->roce_info->err_rq_idx_valid) {
ctx_info->roce_info->err_rq_idx = info->wqe_idx;
ctx_info);
ctx_info->iwarp_info->err_rq_idx_valid = info->rq;
ctx_info->iwarp_info->err_rq_idx = info->wqe_idx;
ctx_info->tcp_info_valid = false;
ctx_info->iwarp_info_valid = true;
ctx_info);
iwqp->ctx_info.qp_compl_ctx = (uintptr_t)qp;
ctx_info = &iwqp->ctx_info;
ctx_info->srq_valid = srq_valid;
ctx_info->srq_id = srq_id;
ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
irdma_roce_fill_and_set_qpctx_info(iwqp, ctx_info);
irdma_iw_fill_and_set_qpctx_info(iwqp, ctx_info);
if (iwqp->ctx_info.remote_atomics_en)
if (iwqp->ctx_info.remote_atomics_en)
struct irdma_qp_host_ctx_info *ctx_info;
ctx_info = &iwqp->ctx_info;
ctx_info->roce_info->pd_id = iwpd->sc_pd.pd_id;
dev->ws_remove(iwqp->sc_qp.vsi, ctx_info->user_pri);
ctx_info->user_pri =
ctx_info->user_pri = rt_tos2priority(udp_info->tos);
ctx_info->roce_info->mac_addr);
ctx_info->user_pri = irdma_roce_get_vlan_prio(sgid_attr,
ctx_info->user_pri);
if (dev->ws_add(iwqp->sc_qp.vsi, ctx_info->user_pri))
iwqp->sc_qp.user_pri = ctx_info->user_pri;
ctx_info->user_pri << VLAN_PRIO_SHIFT;
ctx_info->remote_atomics_en = true;
ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
irdma_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
ctx_info->rem_endpoint_idx = udp_info->arp_idx;
struct irdma_qp_host_ctx_info *ctx_info;
ctx_info = &iwqp->ctx_info;
ctx_info->iwarp_info_valid = true;
if (ctx_info->iwarp_info_valid) {
ctx_info->send_cq_num = iwqp->iwscq->sc_cq.cq_uk.cq_id;
ctx_info->rcv_cq_num = iwqp->iwrcq->sc_cq.cq_uk.cq_id;
irdma_sc_qp_setctx(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
ctx_info->rem_endpoint_idx = tcp_info->arp_idx;
iwqp->ctx_info.qp_compl_ctx = req.user_compl_ctx;
struct irdma_qp_host_ctx_info *ctx_info)
ctx_info->roce_info = &iwqp->roce_info;
ctx_info->udp_info = &iwqp->udp_info;
irdma_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
struct irdma_qp_host_ctx_info *ctx_info)
ctx_info->iwarp_info = &iwqp->iwarp_info;
ctx_info->iwarp_info_valid = true;
irdma_sc_qp_setctx(&iwqp->sc_qp, iwqp->host_ctx.va, ctx_info);
ctx_info->iwarp_info_valid = false;
struct irdma_qp_host_ctx_info *ctx_info;
struct irdma_qp_host_ctx_info ctx_info;
ctx_info(ctx, "V4L2 device registered as %s\n",
struct cudbg_region_info *ctx_info,
ctx_info[i].exist = false;
ctx_info[i].exist = true;
ctx_info[i].start = mem_desc.base;
ctx_info[i].end = mem_desc.limit;
ctx_info[i].exist = false;
ctx_info[CTXT_FLM].exist = true;
ctx_info[CTXT_FLM].end = (CUDBG_MAX_FL_QIDS >> flq) * SGE_CTXT_SIZE;
ctx_info[CTXT_CNM].exist = true;
ctx_info[CTXT_CNM].end = ctx_info[CTXT_FLM].end;
struct cper_arm_ctx_info *ctx_info;
ctx_info = (struct cper_arm_ctx_info *)(err_info + err->err_info_num);
ctx_err = (u8 *)ctx_info;
if (sz + (long)ctx_info - (long)err >= err->section_length)
sz += ctx_info->size;
ctx_info = (struct cper_arm_ctx_info *)((long)ctx_info + sz);
ven_err_data = (u8 *)ctx_info;