__offsetof
objsize = __offsetof(struct memstat_kmemzone, kz_zone) +
smeta.dls_size = __offsetof(struct dl_serinfo, dls_serpath);
hmeta.dls_size = __offsetof(struct dl_serinfo, dls_serpath);
_info.dls_size = __offsetof(struct dl_serinfo, dls_serpath);
if (valid_len < __offsetof(struct scsi_report_supported_opcodes_one,
if (valid_len < __offsetof(struct scsi_report_supported_opcodes_one,
if (length < __offsetof(struct scsi_per_res_cap, type_mask)) {
if (length >= __offsetof(struct scsi_per_res_cap, reserved))
unused = dp->d_reclen - __offsetof(struct direct, d_name);
#define EARLY_COUNTER (void *)__offsetof(struct pcpu, pc_early_dummy_counter)
__offsetof(struct pcpu, name)
_Static_assert(__offsetof(struct vie, disp_bytes) == 22, "ABI");
_Static_assert(__offsetof(struct vie, scale) == 24, "ABI");
_Static_assert(__offsetof(struct vie, base_register) == 28, "ABI");
.offset = __offsetof(struct cpu_desc, field_name), \
__offsetof(struct ata_identify_log_pages,entries);
__offsetof(struct ata_identify_log_sup_cap,
needed_size = __offsetof(struct ata_zoned_info_log,
__offsetof(struct scsi_transportid_iscsi_device,
__offsetof(struct scsi_transportid_iscsi_port,
cdb->length = sizeof(*cdb) - __offsetof(struct ata_pass_32,
(lhs_end - __offsetof(struct scsi_vpd_id_descriptor, identifier));
(rhs_end - __offsetof(struct scsi_vpd_id_descriptor, identifier));
__offsetof(struct scsi_inquiry_data, additional_length) + 1)
__offsetof(struct scsi_vpd_device_id, desc_list)
__offsetof(struct scsi_vpd_id_descriptor, identifier)
__offsetof(struct ata_identify_log_pages,entries);
needed_size = __offsetof(struct ata_identify_log_sup_cap,
needed_size = __offsetof(struct ata_zoned_info_log,
needed_len = __offsetof(struct scsi_vpd_zoned_bdc,
__offsetof(struct sa_prot_state, prot_method),
__offsetof(struct sa_prot_state, pi_length),
__offsetof(struct sa_prot_state, lbp_w),
__offsetof(struct sa_prot_state, lbp_r),
__offsetof(struct sa_prot_state, rbdp),
__offsetof(struct scsi_control_data_prot_subpage, prot_bits)
__offsetof(struct scsi_control_data_prot_subpage, prot_method);
__offsetof(struct scsi_control_data_prot_subpage,
length_offset = __offsetof(struct scsi_density_data,
length_offset = __offsetof(struct scsi_medium_type_data,
__offsetof(struct scsi_medium_type_data,
__offsetof(struct scsi_medium_type_data,
__offsetof(struct scsi_vpd_extended_inquiry_data,
__offsetof(struct ieee80211_tdma_param, tdma_tstamp),
r = malloc(__offsetof(struct mp_ring, items[size]), mt, flags | M_ZERO);
__offsetof(struct scmi_smt_header, channel_status)
__offsetof(struct scmi_smt_header, length)
__offsetof(struct scmi_smt_header, msg_header)
__offsetof(struct ndis_rssprm_toeplitz, rss_ind[0]);
__offsetof(struct ndis_rssprm_toeplitz, rss_key[0]);
return (ofs - __offsetof(struct rndis_packet_msg, rm_dataoffset));
__offsetof(struct hn_rx_ring, hn_lro.lro_queued),
__offsetof(struct hn_rx_ring, hn_lro.lro_flushed),
__offsetof(struct hn_rx_ring, hn_lro_tried),
__offsetof(struct hn_rx_ring, hn_csum_ip),
__offsetof(struct hn_rx_ring, hn_csum_tcp),
__offsetof(struct hn_rx_ring, hn_csum_udp),
__offsetof(struct hn_rx_ring, hn_csum_trusted),
__offsetof(struct hn_rx_ring, hn_small_pkts),
__offsetof(struct hn_rx_ring, hn_ack_failed),
__offsetof(struct hn_tx_ring, hn_no_txdescs),
__offsetof(struct hn_tx_ring, hn_send_failed),
__offsetof(struct hn_tx_ring, hn_txdma_failed),
__offsetof(struct hn_tx_ring, hn_flush_failed),
__offsetof(struct hn_tx_ring, hn_tx_collapsed),
__offsetof(struct hn_tx_ring, hn_tx_chimney),
__offsetof(struct hn_tx_ring, hn_tx_chimney_tried),
__offsetof(struct hn_tx_ring, hn_direct_tx_size),
__offsetof(struct hn_tx_ring, hn_sched_tx),
__offsetof(struct vmbus_chanpkt_rxbuf, cp_rxbuf[count]))) {
__offsetof(struct rndis_pktinfo, rm_data[dlen])
__offsetof(struct ndis_offload_params, ndis_rsc_ip4)
__offsetof(struct ndis_rss_caps, ndis_pad)
__offsetof(struct ndis_rss_caps, ndis_nind)
__offsetof(struct ndis_rss_params, ndis_cpumaskoffset)
__offsetof(struct ndis_rssprm_toeplitz, rss_ind[nind])
__offsetof(struct ndis_offload, ndis_ipsecv2)
__offsetof(struct ndis_offload, ndis_rsc)
__offsetof(struct hv_vss_opt_msg, reserved));
__offsetof(struct hv_vss_opt_msg, reserved));
memset(reqp, 0, __offsetof(hv_vss_req_internal, callout));
__offsetof(struct vmbus_icmsg_negotiate, ic_ver[VMBUS_IC_VERCNT])
if (dlen < __offsetof(struct vmbus_icmsg_negotiate, ic_ver[cnt])) {
__offsetof(struct vmbus_icmsg_shutdown, ic_msg[0])
__offsetof(struct vmbus_icmsg_heartbeat, ic_rsvd[0])
dsize + __offsetof(struct hypercall_postmsg_in, hc_data[0]));
hlen = __offsetof(struct vmbus_chanpkt_sglist, cp_gpa[sglen]);
hlen = __offsetof(struct vmbus_chanpkt_prplist,
iov[1].iov_len = __offsetof(struct vmbus_gpa_range, gpa_page[prp_cnt]);
elem_size = __offsetof(struct vmbus_chanpkt_prplist,
range_len = __offsetof(struct vmbus_gpa_range, gpa_page[page_count]);
reqsz = __offsetof(struct vmbus_chanmsg_gpadl_conn,
reqsz = __offsetof(struct vmbus_chanmsg_gpadl_subconn,
CTASSERT(__offsetof(struct vmbus_chanmsg_gpadl_conn,
CTASSERT(__offsetof(struct vmbus_chanmsg_gpadl_subconn,
__offsetof(struct mana_rxq, lro.lro_queued),
__offsetof(struct mana_rxq, lro.lro_flushed),
__offsetof(struct mana_rxq, lro.lro_bad_csum),
__offsetof(struct mana_rxq, lro_tried),
__offsetof(struct mana_rxq, lro_failed),
__offsetof(struct mana_rxq, lro.lro_ackcnt_lim),
__offsetof(struct mana_rxq, lro.lro_length_lim),
__offsetof(struct mana_rxq, lro.lro_cnt),
__offsetof(struct mana_txq, tso_pkts),
__offsetof(struct mana_txq, tso_bytes),
#define __mlx5_bit_off(typ, fld) __offsetof(struct mlx5_ifc_##typ##_bits, fld)
__offsetof(struct ipsec_accel_in_tag, xh), M_NOWAIT);
sizeof(*(ptr)) - __offsetof(__typeof(*(ptr)), field))
(__offsetof(struct mlx5e_rl_params, n) / sizeof(uint64_t))
__offsetof(struct mlx5e_priv, params_ethtool.n)
(__offsetof(struct mlx5_ib_congestion, field) - \
__offsetof(struct mlx5_ib_congestion, arg[0])) / sizeof(u64))
addr += __offsetof(struct xhci_hw_root, hwr_events[i]);
addr += __offsetof(struct xhci_hw_root, hwr_commands[i]);
addr += __offsetof(struct xhci_hw_root, hwr_commands[0]);
addr += __offsetof(struct xhci_dev_ctx_addr, qwSpBufPtr[0]);
addr += __offsetof(struct xhci_hw_root, hwr_events[0]);
addr += __offsetof(struct xhci_hw_root, hwr_commands[0]);
(uint32_t)__offsetof(struct rndis_packet_msg,
(uint32_t)__offsetof(struct rndis_packet_msg,
__offsetof(struct rndis_packet_msg,
__offsetof(struct pcpu, name)
bintime_off(dst, __offsetof(struct timehands, member)); \
getthmember(dst, sizeof(*dst), __offsetof(struct timehands, \
_Static_assert(__offsetof(struct umutex, m_spare[0]) ==
__offsetof(struct umutex32, m_spare[0]), "m_spare32");
uloc = (char *)td->td_exterr_ptr + __offsetof(struct uexterror,
sz = sizeof(ue) - __offsetof(struct uexterror, error);
__offsetof(struct bufdomain, bd_maxbufspace), sysctl_bufdomain_long, "L",
__offsetof(struct bufdomain, bd_lobufspace), sysctl_bufdomain_long, "L",
__offsetof(struct bufdomain, bd_hibufspace), sysctl_bufdomain_long, "L",
__offsetof(struct bufdomain, bd_bufspacethresh), sysctl_bufdomain_long, "L",
__offsetof(struct bufdomain, bd_lodirtybuffers), sysctl_bufdomain_int, "I",
__offsetof(struct bufdomain, bd_hidirtybuffers), sysctl_bufdomain_int, "I",
__offsetof(struct bufdomain, bd_dirtybufthresh), sysctl_bufdomain_int, "I",
__offsetof(struct bufdomain, bd_lofreebuffers), sysctl_bufdomain_int, "I",
__offsetof(struct bufdomain, bd_hifreebuffers), sysctl_bufdomain_int, "I",
CTASSERT(__offsetof(struct ifreq, ifr_ifru) ==
__offsetof(struct ifreq32, ifr_ifru));
r = malloc(__offsetof(struct ifmp_ring, items[size]), mt, flags | M_ZERO);
__offsetof(struct rndis_packet_msg, rm_dataoffset))
((ofs) + __offsetof(struct rndis_packet_msg, rm_dataoffset))
__offsetof(struct rndis_pktinfo, rm_data[0])
__offsetof(struct rndis_init_comp, rm_aflistsz)
__offsetof(struct rndis_query_req, rm_rid))
((ofs) + __offsetof(struct rndis_query_req, rm_rid))
__offsetof(struct rndis_set_req, rm_rid))
((ofs) + __offsetof(struct rndis_status_msg, rm_status))
#define NHOP_END_CMP (__offsetof(struct nhop_object, nh_pksent))
_Static_assert(__offsetof(struct nhop_object, nh_ifp) == 32,
#define NH_PRIV_END_CMP (__offsetof(struct nhop_priv, nh_idx))
_Static_assert(__offsetof(struct route, ro_dst) == __offsetof(_ro_new, _dst_new),\
_Static_assert(__offsetof(_s1, _f1) == __offsetof(_s2, _f2), \
__offsetof(struct ieee80211_ie_htcap, hc_cap));
ni->ni_htparam = ie[__offsetof(struct ieee80211_ie_htcap, hc_param)];
__offsetof(struct ieee80211_ie_htcap, hc_mcsset));
frm += __offsetof(struct ieee80211_ie_htcap, hc_extcap) -
__offsetof(struct ieee80211_ie_htcap, hc_mcsset);
__offsetof(struct ieee80211_ie_htcap, hc_txbf);
__offsetof(struct ieee80211_ie_htcap, hc_mcsset));
frm += __offsetof(struct ieee80211_ie_htcap, hc_extcap) -
__offsetof(struct ieee80211_ie_htcap, hc_mcsset);
__offsetof(struct ieee80211_ie_htcap, hc_txbf);
__offsetof(struct ieee80211_ie_htinfo, hi_basicmcsset);
space = __offsetof(struct ieee80211req_chaninfo,
const size_t off = __offsetof(struct ieee80211req_sta_stats, is_stats);
const size_t off = __offsetof(struct ieee80211req_sta_req, info);
frm += __offsetof(struct ieee80211_wme_info, wme_info);
*qosinfo = frm[__offsetof(struct ieee80211_wme_param, param_qosInfo)];
frm += __offsetof(struct ieee80211_wme_param, params_acParams);
frm += __offsetof(struct ieee80211_tdma_param, tdma_slot);
bytes = __offsetof(struct inpcblbgroup, il_inp[size]);
#define MROUTE_VIF_SYSCTL_LEN __offsetof(struct vif, v_mtx)
(__offsetof(type, end) - __offsetof(type, start))
__DEQUALIFY(s *, (const volatile char *)__x - __offsetof(s, m));\
((__offsetof(struct dirent, d_name) + (namlen) + 1 + 7) & ~7)
dp->d_reclen - (__offsetof(struct dirent, d_name) + dp->d_namlen));
CTASSERT((__offsetof(struct type, field) & (sizeof(uint32_t) - 1)) == 0); \
((uintptr_t)val - __offsetof(struct type, field)); \
callback, __offsetof(struct type, field), arg); \
callback, __offsetof(struct type, field), arg)) \
#define VOPARG_OFFSETOF(s_type, field) __offsetof(s_type, field)
(roundup2(__offsetof(struct direct, d_name) + (namlen) + 1, DIR_ROUNDUP))
_Static_assert(sizeof(struct uma_slab) == __offsetof(struct uma_slab, us_free),
_Static_assert(__offsetof(struct pcpu, pc_mds_tmp) % 64 == 0, "MDS AVX512");
ATF_REQUIRE(sz >= __offsetof(struct kinfo_file, kf_structsize) +
#ifndef __offsetof
(__offsetof(type, end) - __offsetof(type, start))
s *, (const volatile char *)__x - __offsetof(s, m)); \
#define WHDRSIZE __offsetof(struct whod, wd_we)
if (mp->Length > __offsetof(ACPI_MADT_LOCAL_SAPIC, Uid))
dc->paddr + __offsetof(struct dcons_buf, magic));
dc->paddr + __offsetof(struct dcons_buf, iptr[port]));
loff + __offsetof(vdev_label_t, vl_be), sizeof(label->vl_be));
loff + __offsetof(vdev_label_t, vl_vdev_phys),
loff + __offsetof(vdev_label_t, vl_uberblock) + roff,