to_dev
dev_dbg(to_dev(ioat_chan),
dev_err(to_dev(ioat_chan),
dev_dbg(to_dev(ioat_chan),
dev_dbg(to_dev(ioat_chan),
dev_dbg(to_dev(ioat_chan), "%s: cookie: %d\n", __func__, cookie);
descs->virt = dma_alloc_coherent(to_dev(ioat_chan),
dma_free_coherent(to_dev(ioat_chan),
dma_free_coherent(to_dev(ioat_chan),
dev_dbg(to_dev(ioat_chan), "%s: num_descs: %d (%x:%x:%x)\n",
dev_dbg_ratelimited(to_dev(ioat_chan),
dev_dbg(to_dev(ioat_chan), "%s: phys_complete: %#llx\n", __func__,
dev_dbg(to_dev(ioat_chan), "%s: head: %#x tail: %#x issued: %#x\n",
dev_dbg(to_dev(ioat_chan), "%s: cancel completion timeout\n",
dev_dbg(to_dev(ioat_chan), "%s: error = %x:%x\n",
dev_err(to_dev(ioat_chan), "Err(%d): %s\n",
dev_err(to_dev(ioat_chan), "%s: fatal error (%x:%x)\n",
dev_err(to_dev(ioat_chan), "Errors handled:\n");
dev_err(to_dev(ioat_chan), "Errors not handled:\n");
dev_warn(to_dev(ioat_chan), "Reset channel...\n");
dev_warn(to_dev(ioat_chan), "Restart channel...\n");
dev_err(to_dev(ioat_chan), "%s: Channel halted (%x)\n",
dev_err(to_dev(ioat_chan), "Errors:\n");
dev_err(to_dev(ioat_chan), "CHANSTS: %#Lx CHANERR: %#x\n",
dev_err(to_dev(ioat_chan), "Errors:\n");
dev_dbg(to_dev(ioat_chan), "Active descriptors: %d\n",
dev_warn(to_dev(ioat_chan),
struct device *dev = to_dev(ioat_chan);
dev_dbg(to_dev(ioat_chan), "freeing %d idle descriptors\n", descs);
dev_err(to_dev(ioat_chan), "Freeing %d in use descriptors!\n",
dma_free_coherent(to_dev(ioat_chan), IOAT_CHUNK_SIZE,
dev_WARN(to_dev(ioat_chan),
struct device *dev = to_dev(ioat_chan);
struct device *dev = to_dev(ioat_chan);
dev_dbg(to_dev(ioat_chan), "%s\n", __func__);
dev_dbg(to_dev(ioat_chan), "%s\n", __func__);
dev_err(to_dev(ioat_chan),
dev_warn(to_dev(ioat_chan),
struct kfd_topology_device *dev, *to_dev;
to_dev = kfd_topology_device_by_proximity_domain_no_lock(id_to);
if (!to_dev)
to_dev->node_props.io_links_count++;
list_add_tail(&props2->list, &to_dev->io_link_props);
static void kfd_set_iolink_non_coherent(struct kfd_topology_device *to_dev,
if (!to_dev->gpu &&
if (to_dev->gpu) {
KFD_GC_VERSION(to_dev->gpu) == IP_VERSION(9, 4, 0))) {
static void kfd_set_recommended_sdma_engines(struct kfd_topology_device *to_dev,
bool support_rec_eng = !amdgpu_sriov_vf(adev) && to_dev->gpu &&
int dst_socket_id = to_dev->gpu->adev->gmc.xgmi.physical_node_id;
num_xgmi_sdma_engines && to_dev->gpu) ? xgmi_sdma_eng_id_mask :
const struct net_device *to_dev;
agent_parms.to_dev = out_dev;
mall_entry->mirror.to_dev = act->dev;
if (!mall_entry->mirror.to_dev) {
agent_parms.to_dev = mall_entry->mirror.to_dev;
.to_dev = NULL, /* Mirror to CPU. */
.to_dev = mall_entry->mirror.to_dev,
mall_entry->mirror.to_dev = act->dev;
const struct net_device *to_dev)
if (span->span_entry_ops_arr[i]->can_handle(to_dev))
err = curr->ops->parms_set(mlxsw_sp, curr->to_dev, &sparms);
const struct net_device *to_dev = parms->to_dev;
ops = mlxsw_sp_span_entry_ops(mlxsw_sp, to_dev);
err = ops->parms_set(mlxsw_sp, to_dev, &sparms);
span_entry = mlxsw_sp_span_entry_get(mlxsw_sp, to_dev, ops, sparms);
const struct net_device *to_dev,
const struct net_device *to_dev,
sparmsp->dest_port = netdev_priv(to_dev);
mlxsw_sp_span_gretap4_route(const struct net_device *to_dev,
struct ip_tunnel *tun = netdev_priv(to_dev);
parms = mlxsw_sp_ipip_netdev_parms4(to_dev);
const struct net_device *to_dev,
struct ip_tunnel_parm_kern tparm = mlxsw_sp_ipip_netdev_parms4(to_dev);
if (!(to_dev->flags & IFF_UP) ||
l3edev = mlxsw_sp_span_gretap4_route(to_dev, &saddr.addr4, &gw.addr4);
mlxsw_sp_span_gretap6_route(const struct net_device *to_dev,
struct ip6_tnl *t = netdev_priv(to_dev);
const struct net_device *to_dev,
struct __ip6_tnl_parm tparm = mlxsw_sp_ipip_netdev_parms6(to_dev);
if (!(to_dev->flags & IFF_UP) ||
l3edev = mlxsw_sp_span_gretap6_route(to_dev, &saddr.addr6, &gw.addr6);
const struct net_device *to_dev,
if (!(to_dev->flags & IFF_UP))
real_dev = mlxsw_sp_span_entry_vlan(to_dev, &vid);
const struct net_device *to_dev,
const struct net_device *to_dev,
const struct net_device *to_dev,
span_entry->to_dev = to_dev;
const struct net_device *to_dev)
if (refcount_read(&curr->ref_count) && curr->to_dev == to_dev)
const struct net_device *to_dev,
if (refcount_read(&curr->ref_count) && curr->to_dev == to_dev &&
const struct net_device *to_dev,
span_entry = mlxsw_sp_span_entry_find_by_parms(mlxsw_sp, to_dev,
return mlxsw_sp_span_entry_create(mlxsw_sp, to_dev, ops, sparms);
const struct net_device *to_dev;
const struct net_device *to_dev;
bool (*can_handle)(const struct net_device *to_dev);
const struct net_device *to_dev,
const struct net_device *to_dev);
dev_err_ratelimited(to_dev(arena),
dev_warn_ratelimited(to_dev(arena),
dev_err_ratelimited(to_dev(arena),
dev_err_ratelimited(to_dev(arena),
dev_err(to_dev(arena),
dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->mapoff, 512),
dev_WARN_ONCE(to_dev(arena), size < 512,
dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->logoff, 512),
dev_WARN_ONCE(to_dev(arena), size < 512,
dev_err_ratelimited(to_dev(arena),
dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->infooff, 512),
dev_err(to_dev(arena), "Found an unknown padding scheme\n");
dev_dbg(to_dev(arena), "log_index_0 = %d\n", log_index[0]);
dev_dbg(to_dev(arena), "log_index_1 = %d\n", log_index[1]);
dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->info2off, 512),
dev_info(to_dev(arena), "No existing arenas\n");
dev_err(to_dev(arena),
dev_err(to_dev(arena),
long cleared = nvdimm_clear_poison(to_dev(pmem), phys, len);
return to_nd_region(to_dev(pmem)->parent);
const struct net_device *to_dev,
TP_ARGS(from_dev, to_dev, sent, drops, err),
__entry->to_ifindex = to_dev->ifindex;
const struct net_device *to_dev, int sent, int drops, int err)
idx_out = to_dev->ifindex;
const struct net_device *to_dev, int sent, int drops, int err)
idx_out = to_dev->ifindex;
*from_dev, const struct net_device *to_dev, int sent, int drops,