ul
unsigned long ul;
in_v.ul = x;
tmp_v.ul = (unsigned long) in_v.ui[0] + (unsigned long) in_v.ui[1];
out_v.ul = (unsigned long) tmp_v.us[0] + (unsigned long) tmp_v.us[1]
unsigned long ul;
in_v.ul = x;
tmp_v.ul = (unsigned long) in_v.ui[0] + (unsigned long) in_v.ui[1];
out_v.ul = (unsigned long) tmp_v.us[0] + (unsigned long) tmp_v.us[1]
unsigned long ul;
u.ul = 0;
*dest = u.ul;
unsigned long ul;
u.ul = val;
info->scratchpad[0].ul = dev_id;
writel(cmd_param.ul, privdata->mmio + AMD_C2P_MSG1);
writel(cmd_base.ul, privdata->mmio + AMD_C2P_MSG0);
writel(cmd_base.ul, privdata->mmio + AMD_C2P_MSG0);
writel(cmd_base.ul, privdata->mmio + AMD_C2P_MSG0);
cmd_base.ul = 0;
writel(cmd_base.ul, privdata->mmio + AMD_C2P_MSG0);
cmd_base.ul = 0;
writel(cmd_base.ul, privdata->mmio + AMD_C2P_MSG0);
writel(cmd_base.ul, privdata->mmio + AMD_C2P_MSG0);
u32 ul;
u32 ul;
cmd_base.ul = 0;
writel(cmd_base.ul, privdata->mmio + amd_get_c2p_val(privdata, 0));
cmd_base.ul = 0;
writel(cmd_base.ul, privdata->mmio + amd_get_c2p_val(privdata, 0));
cmd_base.ul = 0;
writel(cmd_base.ul, privdata->mmio + amd_get_c2p_val(privdata, 0));
u32 ul;
i2c_common->eventval.ul = val;
writel(i2c_cmd_base.ul, reg);
i2c_cmd_base.ul = 0;
u32 ul;
u32 ul;
if (!info->scratchpad[0].ul)
info->scratchpad[0].ul = rid;
info->scratchpad[1].ul = pa;
info->scratchpad[0].ul = dev_id;
info->scratchpad[0].ul = dev_id;
info->scratchpad[1].ul = pa;
info->scratchpad[0].ul = pci_msi_domain_get_msi_rid(domain->parent, pdev);
info->scratchpad[0].ul = fsl_mc_msi_domain_get_msi_id(msi_domain,
dev_id = info->scratchpad[0].ul;
u32 dev_id = info->scratchpad[0].ul;
its_dev->its_trans_phys_base = info->scratchpad[1].ul;
#define __ul_u32(s) phy->mib.ul_##s += le32_to_cpu(mu_stats->ul.s)
} ul;
unsigned int sh, ul, uhd, state;
ret = regmap_read(rphy->grf, rport->port_cfg->utmi_ls.offset, &ul);
(((ul & ul_mask) >> rport->port_cfg->utmi_ls.bitstart) << sh);
state = ((ul & ul_mask) >> rport->port_cfg->utmi_ls.bitstart) << 1 |
u32 ul;
cmd_base.ul = 0;
writel(cmd_base.ul, mp2->mmio + AMD_C2P_MSG0);
u16 cl, fl, ul;
ul = le16_to_cpu(ra->client_idx[1]);
(ul != LFS_NO_CLIENT && ul >= cl))
unsigned long ul;
const unsigned long *ul = (const unsigned long *)a;
return (ul[0] | ul[1]) == 0UL;
const unsigned long *ul = (const unsigned long *)a;
unsigned long x = ul[0] ^ ul[1];
struct io_unlink *ul = io_kiocb_to_cmd(req, struct io_unlink);
dismiss_delayed_filename(&ul->filename);
ul = 0;
ul += ui_one;
CHECK(ul, ulong_counter, 1);
ul = 0;
ul -= ui_one;
CHECK(ul, ulong_counter, -1);
CHECK(ul, ulong_counter, ULONG_MAX);
ul = ull = 0;
ul = ull += UINT_MAX;
CHECK(ul, ulong_counter, UINT_MAX);
ul = 3;
ul = this_cpu_sub_return(ulong_counter, ui_one);
CHECK(ul, ulong_counter, 2);
ul = __this_cpu_sub_return(ulong_counter, ui_one);
CHECK(ul, ulong_counter, 1);
unsigned long ul = 0;
ul = 0;
ul += 1UL;
CHECK(ul, ulong_counter, 1);
ul += -1UL;
CHECK(ul, ulong_counter, 0);
ul += -(unsigned long)1;
CHECK(ul, ulong_counter, -1);
ul = 0;
ul -= 1;
CHECK(ul, ulong_counter, -1);
CHECK(ul, ulong_counter, ULONG_MAX);
struct uncached_list *ul = raw_cpu_ptr(&rt_uncached_list);
rt->dst.rt_uncached_list = ul;
spin_lock_bh(&ul->lock);
list_add_tail(&rt->dst.rt_uncached, &ul->head);
spin_unlock_bh(&ul->lock);
struct uncached_list *ul = rt->dst.rt_uncached_list;
if (ul) {
spin_lock_bh(&ul->lock);
spin_unlock_bh(&ul->lock);
struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
if (list_empty(&ul->head))
spin_lock_bh(&ul->lock);
list_for_each_entry_safe(rt, safe, &ul->head, dst.rt_uncached) {
spin_unlock_bh(&ul->lock);
struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
INIT_LIST_HEAD(&ul->head);
spin_lock_init(&ul->lock);
struct uncached_list *ul = raw_cpu_ptr(&rt6_uncached_list);
rt->dst.rt_uncached_list = ul;
spin_lock_bh(&ul->lock);
list_add_tail(&rt->dst.rt_uncached, &ul->head);
spin_unlock_bh(&ul->lock);
struct uncached_list *ul = rt->dst.rt_uncached_list;
if (ul) {
spin_lock_bh(&ul->lock);
spin_unlock_bh(&ul->lock);
struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
if (list_empty(&ul->head))
spin_lock_bh(&ul->lock);
list_for_each_entry_safe(rt, safe, &ul->head, dst.rt_uncached) {
spin_unlock_bh(&ul->lock);
struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
INIT_LIST_HEAD(&ul->head);
spin_lock_init(&ul->lock);
u16 ul;
ul = ntohs(udph->len);
if (ul == 0)
else if ((ul >= sizeof(*udph)) && (ul <= ipl - ihl))
skb->csum = csum_partial(udph, ul, 0);
if (ul != ipl - ihl)
skb->csum = csum_partial(udph, ul, 0);
ul, iph->protocol,
u16 ul;
ul = ntohs(udph->len);
if (ul == 0)
else if ((ul >= sizeof(*udph)) && (ul <= ipl - ihl))
skb->csum = csum_partial(udph, ul, 0);
if (ul != ipl - ihl)
skb->csum = csum_partial(udph, ul, 0);
udph->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, ul,
u32 ul;
unsigned long ul;
&i, &l, &ll, &u, &ul, &ull) != 6)
u != 4 || ul != 5 || ull != 6)