hash_32
per_cpu(iommu_pool_hash, i) = hash_32(i, IOMMU_POOL_HASHBITS);
per_cpu(iommu_hash_common, i) = hash_32(i, IOMMU_POOL_HASHBITS);
u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
return hash_32(gfn & 0xffffffff, order_base_2(ASYNC_PF_PER_VCPU));
hash_32(zwplug->zone_no, disk->zone_wplugs_hash_bits);
unsigned int idx = hash_32(zno, disk->zone_wplugs_hash_bits);
inode = iget5_locked(dax_superblock, hash_32(devt + DAXFS_MAGIC, 31),
head = &eb->buckets[hash_32(handle, eb->lut_size)];
&eb->buckets[hash_32(entry->handle,
const size_t base = (u32)hash_32(pid, MKSSTAT_CAPACITY_LOG2);
u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits);
u32 n = hash_32(qp->ibqp.qp_num, rdi->qp_dev->qp_table_bits);
qp->src_port = RXE_ROCE_V2_SPORT + (hash_32(qp_num(qp), 14) & 0x3fff);
return &c->bucket_hash[hash_32(PTR_HASH(c, k), BUCKET_HASH_BITS)];
return hash_32(from_cblock(cblock), 9) & (NR_CACHE_LEVELS - 1);
u32 hash_idx = hash_32(pid, VK_PID_HT_SHIFT_BIT);
#define VMCI_DOORBELL_HASH(_idx) hash_32(_idx, VMCI_DOORBELL_INDEX_BITS)
return hash_32(handle.resource, VMCI_RESOURCE_HASH_BITS);
struct hlist_head *h = &buckets[hash_32(qpn, MLX5I_MAX_LOG_PKEY_SUP)];
u8 key = hash_32(qpn, MLX5I_MAX_LOG_PKEY_SUP);
key = hash_32(key, IONIC_RX_FILTER_HASH_BITS);
key = hash_32(vid, IONIC_RX_FILTER_HASH_BITS);
key = hash_32(*(u32 *)addr, IONIC_RX_FILTER_HASH_BITS);
key = hash_32(0, IONIC_RX_FILTER_HASH_BITS);
return hash_32(vnid, VNI_HASH_BITS);
return hash_32(val, MACVLAN_MC_FILTER_BITS);
return &vs->vni_list[hash_32((__force u32)vni, VNI_HASH_BITS)];
return &vn->sock_list[hash_32(ntohs(port), PORT_HASH_BITS)];
idx = this_cpu_add_return(flush_idx, hash_32(current->pid + idx, 8));
return hash_32(handle, OF_PHANDLE_CACHE_BITS);
return hash_32(port_id, FT_SESS_HASH_BITS);
return in_lookup_hashtable + hash_32(hash, IN_LOOKUP_SHIFT);
return hash_32(i_pos, EXFAT_HASH_BITS);
return hash_32(logstart, FAT_HASH_BITS);
return hash_32(i_pos, FAT_HASH_BITS);
return glock_wait_table + hash_32(hash, GLOCK_WAIT_TABLE_BITS);
return &cache->c_hash[hash_32(key, cache->c_bucket_bits)];
unsigned int hash = hash_32((__force u32)xid, nn->maskbits);
return hash_32(fsid->val[0], FANOTIFY_EVENT_HASH_BITS) ^
hash_32(fsid->val[1], FANOTIFY_EVENT_HASH_BITS);
#define hash_long(val, bits) hash_32(val, bits)
return hash_32((u32)val ^ __hash_32(val >> 32), bits);
(sizeof(val) <= 4 ? hash_32(val, bits) : hash_long(val, bits))
pid_bit = hash_32(current->pid, ilog2(BITS_PER_LONG));
u32 bucket = hash_32(ifindex, LLC_SK_DEV_HASH_BITS);
return hash_32(jhash(laddr->mac, sizeof(laddr->mac), 0),
return hash_32(type | (u32)local_addr << 8 | (u32)peer_addr << 16,
return hash_32(net_hash_mix(net) ^ proto, RAW_HTABLE_LOG);
u32 n = hash_32(qpn, rdi->qp_dev->qp_table_bits);
if (test_bit(hash_32(current->pid, ilog2(BITS_PER_LONG)), &pids))
hash_or[0][k] |= params.h1 = hash_32(params.h0, k);
#define ALLOC_COVERED_HNEXT(h) hash_32(h, ALLOC_COVERED_ORDER)
return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
return hash_32(hash, flow_table->log);
new_flow = hash_32(skb_get_hash(skb), fl->log_buckets);
return hash_32(val, IN4_ADDR_HSIZE_SHIFT);
return hash_32(val ^ net_hash_mix(net), net->ipv4.fib_info_hash_bits);
slot = hash_32(net_hash_mix(net) ^ (__force u32)val, hash_bits);
return hash_32((__force u32)im->multiaddr, MC_HASH_SZ_LOG);
u32 hash = hash_32((__force u32)addr, MC_HASH_SZ_LOG);
u32 hash = hash_32((__force u32)mc_addr, MC_HASH_SZ_LOG);
return hash_32((__force u32)key ^ (__force u32)remote,
hash = hash_32(hash, tcp_metrics_hash_log);
hash = hash_32(hash, tcp_metrics_hash_log);
hash = hash_32(hash, tcp_metrics_hash_log);
hash = hash_32(hash, tcp_metrics_hash_log);
return hash_32(val, IN6_ADDR_HSIZE_SHIFT);
return hash_32(val, IN6_ADDR_HSIZE_SHIFT);
return hash_32(hash, IP6_GRE_HASH_SIZE_SHIFT);
return hash_32(hash, IP6_TUNNEL_HASH_SIZE_SHIFT);
return hash_32(hash, IP6_VTI_HASH_SIZE_SHIFT);
return hash_32(ntohl(addr_fold), IP_VS_DH_TAB_BITS);
return hash_32(ntohl(addr_fold), IP_VS_LBLC_TAB_BITS);
return hash_32(ntohl(addr_fold), IP_VS_LBLCR_TAB_BITS);
return (offset + hash_32(ntohs(port) + ntohl(addr_fold),
return hash_32(ipv6_addr_hash(ip), IP_HASHBITS);
return hash_32(if_id, XFRMI_HASH_BITS);
hash_add(cache->cache, &ci->hash, hash_32(key));
hash_for_each_possible(cache->cache, ci, hash, hash_32(key)) {
return hash_32(addr_hash(addr) ^ (unsigned int)state);
return hash_32(type) ^ hash_str(target);
return hash_32(addr->section ^ addr_hash(addr->address));
return hash_32((unsigned int)(unsigned long)ptr);
hash = hash_32((unsigned int)type ^ hash_ptr(l) ^ hash_ptr(r));
#define hash_long(val, bits) hash_32(val, bits)
return hash_32((u32)val ^ __hash_32(val >> 32), bits);
(sizeof(val) <= 4 ? hash_32(val, bits) : hash_long(val, bits))
hash = hash_32(tid, PERF_SAMPLE_ID__HLIST_BITS);
hash = hash_32(guest_tid->tid, PERF_EVLIST__HLIST_BITS);
hash = hash_32(tid, PERF_EVLIST__HLIST_BITS);
hlist_add_head(&entry->hash, &c->hashtable[hash_32(key, c->bits)]);
hlist = &c->hashtable[hash_32(key, c->bits)];
hlist = &c->hashtable[hash_32(key, c->bits)];