Symbol: htab
arch/powerpc/kvm/book3s_32_mmu_host.c
110
ulong pteg = htab;
arch/powerpc/kvm/book3s_32_mmu_host.c
123
htab, hash, htabmask, pteg);
arch/powerpc/kvm/book3s_32_mmu_host.c
379
htab = (ulong)__va(sdr1 & 0xffff0000);
arch/powerpc/kvm/book3s_32_mmu_host.c
46
static ulong htab;
drivers/s390/char/sclp_rw.c
235
} while (buffer->current_length % buffer->htab);
drivers/s390/char/sclp_rw.c
41
sclp_make_buffer(void *page, unsigned short columns, unsigned short htab)
drivers/s390/char/sclp_rw.c
59
buffer->htab = htab;
drivers/s390/char/sclp_rw.h
75
unsigned short htab;
kernel/bpf/hashtab.c
1001
htab_put_fd_value(htab, l);
kernel/bpf/hashtab.c
1003
if (htab_is_prealloc(htab)) {
kernel/bpf/hashtab.c
1004
bpf_map_dec_elem_count(&htab->map);
kernel/bpf/hashtab.c
1005
check_and_free_fields(htab, l);
kernel/bpf/hashtab.c
1006
pcpu_freelist_push(&htab->freelist, &l->fnode);
kernel/bpf/hashtab.c
1008
dec_elem_count(htab);
kernel/bpf/hashtab.c
1009
htab_elem_free(htab, l);
kernel/bpf/hashtab.c
1013
static void pcpu_copy_value(struct bpf_htab *htab, void __percpu *pptr,
kernel/bpf/hashtab.c
1021
copy_map_value(&htab->map, ptr, value);
kernel/bpf/hashtab.c
1022
bpf_obj_free_fields(htab->map.record, ptr);
kernel/bpf/hashtab.c
1024
u32 size = round_up(htab->map.value_size, 8);
kernel/bpf/hashtab.c
1031
copy_map_value(&htab->map, ptr, value);
kernel/bpf/hashtab.c
1032
bpf_obj_free_fields(htab->map.record, ptr);
kernel/bpf/hashtab.c
1039
copy_map_value(&htab->map, ptr, val);
kernel/bpf/hashtab.c
1040
bpf_obj_free_fields(htab->map.record, ptr);
kernel/bpf/hashtab.c
1045
static void pcpu_init_value(struct bpf_htab *htab, void __percpu *pptr,
kernel/bpf/hashtab.c
1059
copy_map_value_long(&htab->map, per_cpu_ptr(pptr, cpu), value);
kernel/bpf/hashtab.c
1061
zero_map_value(&htab->map, per_cpu_ptr(pptr, cpu));
kernel/bpf/hashtab.c
1064
pcpu_copy_value(htab, pptr, value, onallcpus, map_flags);
kernel/bpf/hashtab.c
1068
static bool fd_htab_map_needs_adjust(const struct bpf_htab *htab)
kernel/bpf/hashtab.c
1070
return is_fd_htab(htab) && BITS_PER_LONG == 64;
kernel/bpf/hashtab.c
1073
static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
kernel/bpf/hashtab.c
1078
u32 size = htab->map.value_size;
kernel/bpf/hashtab.c
1079
bool prealloc = htab_is_prealloc(htab);
kernel/bpf/hashtab.c
1088
pl_new = this_cpu_ptr(htab->extra_elems);
kernel/bpf/hashtab.c
1094
l = __pcpu_freelist_pop(&htab->freelist);
kernel/bpf/hashtab.c
1098
bpf_map_inc_elem_count(&htab->map);
kernel/bpf/hashtab.c
1101
if (is_map_full(htab))
kernel/bpf/hashtab.c
1109
inc_elem_count(htab);
kernel/bpf/hashtab.c
1110
l_new = bpf_mem_cache_alloc(&htab->ma);
kernel/bpf/hashtab.c
1123
void *ptr = bpf_mem_cache_alloc(&htab->pcpu_ma);
kernel/bpf/hashtab.c
1126
bpf_mem_cache_free(&htab->ma, l_new);
kernel/bpf/hashtab.c
1134
pcpu_init_value(htab, pptr, value, onallcpus, map_flags);
kernel/bpf/hashtab.c
1138
} else if (fd_htab_map_needs_adjust(htab)) {
kernel/bpf/hashtab.c
1142
copy_map_value(&htab->map, htab_elem_value(l_new, key_size), value);
kernel/bpf/hashtab.c
1148
dec_elem_count(htab);
kernel/bpf/hashtab.c
1152
static int check_flags(struct bpf_htab *htab, struct htab_elem *l_old,
kernel/bpf/hashtab.c
1170
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
kernel/bpf/hashtab.c
1186
hash = htab_map_hash(key, key_size, htab->hashrnd);
kernel/bpf/hashtab.c
1188
b = __select_bucket(htab, hash);
kernel/bpf/hashtab.c
1196
htab->n_buckets);
kernel/bpf/hashtab.c
1197
ret = check_flags(htab, l_old, map_flags);
kernel/bpf/hashtab.c
1219
ret = check_flags(htab, l_old, map_flags);
kernel/bpf/hashtab.c
1237
l_new = alloc_htab_elem(htab, key, value, key_size, hash, false, false,
kernel/bpf/hashtab.c
1255
if (htab_is_prealloc(htab))
kernel/bpf/hashtab.c
1256
check_and_free_fields(htab, l_old);
kernel/bpf/hashtab.c
1259
if (l_old && !htab_is_prealloc(htab))
kernel/bpf/hashtab.c
1260
free_htab_elem(htab, l_old);
kernel/bpf/hashtab.c
1267
static void htab_lru_push_free(struct bpf_htab *htab, struct htab_elem *elem)
kernel/bpf/hashtab.c
1269
check_and_free_fields(htab, elem);
kernel/bpf/hashtab.c
1270
bpf_map_dec_elem_count(&htab->map);
kernel/bpf/hashtab.c
1271
bpf_lru_push_free(&htab->lru, &elem->lru_node);
kernel/bpf/hashtab.c
1277
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
kernel/bpf/hashtab.c
1293
hash = htab_map_hash(key, key_size, htab->hashrnd);
kernel/bpf/hashtab.c
1295
b = __select_bucket(htab, hash);
kernel/bpf/hashtab.c
1303
l_new = prealloc_lru_pop(htab, key, hash);
kernel/bpf/hashtab.c
1306
copy_map_value(&htab->map, htab_elem_value(l_new, map->key_size), value);
kernel/bpf/hashtab.c
1314
ret = check_flags(htab, l_old, map_flags);
kernel/bpf/hashtab.c
133
static inline bool htab_is_prealloc(const struct bpf_htab *htab)
kernel/bpf/hashtab.c
1333
htab_lru_push_free(htab, l_new);
kernel/bpf/hashtab.c
1335
htab_lru_push_free(htab, l_old);
kernel/bpf/hashtab.c
135
return !(htab->map.map_flags & BPF_F_NO_PREALLOC);
kernel/bpf/hashtab.c
1353
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
kernel/bpf/hashtab.c
1370
hash = htab_map_hash(key, key_size, htab->hashrnd);
kernel/bpf/hashtab.c
1372
b = __select_bucket(htab, hash);
kernel/bpf/hashtab.c
138
static void htab_init_buckets(struct bpf_htab *htab)
kernel/bpf/hashtab.c
1381
ret = check_flags(htab, l_old, map_flags);
kernel/bpf/hashtab.c
1388
pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size),
kernel/bpf/hashtab.c
1397
l_new = alloc_htab_elem(htab, key, value, key_size,
kernel/bpf/hashtab.c
1416
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
kernel/bpf/hashtab.c
142
for (i = 0; i < htab->n_buckets; i++) {
kernel/bpf/hashtab.c
143
INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i);
kernel/bpf/hashtab.c
1432
hash = htab_map_hash(key, key_size, htab->hashrnd);
kernel/bpf/hashtab.c
1434
b = __select_bucket(htab, hash);
kernel/bpf/hashtab.c
144
raw_res_spin_lock_init(&htab->buckets[i].raw_lock);
kernel/bpf/hashtab.c
1443
l_new = prealloc_lru_pop(htab, key, hash);
kernel/bpf/hashtab.c
1454
ret = check_flags(htab, l_old, map_flags);
kernel/bpf/hashtab.c
1462
pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size),
kernel/bpf/hashtab.c
1465
pcpu_init_value(htab, htab_elem_get_ptr(l_new, key_size),
kernel/bpf/hashtab.c
1475
bpf_map_dec_elem_count(&htab->map);
kernel/bpf/hashtab.c
1476
bpf_lru_push_free(&htab->lru, &l_new->lru_node);
kernel/bpf/hashtab.c
1497
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
kernel/bpf/hashtab.c
1509
hash = htab_map_hash(key, key_size, htab->hashrnd);
kernel/bpf/hashtab.c
1510
b = __select_bucket(htab, hash);
kernel/bpf/hashtab.c
1526
free_htab_elem(htab, l);
kernel/bpf/hashtab.c
1532
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
kernel/bpf/hashtab.c
1544
hash = htab_map_hash(key, key_size, htab->hashrnd);
kernel/bpf/hashtab.c
1545
b = __select_bucket(htab, hash);
kernel/bpf/hashtab.c
1561
htab_lru_push_free(htab, l);
kernel/bpf/hashtab.c
1565
static void delete_all_elements(struct bpf_htab *htab)
kernel/bpf/hashtab.c
1572
for (i = 0; i < htab->n_buckets; i++) {
kernel/bpf/hashtab.c
1573
struct hlist_nulls_head *head = select_bucket(htab, i);
kernel/bpf/hashtab.c
1579
htab_elem_free(htab, l);
kernel/bpf/hashtab.c
1585
static void htab_free_malloced_internal_structs(struct bpf_htab *htab)
kernel/bpf/hashtab.c
1590
for (i = 0; i < htab->n_buckets; i++) {
kernel/bpf/hashtab.c
1591
struct hlist_nulls_head *head = select_bucket(htab, i);
kernel/bpf/hashtab.c
1597
bpf_map_free_internal_structs(&htab->map,
kernel/bpf/hashtab.c
1598
htab_elem_value(l, htab->map.key_size));
kernel/bpf/hashtab.c
1607
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
kernel/bpf/hashtab.c
1613
if (htab_is_prealloc(htab))
kernel/bpf/hashtab.c
1614
htab_free_prealloced_internal_structs(htab);
kernel/bpf/hashtab.c
1616
htab_free_malloced_internal_structs(htab);
kernel/bpf/hashtab.c
1622
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
kernel/bpf/hashtab.c
1633
if (!htab_is_prealloc(htab)) {
kernel/bpf/hashtab.c
1634
delete_all_elements(htab);
kernel/bpf/hashtab.c
1636
htab_free_prealloced_fields(htab);
kernel/bpf/hashtab.c
1637
prealloc_destroy(htab);
kernel/bpf/hashtab.c
1641
free_percpu(htab->extra_elems);
kernel/bpf/hashtab.c
1642
bpf_map_area_free(htab->buckets);
kernel/bpf/hashtab.c
1643
bpf_mem_alloc_destroy(&htab->pcpu_ma);
kernel/bpf/hashtab.c
1644
bpf_mem_alloc_destroy(&htab->ma);
kernel/bpf/hashtab.c
1645
if (htab->use_percpu_counter)
kernel/bpf/hashtab.c
1646
percpu_counter_destroy(&htab->pcount);
kernel/bpf/hashtab.c
1647
bpf_map_area_free(htab);
kernel/bpf/hashtab.c
1675
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
kernel/bpf/hashtab.c
168
static bool htab_is_lru(const struct bpf_htab *htab)
kernel/bpf/hashtab.c
1685
hash = htab_map_hash(key, key_size, htab->hashrnd);
kernel/bpf/hashtab.c
1686
b = __select_bucket(htab, hash);
kernel/bpf/hashtab.c
170
return htab->map.map_type == BPF_MAP_TYPE_LRU_HASH ||
kernel/bpf/hashtab.c
1706
copy_map_value_long(&htab->map, value + off, per_cpu_ptr(pptr, cpu));
kernel/bpf/hashtab.c
1707
check_and_init_map_value(&htab->map, value + off);
kernel/bpf/hashtab.c
171
htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
kernel/bpf/hashtab.c
1727
htab_lru_push_free(htab, l);
kernel/bpf/hashtab.c
1729
free_htab_elem(htab, l);
kernel/bpf/hashtab.c
174
static bool htab_is_percpu(const struct bpf_htab *htab)
kernel/bpf/hashtab.c
176
return htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH ||
kernel/bpf/hashtab.c
177
htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
kernel/bpf/hashtab.c
1772
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
kernel/bpf/hashtab.c
180
static inline bool is_fd_htab(const struct bpf_htab *htab)
kernel/bpf/hashtab.c
1812
if (batch >= htab->n_buckets)
kernel/bpf/hashtab.c
1815
key_size = htab->map.key_size;
kernel/bpf/hashtab.c
1816
value_size = htab->map.value_size;
kernel/bpf/hashtab.c
182
return htab->map.map_type == BPF_MAP_TYPE_HASH_OF_MAPS;
kernel/bpf/hashtab.c
1843
b = &htab->buckets[batch];
kernel/bpf/hashtab.c
1903
copy_map_value(&htab->map, dst_val, per_cpu_ptr(pptr, cpu));
kernel/bpf/hashtab.c
1904
check_and_init_map_value(&htab->map, dst_val);
kernel/bpf/hashtab.c
1907
copy_map_value_long(&htab->map, dst_val + off,
kernel/bpf/hashtab.c
1909
check_and_init_map_value(&htab->map, dst_val + off);
kernel/bpf/hashtab.c
1915
if (is_fd_htab(htab)) {
kernel/bpf/hashtab.c
1958
htab_lru_push_free(htab, l);
kernel/bpf/hashtab.c
1960
free_htab_elem(htab, l);
kernel/bpf/hashtab.c
1967
if (!bucket_cnt && (batch + 1 < htab->n_buckets)) {
kernel/bpf/hashtab.c
1984
if (batch >= htab->n_buckets) {
kernel/bpf/hashtab.c
206
static struct htab_elem *get_htab_elem(struct bpf_htab *htab, int i)
kernel/bpf/hashtab.c
2077
struct bpf_htab *htab;
kernel/bpf/hashtab.c
208
return (struct htab_elem *) (htab->elems + i * (u64)htab->elem_size);
kernel/bpf/hashtab.c
2087
const struct bpf_htab *htab = info->htab;
kernel/bpf/hashtab.c
2096
if (bucket_id >= htab->n_buckets)
kernel/bpf/hashtab.c
2110
b = &htab->buckets[bucket_id++];
kernel/bpf/hashtab.c
2115
for (i = bucket_id; i < htab->n_buckets; i++) {
kernel/bpf/hashtab.c
2116
b = &htab->buckets[i];
kernel/bpf/hashtab.c
215
static bool htab_has_extra_elems(struct bpf_htab *htab)
kernel/bpf/hashtab.c
217
return !htab_is_percpu(htab) && !htab_is_lru(htab) && !is_fd_htab(htab);
kernel/bpf/hashtab.c
220
static void htab_free_prealloced_internal_structs(struct bpf_htab *htab)
kernel/bpf/hashtab.c
222
u32 num_entries = htab->map.max_entries;
kernel/bpf/hashtab.c
2233
seq_info->htab = container_of(map, struct bpf_htab, map);
kernel/bpf/hashtab.c
225
if (htab_has_extra_elems(htab))
kernel/bpf/hashtab.c
2262
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
kernel/bpf/hashtab.c
2278
is_percpu = htab_is_percpu(htab);
kernel/bpf/hashtab.c
2284
for (i = 0; i < htab->n_buckets; i++) {
kernel/bpf/hashtab.c
2285
b = &htab->buckets[i];
kernel/bpf/hashtab.c
231
elem = get_htab_elem(htab, i);
kernel/bpf/hashtab.c
2314
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
kernel/bpf/hashtab.c
2315
u32 value_size = round_up(htab->map.value_size, 8);
kernel/bpf/hashtab.c
2316
bool prealloc = htab_is_prealloc(htab);
kernel/bpf/hashtab.c
2317
bool percpu = htab_is_percpu(htab);
kernel/bpf/hashtab.c
2318
bool lru = htab_is_lru(htab);
kernel/bpf/hashtab.c
232
bpf_map_free_internal_structs(&htab->map,
kernel/bpf/hashtab.c
2322
sizeof(struct bucket) * htab->n_buckets;
kernel/bpf/hashtab.c
2326
if (htab_has_extra_elems(htab))
kernel/bpf/hashtab.c
2329
usage += htab->elem_size * num_entries;
kernel/bpf/hashtab.c
233
htab_elem_value(elem, htab->map.key_size));
kernel/bpf/hashtab.c
2338
num_entries = htab->use_percpu_counter ?
kernel/bpf/hashtab.c
2339
percpu_counter_sum(&htab->pcount) :
kernel/bpf/hashtab.c
2340
atomic_read(&htab->count);
kernel/bpf/hashtab.c
2341
usage += (htab->elem_size + LLIST_NODE_SZ) * num_entries;
kernel/bpf/hashtab.c
2368
BATCH_OPS(htab),
kernel/bpf/hashtab.c
238
static void htab_free_prealloced_fields(struct bpf_htab *htab)
kernel/bpf/hashtab.c
240
u32 num_entries = htab->map.max_entries;
kernel/bpf/hashtab.c
243
if (IS_ERR_OR_NULL(htab->map.record))
kernel/bpf/hashtab.c
245
if (htab_has_extra_elems(htab))
kernel/bpf/hashtab.c
250
elem = get_htab_elem(htab, i);
kernel/bpf/hashtab.c
251
if (htab_is_percpu(htab)) {
kernel/bpf/hashtab.c
2510
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
kernel/bpf/hashtab.c
2514
if (htab_is_lru(htab))
kernel/bpf/hashtab.c
252
void __percpu *pptr = htab_elem_get_ptr(elem, htab->map.key_size);
kernel/bpf/hashtab.c
256
bpf_obj_free_fields(htab->map.record, per_cpu_ptr(pptr, cpu));
kernel/bpf/hashtab.c
260
bpf_obj_free_fields(htab->map.record,
kernel/bpf/hashtab.c
2606
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
kernel/bpf/hashtab.c
261
htab_elem_value(elem, htab->map.key_size));
kernel/bpf/hashtab.c
2612
for (i = 0; i < htab->n_buckets; i++) {
kernel/bpf/hashtab.c
2613
head = select_bucket(htab, i);
kernel/bpf/hashtab.c
268
static void htab_free_elems(struct bpf_htab *htab)
kernel/bpf/hashtab.c
272
if (!htab_is_percpu(htab))
kernel/bpf/hashtab.c
2735
BATCH_OPS(htab),
kernel/bpf/hashtab.c
275
for (i = 0; i < htab->map.max_entries; i++) {
kernel/bpf/hashtab.c
278
pptr = htab_elem_get_ptr(get_htab_elem(htab, i),
kernel/bpf/hashtab.c
279
htab->map.key_size);
kernel/bpf/hashtab.c
284
bpf_map_area_free(htab->elems);
kernel/bpf/hashtab.c
298
static struct htab_elem *prealloc_lru_pop(struct bpf_htab *htab, void *key,
kernel/bpf/hashtab.c
301
struct bpf_lru_node *node = bpf_lru_pop_free(&htab->lru, hash);
kernel/bpf/hashtab.c
305
bpf_map_inc_elem_count(&htab->map);
kernel/bpf/hashtab.c
307
memcpy(l->key, key, htab->map.key_size);
kernel/bpf/hashtab.c
314
static int prealloc_init(struct bpf_htab *htab)
kernel/bpf/hashtab.c
316
u32 num_entries = htab->map.max_entries;
kernel/bpf/hashtab.c
319
if (htab_has_extra_elems(htab))
kernel/bpf/hashtab.c
322
htab->elems = bpf_map_area_alloc((u64)htab->elem_size * num_entries,
kernel/bpf/hashtab.c
323
htab->map.numa_node);
kernel/bpf/hashtab.c
324
if (!htab->elems)
kernel/bpf/hashtab.c
327
if (!htab_is_percpu(htab))
kernel/bpf/hashtab.c
331
u32 size = round_up(htab->map.value_size, 8);
kernel/bpf/hashtab.c
334
pptr = bpf_map_alloc_percpu(&htab->map, size, 8,
kernel/bpf/hashtab.c
338
htab_elem_set_ptr(get_htab_elem(htab, i), htab->map.key_size,
kernel/bpf/hashtab.c
344
if (htab_is_lru(htab))
kernel/bpf/hashtab.c
345
err = bpf_lru_init(&htab->lru,
kernel/bpf/hashtab.c
346
htab->map.map_flags & BPF_F_NO_COMMON_LRU,
kernel/bpf/hashtab.c
350
htab);
kernel/bpf/hashtab.c
352
err = pcpu_freelist_init(&htab->freelist);
kernel/bpf/hashtab.c
357
if (htab_is_lru(htab))
kernel/bpf/hashtab.c
358
bpf_lru_populate(&htab->lru, htab->elems,
kernel/bpf/hashtab.c
360
htab->elem_size, num_entries);
kernel/bpf/hashtab.c
362
pcpu_freelist_populate(&htab->freelist,
kernel/bpf/hashtab.c
363
htab->elems + offsetof(struct htab_elem, fnode),
kernel/bpf/hashtab.c
364
htab->elem_size, num_entries);
kernel/bpf/hashtab.c
369
htab_free_elems(htab);
kernel/bpf/hashtab.c
373
static void prealloc_destroy(struct bpf_htab *htab)
kernel/bpf/hashtab.c
375
htab_free_elems(htab);
kernel/bpf/hashtab.c
377
if (htab_is_lru(htab))
kernel/bpf/hashtab.c
378
bpf_lru_destroy(&htab->lru);
kernel/bpf/hashtab.c
380
pcpu_freelist_destroy(&htab->freelist);
kernel/bpf/hashtab.c
383
static int alloc_extra_elems(struct bpf_htab *htab)
kernel/bpf/hashtab.c
389
pptr = bpf_map_alloc_percpu(&htab->map, sizeof(struct htab_elem *), 8,
kernel/bpf/hashtab.c
395
l = pcpu_freelist_pop(&htab->freelist);
kernel/bpf/hashtab.c
402
htab->extra_elems = pptr;
kernel/bpf/hashtab.c
499
static int htab_set_dtor(struct bpf_htab *htab, void (*dtor)(void *, void *))
kernel/bpf/hashtab.c
501
u32 key_size = htab->map.key_size;
kernel/bpf/hashtab.c
507
if (IS_ERR_OR_NULL(htab->map.record))
kernel/bpf/hashtab.c
514
hrec->record = btf_record_dup(htab->map.record);
kernel/bpf/hashtab.c
520
ma = htab_is_percpu(htab) ? &htab->pcpu_ma : &htab->ma;
kernel/bpf/hashtab.c
528
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
kernel/bpf/hashtab.c
530
if (htab_is_prealloc(htab))
kernel/bpf/hashtab.c
536
if (htab_is_percpu(htab))
kernel/bpf/hashtab.c
537
return htab_set_dtor(htab, htab_pcpu_mem_dtor);
kernel/bpf/hashtab.c
539
return htab_set_dtor(htab, htab_mem_dtor);
kernel/bpf/hashtab.c
553
struct bpf_htab *htab;
kernel/bpf/hashtab.c
556
htab = bpf_map_area_alloc(sizeof(*htab), NUMA_NO_NODE);
kernel/bpf/hashtab.c
557
if (!htab)
kernel/bpf/hashtab.c
560
bpf_map_init_from_attr(&htab->map, attr);
kernel/bpf/hashtab.c
567
htab->map.max_entries = roundup(attr->max_entries,
kernel/bpf/hashtab.c
569
if (htab->map.max_entries < attr->max_entries)
kernel/bpf/hashtab.c
570
htab->map.max_entries = rounddown(attr->max_entries,
kernel/bpf/hashtab.c
578
if (htab->map.max_entries > 1UL << 31)
kernel/bpf/hashtab.c
581
htab->n_buckets = roundup_pow_of_two(htab->map.max_entries);
kernel/bpf/hashtab.c
583
htab->elem_size = sizeof(struct htab_elem) +
kernel/bpf/hashtab.c
584
round_up(htab->map.key_size, 8);
kernel/bpf/hashtab.c
586
htab->elem_size += sizeof(void *);
kernel/bpf/hashtab.c
588
htab->elem_size += round_up(htab->map.value_size, 8);
kernel/bpf/hashtab.c
591
if (htab->n_buckets > U32_MAX / sizeof(struct bucket))
kernel/bpf/hashtab.c
594
err = bpf_map_init_elem_count(&htab->map);
kernel/bpf/hashtab.c
599
htab->buckets = bpf_map_area_alloc(htab->n_buckets *
kernel/bpf/hashtab.c
601
htab->map.numa_node);
kernel/bpf/hashtab.c
602
if (!htab->buckets)
kernel/bpf/hashtab.c
605
if (htab->map.map_flags & BPF_F_ZERO_SEED)
kernel/bpf/hashtab.c
606
htab->hashrnd = 0;
kernel/bpf/hashtab.c
608
htab->hashrnd = get_random_u32();
kernel/bpf/hashtab.c
610
htab_init_buckets(htab);
kernel/bpf/hashtab.c
627
htab->use_percpu_counter = true;
kernel/bpf/hashtab.c
629
if (htab->use_percpu_counter) {
kernel/bpf/hashtab.c
630
err = percpu_counter_init(&htab->pcount, 0, GFP_KERNEL);
kernel/bpf/hashtab.c
636
err = prealloc_init(htab);
kernel/bpf/hashtab.c
640
if (htab_has_extra_elems(htab)) {
kernel/bpf/hashtab.c
641
err = alloc_extra_elems(htab);
kernel/bpf/hashtab.c
646
err = bpf_mem_alloc_init(&htab->ma, htab->elem_size, false);
kernel/bpf/hashtab.c
650
err = bpf_mem_alloc_init(&htab->pcpu_ma,
kernel/bpf/hashtab.c
651
round_up(htab->map.value_size, 8), true);
kernel/bpf/hashtab.c
657
return &htab->map;
kernel/bpf/hashtab.c
660
prealloc_destroy(htab);
kernel/bpf/hashtab.c
662
if (htab->use_percpu_counter)
kernel/bpf/hashtab.c
663
percpu_counter_destroy(&htab->pcount);
kernel/bpf/hashtab.c
664
bpf_map_area_free(htab->buckets);
kernel/bpf/hashtab.c
665
bpf_mem_alloc_destroy(&htab->pcpu_ma);
kernel/bpf/hashtab.c
666
bpf_mem_alloc_destroy(&htab->ma);
kernel/bpf/hashtab.c
668
bpf_map_free_elem_count(&htab->map);
kernel/bpf/hashtab.c
670
bpf_map_area_free(htab);
kernel/bpf/hashtab.c
681
static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash)
kernel/bpf/hashtab.c
683
return &htab->buckets[hash & (htab->n_buckets - 1)];
kernel/bpf/hashtab.c
686
static inline struct hlist_nulls_head *select_bucket(struct bpf_htab *htab, u32 hash)
kernel/bpf/hashtab.c
688
return &__select_bucket(htab, hash)->head;
kernel/bpf/hashtab.c
734
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
kernel/bpf/hashtab.c
743
hash = htab_map_hash(key, key_size, htab->hashrnd);
kernel/bpf/hashtab.c
745
head = select_bucket(htab, hash);
kernel/bpf/hashtab.c
747
l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets);
kernel/bpf/hashtab.c
837
static void check_and_free_fields(struct bpf_htab *htab,
kernel/bpf/hashtab.c
840
if (IS_ERR_OR_NULL(htab->map.record))
kernel/bpf/hashtab.c
843
if (htab_is_percpu(htab)) {
kernel/bpf/hashtab.c
844
void __percpu *pptr = htab_elem_get_ptr(elem, htab->map.key_size);
kernel/bpf/hashtab.c
848
bpf_obj_free_fields(htab->map.record, per_cpu_ptr(pptr, cpu));
kernel/bpf/hashtab.c
850
void *map_value = htab_elem_value(elem, htab->map.key_size);
kernel/bpf/hashtab.c
852
bpf_obj_free_fields(htab->map.record, map_value);
kernel/bpf/hashtab.c
861
struct bpf_htab *htab = arg;
kernel/bpf/hashtab.c
870
b = __select_bucket(htab, tgt_l->hash);
kernel/bpf/hashtab.c
880
bpf_map_dec_elem_count(&htab->map);
kernel/bpf/hashtab.c
887
check_and_free_fields(htab, l);
kernel/bpf/hashtab.c
894
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
kernel/bpf/hashtab.c
907
hash = htab_map_hash(key, key_size, htab->hashrnd);
kernel/bpf/hashtab.c
909
head = select_bucket(htab, hash);
kernel/bpf/hashtab.c
912
l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets);
kernel/bpf/hashtab.c
928
i = hash & (htab->n_buckets - 1);
kernel/bpf/hashtab.c
933
for (; i < htab->n_buckets; i++) {
kernel/bpf/hashtab.c
934
head = select_bucket(htab, i);
kernel/bpf/hashtab.c
950
static void htab_elem_free(struct bpf_htab *htab, struct htab_elem *l)
kernel/bpf/hashtab.c
952
check_and_free_fields(htab, l);
kernel/bpf/hashtab.c
954
if (htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH)
kernel/bpf/hashtab.c
955
bpf_mem_cache_free(&htab->pcpu_ma, l->ptr_to_pptr);
kernel/bpf/hashtab.c
956
bpf_mem_cache_free(&htab->ma, l);
kernel/bpf/hashtab.c
959
static void htab_put_fd_value(struct bpf_htab *htab, struct htab_elem *l)
kernel/bpf/hashtab.c
961
struct bpf_map *map = &htab->map;
kernel/bpf/hashtab.c
970
static bool is_map_full(struct bpf_htab *htab)
kernel/bpf/hashtab.c
972
if (htab->use_percpu_counter)
kernel/bpf/hashtab.c
973
return __percpu_counter_compare(&htab->pcount, htab->map.max_entries,
kernel/bpf/hashtab.c
975
return atomic_read(&htab->count) >= htab->map.max_entries;
kernel/bpf/hashtab.c
978
static void inc_elem_count(struct bpf_htab *htab)
kernel/bpf/hashtab.c
980
bpf_map_inc_elem_count(&htab->map);
kernel/bpf/hashtab.c
982
if (htab->use_percpu_counter)
kernel/bpf/hashtab.c
983
percpu_counter_add_batch(&htab->pcount, 1, PERCPU_COUNTER_BATCH);
kernel/bpf/hashtab.c
985
atomic_inc(&htab->count);
kernel/bpf/hashtab.c
988
static void dec_elem_count(struct bpf_htab *htab)
kernel/bpf/hashtab.c
990
bpf_map_dec_elem_count(&htab->map);
kernel/bpf/hashtab.c
992
if (htab->use_percpu_counter)
kernel/bpf/hashtab.c
993
percpu_counter_add_batch(&htab->pcount, -1, PERCPU_COUNTER_BATCH);
kernel/bpf/hashtab.c
995
atomic_dec(&htab->count);
kernel/bpf/hashtab.c
999
static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
net/core/sock_map.c
1018
bucket = sock_hash_select_bucket(htab, hash);
net/core/sock_map.c
1030
elem_new = sock_hash_alloc_elem(htab, key, key_size, hash, sk, elem);
net/core/sock_map.c
1044
sock_hash_free_elem(htab, elem);
net/core/sock_map.c
1059
struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
net/core/sock_map.c
1068
head = &sock_hash_select_bucket(htab, hash)->head;
net/core/sock_map.c
1080
i = hash & (htab->buckets_num - 1);
net/core/sock_map.c
1083
for (; i < htab->buckets_num; i++) {
net/core/sock_map.c
1084
head = &sock_hash_select_bucket(htab, i)->head;
net/core/sock_map.c
1098
struct bpf_shtab *htab;
net/core/sock_map.c
1110
htab = bpf_map_area_alloc(sizeof(*htab), NUMA_NO_NODE);
net/core/sock_map.c
1111
if (!htab)
net/core/sock_map.c
1114
bpf_map_init_from_attr(&htab->map, attr);
net/core/sock_map.c
1116
htab->buckets_num = roundup_pow_of_two(htab->map.max_entries);
net/core/sock_map.c
1117
htab->elem_size = sizeof(struct bpf_shtab_elem) +
net/core/sock_map.c
1118
round_up(htab->map.key_size, 8);
net/core/sock_map.c
1119
if (htab->buckets_num == 0 ||
net/core/sock_map.c
1120
htab->buckets_num > U32_MAX / sizeof(struct bpf_shtab_bucket)) {
net/core/sock_map.c
1125
htab->buckets = bpf_map_area_alloc(htab->buckets_num *
net/core/sock_map.c
1127
htab->map.numa_node);
net/core/sock_map.c
1128
if (!htab->buckets) {
net/core/sock_map.c
1133
for (i = 0; i < htab->buckets_num; i++) {
net/core/sock_map.c
1134
INIT_HLIST_HEAD(&htab->buckets[i].head);
net/core/sock_map.c
1135
spin_lock_init(&htab->buckets[i].lock);
net/core/sock_map.c
1138
return &htab->map;
net/core/sock_map.c
1140
bpf_map_area_free(htab);
net/core/sock_map.c
1146
struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
net/core/sock_map.c
1158
for (i = 0; i < htab->buckets_num; i++) {
net/core/sock_map.c
1159
bucket = sock_hash_select_bucket(htab, i);
net/core/sock_map.c
1185
sock_hash_free_elem(htab, elem);
net/core/sock_map.c
1193
bpf_map_area_free(htab->buckets);
net/core/sock_map.c
1194
bpf_map_area_free(htab);
net/core/sock_map.c
1312
struct bpf_shtab *htab;
net/core/sock_map.c
1319
const struct bpf_shtab *htab = info->htab;
net/core/sock_map.c
1335
for (; info->bucket_id < htab->buckets_num; info->bucket_id++) {
net/core/sock_map.c
1336
bucket = &htab->buckets[info->bucket_id];
net/core/sock_map.c
1416
info->htab = container_of(aux->map, struct bpf_shtab, map);
net/core/sock_map.c
1429
struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
net/core/sock_map.c
1430
u64 usage = sizeof(*htab);
net/core/sock_map.c
1432
usage += htab->buckets_num * sizeof(struct bpf_shtab_bucket);
net/core/sock_map.c
1433
usage += atomic_read(&htab->count) * (u64)htab->elem_size;
net/core/sock_map.c
872
static struct bpf_shtab_bucket *sock_hash_select_bucket(struct bpf_shtab *htab,
net/core/sock_map.c
875
return &htab->buckets[hash & (htab->buckets_num - 1)];
net/core/sock_map.c
895
struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
net/core/sock_map.c
903
bucket = sock_hash_select_bucket(htab, hash);
net/core/sock_map.c
909
static void sock_hash_free_elem(struct bpf_shtab *htab,
net/core/sock_map.c
912
atomic_dec(&htab->count);
net/core/sock_map.c
919
struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
net/core/sock_map.c
924
bucket = sock_hash_select_bucket(htab, elem->hash);
net/core/sock_map.c
936
sock_hash_free_elem(htab, elem);
net/core/sock_map.c
943
struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
net/core/sock_map.c
950
bucket = sock_hash_select_bucket(htab, hash);
net/core/sock_map.c
957
sock_hash_free_elem(htab, elem);
net/core/sock_map.c
964
static struct bpf_shtab_elem *sock_hash_alloc_elem(struct bpf_shtab *htab,
net/core/sock_map.c
971
if (atomic_inc_return(&htab->count) > htab->map.max_entries) {
net/core/sock_map.c
973
atomic_dec(&htab->count);
net/core/sock_map.c
978
new = bpf_map_kmalloc_node(&htab->map, htab->elem_size,
net/core/sock_map.c
980
htab->map.numa_node);
net/core/sock_map.c
982
atomic_dec(&htab->count);
net/core/sock_map.c
994
struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
net/xfrm/xfrm_policy.c
4239
struct xfrm_policy_hash *htab;
net/xfrm/xfrm_policy.c
4244
htab = &net->xfrm.policy_bydst[dir];
net/xfrm/xfrm_policy.c
4245
rcu_assign_pointer(htab->table, xfrm_hash_alloc(sz));
net/xfrm/xfrm_policy.c
4246
if (!htab->table)
net/xfrm/xfrm_policy.c
4248
htab->hmask = hmask;
net/xfrm/xfrm_policy.c
4249
htab->dbits4 = 32;
net/xfrm/xfrm_policy.c
4250
htab->sbits4 = 32;
net/xfrm/xfrm_policy.c
4251
htab->dbits6 = 128;
net/xfrm/xfrm_policy.c
4252
htab->sbits6 = 128;
net/xfrm/xfrm_policy.c
4269
struct xfrm_policy_hash *htab;
net/xfrm/xfrm_policy.c
4271
htab = &net->xfrm.policy_bydst[dir];
net/xfrm/xfrm_policy.c
4272
xfrm_hash_free(rcu_dereference_protected(htab->table, true), sz);
net/xfrm/xfrm_policy.c
4296
struct xfrm_policy_hash *htab;
net/xfrm/xfrm_policy.c
4298
htab = &net->xfrm.policy_bydst[dir];
net/xfrm/xfrm_policy.c
4299
sz = (htab->hmask + 1) * sizeof(struct hlist_head);
net/xfrm/xfrm_policy.c
4300
WARN_ON(!hlist_empty(rcu_dereference_protected(htab->table, true)));
net/xfrm/xfrm_policy.c
4301
xfrm_hash_free(rcu_dereference_protected(htab->table, true), sz);
tools/testing/selftests/bpf/benchs/bench_htab_mem.c
180
map = ctx.skel->maps.htab;
tools/testing/selftests/bpf/bpf_arena_htab.h
17
typedef struct htab __arena htab_t;
tools/testing/selftests/bpf/bpf_arena_htab.h
19
static inline htab_bucket_t *__select_bucket(htab_t *htab, __u32 hash)
tools/testing/selftests/bpf/bpf_arena_htab.h
21
htab_bucket_t *b = htab->buckets;
tools/testing/selftests/bpf/bpf_arena_htab.h
24
return &b[hash & (htab->n_buckets - 1)];
tools/testing/selftests/bpf/bpf_arena_htab.h
27
static inline arena_list_head_t *select_bucket(htab_t *htab, __u32 hash)
tools/testing/selftests/bpf/bpf_arena_htab.h
29
return &__select_bucket(htab, hash)->head;
tools/testing/selftests/bpf/bpf_arena_htab.h
56
__weak int htab_lookup_elem(htab_t *htab __arg_arena, int key)
tools/testing/selftests/bpf/bpf_arena_htab.h
61
cast_kern(htab);
tools/testing/selftests/bpf/bpf_arena_htab.h
62
head = select_bucket(htab, key);
tools/testing/selftests/bpf/bpf_arena_htab.h
69
__weak int htab_update_elem(htab_t *htab __arg_arena, int key, int value)
tools/testing/selftests/bpf/bpf_arena_htab.h
74
cast_kern(htab);
tools/testing/selftests/bpf/bpf_arena_htab.h
75
head = select_bucket(htab, key);
tools/testing/selftests/bpf/bpf_arena_htab.h
93
void htab_init(htab_t *htab)
tools/testing/selftests/bpf/bpf_arena_htab.h
98
htab->buckets = buckets;
tools/testing/selftests/bpf/bpf_arena_htab.h
99
htab->n_buckets = 2 * PAGE_SIZE / sizeof(struct htab_bucket);
tools/testing/selftests/bpf/prog_tests/arena_htab.c
16
static void test_arena_htab_common(struct htab *htab)
tools/testing/selftests/bpf/prog_tests/arena_htab.c
20
printf("htab %p buckets %p n_buckets %d\n", htab, htab->buckets, htab->n_buckets);
tools/testing/selftests/bpf/prog_tests/arena_htab.c
21
ASSERT_OK_PTR(htab->buckets, "htab->buckets shouldn't be NULL");
tools/testing/selftests/bpf/prog_tests/arena_htab.c
22
for (i = 0; htab->buckets && i < 16; i += 4) {
tools/testing/selftests/bpf/prog_tests/arena_htab.c
27
int val = htab_lookup_elem(htab, i);
tools/testing/selftests/bpf/prog_tests/arena_htab.c
37
struct htab *htab;
tools/testing/selftests/bpf/prog_tests/arena_htab.c
59
htab = skel->bss->htab_for_user;
tools/testing/selftests/bpf/prog_tests/arena_htab.c
60
test_arena_htab_common(htab);
tools/testing/selftests/bpf/prog_tests/arena_htab.c
69
struct htab *htab;
tools/testing/selftests/bpf/prog_tests/arena_htab.c
79
htab = skel->bss->htab_for_user;
tools/testing/selftests/bpf/prog_tests/arena_htab.c
80
test_arena_htab_common(htab);
tools/testing/selftests/bpf/prog_tests/htab_reuse.c
74
ctx.fd = bpf_map__fd(skel->maps.htab);
tools/testing/selftests/bpf/prog_tests/htab_update.c
106
ctx.fd = bpf_map__fd(skel->maps.htab);
tools/testing/selftests/bpf/prog_tests/htab_update.c
36
value_size = bpf_map__value_size(skel->maps.htab);
tools/testing/selftests/bpf/prog_tests/htab_update.c
46
err = bpf_map_update_elem(bpf_map__fd(skel->maps.htab), &key, value, BPF_ANY);
tools/testing/selftests/bpf/prog_tests/htab_update.c
58
err = bpf_map_update_elem(bpf_map__fd(skel->maps.htab), &key, value, BPF_ANY);
tools/testing/selftests/bpf/progs/arena_htab.c
29
struct htab __arena *htab;
tools/testing/selftests/bpf/progs/arena_htab.c
33
htab = bpf_alloc(sizeof(*htab));
tools/testing/selftests/bpf/progs/arena_htab.c
34
cast_kern(htab);
tools/testing/selftests/bpf/progs/arena_htab.c
35
htab_init(htab);
tools/testing/selftests/bpf/progs/arena_htab.c
41
htab_update_elem(htab, i, i);
tools/testing/selftests/bpf/progs/arena_htab.c
47
htab_update_elem(htab, i, i);
tools/testing/selftests/bpf/progs/arena_htab.c
51
cast_user(htab);
tools/testing/selftests/bpf/progs/arena_htab.c
52
htab_for_user = htab;
tools/testing/selftests/bpf/progs/htab_mem_bench.c
21
} htab SEC(".maps");
tools/testing/selftests/bpf/progs/htab_mem_bench.c
31
bpf_map_update_elem(&htab, &ctx->from, zeroed_value, flags);
tools/testing/selftests/bpf/progs/htab_mem_bench.c
49
bpf_map_delete_elem(&htab, &ctx->from);
tools/testing/selftests/bpf/progs/htab_reuse.c
19
} htab SEC(".maps");
tools/testing/selftests/bpf/progs/htab_update.c
20
} htab SEC(".maps");
tools/testing/selftests/bpf/progs/htab_update.c
34
update_err = bpf_map_update_elem(&htab, &key, &value, BPF_ANY);
tools/testing/selftests/net/sample_map_ret0.bpf.c
10
} htab SEC(".maps");
tools/testing/selftests/net/sample_map_ret0.bpf.c
26
value = bpf_map_lookup_elem(&htab, &key);