Symbol: slab
block/bio.c
105
kmem_cache_destroy(bslab->slab);
block/bio.c
131
return bslab->slab;
block/bio.c
146
WARN_ON_ONCE(bslab->slab != bs->bio_slab);
block/bio.c
155
kmem_cache_destroy(bslab->slab);
block/bio.c
169
kmem_cache_free(biovec_slab(nr_vecs)->slab, bv);
block/bio.c
1885
return mempool_init_slab_pool(pool, pool_entries, bp->slab);
block/bio.c
1986
bvs->slab = kmem_cache_create(bvs->name,
block/bio.c
204
bvl = kmem_cache_alloc(bvs->slab, bvec_alloc_gfp(gfp_mask));
block/bio.c
40
struct kmem_cache *slab;
block/bio.c
77
struct kmem_cache *slab;
block/bio.c
93
bslab->slab = kmem_cache_create(bslab->name, size,
block/bio.c
96
if (!bslab->slab)
drivers/md/dm-vdo/dedupe.c
1282
struct vdo_slab *slab = vdo_get_slab(depot, agent->duplicate.pbn);
drivers/md/dm-vdo/dedupe.c
1283
int result = vdo_acquire_provisional_reference(slab, agent->duplicate.pbn, lock);
drivers/md/dm-vdo/repair.c
810
struct vdo_slab *slab;
drivers/md/dm-vdo/repair.c
830
slab = vdo_get_slab(vdo->depot, pbn);
drivers/md/dm-vdo/repair.c
831
if (slab->allocator != allocator)
drivers/md/dm-vdo/repair.c
834
if (!vdo_attempt_replay_into_slab(slab, pbn, entry.operation, increment,
drivers/md/dm-vdo/slab-depot.c
1017
struct vdo_slab *slab = container_of(waiter, struct vdo_slab, summary_waiter);
drivers/md/dm-vdo/slab-depot.c
1020
slab->active_count--;
drivers/md/dm-vdo/slab-depot.c
1024
vdo_enter_read_only_mode(slab->allocator->depot->vdo, result);
drivers/md/dm-vdo/slab-depot.c
1027
check_if_slab_drained(slab);
drivers/md/dm-vdo/slab-depot.c
1043
struct vdo_slab *slab = context;
drivers/md/dm-vdo/slab-depot.c
1045
if (vdo_is_read_only(slab->allocator->depot->vdo))
drivers/md/dm-vdo/slab-depot.c
1048
slab->active_count++;
drivers/md/dm-vdo/slab-depot.c
1051
acquire_vio_from_pool(slab->allocator->vio_pool, waiter);
drivers/md/dm-vdo/slab-depot.c
1054
static void save_dirty_reference_blocks(struct vdo_slab *slab)
drivers/md/dm-vdo/slab-depot.c
1056
vdo_waitq_notify_all_waiters(&slab->dirty_blocks,
drivers/md/dm-vdo/slab-depot.c
1057
launch_reference_block_write, slab);
drivers/md/dm-vdo/slab-depot.c
1058
check_if_slab_drained(slab);
drivers/md/dm-vdo/slab-depot.c
1071
struct vdo_slab *slab = block->slab;
drivers/md/dm-vdo/slab-depot.c
1074
slab->active_count--;
drivers/md/dm-vdo/slab-depot.c
1077
adjust_slab_journal_block_reference(&slab->journal,
drivers/md/dm-vdo/slab-depot.c
1088
check_if_slab_drained(slab);
drivers/md/dm-vdo/slab-depot.c
1094
vdo_waitq_enqueue_waiter(&block->slab->dirty_blocks, &block->waiter);
drivers/md/dm-vdo/slab-depot.c
1095
if (vdo_is_state_draining(&slab->state)) {
drivers/md/dm-vdo/slab-depot.c
1097
save_dirty_reference_blocks(slab);
drivers/md/dm-vdo/slab-depot.c
1107
if ((slab->active_count > 0) || vdo_waitq_has_waiters(&slab->dirty_blocks)) {
drivers/md/dm-vdo/slab-depot.c
1108
check_if_slab_drained(slab);
drivers/md/dm-vdo/slab-depot.c
1112
offset = slab->allocator->summary_entries[slab->slab_number].tail_block_offset;
drivers/md/dm-vdo/slab-depot.c
1113
slab->active_count++;
drivers/md/dm-vdo/slab-depot.c
1114
slab->summary_waiter.callback = finish_summary_update;
drivers/md/dm-vdo/slab-depot.c
1115
update_slab_summary_entry(slab, &slab->summary_waiter, offset,
drivers/md/dm-vdo/slab-depot.c
1116
true, true, slab->free_blocks);
drivers/md/dm-vdo/slab-depot.c
1127
size_t block_index = block - block->slab->reference_blocks;
drivers/md/dm-vdo/slab-depot.c
1129
return &block->slab->counters[block_index * COUNTS_PER_BLOCK];
drivers/md/dm-vdo/slab-depot.c
1144
vdo_pack_journal_point(&block->slab->slab_journal_point, &commit_point);
drivers/md/dm-vdo/slab-depot.c
1157
thread_id_t thread_id = block->slab->allocator->thread_id;
drivers/md/dm-vdo/slab-depot.c
1170
struct vdo_slab *slab = ((struct reference_block *) completion->parent)->slab;
drivers/md/dm-vdo/slab-depot.c
1174
slab->active_count -= vio->io_size / VDO_BLOCK_SIZE;
drivers/md/dm-vdo/slab-depot.c
1175
vdo_enter_read_only_mode(slab->allocator->depot->vdo, result);
drivers/md/dm-vdo/slab-depot.c
1176
check_if_slab_drained(slab);
drivers/md/dm-vdo/slab-depot.c
1195
block_offset = (block - block->slab->reference_blocks);
drivers/md/dm-vdo/slab-depot.c
1196
pbn = (block->slab->ref_counts_origin + block_offset);
drivers/md/dm-vdo/slab-depot.c
1212
WRITE_ONCE(block->slab->allocator->ref_counts_statistics.blocks_written,
drivers/md/dm-vdo/slab-depot.c
1213
block->slab->allocator->ref_counts_statistics.blocks_written + 1);
drivers/md/dm-vdo/slab-depot.c
1223
struct vdo_slab *slab = journal->slab;
drivers/md/dm-vdo/slab-depot.c
1224
block_count_t write_count = vdo_waitq_num_waiters(&slab->dirty_blocks);
drivers/md/dm-vdo/slab-depot.c
1239
vdo_waitq_notify_next_waiter(&slab->dirty_blocks,
drivers/md/dm-vdo/slab-depot.c
1240
launch_reference_block_write, slab);
drivers/md/dm-vdo/slab-depot.c
1274
vdo_waitq_enqueue_waiter(&block->slab->dirty_blocks, &block->waiter);
drivers/md/dm-vdo/slab-depot.c
1282
static struct reference_block * __must_check get_reference_block(struct vdo_slab *slab,
drivers/md/dm-vdo/slab-depot.c
1285
return &slab->reference_blocks[index / COUNTS_PER_BLOCK];
drivers/md/dm-vdo/slab-depot.c
1297
static int __must_check slab_block_number_from_pbn(struct vdo_slab *slab,
drivers/md/dm-vdo/slab-depot.c
1303
if (pbn < slab->start)
drivers/md/dm-vdo/slab-depot.c
1306
slab_block_number = pbn - slab->start;
drivers/md/dm-vdo/slab-depot.c
1307
if (slab_block_number >= slab->allocator->depot->slab_config.data_blocks)
drivers/md/dm-vdo/slab-depot.c
1320
static int __must_check get_reference_counter(struct vdo_slab *slab,
drivers/md/dm-vdo/slab-depot.c
1325
int result = slab_block_number_from_pbn(slab, pbn, &index);
drivers/md/dm-vdo/slab-depot.c
1330
*counter_ptr = &slab->counters[index];
drivers/md/dm-vdo/slab-depot.c
1335
static unsigned int calculate_slab_priority(struct vdo_slab *slab)
drivers/md/dm-vdo/slab-depot.c
1337
block_count_t free_blocks = slab->free_blocks;
drivers/md/dm-vdo/slab-depot.c
1338
unsigned int unopened_slab_priority = slab->allocator->unopened_slab_priority;
drivers/md/dm-vdo/slab-depot.c
136
static bool is_slab_journal_blank(const struct vdo_slab *slab)
drivers/md/dm-vdo/slab-depot.c
1362
if (is_slab_journal_blank(slab))
drivers/md/dm-vdo/slab-depot.c
1374
static void prioritize_slab(struct vdo_slab *slab)
drivers/md/dm-vdo/slab-depot.c
1376
VDO_ASSERT_LOG_ONLY(list_empty(&slab->allocq_entry),
drivers/md/dm-vdo/slab-depot.c
1378
slab->priority = calculate_slab_priority(slab);
drivers/md/dm-vdo/slab-depot.c
1379
vdo_priority_table_enqueue(slab->allocator->prioritized_slabs,
drivers/md/dm-vdo/slab-depot.c
138
return ((slab->journal.tail == 1) &&
drivers/md/dm-vdo/slab-depot.c
1380
slab->priority, &slab->allocq_entry);
drivers/md/dm-vdo/slab-depot.c
1388
static void adjust_free_block_count(struct vdo_slab *slab, bool incremented)
drivers/md/dm-vdo/slab-depot.c
139
(slab->journal.tail_header.entry_count == 0));
drivers/md/dm-vdo/slab-depot.c
1390
struct block_allocator *allocator = slab->allocator;
drivers/md/dm-vdo/slab-depot.c
1396
if (slab == allocator->open_slab)
drivers/md/dm-vdo/slab-depot.c
1400
if (slab->priority == calculate_slab_priority(slab))
drivers/md/dm-vdo/slab-depot.c
1407
vdo_priority_table_remove(allocator->prioritized_slabs, &slab->allocq_entry);
drivers/md/dm-vdo/slab-depot.c
1408
prioritize_slab(slab);
drivers/md/dm-vdo/slab-depot.c
1423
static int increment_for_data(struct vdo_slab *slab, struct reference_block *block,
drivers/md/dm-vdo/slab-depot.c
1433
slab->free_blocks--;
drivers/md/dm-vdo/slab-depot.c
1435
adjust_free_block_count(slab, false);
drivers/md/dm-vdo/slab-depot.c
1448
slab->slab_number, block_number);
drivers/md/dm-vdo/slab-depot.c
1470
static int decrement_for_data(struct vdo_slab *slab, struct reference_block *block,
drivers/md/dm-vdo/slab-depot.c
1480
block_number, slab->slab_number);
drivers/md/dm-vdo/slab-depot.c
1501
slab->free_blocks++;
drivers/md/dm-vdo/slab-depot.c
1503
adjust_free_block_count(slab, true);
drivers/md/dm-vdo/slab-depot.c
151
struct list_head *dirty_list = &journal->slab->allocator->dirty_slab_journals;
drivers/md/dm-vdo/slab-depot.c
1533
static int increment_for_block_map(struct vdo_slab *slab, struct reference_block *block,
drivers/md/dm-vdo/slab-depot.c
1544
slab->slab_number, block_number);
drivers/md/dm-vdo/slab-depot.c
1549
slab->free_blocks--;
drivers/md/dm-vdo/slab-depot.c
1551
adjust_free_block_count(slab, false);
drivers/md/dm-vdo/slab-depot.c
1559
slab->slab_number, block_number);
drivers/md/dm-vdo/slab-depot.c
1569
*counter_ptr, slab->slab_number,
drivers/md/dm-vdo/slab-depot.c
1593
static int update_reference_count(struct vdo_slab *slab, struct reference_block *block,
drivers/md/dm-vdo/slab-depot.c
1600
vdo_refcount_t *counter_ptr = &slab->counters[block_number];
drivers/md/dm-vdo/slab-depot.c
1605
result = decrement_for_data(slab, block, block_number, old_status,
drivers/md/dm-vdo/slab-depot.c
1613
result = increment_for_data(slab, block, block_number, old_status,
drivers/md/dm-vdo/slab-depot.c
1616
result = increment_for_block_map(slab, block, block_number, old_status,
drivers/md/dm-vdo/slab-depot.c
1625
slab->slab_journal_point = *slab_journal_point;
drivers/md/dm-vdo/slab-depot.c
1630
static int __must_check adjust_reference_count(struct vdo_slab *slab,
drivers/md/dm-vdo/slab-depot.c
1639
if (!is_slab_open(slab))
drivers/md/dm-vdo/slab-depot.c
1642
result = slab_block_number_from_pbn(slab, updater->zpbn.pbn, &block_number);
drivers/md/dm-vdo/slab-depot.c
1646
block = get_reference_block(slab, block_number);
drivers/md/dm-vdo/slab-depot.c
1647
result = update_reference_count(slab, block, block_number, slab_journal_point,
drivers/md/dm-vdo/slab-depot.c
1665
adjust_slab_journal_block_reference(&slab->journal, entry_lock, -1);
drivers/md/dm-vdo/slab-depot.c
170
static void check_if_slab_drained(struct vdo_slab *slab)
drivers/md/dm-vdo/slab-depot.c
1712
zone_count_t zone_number = journal->slab->allocator->zone_number;
drivers/md/dm-vdo/slab-depot.c
1728
if (journal->slab->status != VDO_SLAB_REBUILT) {
drivers/md/dm-vdo/slab-depot.c
173
struct slab_journal *journal = &slab->journal;
drivers/md/dm-vdo/slab-depot.c
1738
result = adjust_reference_count(journal->slab, updater,
drivers/md/dm-vdo/slab-depot.c
176
if (!vdo_is_state_draining(&slab->state) ||
drivers/md/dm-vdo/slab-depot.c
1783
(journal->slab->status == VDO_SLAB_REBUILDING)) {
drivers/md/dm-vdo/slab-depot.c
1817
save_dirty_reference_blocks(journal->slab);
drivers/md/dm-vdo/slab-depot.c
182
(slab->active_count > 0))
drivers/md/dm-vdo/slab-depot.c
1843
save_dirty_reference_blocks(journal->slab);
drivers/md/dm-vdo/slab-depot.c
1855
struct vdo_slab *slab = journal->slab;
drivers/md/dm-vdo/slab-depot.c
186
code = vdo_get_admin_state_code(&slab->state);
drivers/md/dm-vdo/slab-depot.c
1866
for (i = 0; i < slab->reference_block_count; i++) {
drivers/md/dm-vdo/slab-depot.c
1867
slab->reference_blocks[i].slab_journal_lock = 1;
drivers/md/dm-vdo/slab-depot.c
1868
dirty_block(&slab->reference_blocks[i]);
drivers/md/dm-vdo/slab-depot.c
187
read_only = vdo_is_read_only(slab->allocator->depot->vdo);
drivers/md/dm-vdo/slab-depot.c
1872
slab->reference_block_count);
drivers/md/dm-vdo/slab-depot.c
1883
if (vdo_is_state_draining(&journal->slab->state) &&
drivers/md/dm-vdo/slab-depot.c
1884
!vdo_is_state_suspending(&journal->slab->state) &&
drivers/md/dm-vdo/slab-depot.c
189
vdo_waitq_has_waiters(&slab->dirty_blocks) &&
drivers/md/dm-vdo/slab-depot.c
1894
static void reset_search_cursor(struct vdo_slab *slab)
drivers/md/dm-vdo/slab-depot.c
1896
struct search_cursor *cursor = &slab->search_cursor;
drivers/md/dm-vdo/slab-depot.c
1900
cursor->end_index = min_t(u32, COUNTS_PER_BLOCK, slab->block_count);
drivers/md/dm-vdo/slab-depot.c
1912
static bool advance_search_cursor(struct vdo_slab *slab)
drivers/md/dm-vdo/slab-depot.c
1914
struct search_cursor *cursor = &slab->search_cursor;
drivers/md/dm-vdo/slab-depot.c
1921
reset_search_cursor(slab);
drivers/md/dm-vdo/slab-depot.c
1931
cursor->end_index = slab->block_count;
drivers/md/dm-vdo/slab-depot.c
194
vdo_finish_draining_with_result(&slab->state,
drivers/md/dm-vdo/slab-depot.c
1954
struct vdo_slab *slab = vdo_get_slab(depot, pbn);
drivers/md/dm-vdo/slab-depot.c
1960
result = slab_block_number_from_pbn(slab, pbn, &block_number);
drivers/md/dm-vdo/slab-depot.c
1964
block = get_reference_block(slab, block_number);
drivers/md/dm-vdo/slab-depot.c
1965
result = update_reference_count(slab, block, block_number, NULL,
drivers/md/dm-vdo/slab-depot.c
1985
static int replay_reference_count_change(struct vdo_slab *slab,
drivers/md/dm-vdo/slab-depot.c
1990
struct reference_block *block = get_reference_block(slab, entry.sbn);
drivers/md/dm-vdo/slab-depot.c
2003
result = update_reference_count(slab, block, entry.sbn, entry_point,
drivers/md/dm-vdo/slab-depot.c
2051
static bool find_free_block(const struct vdo_slab *slab, slab_block_number *index_ptr)
drivers/md/dm-vdo/slab-depot.c
2054
slab_block_number next_index = slab->search_cursor.index;
drivers/md/dm-vdo/slab-depot.c
2055
slab_block_number end_index = slab->search_cursor.end_index;
drivers/md/dm-vdo/slab-depot.c
2056
u8 *next_counter = &slab->counters[next_index];
drivers/md/dm-vdo/slab-depot.c
2057
u8 *end_counter = &slab->counters[end_index];
drivers/md/dm-vdo/slab-depot.c
2108
static bool search_current_reference_block(const struct vdo_slab *slab,
drivers/md/dm-vdo/slab-depot.c
2112
return ((slab->search_cursor.block->allocated_count < COUNTS_PER_BLOCK) &&
drivers/md/dm-vdo/slab-depot.c
2113
find_free_block(slab, free_index_ptr));
drivers/md/dm-vdo/slab-depot.c
2127
static bool search_reference_blocks(struct vdo_slab *slab,
drivers/md/dm-vdo/slab-depot.c
2131
if (search_current_reference_block(slab, free_index_ptr))
drivers/md/dm-vdo/slab-depot.c
2135
while (advance_search_cursor(slab)) {
drivers/md/dm-vdo/slab-depot.c
2136
if (search_current_reference_block(slab, free_index_ptr))
drivers/md/dm-vdo/slab-depot.c
2148
static void make_provisional_reference(struct vdo_slab *slab,
drivers/md/dm-vdo/slab-depot.c
2151
struct reference_block *block = get_reference_block(slab, block_number);
drivers/md/dm-vdo/slab-depot.c
2157
slab->counters[block_number] = PROVISIONAL_REFERENCE_COUNT;
drivers/md/dm-vdo/slab-depot.c
2161
slab->free_blocks--;
drivers/md/dm-vdo/slab-depot.c
2168
static void dirty_all_reference_blocks(struct vdo_slab *slab)
drivers/md/dm-vdo/slab-depot.c
2172
for (i = 0; i < slab->reference_block_count; i++)
drivers/md/dm-vdo/slab-depot.c
2173
dirty_block(&slab->reference_blocks[i]);
drivers/md/dm-vdo/slab-depot.c
2276
struct vdo_slab *slab = block->slab;
drivers/md/dm-vdo/slab-depot.c
2286
if (vdo_before_journal_point(&slab->slab_journal_point,
drivers/md/dm-vdo/slab-depot.c
2288
slab->slab_journal_point = block->commit_points[i];
drivers/md/dm-vdo/slab-depot.c
2293
size_t block_index = block - block->slab->reference_blocks;
drivers/md/dm-vdo/slab-depot.c
2296
i, block_index, block->slab->slab_number);
drivers/md/dm-vdo/slab-depot.c
2312
struct vdo_slab *slab = block->slab;
drivers/md/dm-vdo/slab-depot.c
2321
slab->free_blocks -= block->allocated_count;
drivers/md/dm-vdo/slab-depot.c
2324
slab->active_count -= block_count;
drivers/md/dm-vdo/slab-depot.c
2326
check_if_slab_drained(slab);
drivers/md/dm-vdo/slab-depot.c
2335
block->slab->allocator->thread_id);
drivers/md/dm-vdo/slab-depot.c
2350
u32 block_offset = block - block->slab->reference_blocks;
drivers/md/dm-vdo/slab-depot.c
2351
u32 max_block_count = block->slab->reference_block_count - block_offset;
drivers/md/dm-vdo/slab-depot.c
2355
vdo_submit_metadata_vio_with_size(vio, block->slab->ref_counts_origin + block_offset,
drivers/md/dm-vdo/slab-depot.c
2365
static void load_reference_blocks(struct vdo_slab *slab)
drivers/md/dm-vdo/slab-depot.c
2368
u64 blocks_per_vio = slab->allocator->refcount_blocks_per_big_vio;
drivers/md/dm-vdo/slab-depot.c
2369
struct vio_pool *pool = slab->allocator->refcount_big_vio_pool;
drivers/md/dm-vdo/slab-depot.c
2372
pool = slab->allocator->vio_pool;
drivers/md/dm-vdo/slab-depot.c
2376
slab->free_blocks = slab->block_count;
drivers/md/dm-vdo/slab-depot.c
2377
slab->active_count = slab->reference_block_count;
drivers/md/dm-vdo/slab-depot.c
2378
for (i = 0; i < slab->reference_block_count; i += blocks_per_vio) {
drivers/md/dm-vdo/slab-depot.c
2379
struct vdo_waiter *waiter = &slab->reference_blocks[i].waiter;
drivers/md/dm-vdo/slab-depot.c
2393
static void drain_slab(struct vdo_slab *slab)
drivers/md/dm-vdo/slab-depot.c
2397
const struct admin_state_code *state = vdo_get_admin_state_code(&slab->state);
drivers/md/dm-vdo/slab-depot.c
2404
commit_tail(&slab->journal);
drivers/md/dm-vdo/slab-depot.c
2406
if ((state == VDO_ADMIN_STATE_RECOVERING) || (slab->counters == NULL))
drivers/md/dm-vdo/slab-depot.c
2410
load = slab->allocator->summary_entries[slab->slab_number].load_ref_counts;
drivers/md/dm-vdo/slab-depot.c
2413
load_reference_blocks(slab);
drivers/md/dm-vdo/slab-depot.c
2419
dirty_all_reference_blocks(slab);
drivers/md/dm-vdo/slab-depot.c
2427
block_count_t data_blocks = slab->allocator->depot->slab_config.data_blocks;
drivers/md/dm-vdo/slab-depot.c
2429
if (load || (slab->free_blocks != data_blocks) ||
drivers/md/dm-vdo/slab-depot.c
2430
!is_slab_journal_blank(slab)) {
drivers/md/dm-vdo/slab-depot.c
2431
dirty_all_reference_blocks(slab);
drivers/md/dm-vdo/slab-depot.c
2435
save = (slab->status == VDO_SLAB_REBUILT);
drivers/md/dm-vdo/slab-depot.c
2437
vdo_finish_draining_with_result(&slab->state, VDO_SUCCESS);
drivers/md/dm-vdo/slab-depot.c
2442
save_dirty_reference_blocks(slab);
drivers/md/dm-vdo/slab-depot.c
2445
static int allocate_slab_counters(struct vdo_slab *slab)
drivers/md/dm-vdo/slab-depot.c
2450
result = VDO_ASSERT(slab->reference_blocks == NULL,
drivers/md/dm-vdo/slab-depot.c
2452
slab->slab_number);
drivers/md/dm-vdo/slab-depot.c
2456
result = vdo_allocate(slab->reference_block_count, struct reference_block,
drivers/md/dm-vdo/slab-depot.c
2457
__func__, &slab->reference_blocks);
drivers/md/dm-vdo/slab-depot.c
2465
bytes = (slab->reference_block_count * COUNTS_PER_BLOCK) + (2 * BYTES_PER_WORD);
drivers/md/dm-vdo/slab-depot.c
2467
&slab->counters);
drivers/md/dm-vdo/slab-depot.c
2469
vdo_free(vdo_forget(slab->reference_blocks));
drivers/md/dm-vdo/slab-depot.c
2473
slab->search_cursor.first_block = slab->reference_blocks;
drivers/md/dm-vdo/slab-depot.c
2474
slab->search_cursor.last_block = &slab->reference_blocks[slab->reference_block_count - 1];
drivers/md/dm-vdo/slab-depot.c
2475
reset_search_cursor(slab);
drivers/md/dm-vdo/slab-depot.c
2477
for (index = 0; index < slab->reference_block_count; index++) {
drivers/md/dm-vdo/slab-depot.c
2478
slab->reference_blocks[index] = (struct reference_block) {
drivers/md/dm-vdo/slab-depot.c
2479
.slab = slab,
drivers/md/dm-vdo/slab-depot.c
2486
static int allocate_counters_if_clean(struct vdo_slab *slab)
drivers/md/dm-vdo/slab-depot.c
2488
if (vdo_is_state_clean_load(&slab->state))
drivers/md/dm-vdo/slab-depot.c
2489
return allocate_slab_counters(slab);
drivers/md/dm-vdo/slab-depot.c
2498
struct vdo_slab *slab = journal->slab;
drivers/md/dm-vdo/slab-depot.c
2506
(header.nonce == slab->allocator->nonce)) {
drivers/md/dm-vdo/slab-depot.c
2513
journal->head = (slab->allocator->summary_entries[slab->slab_number].is_dirty ?
drivers/md/dm-vdo/slab-depot.c
2520
vdo_finish_loading_with_result(&slab->state, allocate_counters_if_clean(slab));
drivers/md/dm-vdo/slab-depot.c
2529
journal->slab->allocator->thread_id);
drivers/md/dm-vdo/slab-depot.c
2540
vdo_finish_loading_with_result(&journal->slab->state, result);
drivers/md/dm-vdo/slab-depot.c
2555
struct vdo_slab *slab = journal->slab;
drivers/md/dm-vdo/slab-depot.c
2559
slab->allocator->summary_entries[slab->slab_number].tail_block_offset;
drivers/md/dm-vdo/slab-depot.c
2570
vio->completion.callback_thread_id = slab->allocator->thread_id;
drivers/md/dm-vdo/slab-depot.c
2571
vdo_submit_metadata_vio(vio, slab->journal_origin + tail_block,
drivers/md/dm-vdo/slab-depot.c
2580
static void load_slab_journal(struct vdo_slab *slab)
drivers/md/dm-vdo/slab-depot.c
2582
struct slab_journal *journal = &slab->journal;
drivers/md/dm-vdo/slab-depot.c
2585
last_commit_point = slab->allocator->summary_entries[slab->slab_number].tail_block_offset;
drivers/md/dm-vdo/slab-depot.c
2587
!slab->allocator->summary_entries[slab->slab_number].load_ref_counts) {
drivers/md/dm-vdo/slab-depot.c
2596
vdo_finish_loading_with_result(&slab->state,
drivers/md/dm-vdo/slab-depot.c
2597
allocate_counters_if_clean(slab));
drivers/md/dm-vdo/slab-depot.c
2602
acquire_vio_from_pool(slab->allocator->vio_pool, &journal->resource_waiter);
drivers/md/dm-vdo/slab-depot.c
2605
static void register_slab_for_scrubbing(struct vdo_slab *slab, bool high_priority)
drivers/md/dm-vdo/slab-depot.c
2607
struct slab_scrubber *scrubber = &slab->allocator->scrubber;
drivers/md/dm-vdo/slab-depot.c
2609
VDO_ASSERT_LOG_ONLY((slab->status != VDO_SLAB_REBUILT),
drivers/md/dm-vdo/slab-depot.c
2612
if (slab->status != VDO_SLAB_REQUIRES_SCRUBBING)
drivers/md/dm-vdo/slab-depot.c
2615
list_del_init(&slab->allocq_entry);
drivers/md/dm-vdo/slab-depot.c
2616
if (!slab->was_queued_for_scrubbing) {
drivers/md/dm-vdo/slab-depot.c
2618
slab->was_queued_for_scrubbing = true;
drivers/md/dm-vdo/slab-depot.c
2622
slab->status = VDO_SLAB_REQUIRES_HIGH_PRIORITY_SCRUBBING;
drivers/md/dm-vdo/slab-depot.c
2623
list_add_tail(&slab->allocq_entry, &scrubber->high_priority_slabs);
drivers/md/dm-vdo/slab-depot.c
2627
list_add_tail(&slab->allocq_entry, &scrubber->slabs);
drivers/md/dm-vdo/slab-depot.c
2631
static void queue_slab(struct vdo_slab *slab)
drivers/md/dm-vdo/slab-depot.c
2633
struct block_allocator *allocator = slab->allocator;
drivers/md/dm-vdo/slab-depot.c
2637
VDO_ASSERT_LOG_ONLY(list_empty(&slab->allocq_entry),
drivers/md/dm-vdo/slab-depot.c
2643
free_blocks = slab->free_blocks;
drivers/md/dm-vdo/slab-depot.c
2646
slab->slab_number, (unsigned long long) free_blocks,
drivers/md/dm-vdo/slab-depot.c
2653
if (slab->status != VDO_SLAB_REBUILT) {
drivers/md/dm-vdo/slab-depot.c
2654
register_slab_for_scrubbing(slab, false);
drivers/md/dm-vdo/slab-depot.c
2658
if (!vdo_is_state_resuming(&slab->state)) {
drivers/md/dm-vdo/slab-depot.c
2666
if (!is_slab_journal_blank(slab)) {
drivers/md/dm-vdo/slab-depot.c
2673
reopen_slab_journal(slab);
drivers/md/dm-vdo/slab-depot.c
2675
prioritize_slab(slab);
drivers/md/dm-vdo/slab-depot.c
2681
struct vdo_slab *slab = container_of(state, struct vdo_slab, state);
drivers/md/dm-vdo/slab-depot.c
2687
slab->status = VDO_SLAB_REBUILDING;
drivers/md/dm-vdo/slab-depot.c
2689
drain_slab(slab);
drivers/md/dm-vdo/slab-depot.c
2690
check_if_slab_drained(slab);
drivers/md/dm-vdo/slab-depot.c
2695
load_slab_journal(slab);
drivers/md/dm-vdo/slab-depot.c
2700
queue_slab(slab);
drivers/md/dm-vdo/slab-depot.c
2716
struct vdo_slab *slab;
drivers/md/dm-vdo/slab-depot.c
2718
slab = list_first_entry_or_null(&scrubber->high_priority_slabs,
drivers/md/dm-vdo/slab-depot.c
2720
if (slab != NULL)
drivers/md/dm-vdo/slab-depot.c
2721
return slab;
drivers/md/dm-vdo/slab-depot.c
2819
struct vdo_slab *slab = scrubber->slab;
drivers/md/dm-vdo/slab-depot.c
2821
slab->status = VDO_SLAB_REBUILT;
drivers/md/dm-vdo/slab-depot.c
2822
queue_slab(slab);
drivers/md/dm-vdo/slab-depot.c
2823
reopen_slab_journal(slab);
drivers/md/dm-vdo/slab-depot.c
2863
sequence_number_t block_number, struct vdo_slab *slab)
drivers/md/dm-vdo/slab-depot.c
2870
slab_block_number max_sbn = slab->end - slab->start;
drivers/md/dm-vdo/slab-depot.c
2885
result = replay_reference_count_change(slab, &entry_point, entry);
drivers/md/dm-vdo/slab-depot.c
2892
entry.sbn, slab->slab_number);
drivers/md/dm-vdo/slab-depot.c
2912
struct vdo_slab *slab = scrubber->slab;
drivers/md/dm-vdo/slab-depot.c
2913
struct slab_journal *journal = &slab->journal;
drivers/md/dm-vdo/slab-depot.c
2926
struct journal_point ref_counts_point = slab->slab_journal_point;
drivers/md/dm-vdo/slab-depot.c
2938
if ((header.nonce != slab->allocator->nonce) ||
drivers/md/dm-vdo/slab-depot.c
2946
slab->slab_number);
drivers/md/dm-vdo/slab-depot.c
2951
result = apply_block_entries(block, header.entry_count, sequence, slab);
drivers/md/dm-vdo/slab-depot.c
2978
slab->allocator->thread_id, completion->parent);
drivers/md/dm-vdo/slab-depot.c
2979
vdo_start_operation_with_waiter(&slab->state,
drivers/md/dm-vdo/slab-depot.c
2990
scrubber->slab->allocator->thread_id);
drivers/md/dm-vdo/slab-depot.c
3003
struct vdo_slab *slab = scrubber->slab;
drivers/md/dm-vdo/slab-depot.c
3005
if (!slab->allocator->summary_entries[slab->slab_number].is_dirty) {
drivers/md/dm-vdo/slab-depot.c
3010
vdo_submit_metadata_vio(&scrubber->vio, slab->journal_origin,
drivers/md/dm-vdo/slab-depot.c
3022
struct vdo_slab *slab;
drivers/md/dm-vdo/slab-depot.c
3035
slab = get_next_slab(scrubber);
drivers/md/dm-vdo/slab-depot.c
3036
if ((slab == NULL) ||
drivers/md/dm-vdo/slab-depot.c
3045
list_del_init(&slab->allocq_entry);
drivers/md/dm-vdo/slab-depot.c
3046
scrubber->slab = slab;
drivers/md/dm-vdo/slab-depot.c
3048
slab->allocator->thread_id, completion->parent);
drivers/md/dm-vdo/slab-depot.c
3049
vdo_start_operation_with_waiter(&slab->state, VDO_ADMIN_STATE_SCRUBBING,
drivers/md/dm-vdo/slab-depot.c
3086
struct vdo_slab *slab)
drivers/md/dm-vdo/slab-depot.c
3089
allocator->last_slab = slab->slab_number;
drivers/md/dm-vdo/slab-depot.c
3132
struct vdo_slab *slab = iterator->next;
drivers/md/dm-vdo/slab-depot.c
3134
if ((slab == NULL) || (slab->slab_number < iterator->end + iterator->stride))
drivers/md/dm-vdo/slab-depot.c
3137
iterator->next = iterator->slabs[slab->slab_number - iterator->stride];
drivers/md/dm-vdo/slab-depot.c
3139
return slab;
drivers/md/dm-vdo/slab-depot.c
3174
struct vdo_slab *slab = next_slab(&iterator);
drivers/md/dm-vdo/slab-depot.c
3176
vdo_waitq_notify_all_waiters(&slab->journal.entry_waiters,
drivers/md/dm-vdo/slab-depot.c
3177
abort_waiter, &slab->journal);
drivers/md/dm-vdo/slab-depot.c
3178
check_if_slab_drained(slab);
drivers/md/dm-vdo/slab-depot.c
3193
int vdo_acquire_provisional_reference(struct vdo_slab *slab, physical_block_number_t pbn,
drivers/md/dm-vdo/slab-depot.c
3202
if (!is_slab_open(slab))
drivers/md/dm-vdo/slab-depot.c
3205
result = slab_block_number_from_pbn(slab, pbn, &block_number);
drivers/md/dm-vdo/slab-depot.c
3209
if (slab->counters[block_number] == EMPTY_REFERENCE_COUNT) {
drivers/md/dm-vdo/slab-depot.c
3210
make_provisional_reference(slab, block_number);
drivers/md/dm-vdo/slab-depot.c
3216
adjust_free_block_count(slab, false);
drivers/md/dm-vdo/slab-depot.c
3221
static int __must_check allocate_slab_block(struct vdo_slab *slab,
drivers/md/dm-vdo/slab-depot.c
3226
if (!is_slab_open(slab))
drivers/md/dm-vdo/slab-depot.c
3229
if (!search_reference_blocks(slab, &free_index))
drivers/md/dm-vdo/slab-depot.c
3232
VDO_ASSERT_LOG_ONLY((slab->counters[free_index] == EMPTY_REFERENCE_COUNT),
drivers/md/dm-vdo/slab-depot.c
3234
make_provisional_reference(slab, free_index);
drivers/md/dm-vdo/slab-depot.c
3235
adjust_free_block_count(slab, false);
drivers/md/dm-vdo/slab-depot.c
3241
slab->search_cursor.index = (free_index + 1);
drivers/md/dm-vdo/slab-depot.c
3243
*block_number_ptr = slab->start + free_index;
drivers/md/dm-vdo/slab-depot.c
3251
static void open_slab(struct vdo_slab *slab)
drivers/md/dm-vdo/slab-depot.c
3253
reset_search_cursor(slab);
drivers/md/dm-vdo/slab-depot.c
3254
if (is_slab_journal_blank(slab)) {
drivers/md/dm-vdo/slab-depot.c
3255
WRITE_ONCE(slab->allocator->statistics.slabs_opened,
drivers/md/dm-vdo/slab-depot.c
3256
slab->allocator->statistics.slabs_opened + 1);
drivers/md/dm-vdo/slab-depot.c
3257
dirty_all_reference_blocks(slab);
drivers/md/dm-vdo/slab-depot.c
3259
WRITE_ONCE(slab->allocator->statistics.slabs_reopened,
drivers/md/dm-vdo/slab-depot.c
3260
slab->allocator->statistics.slabs_reopened + 1);
drivers/md/dm-vdo/slab-depot.c
3263
slab->allocator->open_slab = slab;
drivers/md/dm-vdo/slab-depot.c
3328
struct vdo_slab *slab = vdo_get_slab(completion->vdo->depot, updater->zpbn.pbn);
drivers/md/dm-vdo/slab-depot.c
3330
if (!is_slab_open(slab)) {
drivers/md/dm-vdo/slab-depot.c
3340
vdo_waitq_enqueue_waiter(&slab->journal.entry_waiters, &updater->waiter);
drivers/md/dm-vdo/slab-depot.c
3341
if ((slab->status != VDO_SLAB_REBUILT) && requires_reaping(&slab->journal))
drivers/md/dm-vdo/slab-depot.c
3342
register_slab_for_scrubbing(slab, true);
drivers/md/dm-vdo/slab-depot.c
3344
add_entries(&slab->journal);
drivers/md/dm-vdo/slab-depot.c
3444
struct vdo_slab *slab = next_slab(&iterator);
drivers/md/dm-vdo/slab-depot.c
3446
list_del_init(&slab->allocq_entry);
drivers/md/dm-vdo/slab-depot.c
3448
vdo_start_operation_with_waiter(&slab->state, operation,
drivers/md/dm-vdo/slab-depot.c
3494
struct vdo_slab *slab;
drivers/md/dm-vdo/slab-depot.c
3505
slab = next_slab(&allocator->slabs_to_erase);
drivers/md/dm-vdo/slab-depot.c
3506
pbn = slab->journal_origin - depot->vdo->geometry.bio_offset;
drivers/md/dm-vdo/slab-depot.c
361
static void update_slab_summary_entry(struct vdo_slab *slab, struct vdo_waiter *waiter,
drivers/md/dm-vdo/slab-depot.c
3611
struct vdo_slab *slab;
drivers/md/dm-vdo/slab-depot.c
3616
slab = depot->slabs[current_slab_status.slab_number];
drivers/md/dm-vdo/slab-depot.c
3619
(!allocator->summary_entries[slab->slab_number].load_ref_counts &&
drivers/md/dm-vdo/slab-depot.c
3621
queue_slab(slab);
drivers/md/dm-vdo/slab-depot.c
3625
slab->status = VDO_SLAB_REQUIRES_SCRUBBING;
drivers/md/dm-vdo/slab-depot.c
3626
journal = &slab->journal;
drivers/md/dm-vdo/slab-depot.c
3630
register_slab_for_scrubbing(slab, high_priority);
drivers/md/dm-vdo/slab-depot.c
366
u8 index = slab->slab_number / VDO_SLAB_SUMMARY_ENTRIES_PER_BLOCK;
drivers/md/dm-vdo/slab-depot.c
3663
struct vdo_slab *slab = next_slab(&iterator);
drivers/md/dm-vdo/slab-depot.c
3664
struct slab_journal *journal = &slab->journal;
drivers/md/dm-vdo/slab-depot.c
3666
if (slab->reference_blocks != NULL) {
drivers/md/dm-vdo/slab-depot.c
3668
vdo_log_info("slab %u: P%u, %llu free", slab->slab_number,
drivers/md/dm-vdo/slab-depot.c
3669
slab->priority,
drivers/md/dm-vdo/slab-depot.c
367
struct block_allocator *allocator = slab->allocator;
drivers/md/dm-vdo/slab-depot.c
3670
(unsigned long long) slab->free_blocks);
drivers/md/dm-vdo/slab-depot.c
3672
vdo_log_info("slab %u: status %s", slab->slab_number,
drivers/md/dm-vdo/slab-depot.c
3673
status_to_string(slab->status));
drivers/md/dm-vdo/slab-depot.c
3693
if (slab->counters != NULL) {
drivers/md/dm-vdo/slab-depot.c
3696
slab->free_blocks, slab->block_count,
drivers/md/dm-vdo/slab-depot.c
3697
slab->reference_block_count,
drivers/md/dm-vdo/slab-depot.c
3698
vdo_waitq_num_waiters(&slab->dirty_blocks),
drivers/md/dm-vdo/slab-depot.c
3699
slab->active_count,
drivers/md/dm-vdo/slab-depot.c
3700
(unsigned long long) slab->slab_journal_point.sequence_number,
drivers/md/dm-vdo/slab-depot.c
3701
slab->slab_journal_point.entry_count);
drivers/md/dm-vdo/slab-depot.c
3723
static void free_slab(struct vdo_slab *slab)
drivers/md/dm-vdo/slab-depot.c
3725
if (slab == NULL)
drivers/md/dm-vdo/slab-depot.c
3728
list_del(&slab->allocq_entry);
drivers/md/dm-vdo/slab-depot.c
3729
vdo_free(vdo_forget(slab->journal.block));
drivers/md/dm-vdo/slab-depot.c
3730
vdo_free(vdo_forget(slab->journal.locks));
drivers/md/dm-vdo/slab-depot.c
3731
vdo_free(vdo_forget(slab->counters));
drivers/md/dm-vdo/slab-depot.c
3732
vdo_free(vdo_forget(slab->reference_blocks));
drivers/md/dm-vdo/slab-depot.c
3733
vdo_free(slab);
drivers/md/dm-vdo/slab-depot.c
3736
static int initialize_slab_journal(struct vdo_slab *slab)
drivers/md/dm-vdo/slab-depot.c
3738
struct slab_journal *journal = &slab->journal;
drivers/md/dm-vdo/slab-depot.c
3739
const struct slab_config *slab_config = &slab->allocator->depot->slab_config;
drivers/md/dm-vdo/slab-depot.c
3752
journal->slab = slab;
drivers/md/dm-vdo/slab-depot.c
3759
journal->events = &slab->allocator->slab_journal_statistics;
drivers/md/dm-vdo/slab-depot.c
3760
journal->recovery_journal = slab->allocator->depot->vdo->recovery_journal;
drivers/md/dm-vdo/slab-depot.c
3777
journal->tail_header.nonce = slab->allocator->nonce;
drivers/md/dm-vdo/slab-depot.c
3800
struct vdo_slab *slab;
drivers/md/dm-vdo/slab-depot.c
3803
result = vdo_allocate(1, struct vdo_slab, __func__, &slab);
drivers/md/dm-vdo/slab-depot.c
3807
*slab = (struct vdo_slab) {
drivers/md/dm-vdo/slab-depot.c
3820
INIT_LIST_HEAD(&slab->allocq_entry);
drivers/md/dm-vdo/slab-depot.c
3822
result = initialize_slab_journal(slab);
drivers/md/dm-vdo/slab-depot.c
3824
free_slab(slab);
drivers/md/dm-vdo/slab-depot.c
3829
vdo_set_admin_state_code(&slab->state, VDO_ADMIN_STATE_NEW);
drivers/md/dm-vdo/slab-depot.c
3830
result = allocate_slab_counters(slab);
drivers/md/dm-vdo/slab-depot.c
3832
free_slab(slab);
drivers/md/dm-vdo/slab-depot.c
3836
vdo_set_admin_state_code(&slab->state, VDO_ADMIN_STATE_NORMAL_OPERATION);
drivers/md/dm-vdo/slab-depot.c
3839
*slab_ptr = slab;
drivers/md/dm-vdo/slab-depot.c
385
entry = &allocator->summary_entries[slab->slab_number];
drivers/md/dm-vdo/slab-depot.c
3933
vdo_is_read_only(journal->slab->allocator->depot->vdo))
drivers/md/dm-vdo/slab-depot.c
405
check_if_slab_drained(journal->slab);
drivers/md/dm-vdo/slab-depot.c
4222
struct vdo_slab *slab = depot->new_slabs[i];
drivers/md/dm-vdo/slab-depot.c
4224
register_slab_with_allocator(slab->allocator, slab);
drivers/md/dm-vdo/slab-depot.c
441
journal->slab->allocator->thread_id);
drivers/md/dm-vdo/slab-depot.c
4459
struct vdo_slab *slab = vdo_get_slab(depot, pbn);
drivers/md/dm-vdo/slab-depot.c
4463
if ((slab == NULL) || (slab->status != VDO_SLAB_REBUILT))
drivers/md/dm-vdo/slab-depot.c
4466
result = get_reference_counter(slab, pbn, &counter_ptr);
drivers/md/dm-vdo/slab-depot.c
474
if ((journal->slab->status != VDO_SLAB_REBUILT) ||
drivers/md/dm-vdo/slab-depot.c
475
!vdo_is_state_normal(&journal->slab->state) ||
drivers/md/dm-vdo/slab-depot.c
476
vdo_is_read_only(journal->slab->allocator->depot->vdo)) {
drivers/md/dm-vdo/slab-depot.c
4822
struct vdo_slab *slab = depot->new_slabs[i];
drivers/md/dm-vdo/slab-depot.c
4824
if (slab->allocator == allocator)
drivers/md/dm-vdo/slab-depot.c
4825
register_slab_with_allocator(allocator, slab);
drivers/md/dm-vdo/slab-depot.c
510
acquire_vio_from_pool(journal->slab->allocator->vio_pool,
drivers/md/dm-vdo/slab-depot.c
531
if (journal->slab->status == VDO_SLAB_REPLAYING) {
drivers/md/dm-vdo/slab-depot.c
54
static bool is_slab_open(struct vdo_slab *slab)
drivers/md/dm-vdo/slab-depot.c
56
return (!vdo_is_state_quiescing(&slab->state) &&
drivers/md/dm-vdo/slab-depot.c
57
!vdo_is_state_quiescent(&slab->state));
drivers/md/dm-vdo/slab-depot.c
577
vdo_enter_read_only_mode(journal->slab->allocator->depot->vdo, result);
drivers/md/dm-vdo/slab-depot.c
578
check_if_slab_drained(journal->slab);
drivers/md/dm-vdo/slab-depot.c
595
zone_count_t zone_number = journal->slab->allocator->zone_number;
drivers/md/dm-vdo/slab-depot.c
626
struct vdo_slab *slab = journal->slab;
drivers/md/dm-vdo/slab-depot.c
629
vdo_is_read_only(journal->slab->allocator->depot->vdo) ||
drivers/md/dm-vdo/slab-depot.c
631
check_if_slab_drained(slab);
drivers/md/dm-vdo/slab-depot.c
635
if (slab->status != VDO_SLAB_REBUILT) {
drivers/md/dm-vdo/slab-depot.c
636
u8 hint = slab->allocator->summary_entries[slab->slab_number].fullness_hint;
drivers/md/dm-vdo/slab-depot.c
638
free_block_count = ((block_count_t) hint) << slab->allocator->depot->hint_shift;
drivers/md/dm-vdo/slab-depot.c
640
free_block_count = slab->free_blocks;
drivers/md/dm-vdo/slab-depot.c
652
update_slab_summary_entry(slab, &journal->slab_summary_waiter,
drivers/md/dm-vdo/slab-depot.c
661
static void reopen_slab_journal(struct vdo_slab *slab)
drivers/md/dm-vdo/slab-depot.c
663
struct slab_journal *journal = &slab->journal;
drivers/md/dm-vdo/slab-depot.c
68
return ((journal->slab->status != VDO_SLAB_REBUILDING) &&
drivers/md/dm-vdo/slab-depot.c
709
vdo_enter_read_only_mode(journal->slab->allocator->depot->vdo, result);
drivers/md/dm-vdo/slab-depot.c
710
check_if_slab_drained(journal->slab);
drivers/md/dm-vdo/slab-depot.c
734
continue_vio_after_io(vio, complete_write, journal->slab->allocator->thread_id);
drivers/md/dm-vdo/slab-depot.c
773
block_number = journal->slab->journal_origin +
drivers/md/dm-vdo/slab-depot.c
790
operation = vdo_get_admin_state_code(&journal->slab->state);
drivers/md/dm-vdo/slab-depot.c
792
vdo_finish_operation(&journal->slab->state,
drivers/md/dm-vdo/slab-depot.c
793
(vdo_is_read_only(journal->slab->allocator->depot->vdo) ?
drivers/md/dm-vdo/slab-depot.c
815
if (vdo_is_read_only(journal->slab->allocator->depot->vdo) ||
drivers/md/dm-vdo/slab-depot.c
834
acquire_vio_from_pool(journal->slab->allocator->vio_pool,
drivers/md/dm-vdo/slab-depot.c
918
vdo_enter_read_only_mode(journal->slab->allocator->depot->vdo, result);
drivers/md/dm-vdo/slab-depot.c
927
vdo_enter_read_only_mode(journal->slab->allocator->depot->vdo,
drivers/md/dm-vdo/slab-depot.c
934
pbn - journal->slab->start, operation, increment);
drivers/md/dm-vdo/slab-depot.c
957
bool vdo_attempt_replay_into_slab(struct vdo_slab *slab, physical_block_number_t pbn,
drivers/md/dm-vdo/slab-depot.c
962
struct slab_journal *journal = &slab->journal;
drivers/md/dm-vdo/slab-depot.c
980
vdo_start_operation_with_waiter(&journal->slab->state,
drivers/md/dm-vdo/slab-depot.c
996
if (journal->slab->status == VDO_SLAB_REBUILT)
drivers/md/dm-vdo/slab-depot.c
997
journal->slab->status = VDO_SLAB_REPLAYING;
drivers/md/dm-vdo/slab-depot.h
167
struct vdo_slab *slab;
drivers/md/dm-vdo/slab-depot.h
300
struct vdo_slab *slab;
drivers/md/dm-vdo/slab-depot.h
517
bool __must_check vdo_attempt_replay_into_slab(struct vdo_slab *slab,
drivers/md/dm-vdo/slab-depot.h
534
int __must_check vdo_acquire_provisional_reference(struct vdo_slab *slab,
drivers/md/dm-vdo/slab-depot.h
84
struct vdo_slab *slab;
drivers/md/dm-vdo/vdo.c
1701
struct vdo_slab *slab;
drivers/md/dm-vdo/vdo.c
1717
slab = vdo_get_slab(vdo->depot, pbn);
drivers/md/dm-vdo/vdo.c
1718
result = VDO_ASSERT(slab != NULL, "vdo_get_slab must succeed on all valid PBNs");
drivers/md/dm-vdo/vdo.c
1722
*zone_ptr = &vdo->physical_zones->zones[slab->allocator->zone_number];
drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h
128
chtls_tcp_ops->slab = tcp_prot->rsk_prot->slab;
drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h
136
kmem_cache_free(req->rsk_ops->slab, req);
fs/erofs/zdata.c
123
struct kmem_cache *slab;
fs/erofs/zdata.c
223
if (!pcluster_pool[i].slab)
fs/erofs/zdata.c
225
kmem_cache_destroy(pcluster_pool[i].slab);
fs/erofs/zdata.c
226
pcluster_pool[i].slab = NULL;
fs/erofs/zdata.c
241
pcs->slab = kmem_cache_create(pcs->name, size, 0,
fs/erofs/zdata.c
243
if (pcs->slab)
fs/erofs/zdata.c
263
pcl = kmem_cache_zalloc(pcs->slab, GFP_KERNEL);
fs/erofs/zdata.c
282
kmem_cache_free(pcs->slab, pcl);
fs/nfsd/nfs4state.c
4959
static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj *owner, struct nfs4_client *clp)
fs/nfsd/nfs4state.c
4963
sop = kmem_cache_alloc(slab, GFP_KERNEL);
fs/nfsd/nfs4state.c
4969
kmem_cache_free(slab, sop);
fs/nfsd/nfs4state.c
909
struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab,
fs/nfsd/nfs4state.c
915
stid = kmem_cache_zalloc(slab, GFP_KERNEL);
fs/nfsd/nfs4state.c
939
kmem_cache_free(slab, stid);
fs/nfsd/state.h
800
struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab,
include/linux/kasan.h
130
void __kasan_poison_slab(struct slab *slab);
include/linux/kasan.h
131
static __always_inline void kasan_poison_slab(struct slab *slab)
include/linux/kasan.h
134
__kasan_poison_slab(slab);
include/linux/kasan.h
14
struct slab;
include/linux/kasan.h
416
static inline void kasan_poison_slab(struct slab *slab) {}
include/linux/kfence.h
222
bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab);
include/linux/kfence.h
246
static inline bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
include/linux/page-flags.h
1051
PAGE_TYPE_OPS(Slab, slab, slab)
include/net/request_sock.h
31
struct kmem_cache *slab;
include/net/sock.h
1377
struct kmem_cache *slab;
lib/sg_pool.c
13
struct kmem_cache *slab;
lib/sg_pool.c
150
sgp->slab = kmem_cache_create(sgp->name, size, 0,
lib/sg_pool.c
152
if (!sgp->slab) {
lib/sg_pool.c
159
sgp->slab);
lib/sg_pool.c
174
kmem_cache_destroy(sgp->slab);
mm/debug.c
52
DEF_PAGETYPE_NAME(slab),
mm/kasan/common.c
155
void __kasan_poison_slab(struct slab *slab)
mm/kasan/common.c
157
struct page *page = slab_page(slab);
mm/kasan/common.c
45
struct slab *kasan_addr_to_slab(const void *addr)
mm/kasan/common.c
461
struct slab *slab;
mm/kasan/common.c
479
slab = virt_to_slab(object);
mm/kasan/common.c
482
if (unlikely(!slab))
mm/kasan/common.c
485
poison_kmalloc_redzone(slab->slab_cache, object, size, flags);
mm/kasan/common.c
522
struct slab *slab;
mm/kasan/common.c
534
slab = page_slab(page);
mm/kasan/common.c
536
if (check_slab_allocation(slab->slab_cache, ptr, ip))
mm/kasan/common.c
539
poison_slab_object(slab->slab_cache, ptr, false);
mm/kasan/common.c
545
struct slab *slab;
mm/kasan/common.c
548
slab = virt_to_slab(ptr);
mm/kasan/common.c
554
if (unlikely(!slab)) {
mm/kasan/common.c
564
unpoison_slab_object(slab->slab_cache, ptr, flags, false);
mm/kasan/common.c
567
if (is_kmalloc_cache(slab->slab_cache))
mm/kasan/common.c
568
poison_kmalloc_redzone(slab->slab_cache, ptr, size, flags);
mm/kasan/generic.c
541
struct slab *slab = kasan_addr_to_slab(addr);
mm/kasan/generic.c
546
if (is_kfence_address(addr) || !slab)
mm/kasan/generic.c
549
cache = slab->slab_cache;
mm/kasan/generic.c
550
object = nearest_obj(cache, slab, addr);
mm/kasan/kasan.h
385
struct slab *kasan_addr_to_slab(const void *addr);
mm/kasan/report.c
492
struct slab *slab;
mm/kasan/report.c
500
slab = kasan_addr_to_slab(addr);
mm/kasan/report.c
501
if (slab) {
mm/kasan/report.c
502
info->cache = slab->slab_cache;
mm/kasan/report.c
503
info->object = nearest_obj(info->cache, slab, addr);
mm/kfence/core.c
424
struct slab *slab;
mm/kfence/core.c
492
slab = virt_to_slab(addr);
mm/kfence/core.c
493
slab->slab_cache = cache;
mm/kfence/core.c
494
slab->objects = 1;
mm/kfence/core.c
630
struct slab *slab = page_slab(page);
mm/kfence/core.c
631
slab->obj_exts = (unsigned long)&kfence_metadata_init[i / 2 - 1].obj_exts |
mm/kfence/core.c
698
struct slab *slab = page_slab(page);
mm/kfence/core.c
699
slab->obj_exts = 0;
mm/kfence/kfence_test.c
287
struct slab *slab = virt_to_slab(alloc);
mm/kfence/kfence_test.c
297
KUNIT_EXPECT_EQ(test, obj_to_index(s, slab, alloc), 0U);
mm/kfence/kfence_test.c
298
KUNIT_EXPECT_EQ(test, objs_per_slab(s, slab), 1);
mm/kfence/report.c
299
bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
mm/kfence/report.c
319
kpp->kp_slab = slab;
mm/memcontrol.c
2618
struct mem_cgroup *mem_cgroup_from_obj_slab(struct slab *slab, void *p)
mm/memcontrol.c
2629
obj_exts = slab_obj_exts(slab);
mm/memcontrol.c
2634
off = obj_to_index(slab->slab_cache, slab, p);
mm/memcontrol.c
2635
obj_ext = slab_obj_ext(slab, obj_exts, off);
mm/memcontrol.c
2658
struct slab *slab;
mm/memcontrol.c
2663
slab = virt_to_slab(p);
mm/memcontrol.c
2664
if (slab)
mm/memcontrol.c
2665
return mem_cgroup_from_obj_slab(slab, p);
mm/memcontrol.c
3191
struct slab *slab;
mm/memcontrol.c
3231
slab = virt_to_slab(p[i]);
mm/memcontrol.c
3233
if (!slab_obj_exts(slab) &&
mm/memcontrol.c
3234
alloc_slab_obj_exts(slab, s, flags, false)) {
mm/memcontrol.c
3250
slab_pgdat(slab), cache_vmstat_idx(s)))
mm/memcontrol.c
3253
obj_exts = slab_obj_exts(slab);
mm/memcontrol.c
3255
off = obj_to_index(s, slab, p[i]);
mm/memcontrol.c
3256
obj_ext = slab_obj_ext(slab, obj_exts, off);
mm/memcontrol.c
3265
void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
mm/memcontrol.c
3275
off = obj_to_index(s, slab, p[i]);
mm/memcontrol.c
3276
obj_ext = slab_obj_ext(slab, obj_exts, off);
mm/memcontrol.c
3283
slab_pgdat(slab), cache_vmstat_idx(s));
mm/memcontrol.c
4082
int slab = atomic_xchg(&pn->slab_reclaimable, 0);
mm/memcontrol.c
4085
lstats->state[index] += slab;
mm/memcontrol.c
4087
plstats->state_pending[index] += slab;
mm/memcontrol.c
4090
int slab = atomic_xchg(&pn->slab_unreclaimable, 0);
mm/memcontrol.c
4093
lstats->state[index] += slab;
mm/memcontrol.c
4095
plstats->state_pending[index] += slab;
mm/slab.h
105
static_assert(sizeof(struct slab) <= sizeof(struct page));
mm/slab.h
107
static_assert(IS_ALIGNED(offsetof(struct slab, freelist), sizeof(struct freelist_counters)));
mm/slab.h
122
const struct slab *: (const struct folio *)s, \
mm/slab.h
123
struct slab *: (struct folio *)s))
mm/slab.h
132
static inline struct slab *page_slab(const struct page *page)
mm/slab.h
142
return (struct slab *)page;
mm/slab.h
155
static inline void *slab_address(const struct slab *slab)
mm/slab.h
157
return folio_address(slab_folio(slab));
mm/slab.h
160
static inline int slab_nid(const struct slab *slab)
mm/slab.h
162
return memdesc_nid(slab->flags);
mm/slab.h
165
static inline pg_data_t *slab_pgdat(const struct slab *slab)
mm/slab.h
167
return NODE_DATA(slab_nid(slab));
mm/slab.h
170
static inline struct slab *virt_to_slab(const void *addr)
mm/slab.h
175
static inline int slab_order(const struct slab *slab)
mm/slab.h
177
return folio_order(slab_folio(slab));
mm/slab.h
180
static inline size_t slab_size(const struct slab *slab)
mm/slab.h
182
return PAGE_SIZE << slab_order(slab);
mm/slab.h
280
const struct slab *slab, void *x)
mm/slab.h
282
void *object = x - (x - slab_address(slab)) % cache->size;
mm/slab.h
283
void *last_object = slab_address(slab) +
mm/slab.h
284
(slab->objects - 1) * cache->size;
mm/slab.h
300
const struct slab *slab, const void *obj)
mm/slab.h
304
return __obj_to_index(cache, slab_address(slab), obj);
mm/slab.h
308
const struct slab *slab)
mm/slab.h
310
return slab->objects;
mm/slab.h
533
static inline unsigned long slab_obj_exts(struct slab *slab)
mm/slab.h
535
unsigned long obj_exts = READ_ONCE(slab->obj_exts);
mm/slab.h
543
obj_exts != OBJEXTS_ALLOC_FAIL, slab_page(slab));
mm/slab.h
544
VM_BUG_ON_PAGE(obj_exts & MEMCG_DATA_KMEM, slab_page(slab));
mm/slab.h
562
static inline void slab_set_stride(struct slab *slab, unsigned int stride)
mm/slab.h
564
slab->stride = stride;
mm/slab.h
566
static inline unsigned int slab_get_stride(struct slab *slab)
mm/slab.h
568
return slab->stride;
mm/slab.h
571
static inline void slab_set_stride(struct slab *slab, unsigned int stride)
mm/slab.h
575
static inline unsigned int slab_get_stride(struct slab *slab)
mm/slab.h
591
static inline struct slabobj_ext *slab_obj_ext(struct slab *slab,
mm/slab.h
597
VM_WARN_ON_ONCE(obj_exts != slab_obj_exts(slab));
mm/slab.h
600
slab_get_stride(slab) * index);
mm/slab.h
604
int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
mm/slab.h
609
static inline unsigned long slab_obj_exts(struct slab *slab)
mm/slab.h
614
static inline struct slabobj_ext *slab_obj_ext(struct slab *slab,
mm/slab.h
621
static inline void slab_set_stride(struct slab *slab, unsigned int stride) { }
mm/slab.h
622
static inline unsigned int slab_get_stride(struct slab *slab) { return 0; }
mm/slab.h
636
void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
mm/slab.h
707
struct slab *kp_slab;
mm/slab.h
715
void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab);
mm/slab.h
719
const struct slab *slab, bool to_user);
mm/slab.h
95
static_assert(offsetof(struct page, pg) == offsetof(struct slab, sl))
mm/slab_common.c
1248
struct slab *slab;
mm/slab_common.c
1253
slab = virt_to_slab((void *)(long)addr);
mm/slab_common.c
1254
return slab ? slab->slab_cache : NULL;
mm/slab_common.c
1597
struct slab *slab;
mm/slab_common.c
1602
slab = virt_to_slab(obj);
mm/slab_common.c
1603
if (unlikely(!slab))
mm/slab_common.c
1606
s = slab->slab_cache;
mm/slab_common.c
1607
if (likely(!IS_ENABLED(CONFIG_NUMA) || slab_nid(slab) == numa_mem_id()))
mm/slab_common.c
616
static void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
mm/slab_common.c
618
if (__kfence_obj_info(kpp, object, slab))
mm/slab_common.c
620
__kmem_obj_info(kpp, object, slab);
mm/slab_common.c
642
struct slab *slab;
mm/slab_common.c
649
slab = virt_to_slab(object);
mm/slab_common.c
650
if (!slab)
mm/slab_common.c
653
kmem_obj_info(&kp, object, slab);
mm/slub.c
1107
static void print_slab_info(const struct slab *slab)
mm/slub.c
1110
slab, slab->objects, slab->inuse, slab->freelist,
mm/slub.c
1111
&slab->flags.f);
mm/slub.c
1158
static void print_trailer(struct kmem_cache *s, struct slab *slab, u8 *p)
mm/slub.c
1161
u8 *addr = slab_address(slab);
mm/slub.c
1165
print_slab_info(slab);
mm/slub.c
1192
if (obj_exts_in_object(s, slab))
mm/slub.c
1201
static void object_err(struct kmem_cache *s, struct slab *slab,
mm/slub.c
1208
if (!object || !check_valid_pointer(s, slab, object)) {
mm/slub.c
1209
print_slab_info(slab);
mm/slub.c
1212
print_trailer(s, slab, object);
mm/slub.c
1219
static void __slab_err(struct slab *slab)
mm/slub.c
1224
print_slab_info(slab);
mm/slub.c
1230
static __printf(3, 4) void slab_err(struct kmem_cache *s, struct slab *slab,
mm/slub.c
1242
__slab_err(slab);
mm/slub.c
1293
check_bytes_and_report(struct kmem_cache *s, struct slab *slab,
mm/slub.c
1299
u8 *addr = slab_address(slab);
mm/slub.c
1318
object_err(s, slab, object, "Object corrupt");
mm/slub.c
1383
static int check_pad_bytes(struct kmem_cache *s, struct slab *slab, u8 *p)
mm/slub.c
1397
if (obj_exts_in_object(s, slab))
mm/slub.c
1403
return check_bytes_and_report(s, slab, p, "Object padding",
mm/slub.c
1409
slab_pad_check(struct kmem_cache *s, struct slab *slab)
mm/slub.c
1421
start = slab_address(slab);
mm/slub.c
1422
length = slab_size(slab);
mm/slub.c
1425
if (obj_exts_in_slab(s, slab) && !obj_exts_in_object(s, slab)) {
mm/slub.c
1427
remainder -= obj_exts_offset_in_slab(s, slab);
mm/slub.c
1428
remainder -= obj_exts_size_in_slab(slab);
mm/slub.c
1448
__slab_err(slab);
mm/slub.c
1453
static int check_object(struct kmem_cache *s, struct slab *slab,
mm/slub.c
1462
if (!check_bytes_and_report(s, slab, object, "Left Redzone",
mm/slub.c
1466
if (!check_bytes_and_report(s, slab, object, "Right Redzone",
mm/slub.c
1474
!check_bytes_and_report(s, slab, object,
mm/slub.c
1482
if (!check_bytes_and_report(s, slab, p, "Alignment padding",
mm/slub.c
1498
!check_bytes_and_report(s, slab, p, "Poison",
mm/slub.c
1503
!check_bytes_and_report(s, slab, p, "End Poison",
mm/slub.c
1510
if (!check_pad_bytes(s, slab, p))
mm/slub.c
1519
!check_valid_pointer(s, slab, get_freepointer(s, p))) {
mm/slub.c
1520
object_err(s, slab, p, "Freepointer corrupt");
mm/slub.c
1538
static int check_slab(struct kmem_cache *s, struct slab *slab)
mm/slub.c
1542
maxobj = order_objects(slab_order(slab), s->size);
mm/slub.c
1543
if (slab->objects > maxobj) {
mm/slub.c
1544
slab_err(s, slab, "objects %u > max %u",
mm/slub.c
1545
slab->objects, maxobj);
mm/slub.c
1548
if (slab->inuse > slab->objects) {
mm/slub.c
1549
slab_err(s, slab, "inuse %u > max %u",
mm/slub.c
1550
slab->inuse, slab->objects);
mm/slub.c
1553
if (slab->frozen) {
mm/slub.c
1554
slab_err(s, slab, "Slab disabled since SLUB metadata consistency check failed");
mm/slub.c
1559
slab_pad_check(s, slab);
mm/slub.c
1567
static bool on_freelist(struct kmem_cache *s, struct slab *slab, void *search)
mm/slub.c
1574
fp = slab->freelist;
mm/slub.c
1575
while (fp && nr <= slab->objects) {
mm/slub.c
1578
if (!check_valid_pointer(s, slab, fp)) {
mm/slub.c
1580
object_err(s, slab, object,
mm/slub.c
1585
slab_err(s, slab, "Freepointer corrupt");
mm/slub.c
1586
slab->freelist = NULL;
mm/slub.c
1587
slab->inuse = slab->objects;
mm/slub.c
1597
if (nr > slab->objects) {
mm/slub.c
1598
slab_err(s, slab, "Freelist cycle detected");
mm/slub.c
1599
slab->freelist = NULL;
mm/slub.c
1600
slab->inuse = slab->objects;
mm/slub.c
1605
max_objects = order_objects(slab_order(slab), s->size);
mm/slub.c
1609
if (slab->objects != max_objects) {
mm/slub.c
1610
slab_err(s, slab, "Wrong number of objects. Found %d but should be %d",
mm/slub.c
1611
slab->objects, max_objects);
mm/slub.c
1612
slab->objects = max_objects;
mm/slub.c
1615
if (slab->inuse != slab->objects - nr) {
mm/slub.c
1616
slab_err(s, slab, "Wrong object count. Counter is %d but counted were %d",
mm/slub.c
1617
slab->inuse, slab->objects - nr);
mm/slub.c
1618
slab->inuse = slab->objects - nr;
mm/slub.c
1624
static void trace(struct kmem_cache *s, struct slab *slab, void *object,
mm/slub.c
1631
object, slab->inuse,
mm/slub.c
1632
slab->freelist);
mm/slub.c
1646
struct kmem_cache_node *n, struct slab *slab)
mm/slub.c
1652
list_add(&slab->slab_list, &n->full);
mm/slub.c
1655
static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct slab *slab)
mm/slub.c
1661
list_del(&slab->slab_list);
mm/slub.c
1695
void setup_slab_debug(struct kmem_cache *s, struct slab *slab, void *addr)
mm/slub.c
1701
memset(kasan_reset_tag(addr), POISON_INUSE, slab_size(slab));
mm/slub.c
1706
struct slab *slab, void *object)
mm/slub.c
1708
if (!check_slab(s, slab))
mm/slub.c
1711
if (!check_valid_pointer(s, slab, object)) {
mm/slub.c
1712
object_err(s, slab, object, "Freelist Pointer check fails");
mm/slub.c
1716
if (!check_object(s, slab, object, SLUB_RED_INACTIVE))
mm/slub.c
1723
struct slab *slab, void *object, int orig_size)
mm/slub.c
1726
if (!alloc_consistency_checks(s, slab, object))
mm/slub.c
1731
trace(s, slab, object, 1);
mm/slub.c
1742
slab->inuse = slab->objects;
mm/slub.c
1743
slab->freelist = NULL;
mm/slub.c
1744
slab->frozen = 1; /* mark consistency-failed slab as frozen */
mm/slub.c
1750
struct slab *slab, void *object, unsigned long addr)
mm/slub.c
1752
if (!check_valid_pointer(s, slab, object)) {
mm/slub.c
1753
slab_err(s, slab, "Invalid object pointer 0x%p", object);
mm/slub.c
1757
if (on_freelist(s, slab, object)) {
mm/slub.c
1758
object_err(s, slab, object, "Object already free");
mm/slub.c
1762
if (!check_object(s, slab, object, SLUB_RED_ACTIVE))
mm/slub.c
1765
if (unlikely(s != slab->slab_cache)) {
mm/slub.c
1766
if (!slab->slab_cache) {
mm/slub.c
1767
slab_err(NULL, slab, "No slab cache for object 0x%p",
mm/slub.c
1770
object_err(s, slab, object,
mm/slub.c
1997
void setup_slab_debug(struct kmem_cache *s, struct slab *slab, void *addr) {}
mm/slub.c
2000
struct slab *slab, void *object, int orig_size) { return true; }
mm/slub.c
2003
struct slab *slab, void *head, void *tail, int *bulk_cnt,
mm/slub.c
2006
static inline void slab_pad_check(struct kmem_cache *s, struct slab *slab) {}
mm/slub.c
2007
static inline int check_object(struct kmem_cache *s, struct slab *slab,
mm/slub.c
2013
struct slab *slab) {}
mm/slub.c
2015
struct slab *slab) {}
mm/slub.c
2046
struct slab *obj_slab;
mm/slub.c
2070
static inline bool mark_failed_objexts_alloc(struct slab *slab)
mm/slub.c
2072
return cmpxchg(&slab->obj_exts, 0, OBJEXTS_ALLOC_FAIL) == 0;
mm/slub.c
2094
static inline bool mark_failed_objexts_alloc(struct slab *slab) { return false; }
mm/slub.c
2100
static inline void init_slab_obj_exts(struct slab *slab)
mm/slub.c
2102
slab->obj_exts = 0;
mm/slub.c
2117
struct slab *slab, gfp_t gfp)
mm/slub.c
2119
size_t sz = sizeof(struct slabobj_ext) * slab->objects;
mm/slub.c
2141
int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
mm/slub.c
2145
unsigned int objects = objs_per_slab(s, slab);
mm/slub.c
2155
sz = obj_exts_alloc_size(s, slab, gfp);
mm/slub.c
2165
slab_nid(slab));
mm/slub.c
2167
vec = kmalloc_node(sz, gfp | __GFP_ZERO, slab_nid(slab));
mm/slub.c
2175
if (!mark_failed_objexts_alloc(slab) &&
mm/slub.c
2176
slab_obj_exts(slab))
mm/slub.c
2190
old_exts = READ_ONCE(slab->obj_exts);
mm/slub.c
2199
slab->obj_exts = new_exts;
mm/slub.c
2212
} else if (cmpxchg(&slab->obj_exts, old_exts, new_exts) != old_exts) {
mm/slub.c
2222
static inline void free_slab_obj_exts(struct slab *slab, bool allow_spin)
mm/slub.c
2226
obj_exts = (struct slabobj_ext *)slab_obj_exts(slab);
mm/slub.c
2233
slab->obj_exts = 0;
mm/slub.c
2237
if (obj_exts_in_slab(slab->slab_cache, slab)) {
mm/slub.c
2238
slab->obj_exts = 0;
mm/slub.c
2254
slab->obj_exts = 0;
mm/slub.c
2262
static void alloc_slab_obj_exts_early(struct kmem_cache *s, struct slab *slab)
mm/slub.c
2268
slab_set_stride(slab, sizeof(struct slabobj_ext));
mm/slub.c
2273
if (obj_exts_fit_within_slab_leftover(s, slab)) {
mm/slub.c
2274
addr = slab_address(slab) + obj_exts_offset_in_slab(s, slab);
mm/slub.c
2279
memset(addr, 0, obj_exts_size_in_slab(slab));
mm/slub.c
2285
slab->obj_exts = obj_exts;
mm/slub.c
2289
obj_exts = (unsigned long)slab_address(slab);
mm/slub.c
2294
for_each_object(addr, s, slab_address(slab), slab->objects)
mm/slub.c
2302
slab->obj_exts = obj_exts;
mm/slub.c
2303
slab_set_stride(slab, s->size);
mm/slub.c
2313
static inline void init_slab_obj_exts(struct slab *slab)
mm/slub.c
2317
static int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
mm/slub.c
2323
static inline void free_slab_obj_exts(struct slab *slab, bool allow_spin)
mm/slub.c
2328
struct slab *slab)
mm/slub.c
2337
prepare_slab_obj_exts_hook(struct kmem_cache *s, struct slab *slab,
mm/slub.c
2340
if (!slab_obj_exts(slab) &&
mm/slub.c
2341
alloc_slab_obj_exts(slab, s, flags, false)) {
mm/slub.c
2347
return slab_obj_exts(slab);
mm/slub.c
2357
struct slab *slab;
mm/slub.c
2368
slab = virt_to_slab(object);
mm/slub.c
2369
obj_exts = prepare_slab_obj_exts_hook(s, slab, flags, object);
mm/slub.c
2376
unsigned int obj_idx = obj_to_index(s, slab, object);
mm/slub.c
2379
obj_ext = slab_obj_ext(slab, obj_exts, obj_idx);
mm/slub.c
2396
__alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
mm/slub.c
2406
obj_exts = slab_obj_exts(slab);
mm/slub.c
2412
unsigned int off = obj_to_index(s, slab, p[i]);
mm/slub.c
2414
alloc_tag_sub(&slab_obj_ext(slab, obj_exts, off)->ref, s->size);
mm/slub.c
2420
alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
mm/slub.c
2424
__alloc_tagging_slab_free_hook(s, slab, p, objects);
mm/slub.c
2435
alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
mm/slub.c
2471
void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
mm/slub.c
2479
obj_exts = slab_obj_exts(slab);
mm/slub.c
2484
__memcg_slab_free_hook(s, slab, p, objects, obj_exts);
mm/slub.c
2495
struct slab *slab;
mm/slub.c
2521
slab = page_slab(page);
mm/slub.c
2522
s = slab->slab_cache;
mm/slub.c
2533
obj_exts = slab_obj_exts(slab);
mm/slub.c
2536
off = obj_to_index(s, slab, p);
mm/slub.c
2537
obj_ext = slab_obj_ext(slab, obj_exts, off);
mm/slub.c
2557
static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
mm/slub.c
2949
struct slab *slab = virt_to_slab(p[i]);
mm/slub.c
2951
memcg_slab_free_hook(s, slab, p + i, 1);
mm/slub.c
2952
alloc_tagging_slab_free_hook(s, slab, p + i, 1);
mm/slub.c
2959
if (slab_test_pfmemalloc(slab))
mm/slub.c
3278
static inline struct slab *alloc_slab_page(gfp_t flags, int node,
mm/slub.c
3283
struct slab *slab;
mm/slub.c
3298
slab = page_slab(page);
mm/slub.c
3300
slab_set_pfmemalloc(slab);
mm/slub.c
3302
return slab;
mm/slub.c
3371
static bool shuffle_freelist(struct kmem_cache *s, struct slab *slab,
mm/slub.c
3379
if (slab->objects < 2 || !s->random_seq)
mm/slub.c
3397
page_limit = slab->objects * s->size;
mm/slub.c
3398
start = fixup_red_left(s, slab_address(slab));
mm/slub.c
3403
slab->freelist = cur;
mm/slub.c
3405
for (idx = 1; idx < slab->objects; idx++) {
mm/slub.c
3422
static inline bool shuffle_freelist(struct kmem_cache *s, struct slab *slab,
mm/slub.c
3429
static __always_inline void account_slab(struct slab *slab, int order,
mm/slub.c
3434
!slab_obj_exts(slab))
mm/slub.c
3435
alloc_slab_obj_exts(slab, s, gfp, true);
mm/slub.c
3437
mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
mm/slub.c
3441
static __always_inline void unaccount_slab(struct slab *slab, int order,
mm/slub.c
3449
free_slab_obj_exts(slab, allow_spin);
mm/slub.c
3451
mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
mm/slub.c
3455
static struct slab *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
mm/slub.c
3458
struct slab *slab;
mm/slub.c
3481
slab = alloc_slab_page(alloc_gfp, node, oo, allow_spin);
mm/slub.c
3482
if (unlikely(!slab)) {
mm/slub.c
3489
slab = alloc_slab_page(alloc_gfp, node, oo, allow_spin);
mm/slub.c
3490
if (unlikely(!slab))
mm/slub.c
3495
slab->objects = oo_objects(oo);
mm/slub.c
3496
slab->inuse = 0;
mm/slub.c
3497
slab->frozen = 0;
mm/slub.c
3499
slab->slab_cache = s;
mm/slub.c
3501
kasan_poison_slab(slab);
mm/slub.c
3503
start = slab_address(slab);
mm/slub.c
3505
setup_slab_debug(s, slab, start);
mm/slub.c
3506
init_slab_obj_exts(slab);
mm/slub.c
3511
alloc_slab_obj_exts_early(s, slab);
mm/slub.c
3512
account_slab(slab, oo_order(oo), s, flags);
mm/slub.c
3514
shuffle = shuffle_freelist(s, slab, allow_spin);
mm/slub.c
3519
slab->freelist = start;
mm/slub.c
3520
for (idx = 0, p = start; idx < slab->objects - 1; idx++) {
mm/slub.c
3529
return slab;
mm/slub.c
3532
static struct slab *new_slab(struct kmem_cache *s, gfp_t flags, int node)
mm/slub.c
3543
static void __free_slab(struct kmem_cache *s, struct slab *slab, bool allow_spin)
mm/slub.c
3545
struct page *page = slab_page(slab);
mm/slub.c
3549
__slab_clear_pfmemalloc(slab);
mm/slub.c
3553
unaccount_slab(slab, order, s, allow_spin);
mm/slub.c
3560
static void free_new_slab_nolock(struct kmem_cache *s, struct slab *slab)
mm/slub.c
3566
__free_slab(s, slab, false);
mm/slub.c
3571
struct slab *slab = container_of(h, struct slab, rcu_head);
mm/slub.c
3573
__free_slab(slab->slab_cache, slab, true);
mm/slub.c
3576
static void free_slab(struct kmem_cache *s, struct slab *slab)
mm/slub.c
3581
slab_pad_check(s, slab);
mm/slub.c
3582
for_each_object(p, s, slab_address(slab), slab->objects)
mm/slub.c
3583
check_object(s, slab, p, SLUB_RED_INACTIVE);
mm/slub.c
3587
call_rcu(&slab->rcu_head, rcu_free_slab);
mm/slub.c
3589
__free_slab(s, slab, true);
mm/slub.c
3592
static void discard_slab(struct kmem_cache *s, struct slab *slab)
mm/slub.c
3594
dec_slabs_node(s, slab_nid(slab), slab->objects);
mm/slub.c
3595
free_slab(s, slab);
mm/slub.c
3598
static inline bool slab_test_node_partial(const struct slab *slab)
mm/slub.c
3600
return test_bit(SL_partial, &slab->flags.f);
mm/slub.c
3603
static inline void slab_set_node_partial(struct slab *slab)
mm/slub.c
3605
set_bit(SL_partial, &slab->flags.f);
mm/slub.c
3608
static inline void slab_clear_node_partial(struct slab *slab)
mm/slub.c
3610
clear_bit(SL_partial, &slab->flags.f);
mm/slub.c
3617
__add_partial(struct kmem_cache_node *n, struct slab *slab, enum add_mode mode)
mm/slub.c
3621
list_add_tail(&slab->slab_list, &n->partial);
mm/slub.c
3623
list_add(&slab->slab_list, &n->partial);
mm/slub.c
3624
slab_set_node_partial(slab);
mm/slub.c
3628
struct slab *slab, enum add_mode mode)
mm/slub.c
3631
__add_partial(n, slab, mode);
mm/slub.c
3635
struct slab *slab)
mm/slub.c
3638
list_del(&slab->slab_list);
mm/slub.c
3639
slab_clear_node_partial(slab);
mm/slub.c
3650
struct kmem_cache_node *n, struct slab *slab, int orig_size)
mm/slub.c
3658
if (!validate_slab_ptr(slab)) {
mm/slub.c
3659
slab_err(s, slab, "Not a valid slab page");
mm/slub.c
3665
object = slab->freelist;
mm/slub.c
3666
slab->freelist = get_freepointer(s, object);
mm/slub.c
3667
slab->inuse++;
mm/slub.c
3669
if (!alloc_debug_processing(s, slab, object, orig_size)) {
mm/slub.c
3670
remove_partial(n, slab);
mm/slub.c
3674
if (slab->inuse == slab->objects) {
mm/slub.c
3675
remove_partial(n, slab);
mm/slub.c
3676
add_full(s, n, slab);
mm/slub.c
3687
static void *alloc_single_from_new_slab(struct kmem_cache *s, struct slab *slab,
mm/slub.c
3691
int nid = slab_nid(slab);
mm/slub.c
3698
free_new_slab_nolock(s, slab);
mm/slub.c
3702
object = slab->freelist;
mm/slub.c
3703
slab->freelist = get_freepointer(s, object);
mm/slub.c
3704
slab->inuse = 1;
mm/slub.c
3706
if (!alloc_debug_processing(s, slab, object, orig_size)) {
mm/slub.c
3721
if (slab->inuse == slab->objects)
mm/slub.c
3722
add_full(s, n, slab);
mm/slub.c
3724
add_partial(n, slab, ADD_TO_HEAD);
mm/slub.c
3726
inc_slabs_node(s, nid, slab->objects);
mm/slub.c
3732
static inline bool pfmemalloc_match(struct slab *slab, gfp_t gfpflags);
mm/slub.c
3739
struct slab *slab, *slab2;
mm/slub.c
3754
list_for_each_entry_safe(slab, slab2, &n->partial, slab_list) {
mm/slub.c
3758
if (!pfmemalloc_match(slab, pc->flags))
mm/slub.c
3768
flc.counters = data_race(READ_ONCE(slab->counters));
mm/slub.c
3776
remove_partial(n, slab);
mm/slub.c
3778
list_add(&slab->slab_list, &pc->slabs);
mm/slub.c
3796
struct slab *slab, *slab2;
mm/slub.c
3813
list_for_each_entry_safe(slab, slab2, &n->partial, slab_list) {
mm/slub.c
3817
if (!pfmemalloc_match(slab, pc->flags))
mm/slub.c
3821
object = alloc_single_from_partial(s, n, slab,
mm/slub.c
3834
old.freelist = slab->freelist;
mm/slub.c
3835
old.counters = slab->counters;
mm/slub.c
3841
} while (!__slab_update_freelist(s, slab, &old, &new, "get_from_partial_node"));
mm/slub.c
3845
remove_partial(n, slab);
mm/slub.c
4103
static int count_free(struct slab *slab)
mm/slub.c
4105
return slab->objects - slab->inuse;
mm/slub.c
4115
struct slab *slab, void *head, void *tail, int *bulk_cnt,
mm/slub.c
4123
if (!check_slab(s, slab))
mm/slub.c
4127
if (slab->inuse < *bulk_cnt) {
mm/slub.c
4128
slab_err(s, slab, "Slab has %d allocated objects but %d are to be freed\n",
mm/slub.c
4129
slab->inuse, *bulk_cnt);
mm/slub.c
4139
if (!free_consistency_checks(s, slab, object, addr))
mm/slub.c
4145
trace(s, slab, object, 0);
mm/slub.c
4158
slab_err(s, slab, "Bulk free expected %d objects but found %d\n",
mm/slub.c
4174
int (*get_count)(struct slab *))
mm/slub.c
4178
struct slab *slab;
mm/slub.c
4181
list_for_each_entry(slab, &n->partial, slab_list)
mm/slub.c
4182
x += get_count(slab);
mm/slub.c
4195
struct slab *slab;
mm/slub.c
4199
list_for_each_entry(slab, &n->partial, slab_list)
mm/slub.c
4200
x += slab->objects - slab->inuse;
mm/slub.c
4209
list_for_each_entry(slab, &n->partial, slab_list) {
mm/slub.c
4210
x += slab->objects - slab->inuse;
mm/slub.c
4214
list_for_each_entry_reverse(slab, &n->partial, slab_list) {
mm/slub.c
4215
x += slab->objects - slab->inuse;
mm/slub.c
4266
static inline bool pfmemalloc_match(struct slab *slab, gfp_t gfpflags)
mm/slub.c
4268
if (unlikely(slab_test_pfmemalloc(slab)))
mm/slub.c
4282
static inline void *get_freelist_nofreeze(struct kmem_cache *s, struct slab *slab)
mm/slub.c
4287
old.freelist = slab->freelist;
mm/slub.c
4288
old.counters = slab->counters;
mm/slub.c
4296
} while (!slab_update_freelist(s, slab, &old, &new, "get_freelist_nofreeze"));
mm/slub.c
4316
static unsigned int alloc_from_new_slab(struct kmem_cache *s, struct slab *slab,
mm/slub.c
4329
needs_add_partial = (slab->objects > count);
mm/slub.c
4333
n = get_node(s, slab_nid(slab));
mm/slub.c
4337
free_new_slab_nolock(s, slab);
mm/slub.c
4342
object = slab->freelist;
mm/slub.c
4348
slab->inuse++;
mm/slub.c
4351
slab->freelist = object;
mm/slub.c
4356
n = get_node(s, slab_nid(slab));
mm/slub.c
4359
add_partial(n, slab, ADD_TO_HEAD);
mm/slub.c
4363
inc_slabs_node(s, slab_nid(slab), slab->objects);
mm/slub.c
4379
struct slab *slab;
mm/slub.c
4413
slab = new_slab(s, pc.flags, node);
mm/slub.c
4415
if (unlikely(!slab)) {
mm/slub.c
4428
object = alloc_single_from_new_slab(s, slab, orig_size, gfpflags);
mm/slub.c
4433
alloc_from_new_slab(s, slab, &object, 1, allow_spin);
mm/slub.c
5398
struct kmem_cache *s, struct slab *slab,
mm/slub.c
5402
struct kmem_cache_node *n = get_node(s, slab_nid(slab));
mm/slub.c
5403
struct slab *slab_free = NULL;
mm/slub.c
5417
if (free_debug_processing(s, slab, head, tail, &cnt, addr, handle)) {
mm/slub.c
5418
void *prior = slab->freelist;
mm/slub.c
5421
slab->inuse -= cnt;
mm/slub.c
5423
slab->freelist = head;
mm/slub.c
5430
if (slab->inuse == 0 && n->nr_partial >= s->min_partial)
mm/slub.c
5431
slab_free = slab;
mm/slub.c
5435
remove_full(s, n, slab);
mm/slub.c
5437
add_partial(n, slab, ADD_TO_TAIL);
mm/slub.c
5441
remove_partial(n, slab);
mm/slub.c
5470
static void __slab_free(struct kmem_cache *s, struct slab *slab,
mm/slub.c
5482
free_to_partial_list(s, slab, head, tail, cnt, addr);
mm/slub.c
5492
old.freelist = slab->freelist;
mm/slub.c
5493
old.counters = slab->counters;
mm/slub.c
5510
n = get_node(s, slab_nid(slab));
mm/slub.c
5521
on_node_partial = slab_test_node_partial(slab);
mm/slub.c
5524
} while (!slab_update_freelist(s, slab, &old, &new, "__slab_free"));
mm/slub.c
5555
add_partial(n, slab, ADD_TO_TAIL);
mm/slub.c
5567
remove_partial(n, slab);
mm/slub.c
5573
discard_slab(s, slab);
mm/slub.c
5981
struct slab *slab = virt_to_slab(p[i]);
mm/slub.c
5983
memcg_slab_free_hook(s, slab, p + i, 1);
mm/slub.c
5984
alloc_tagging_slab_free_hook(s, slab, p + i, 1);
mm/slub.c
5991
if (unlikely((IS_ENABLED(CONFIG_NUMA) && slab_nid(slab) != node)
mm/slub.c
5992
|| slab_test_pfmemalloc(slab))) {
mm/slub.c
604
static inline bool slab_test_pfmemalloc(const struct slab *slab)
mm/slub.c
606
return test_bit(SL_pfmemalloc, &slab->flags.f);
mm/slub.c
609
static inline void slab_set_pfmemalloc(struct slab *slab)
mm/slub.c
611
set_bit(SL_pfmemalloc, &slab->flags.f);
mm/slub.c
6116
struct slab *slab;
mm/slub.c
6119
slab = virt_to_slab(x);
mm/slub.c
6120
s = slab->slab_cache;
mm/slub.c
6132
__slab_free(s, slab, x, x, 1, _THIS_IP_);
mm/slub.c
614
static inline void __slab_clear_pfmemalloc(struct slab *slab)
mm/slub.c
6159
void slab_free(struct kmem_cache *s, struct slab *slab, void *object,
mm/slub.c
616
__clear_bit(SL_pfmemalloc, &slab->flags.f);
mm/slub.c
6162
memcg_slab_free_hook(s, slab, &object, 1);
mm/slub.c
6163
alloc_tagging_slab_free_hook(s, slab, &object, 1);
mm/slub.c
6168
if (likely(!IS_ENABLED(CONFIG_NUMA) || slab_nid(slab) == numa_mem_id())
mm/slub.c
6169
&& likely(!slab_test_pfmemalloc(slab))) {
mm/slub.c
6174
__slab_free(s, slab, object, object, 1, addr);
mm/slub.c
6183
struct slab *slab = virt_to_slab(object);
mm/slub.c
6185
alloc_tagging_slab_free_hook(s, slab, &object, 1);
mm/slub.c
6188
__slab_free(s, slab, object, object, 1, _RET_IP_);
mm/slub.c
6193
void slab_free_bulk(struct kmem_cache *s, struct slab *slab, void *head,
mm/slub.c
6196
memcg_slab_free_hook(s, slab, p, cnt);
mm/slub.c
6197
alloc_tagging_slab_free_hook(s, slab, p, cnt);
mm/slub.c
6203
__slab_free(s, slab, head, tail, cnt, addr);
mm/slub.c
6214
struct slab *slab = virt_to_slab(object);
mm/slub.c
622
static __always_inline void slab_lock(struct slab *slab)
mm/slub.c
6223
if (WARN_ON(!slab))
mm/slub.c
6225
s = slab->slab_cache;
mm/slub.c
6231
__slab_free(s, slab, object, object, 1, _THIS_IP_);
mm/slub.c
624
bit_spin_lock(SL_locked, &slab->flags.f);
mm/slub.c
6248
struct slab *slab;
mm/slub.c
6250
slab = virt_to_slab(obj);
mm/slub.c
6251
if (WARN_ONCE(!slab,
mm/slub.c
6256
cachep = slab->slab_cache;
mm/slub.c
627
static __always_inline void slab_unlock(struct slab *slab)
mm/slub.c
6277
struct slab *slab;
mm/slub.c
6279
slab = virt_to_slab(x);
mm/slub.c
6288
if (unlikely(!slab || (slab->slab_cache != s))) {
mm/slub.c
629
bit_spin_unlock(SL_locked, &slab->flags.f);
mm/slub.c
6295
slab_free(s, slab, x, _RET_IP_);
mm/slub.c
6299
static inline size_t slab_ksize(struct slab *slab)
mm/slub.c
6301
struct kmem_cache *s = slab->slab_cache;
mm/slub.c
6320
else if (obj_exts_in_object(s, slab))
mm/slub.c
633
__update_freelist_fast(struct slab *slab, struct freelist_counters *old,
mm/slub.c
6331
struct slab *slab;
mm/slub.c
6341
slab = page_slab(page);
mm/slub.c
6343
if (WARN_ON(!slab))
mm/slub.c
6347
skip_orig_size_check(slab->slab_cache, object);
mm/slub.c
6350
return slab_ksize(slab);
mm/slub.c
637
return try_cmpxchg_freelist(&slab->freelist_counters,
mm/slub.c
6419
struct slab *slab;
mm/slub.c
6430
slab = page_slab(page);
mm/slub.c
6431
if (!slab) {
mm/slub.c
6441
s = slab->slab_cache;
mm/slub.c
6442
slab_addr = slab_address(slab);
mm/slub.c
6453
slab_free(s, slab, obj, _RET_IP_);
mm/slub.c
646
__update_freelist_slow(struct slab *slab, struct freelist_counters *old,
mm/slub.c
6465
struct slab *slab;
mm/slub.c
6475
slab = page_slab(page);
mm/slub.c
6476
if (!slab) {
mm/slub.c
6482
s = slab->slab_cache;
mm/slub.c
6483
slab_free(s, slab, x, _RET_IP_);
mm/slub.c
6498
struct slab *slab;
mm/slub.c
6505
slab = virt_to_slab(object);
mm/slub.c
6506
if (unlikely(!slab)) {
mm/slub.c
651
slab_lock(slab);
mm/slub.c
6511
s = slab->slab_cache;
mm/slub.c
6513
memcg_slab_free_hook(s, slab, &x, 1);
mm/slub.c
6514
alloc_tagging_slab_free_hook(s, slab, &x, 1);
mm/slub.c
652
if (slab->freelist == old->freelist &&
mm/slub.c
653
slab->counters == old->counters) {
mm/slub.c
654
slab->freelist = new->freelist;
mm/slub.c
6543
if (likely(!IS_ENABLED(CONFIG_NUMA) || slab_nid(slab) == numa_mem_id())) {
mm/slub.c
656
WRITE_ONCE(slab->counters, new->counters);
mm/slub.c
6586
struct slab *slab = page_slab(page);
mm/slub.c
6588
if (!slab) {
mm/slub.c
659
slab_unlock(slab);
mm/slub.c
6594
s = slab->slab_cache;
mm/slub.c
671
static inline bool __slab_update_freelist(struct kmem_cache *s, struct slab *slab,
mm/slub.c
680
ret = __update_freelist_fast(slab, old, new);
mm/slub.c
682
ret = __update_freelist_slow(slab, old, new);
mm/slub.c
6880
struct slab *slab;
mm/slub.c
6906
struct slab *slab;
mm/slub.c
6911
slab = page_slab(page);
mm/slub.c
6914
if (!slab) {
mm/slub.c
6916
df->slab = NULL;
mm/slub.c
6920
df->slab = slab;
mm/slub.c
6921
df->s = slab->slab_cache;
mm/slub.c
6923
df->slab = slab;
mm/slub.c
6941
if (df->slab == virt_to_slab(object)) {
mm/slub.c
697
static inline bool slab_update_freelist(struct kmem_cache *s, struct slab *slab,
mm/slub.c
6973
if (!df.slab)
mm/slub.c
6979
__slab_free(df.s, df.slab, df.freelist, df.tail, df.cnt,
mm/slub.c
7003
if (!df.slab)
mm/slub.c
7006
slab_free_bulk(df.s, df.slab, df.freelist, df.tail, &p[size],
mm/slub.c
7018
struct slab *slab, *slab2;
mm/slub.c
703
ret = __update_freelist_fast(slab, old, new);
mm/slub.c
7030
list_for_each_entry_safe(slab, slab2, &pc.slabs, slab_list) {
mm/slub.c
7032
list_del(&slab->slab_list);
mm/slub.c
7034
object = get_freelist_nofreeze(s, slab);
mm/slub.c
7059
__slab_free(s, slab, head, tail, cnt, _RET_IP_);
mm/slub.c
7069
list_for_each_entry_safe(slab, slab2, &pc.slabs, slab_list) {
mm/slub.c
7071
if (unlikely(!slab->inuse && n->nr_partial >= s->min_partial))
mm/slub.c
7074
list_del(&slab->slab_list);
mm/slub.c
7075
add_partial(n, slab, ADD_TO_HEAD);
mm/slub.c
708
ret = __update_freelist_slow(slab, old, new);
mm/slub.c
7081
list_for_each_entry_safe(slab, slab2, &pc.slabs, slab_list) {
mm/slub.c
7083
list_del(&slab->slab_list);
mm/slub.c
7084
discard_slab(s, slab);
mm/slub.c
7157
struct slab *slab;
mm/slub.c
7175
slab = new_slab(s, gfp, local_node);
mm/slub.c
7176
if (!slab)
mm/slub.c
7185
refilled += alloc_from_new_slab(s, slab, p + refilled, max - refilled,
mm/slub.c
7516
struct slab *slab;
mm/slub.c
7521
slab = new_slab(kmem_cache_node, GFP_NOWAIT, node);
mm/slub.c
7523
BUG_ON(!slab);
mm/slub.c
7524
if (slab_nid(slab) != node) {
mm/slub.c
7529
n = slab->freelist;
mm/slub.c
7535
slab->freelist = get_freepointer(kmem_cache_node, n);
mm/slub.c
7536
slab->inuse = 1;
mm/slub.c
7539
inc_slabs_node(kmem_cache_node, node, slab->objects);
mm/slub.c
7545
__add_partial(n, slab, ADD_TO_HEAD);
mm/slub.c
7817
static void list_slab_objects(struct kmem_cache *s, struct slab *slab)
mm/slub.c
7820
void *addr = slab_address(slab);
mm/slub.c
7827
__fill_map(object_map, s, slab);
mm/slub.c
7829
for_each_object(p, s, addr, slab->objects) {
mm/slub.c
7840
__slab_err(slab);
mm/slub.c
7852
struct slab *slab, *h;
mm/slub.c
7856
list_for_each_entry_safe(slab, h, &n->partial, slab_list) {
mm/slub.c
7857
if (!slab->inuse) {
mm/slub.c
7858
remove_partial(n, slab);
mm/slub.c
7859
list_add(&slab->slab_list, &discard);
mm/slub.c
7861
list_slab_objects(s, slab);
mm/slub.c
7866
list_for_each_entry_safe(slab, h, &discard, slab_list)
mm/slub.c
7867
discard_slab(s, slab);
mm/slub.c
787
static inline unsigned int obj_exts_size_in_slab(struct slab *slab)
mm/slub.c
789
return sizeof(struct slabobj_ext) * slab->objects;
mm/slub.c
7907
void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
mm/slub.c
7914
struct kmem_cache *s = slab->slab_cache;
mm/slub.c
7918
kpp->kp_slab = slab;
mm/slub.c
7920
base = slab_address(slab);
mm/slub.c
7927
objnr = obj_to_index(s, slab, objp);
mm/slub.c
793
struct slab *slab)
mm/slub.c
7931
if (WARN_ON_ONCE(objp < base || objp >= base + slab->objects * s->size
mm/slub.c
797
objext_offset = s->size * slab->objects;
mm/slub.c
803
struct slab *slab)
mm/slub.c
8045
const struct slab *slab, bool to_user)
mm/slub.c
805
unsigned long objext_offset = obj_exts_offset_in_slab(s, slab);
mm/slub.c
8054
s = slab->slab_cache;
mm/slub.c
8057
if (ptr < slab_address(slab))
mm/slub.c
806
unsigned long objext_size = obj_exts_size_in_slab(slab);
mm/slub.c
8065
offset = (ptr - slab_address(slab)) % s->size;
mm/slub.c
808
return objext_offset + objext_size <= slab_size(slab);
mm/slub.c
8101
struct slab *slab;
mm/slub.c
8102
struct slab *t;
mm/slub.c
811
static inline bool obj_exts_in_slab(struct kmem_cache *s, struct slab *slab)
mm/slub.c
8124
list_for_each_entry_safe(slab, t, &n->partial, slab_list) {
mm/slub.c
8125
int free = slab->objects - slab->inuse;
mm/slub.c
8133
if (free == slab->objects) {
mm/slub.c
8134
list_move(&slab->slab_list, &discard);
mm/slub.c
8135
slab_clear_node_partial(slab);
mm/slub.c
8137
dec_slabs_node(s, node, slab->objects);
mm/slub.c
8139
list_move(&slab->slab_list, promote + free - 1);
mm/slub.c
8152
list_for_each_entry_safe(slab, t, &discard, slab_list)
mm/slub.c
8153
free_slab(s, slab);
mm/slub.c
817
obj_exts = slab_obj_exts(slab);
mm/slub.c
821
start = (unsigned long)slab_address(slab);
mm/slub.c
822
end = start + slab_size(slab);
mm/slub.c
8280
struct slab *p;
mm/slub.c
831
static inline unsigned int obj_exts_size_in_slab(struct slab *slab)
mm/slub.c
837
struct slab *slab)
mm/slub.c
843
struct slab *slab)
mm/slub.c
848
static inline bool obj_exts_in_slab(struct kmem_cache *s, struct slab *slab)
mm/slub.c
8529
static int count_inuse(struct slab *slab)
mm/slub.c
8531
return slab->inuse;
mm/slub.c
8534
static int count_total(struct slab *slab)
mm/slub.c
8536
return slab->objects;
mm/slub.c
8541
static void validate_slab(struct kmem_cache *s, struct slab *slab,
mm/slub.c
8545
void *addr = slab_address(slab);
mm/slub.c
8547
if (!validate_slab_ptr(slab)) {
mm/slub.c
8548
slab_err(s, slab, "Not a valid slab page");
mm/slub.c
8552
if (!check_slab(s, slab) || !on_freelist(s, slab, NULL))
mm/slub.c
8556
__fill_map(obj_map, s, slab);
mm/slub.c
8557
for_each_object(p, s, addr, slab->objects) {
mm/slub.c
856
static bool obj_exts_in_object(struct kmem_cache *s, struct slab *slab)
mm/slub.c
8561
if (!check_object(s, slab, p, val))
mm/slub.c
8570
struct slab *slab;
mm/slub.c
8575
list_for_each_entry(slab, &n->partial, slab_list) {
mm/slub.c
8576
validate_slab(s, slab, obj_map);
mm/slub.c
8588
list_for_each_entry(slab, &n->full, slab_list) {
mm/slub.c
8589
validate_slab(s, slab, obj_map);
mm/slub.c
864
return obj_exts_in_slab(s, slab) &&
mm/slub.c
865
(slab_get_stride(slab) == s->size);
mm/slub.c
8773
struct slab *slab, enum track_item alloc,
mm/slub.c
8776
void *addr = slab_address(slab);
mm/slub.c
8780
__fill_map(obj_map, s, slab);
mm/slub.c
8782
for_each_object(p, s, addr, slab->objects)
mm/slub.c
883
static inline bool obj_exts_in_object(struct kmem_cache *s, struct slab *slab)
mm/slub.c
900
static inline bool validate_slab_ptr(struct slab *slab)
mm/slub.c
902
return PageSlab(slab_page(slab));
mm/slub.c
909
struct slab *slab)
mm/slub.c
911
void *addr = slab_address(slab);
mm/slub.c
914
bitmap_zero(obj_map, slab->objects);
mm/slub.c
916
for (p = slab->freelist; p; p = get_freepointer(s, p))
mm/slub.c
9741
struct slab *slab;
mm/slub.c
9747
list_for_each_entry(slab, &n->partial, slab_list)
mm/slub.c
9748
process_slab(t, s, slab, alloc, obj_map);
mm/slub.c
9749
list_for_each_entry(slab, &n->full, slab_list)
mm/slub.c
9750
process_slab(t, s, slab, alloc, obj_map);
mm/slub.c
989
struct slab *slab, void *object)
mm/slub.c
996
base = slab_address(slab);
mm/slub.c
999
if (object < base || object >= base + slab->objects * s->size ||
mm/usercopy.c
168
struct slab *slab;
mm/usercopy.c
194
slab = page_slab(page);
mm/usercopy.c
195
if (slab) {
mm/usercopy.c
197
__check_heap_object(ptr, n, slab, to_user);
net/core/sock.c
2235
struct kmem_cache *slab;
net/core/sock.c
2237
slab = prot->slab;
net/core/sock.c
2238
if (slab != NULL) {
net/core/sock.c
2239
sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO);
net/core/sock.c
2260
if (slab != NULL)
net/core/sock.c
2261
kmem_cache_free(slab, sk);
net/core/sock.c
2269
struct kmem_cache *slab;
net/core/sock.c
2273
slab = prot->slab;
net/core/sock.c
2281
if (slab != NULL)
net/core/sock.c
2282
kmem_cache_free(slab, sk);
net/core/sock.c
4154
kmem_cache_destroy(rsk_prot->slab);
net/core/sock.c
4155
rsk_prot->slab = NULL;
net/core/sock.c
4170
rsk_prot->slab = kmem_cache_create(rsk_prot->slab_name,
net/core/sock.c
4175
if (!rsk_prot->slab) {
net/core/sock.c
4203
prot->slab = kmem_cache_create(prot->name, prot->obj_size,
net/core/sock.c
4207
if (prot->slab == NULL) {
net/core/sock.c
4237
kmem_cache_destroy(prot->slab);
net/core/sock.c
4238
prot->slab = NULL;
net/core/sock.c
4252
kmem_cache_destroy(prot->slab);
net/core/sock.c
4253
prot->slab = NULL;
net/core/sock.c
4328
proto->slab == NULL ? "no" : "yes",
net/ipv4/af_inet.c
331
WARN_ON(!answer_prot->slab);
net/ipv4/inet_connection_sock.c
858
req = kmem_cache_alloc_noprof(ops->slab, GFP_ATOMIC | __GFP_NOWARN);
net/ipv4/inet_connection_sock.c
864
kmem_cache_free(ops->slab, req);
net/ipv4/inet_connection_sock.c
914
kmem_cache_free(req->rsk_ops->slab, req);
net/ipv4/inet_connection_sock.c
924
nreq = kmem_cache_alloc(req->rsk_ops->slab, GFP_ATOMIC | __GFP_NOWARN);
net/ipv6/af_inet6.c
182
WARN_ON(!answer_prot->slab);
net/mptcp/protocol.c
4665
mptcp_v6_prot.slab = NULL;
net/mptcp/subflow.c
2130
subflow_ops->slab = kmem_cache_create(subflow_ops->slab_name,
net/mptcp/subflow.c
2135
if (!subflow_ops->slab)
tools/mm/slabinfo.c
1146
a->slab = s;
tools/mm/slabinfo.c
1165
if (!show_single_ref && a->slab->refs == 1)
tools/mm/slabinfo.c
1170
if (strcmp(a->slab->name, active) == 0) {
tools/mm/slabinfo.c
1175
printf("\n%-12s <- %s", a->slab->name, a->name);
tools/mm/slabinfo.c
1176
active = a->slab->name;
tools/mm/slabinfo.c
1179
printf("%-15s -> %s\n", a->name, a->slab->name);
tools/mm/slabinfo.c
1209
static int slab_mismatch(char *slab)
tools/mm/slabinfo.c
1211
return regexec(&pattern, slab, 0, NULL, 0);
tools/mm/slabinfo.c
1218
struct slabinfo *slab = slabinfo;
tools/mm/slabinfo.c
1250
if (slab - slabinfo == MAX_SLABS)
tools/mm/slabinfo.c
1253
fatal("Unable to access slab %s\n", slab->name);
tools/mm/slabinfo.c
1254
slab->name = strdup(de->d_name);
tools/mm/slabinfo.c
1255
slab->alias = 0;
tools/mm/slabinfo.c
1256
slab->refs = 0;
tools/mm/slabinfo.c
1257
slab->aliases = get_obj("aliases");
tools/mm/slabinfo.c
1258
slab->align = get_obj("align");
tools/mm/slabinfo.c
1259
slab->cache_dma = get_obj("cache_dma");
tools/mm/slabinfo.c
1260
slab->cpu_slabs = get_obj("cpu_slabs");
tools/mm/slabinfo.c
1261
slab->destroy_by_rcu = get_obj("destroy_by_rcu");
tools/mm/slabinfo.c
1262
slab->hwcache_align = get_obj("hwcache_align");
tools/mm/slabinfo.c
1263
slab->object_size = get_obj("object_size");
tools/mm/slabinfo.c
1264
slab->objects = get_obj("objects");
tools/mm/slabinfo.c
1265
slab->objects_partial = get_obj("objects_partial");
tools/mm/slabinfo.c
1266
slab->objects_total = get_obj("objects_total");
tools/mm/slabinfo.c
1267
slab->objs_per_slab = get_obj("objs_per_slab");
tools/mm/slabinfo.c
1268
slab->order = get_obj("order");
tools/mm/slabinfo.c
1269
slab->partial = get_obj("partial");
tools/mm/slabinfo.c
1270
slab->partial = get_obj_and_str("partial", &t);
tools/mm/slabinfo.c
1271
decode_numa_list(slab->numa_partial, t);
tools/mm/slabinfo.c
1273
slab->poison = get_obj("poison");
tools/mm/slabinfo.c
1274
slab->reclaim_account = get_obj("reclaim_account");
tools/mm/slabinfo.c
1275
slab->red_zone = get_obj("red_zone");
tools/mm/slabinfo.c
1276
slab->sanity_checks = get_obj("sanity_checks");
tools/mm/slabinfo.c
1277
slab->slab_size = get_obj("slab_size");
tools/mm/slabinfo.c
1278
slab->slabs = get_obj_and_str("slabs", &t);
tools/mm/slabinfo.c
1279
decode_numa_list(slab->numa, t);
tools/mm/slabinfo.c
1281
slab->store_user = get_obj("store_user");
tools/mm/slabinfo.c
1282
slab->trace = get_obj("trace");
tools/mm/slabinfo.c
1283
slab->alloc_fastpath = get_obj("alloc_fastpath");
tools/mm/slabinfo.c
1284
slab->alloc_slowpath = get_obj("alloc_slowpath");
tools/mm/slabinfo.c
1285
slab->free_fastpath = get_obj("free_fastpath");
tools/mm/slabinfo.c
1286
slab->free_slowpath = get_obj("free_slowpath");
tools/mm/slabinfo.c
1287
slab->free_frozen= get_obj("free_frozen");
tools/mm/slabinfo.c
1288
slab->free_add_partial = get_obj("free_add_partial");
tools/mm/slabinfo.c
1289
slab->free_remove_partial = get_obj("free_remove_partial");
tools/mm/slabinfo.c
1290
slab->alloc_from_partial = get_obj("alloc_from_partial");
tools/mm/slabinfo.c
1291
slab->alloc_slab = get_obj("alloc_slab");
tools/mm/slabinfo.c
1292
slab->alloc_refill = get_obj("alloc_refill");
tools/mm/slabinfo.c
1293
slab->free_slab = get_obj("free_slab");
tools/mm/slabinfo.c
1294
slab->cpuslab_flush = get_obj("cpuslab_flush");
tools/mm/slabinfo.c
1295
slab->deactivate_full = get_obj("deactivate_full");
tools/mm/slabinfo.c
1296
slab->deactivate_empty = get_obj("deactivate_empty");
tools/mm/slabinfo.c
1297
slab->deactivate_to_head = get_obj("deactivate_to_head");
tools/mm/slabinfo.c
1298
slab->deactivate_to_tail = get_obj("deactivate_to_tail");
tools/mm/slabinfo.c
1299
slab->deactivate_remote_frees = get_obj("deactivate_remote_frees");
tools/mm/slabinfo.c
1300
slab->order_fallback = get_obj("order_fallback");
tools/mm/slabinfo.c
1301
slab->cmpxchg_double_cpu_fail = get_obj("cmpxchg_double_cpu_fail");
tools/mm/slabinfo.c
1302
slab->cmpxchg_double_fail = get_obj("cmpxchg_double_fail");
tools/mm/slabinfo.c
1303
slab->cpu_partial_alloc = get_obj("cpu_partial_alloc");
tools/mm/slabinfo.c
1304
slab->cpu_partial_free = get_obj("cpu_partial_free");
tools/mm/slabinfo.c
1305
slab->alloc_node_mismatch = get_obj("alloc_node_mismatch");
tools/mm/slabinfo.c
1306
slab->deactivate_bypass = get_obj("deactivate_bypass");
tools/mm/slabinfo.c
1309
slab->name);
tools/mm/slabinfo.c
1310
if (slab->name[0] == ':')
tools/mm/slabinfo.c
1312
slab++;
tools/mm/slabinfo.c
1319
slabs = slab - slabinfo;
tools/mm/slabinfo.c
1326
struct slabinfo *slab;
tools/mm/slabinfo.c
1329
for (slab = slabinfo; (slab < slabinfo + slabs) &&
tools/mm/slabinfo.c
1330
lines != 0; slab++) {
tools/mm/slabinfo.c
1332
if (slab->alias)
tools/mm/slabinfo.c
1339
slab_numa(slab, 0);
tools/mm/slabinfo.c
1341
show_tracking(slab);
tools/mm/slabinfo.c
1343
slab_validate(slab);
tools/mm/slabinfo.c
1345
slab_shrink(slab);
tools/mm/slabinfo.c
1347
slab_debug(slab);
tools/mm/slabinfo.c
1349
ops(slab);
tools/mm/slabinfo.c
1351
slabcache(slab);
tools/mm/slabinfo.c
1353
report(slab);
tools/mm/slabinfo.c
360
if (a->slab == find &&
tools/mm/slabinfo.c
54
struct slabinfo *slab;
tools/sched_ext/scx_sdt.bpf.c
104
void __arena *slab;
tools/sched_ext/scx_sdt.bpf.c
112
slab = bpf_arena_alloc_pages(&arena, NULL,
tools/sched_ext/scx_sdt.bpf.c
114
if (!slab)
tools/sched_ext/scx_sdt.bpf.c
117
pool->slab = slab;
tools/sched_ext/scx_sdt.bpf.c
121
ptr = (void __arena *)((__u64) pool->slab + elem_size * pool->idx);
tools/sched_ext/scx_sdt.h
23
void __arena *slab;