arch/arm64/kernel/hibernate.c
257
struct zone *zone;
arch/arm64/kernel/hibernate.c
265
for_each_populated_zone(zone) {
arch/arm64/kernel/hibernate.c
266
max_zone_pfn = zone_end_pfn(zone);
arch/arm64/kernel/hibernate.c
267
for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) {
arch/s390/include/asm/ap.h
296
unsigned int zone : 8; /* zone info */
block/blk-zoned.c
1028
struct blk_zone zone;
block/blk-zoned.c
1051
ret = blkdev_get_zone_info(bdev, sector, &zone);
block/blk-zoned.c
1055
ret = cb(&zone, idx, data);
block/blk-zoned.c
2124
static int blk_revalidate_zone_cond(struct blk_zone *zone, unsigned int idx,
block/blk-zoned.c
2127
enum blk_zone_cond cond = zone->cond;
block/blk-zoned.c
2132
if (zone->type != BLK_ZONE_TYPE_CONVENTIONAL)
block/blk-zoned.c
2142
if (zone->type != BLK_ZONE_TYPE_SEQWRITE_REQ)
block/blk-zoned.c
2157
args->disk->disk_name, cond, zone->type);
block/blk-zoned.c
2162
static int blk_revalidate_conv_zone(struct blk_zone *zone, unsigned int idx,
block/blk-zoned.c
2167
if (zone->capacity != zone->len) {
block/blk-zoned.c
2173
if (disk_zone_is_last(disk, zone))
block/blk-zoned.c
2174
args->last_zone_capacity = zone->capacity;
block/blk-zoned.c
2181
static int blk_revalidate_seq_zone(struct blk_zone *zone, unsigned int idx,
block/blk-zoned.c
2194
args->zone_capacity = zone->capacity;
block/blk-zoned.c
2195
if (disk_zone_is_last(disk, zone)) {
block/blk-zoned.c
2196
args->last_zone_capacity = zone->capacity;
block/blk-zoned.c
2197
} else if (zone->capacity != args->zone_capacity) {
block/blk-zoned.c
2212
wp_offset = disk_zone_wplug_sync_wp_offset(disk, zone);
block/blk-zoned.c
2213
if (!wp_offset || wp_offset >= zone->capacity)
block/blk-zoned.c
2216
zwplug = disk_get_or_alloc_zone_wplug(disk, zone->wp, GFP_NOIO);
block/blk-zoned.c
2227
static int blk_revalidate_zone_cb(struct blk_zone *zone, unsigned int idx,
block/blk-zoned.c
2236
if (zone->start != args->sector) {
block/blk-zoned.c
2238
disk->disk_name, args->sector, zone->start);
block/blk-zoned.c
2242
if (zone->start >= get_capacity(disk) || !zone->len) {
block/blk-zoned.c
2244
disk->disk_name, zone->start, zone->len);
block/blk-zoned.c
2252
if (!disk_zone_is_last(disk, zone)) {
block/blk-zoned.c
2253
if (zone->len != zone_sectors) {
block/blk-zoned.c
2258
} else if (zone->len > zone_sectors) {
block/blk-zoned.c
2264
if (!zone->capacity || zone->capacity > zone->len) {
block/blk-zoned.c
2271
ret = blk_revalidate_zone_cond(zone, idx, args);
block/blk-zoned.c
2276
switch (zone->type) {
block/blk-zoned.c
2278
ret = blk_revalidate_conv_zone(zone, idx, args);
block/blk-zoned.c
2281
ret = blk_revalidate_seq_zone(zone, idx, args);
block/blk-zoned.c
2286
disk->disk_name, (int)zone->type, zone->start);
block/blk-zoned.c
2291
args->sector += zone->len;
block/blk-zoned.c
354
static int blkdev_copy_zone_to_user(struct blk_zone *zone, unsigned int idx,
block/blk-zoned.c
359
if (copy_to_user(&args->zones[idx], zone, sizeof(struct blk_zone)))
block/blk-zoned.c
490
static bool disk_zone_is_last(struct gendisk *disk, struct blk_zone *zone)
block/blk-zoned.c
492
return zone->start + zone->len >= get_capacity(disk);
block/blk-zoned.c
774
static unsigned int blk_zone_wp_offset(struct blk_zone *zone)
block/blk-zoned.c
776
switch (zone->cond) {
block/blk-zoned.c
781
return zone->wp - zone->start;
block/blk-zoned.c
798
struct blk_zone *zone)
block/blk-zoned.c
801
unsigned int wp_offset = blk_zone_wp_offset(zone);
block/blk-zoned.c
803
zwplug = disk_get_zone_wplug(disk, zone->start);
block/blk-zoned.c
831
int disk_report_zone(struct gendisk *disk, struct blk_zone *zone,
block/blk-zoned.c
840
switch (zone->cond) {
block/blk-zoned.c
844
zone->cond = BLK_ZONE_COND_ACTIVE;
block/blk-zoned.c
852
disk_zone_wplug_sync_wp_offset(disk, zone);
block/blk-zoned.c
855
return args->cb(zone, idx, args->data);
block/blk-zoned.c
861
static int blkdev_report_zone_cb(struct blk_zone *zone, unsigned int idx,
block/blk-zoned.c
864
memcpy(data, zone, sizeof(struct blk_zone));
block/blk-zoned.c
869
sector_t sector, struct blk_zone *zone)
block/blk-zoned.c
873
.data = zone,
block/blk-zoned.c
915
struct blk_zone *zone)
block/blk-zoned.c
929
memset(zone, 0, sizeof(*zone));
block/blk-zoned.c
933
return blkdev_report_zone_fallback(bdev, sector, zone);
block/blk-zoned.c
939
return blkdev_report_zone_fallback(bdev, sector, zone);
block/blk-zoned.c
941
zone->cond = zones_cond[disk_zone_no(disk, sector)];
block/blk-zoned.c
944
zone->start = sector;
block/blk-zoned.c
945
zone->len = zone_sectors;
block/blk-zoned.c
951
if (zone->cond == BLK_ZONE_COND_NOT_WP) {
block/blk-zoned.c
952
zone->type = BLK_ZONE_TYPE_CONVENTIONAL;
block/blk-zoned.c
953
zone->capacity = zone_sectors;
block/blk-zoned.c
954
zone->wp = ULLONG_MAX;
block/blk-zoned.c
963
zone->type = BLK_ZONE_TYPE_SEQWRITE_REQ;
block/blk-zoned.c
964
if (disk_zone_is_last(disk, zone))
block/blk-zoned.c
965
zone->capacity = disk->last_zone_capacity;
block/blk-zoned.c
967
zone->capacity = disk->zone_capacity;
block/blk-zoned.c
969
if (zone->cond == BLK_ZONE_COND_READONLY ||
block/blk-zoned.c
970
zone->cond == BLK_ZONE_COND_OFFLINE) {
block/blk-zoned.c
971
zone->wp = ULLONG_MAX;
block/blk-zoned.c
983
if (zone->cond == BLK_ZONE_COND_FULL)
block/blk-zoned.c
984
zone->wp = ULLONG_MAX;
block/blk-zoned.c
986
zone->wp = sector;
block/blk-zoned.c
994
return blkdev_report_zone_fallback(bdev, sector, zone);
block/blk-zoned.c
996
zone->cond = zwplug->cond;
block/blk-zoned.c
997
zone->wp = sector + zwplug->wp_offset;
drivers/acpi/acpi_mrrm.c
43
struct zone *zone = NODE_DATA(nid)->node_zones + z;
drivers/acpi/acpi_mrrm.c
45
if (!populated_zone(zone))
drivers/acpi/acpi_mrrm.c
47
if (zone_intersects(zone, PHYS_PFN(e->base), PHYS_PFN(e->length)))
drivers/acpi/acpi_mrrm.c
48
return zone_to_nid(zone);
drivers/base/memory.c
229
struct zone *zone;
drivers/base/memory.c
235
zone = zone_for_pfn_range(mem->online_type, mem->nid, mem->group,
drivers/base/memory.c
250
ret = mhp_init_memmap_on_memory(start_pfn, nr_vmemmap_pages, zone);
drivers/base/memory.c
256
nr_pages - nr_vmemmap_pages, zone, mem->group);
drivers/base/memory.c
271
mem->zone = zone;
drivers/base/memory.c
287
if (!mem->zone)
drivers/base/memory.c
303
nr_pages - nr_vmemmap_pages, mem->zone, mem->group);
drivers/base/memory.c
315
mem->zone = NULL;
drivers/base/memory.c
455
enum mmop online_type, struct zone *default_zone)
drivers/base/memory.c
457
struct zone *zone;
drivers/base/memory.c
459
zone = zone_for_pfn_range(online_type, nid, group, start_pfn, nr_pages);
drivers/base/memory.c
460
if (zone == default_zone)
drivers/base/memory.c
463
return sysfs_emit_at(buf, len, " %s", zone->name);
drivers/base/memory.c
473
struct zone *default_zone;
drivers/base/memory.c
487
mem->zone ? mem->zone->name : "none");
drivers/base/memory.c
715
static struct zone *early_node_zone_for_memory_block(struct memory_block *mem,
drivers/base/memory.c
720
struct zone *zone, *matching_zone = NULL;
drivers/base/memory.c
733
zone = pgdat->node_zones + i;
drivers/base/memory.c
734
if (!populated_zone(zone))
drivers/base/memory.c
736
if (!zone_intersects(zone, start_pfn, nr_pages))
drivers/base/memory.c
739
matching_zone = zone;
drivers/base/memory.c
777
mem->zone = early_node_zone_for_memory_block(mem, nid);
drivers/base/memory.c
779
mem->zone = NULL;
drivers/base/memory.c
821
mem->zone = early_node_zone_for_memory_block(mem, NUMA_NO_NODE);
drivers/block/null_blk/zoned.c
131
zone = &dev->zones[i];
drivers/block/null_blk/zoned.c
133
null_init_zone_lock(dev, zone);
drivers/block/null_blk/zoned.c
134
zone->start = sector;
drivers/block/null_blk/zoned.c
135
zone->len = dev->zone_size_sects;
drivers/block/null_blk/zoned.c
136
zone->capacity = zone->len;
drivers/block/null_blk/zoned.c
137
zone->wp = zone->start + zone->len;
drivers/block/null_blk/zoned.c
138
zone->type = BLK_ZONE_TYPE_CONVENTIONAL;
drivers/block/null_blk/zoned.c
139
zone->cond = BLK_ZONE_COND_NOT_WP;
drivers/block/null_blk/zoned.c
145
zone = &dev->zones[i];
drivers/block/null_blk/zoned.c
147
null_init_zone_lock(dev, zone);
drivers/block/null_blk/zoned.c
148
zone->start = sector;
drivers/block/null_blk/zoned.c
149
if (zone->start + dev->zone_size_sects > dev_capacity_sects)
drivers/block/null_blk/zoned.c
150
zone->len = dev_capacity_sects - zone->start;
drivers/block/null_blk/zoned.c
152
zone->len = dev->zone_size_sects;
drivers/block/null_blk/zoned.c
153
zone->capacity =
drivers/block/null_blk/zoned.c
154
min_t(sector_t, zone->len, zone_capacity_sects);
drivers/block/null_blk/zoned.c
155
zone->type = BLK_ZONE_TYPE_SEQWRITE_REQ;
drivers/block/null_blk/zoned.c
157
zone->cond = BLK_ZONE_COND_FULL;
drivers/block/null_blk/zoned.c
158
zone->wp = zone->start + zone->capacity;
drivers/block/null_blk/zoned.c
160
zone->cond = BLK_ZONE_COND_EMPTY;
drivers/block/null_blk/zoned.c
161
zone->wp = zone->start;
drivers/block/null_blk/zoned.c
199
struct nullb_zone *zone;
drivers/block/null_blk/zoned.c
211
zone = &dev->zones[first_zone];
drivers/block/null_blk/zoned.c
212
for (i = 0; i < nr_zones; i++, zone++) {
drivers/block/null_blk/zoned.c
219
null_lock_zone(dev, zone);
drivers/block/null_blk/zoned.c
220
blkz.start = zone->start;
drivers/block/null_blk/zoned.c
221
blkz.len = zone->len;
drivers/block/null_blk/zoned.c
222
blkz.wp = zone->wp;
drivers/block/null_blk/zoned.c
223
blkz.type = zone->type;
drivers/block/null_blk/zoned.c
224
blkz.cond = zone->cond;
drivers/block/null_blk/zoned.c
225
blkz.capacity = zone->capacity;
drivers/block/null_blk/zoned.c
226
null_unlock_zone(dev, zone);
drivers/block/null_blk/zoned.c
244
struct nullb_zone *zone = &dev->zones[null_zone_no(dev, sector)];
drivers/block/null_blk/zoned.c
248
if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL ||
drivers/block/null_blk/zoned.c
249
sector + nr_sectors <= zone->wp)
drivers/block/null_blk/zoned.c
25
struct nullb_zone *zone)
drivers/block/null_blk/zoned.c
252
if (sector > zone->wp)
drivers/block/null_blk/zoned.c
255
return (zone->wp - sector) << SECTOR_SHIFT;
drivers/block/null_blk/zoned.c
260
struct nullb_zone *zone;
drivers/block/null_blk/zoned.c
268
zone = &dev->zones[zno];
drivers/block/null_blk/zoned.c
273
if (zone->cond == BLK_ZONE_COND_IMP_OPEN) {
drivers/block/null_blk/zoned.c
275
if (zone->wp == zone->start) {
drivers/block/null_blk/zoned.c
276
zone->cond = BLK_ZONE_COND_EMPTY;
drivers/block/null_blk/zoned.c
278
zone->cond = BLK_ZONE_COND_CLOSED;
drivers/block/null_blk/zoned.c
28
spin_lock_init(&zone->spinlock);
drivers/block/null_blk/zoned.c
30
mutex_init(&zone->mutex);
drivers/block/null_blk/zoned.c
331
struct nullb_zone *zone)
drivers/block/null_blk/zoned.c
335
switch (zone->cond) {
drivers/block/null_blk/zoned.c
34
struct nullb_zone *zone)
drivers/block/null_blk/zoned.c
355
struct nullb_zone *zone = &dev->zones[zno];
drivers/block/null_blk/zoned.c
359
trace_nullb_zone_op(cmd, zno, zone->cond);
drivers/block/null_blk/zoned.c
361
if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) {
drivers/block/null_blk/zoned.c
367
null_lock_zone(dev, zone);
drivers/block/null_blk/zoned.c
37
spin_lock_irq(&zone->spinlock);
drivers/block/null_blk/zoned.c
379
zone->wp == NULL_ZONE_INVALID_WP) {
drivers/block/null_blk/zoned.c
383
sector = zone->wp;
drivers/block/null_blk/zoned.c
387
if (sector != zone->wp ||
drivers/block/null_blk/zoned.c
388
zone->wp + nr_sectors > zone->start + zone->capacity) {
drivers/block/null_blk/zoned.c
39
mutex_lock(&zone->mutex);
drivers/block/null_blk/zoned.c
393
if (zone->cond == BLK_ZONE_COND_CLOSED ||
drivers/block/null_blk/zoned.c
394
zone->cond == BLK_ZONE_COND_EMPTY) {
drivers/block/null_blk/zoned.c
398
ret = null_check_zone_resources(dev, zone);
drivers/block/null_blk/zoned.c
403
if (zone->cond == BLK_ZONE_COND_CLOSED) {
drivers/block/null_blk/zoned.c
406
} else if (zone->cond == BLK_ZONE_COND_EMPTY) {
drivers/block/null_blk/zoned.c
413
zone->cond = BLK_ZONE_COND_IMP_OPEN;
drivers/block/null_blk/zoned.c
43
struct nullb_zone *zone)
drivers/block/null_blk/zoned.c
431
zone->wp += nr_sectors;
drivers/block/null_blk/zoned.c
432
if (zone->wp == zone->start + zone->capacity) {
drivers/block/null_blk/zoned.c
435
if (zone->cond == BLK_ZONE_COND_EXP_OPEN)
drivers/block/null_blk/zoned.c
437
else if (zone->cond == BLK_ZONE_COND_IMP_OPEN)
drivers/block/null_blk/zoned.c
441
zone->cond = BLK_ZONE_COND_FULL;
drivers/block/null_blk/zoned.c
447
null_unlock_zone(dev, zone);
drivers/block/null_blk/zoned.c
453
struct nullb_zone *zone)
drivers/block/null_blk/zoned.c
457
if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
drivers/block/null_blk/zoned.c
46
spin_unlock_irq(&zone->spinlock);
drivers/block/null_blk/zoned.c
460
switch (zone->cond) {
drivers/block/null_blk/zoned.c
476
switch (zone->cond) {
drivers/block/null_blk/zoned.c
478
ret = null_check_zone_resources(dev, zone);
drivers/block/null_blk/zoned.c
48
mutex_unlock(&zone->mutex);
drivers/block/null_blk/zoned.c
488
ret = null_check_zone_resources(dev, zone);
drivers/block/null_blk/zoned.c
504
zone->cond = BLK_ZONE_COND_EXP_OPEN;
drivers/block/null_blk/zoned.c
510
struct nullb_zone *zone)
drivers/block/null_blk/zoned.c
512
if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
drivers/block/null_blk/zoned.c
515
switch (zone->cond) {
drivers/block/null_blk/zoned.c
531
switch (zone->cond) {
drivers/block/null_blk/zoned.c
542
if (zone->wp > zone->start)
drivers/block/null_blk/zoned.c
548
if (zone->wp == zone->start)
drivers/block/null_blk/zoned.c
549
zone->cond = BLK_ZONE_COND_EMPTY;
drivers/block/null_blk/zoned.c
55
struct nullb_zone *zone;
drivers/block/null_blk/zoned.c
551
zone->cond = BLK_ZONE_COND_CLOSED;
drivers/block/null_blk/zoned.c
557
struct nullb_zone *zone)
drivers/block/null_blk/zoned.c
561
if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
drivers/block/null_blk/zoned.c
567
switch (zone->cond) {
drivers/block/null_blk/zoned.c
573
ret = null_check_zone_resources(dev, zone);
drivers/block/null_blk/zoned.c
586
ret = null_check_zone_resources(dev, zone);
drivers/block/null_blk/zoned.c
601
zone->cond = BLK_ZONE_COND_FULL;
drivers/block/null_blk/zoned.c
602
zone->wp = zone->start + zone->len;
drivers/block/null_blk/zoned.c
608
struct nullb_zone *zone)
drivers/block/null_blk/zoned.c
610
if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
drivers/block/null_blk/zoned.c
616
switch (zone->cond) {
drivers/block/null_blk/zoned.c
637
zone->cond = BLK_ZONE_COND_EMPTY;
drivers/block/null_blk/zoned.c
638
zone->wp = zone->start;
drivers/block/null_blk/zoned.c
641
return null_handle_discard(dev, zone->start, zone->len);
drivers/block/null_blk/zoned.c
651
struct nullb_zone *zone;
drivers/block/null_blk/zoned.c
657
zone = &dev->zones[i];
drivers/block/null_blk/zoned.c
658
null_lock_zone(dev, zone);
drivers/block/null_blk/zoned.c
659
if (zone->cond != BLK_ZONE_COND_EMPTY &&
drivers/block/null_blk/zoned.c
660
zone->cond != BLK_ZONE_COND_READONLY &&
drivers/block/null_blk/zoned.c
661
zone->cond != BLK_ZONE_COND_OFFLINE) {
drivers/block/null_blk/zoned.c
662
null_reset_zone(dev, zone);
drivers/block/null_blk/zoned.c
663
trace_nullb_zone_op(cmd, i, zone->cond);
drivers/block/null_blk/zoned.c
665
null_unlock_zone(dev, zone);
drivers/block/null_blk/zoned.c
671
zone = &dev->zones[zone_no];
drivers/block/null_blk/zoned.c
673
null_lock_zone(dev, zone);
drivers/block/null_blk/zoned.c
675
if (zone->cond == BLK_ZONE_COND_READONLY ||
drivers/block/null_blk/zoned.c
676
zone->cond == BLK_ZONE_COND_OFFLINE) {
drivers/block/null_blk/zoned.c
683
ret = null_reset_zone(dev, zone);
drivers/block/null_blk/zoned.c
686
ret = null_open_zone(dev, zone);
drivers/block/null_blk/zoned.c
689
ret = null_close_zone(dev, zone);
drivers/block/null_blk/zoned.c
692
ret = null_finish_zone(dev, zone);
drivers/block/null_blk/zoned.c
700
trace_nullb_zone_op(cmd, zone_no, zone->cond);
drivers/block/null_blk/zoned.c
703
null_unlock_zone(dev, zone);
drivers/block/null_blk/zoned.c
712
struct nullb_zone *zone;
drivers/block/null_blk/zoned.c
728
zone = &dev->zones[null_zone_no(dev, sector)];
drivers/block/null_blk/zoned.c
729
if (zone->cond == BLK_ZONE_COND_OFFLINE)
drivers/block/null_blk/zoned.c
732
null_lock_zone(dev, zone);
drivers/block/null_blk/zoned.c
734
null_unlock_zone(dev, zone);
drivers/block/null_blk/zoned.c
743
struct nullb_zone *zone, enum blk_zone_cond cond)
drivers/block/null_blk/zoned.c
749
null_lock_zone(dev, zone);
drivers/block/null_blk/zoned.c
758
if (zone->cond == cond) {
drivers/block/null_blk/zoned.c
759
zone->cond = BLK_ZONE_COND_EMPTY;
drivers/block/null_blk/zoned.c
760
zone->wp = zone->start;
drivers/block/null_blk/zoned.c
762
null_handle_discard(dev, zone->start, zone->len);
drivers/block/null_blk/zoned.c
764
if (zone->cond != BLK_ZONE_COND_READONLY &&
drivers/block/null_blk/zoned.c
765
zone->cond != BLK_ZONE_COND_OFFLINE)
drivers/block/null_blk/zoned.c
766
null_finish_zone(dev, zone);
drivers/block/null_blk/zoned.c
767
zone->cond = cond;
drivers/block/null_blk/zoned.c
768
zone->wp = NULL_ZONE_INVALID_WP;
drivers/block/null_blk/zoned.c
771
null_unlock_zone(dev, zone);
drivers/block/ublk_drv.c
629
struct blk_zone *zone = buffer + i;
drivers/block/ublk_drv.c
632
if (!zone->len)
drivers/block/ublk_drv.c
635
ret = disk_report_zone(disk, zone, i, args);
drivers/block/virtio_blk.c
590
struct blk_zone zone = { };
drivers/block/virtio_blk.c
592
zone.start = virtio64_to_cpu(vblk->vdev, entry->z_start);
drivers/block/virtio_blk.c
593
if (zone.start + vblk->zone_sectors <= get_capacity(vblk->disk))
drivers/block/virtio_blk.c
594
zone.len = vblk->zone_sectors;
drivers/block/virtio_blk.c
596
zone.len = get_capacity(vblk->disk) - zone.start;
drivers/block/virtio_blk.c
597
zone.capacity = virtio64_to_cpu(vblk->vdev, entry->z_cap);
drivers/block/virtio_blk.c
598
zone.wp = virtio64_to_cpu(vblk->vdev, entry->z_wp);
drivers/block/virtio_blk.c
602
zone.type = BLK_ZONE_TYPE_SEQWRITE_REQ;
drivers/block/virtio_blk.c
605
zone.type = BLK_ZONE_TYPE_SEQWRITE_PREF;
drivers/block/virtio_blk.c
608
zone.type = BLK_ZONE_TYPE_CONVENTIONAL;
drivers/block/virtio_blk.c
612
zone.start, entry->z_type);
drivers/block/virtio_blk.c
618
zone.cond = BLK_ZONE_COND_EMPTY;
drivers/block/virtio_blk.c
621
zone.cond = BLK_ZONE_COND_CLOSED;
drivers/block/virtio_blk.c
624
zone.cond = BLK_ZONE_COND_FULL;
drivers/block/virtio_blk.c
625
zone.wp = zone.start + zone.len;
drivers/block/virtio_blk.c
628
zone.cond = BLK_ZONE_COND_EXP_OPEN;
drivers/block/virtio_blk.c
631
zone.cond = BLK_ZONE_COND_IMP_OPEN;
drivers/block/virtio_blk.c
634
zone.cond = BLK_ZONE_COND_NOT_WP;
drivers/block/virtio_blk.c
637
zone.cond = BLK_ZONE_COND_READONLY;
drivers/block/virtio_blk.c
638
zone.wp = ULONG_MAX;
drivers/block/virtio_blk.c
641
zone.cond = BLK_ZONE_COND_OFFLINE;
drivers/block/virtio_blk.c
642
zone.wp = ULONG_MAX;
drivers/block/virtio_blk.c
646
zone.start, entry->z_state);
drivers/block/virtio_blk.c
654
return disk_report_zone(vblk->disk, &zone, idx, args);
drivers/block/zloop.c
1035
struct zloop_zone *zone)
drivers/block/zloop.c
1037
struct block_device *sb_bdev = zone->file->f_mapping->host->i_sb->s_bdev;
drivers/block/zloop.c
1046
if (file_inode(zone->file)->i_sb->s_blocksize <= SZ_4K)
drivers/block/zloop.c
1047
zlo->block_size = file_inode(zone->file)->i_sb->s_blocksize;
drivers/block/zloop.c
1048
else if (!vfs_getattr(&zone->file->f_path, &st, STATX_DIOALIGN, 0) &&
drivers/block/zloop.c
1068
struct zloop_zone *zone = &zlo->zones[zone_no];
drivers/block/zloop.c
1074
mutex_init(&zone->lock);
drivers/block/zloop.c
1075
INIT_LIST_HEAD(&zone->open_zone_entry);
drivers/block/zloop.c
1076
spin_lock_init(&zone->wp_lock);
drivers/block/zloop.c
1077
zone->start = (sector_t)zone_no << zlo->zone_shift;
drivers/block/zloop.c
1087
set_bit(ZLOOP_ZONE_CONV, &zone->flags);
drivers/block/zloop.c
1088
zone->cond = BLK_ZONE_COND_NOT_WP;
drivers/block/zloop.c
1089
zone->wp = U64_MAX;
drivers/block/zloop.c
1091
zone->file = zloop_filp_open_fmt(oflags, 0600, "%s/%u/cnv-%06u",
drivers/block/zloop.c
1093
if (IS_ERR(zone->file)) {
drivers/block/zloop.c
1096
PTR_ERR(zone->file));
drivers/block/zloop.c
1097
return PTR_ERR(zone->file);
drivers/block/zloop.c
1101
ret = zloop_get_block_size(zlo, zone);
drivers/block/zloop.c
1106
ret = vfs_getattr(&zone->file->f_path, &stat, STATX_SIZE, 0);
drivers/block/zloop.c
1119
ret = vfs_truncate(&zone->file->f_path,
drivers/block/zloop.c
1131
zone->file = zloop_filp_open_fmt(oflags, 0600, "%s/%u/seq-%06u",
drivers/block/zloop.c
1133
if (IS_ERR(zone->file)) {
drivers/block/zloop.c
1136
PTR_ERR(zone->file));
drivers/block/zloop.c
1137
return PTR_ERR(zone->file);
drivers/block/zloop.c
1141
ret = zloop_get_block_size(zlo, zone);
drivers/block/zloop.c
1146
zloop_get_block_size(zlo, zone);
drivers/block/zloop.c
1148
mutex_lock(&zone->lock);
drivers/block/zloop.c
1150
mutex_unlock(&zone->lock);
drivers/block/zloop.c
1343
struct zloop_zone *zone = &zlo->zones[j];
drivers/block/zloop.c
1345
if (!IS_ERR_OR_NULL(zone->file))
drivers/block/zloop.c
1346
fput(zone->file);
drivers/block/zloop.c
1388
struct zloop_zone *zone = &zlo->zones[i];
drivers/block/zloop.c
1389
struct file *file = zone->file;
drivers/block/zloop.c
1392
if (!zloop_zone_is_active(zone))
drivers/block/zloop.c
1404
if (old_wp < zone->wp)
drivers/block/zloop.c
182
struct zloop_zone *zone)
drivers/block/zloop.c
186
list_move_tail(&zone->open_zone_entry,
drivers/block/zloop.c
193
struct zloop_zone *zone)
drivers/block/zloop.c
195
if (zone->cond == BLK_ZONE_COND_IMP_OPEN ||
drivers/block/zloop.c
196
zone->cond == BLK_ZONE_COND_EXP_OPEN) {
drivers/block/zloop.c
198
list_del_init(&zone->open_zone_entry);
drivers/block/zloop.c
216
struct zloop_zone *zone;
drivers/block/zloop.c
223
list_for_each_entry(zone, &zlo->open_zones_lru_list, open_zone_entry) {
drivers/block/zloop.c
224
if (zone->cond == BLK_ZONE_COND_IMP_OPEN) {
drivers/block/zloop.c
225
zone->cond = BLK_ZONE_COND_CLOSED;
drivers/block/zloop.c
226
list_del_init(&zone->open_zone_entry);
drivers/block/zloop.c
236
struct zloop_zone *zone,
drivers/block/zloop.c
248
zone->cond = BLK_ZONE_COND_EXP_OPEN;
drivers/block/zloop.c
256
zone->cond = BLK_ZONE_COND_IMP_OPEN;
drivers/block/zloop.c
260
list_add_tail(&zone->open_zone_entry,
drivers/block/zloop.c
274
struct zloop_zone *zone, bool explicit)
drivers/block/zloop.c
276
switch (zone->cond) {
drivers/block/zloop.c
280
zone->cond = BLK_ZONE_COND_EXP_OPEN;
drivers/block/zloop.c
281
zloop_lru_rotate_open_zone(zlo, zone);
drivers/block/zloop.c
285
return zloop_open_closed_or_empty_zone(zlo, zone, explicit);
drivers/block/zloop.c
293
struct zloop_zone *zone = &zlo->zones[zone_no];
drivers/block/zloop.c
299
lockdep_assert_held(&zone->lock);
drivers/block/zloop.c
301
ret = vfs_getattr(&zone->file->f_path, &stat, STATX_SIZE, 0);
drivers/block/zloop.c
305
set_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags);
drivers/block/zloop.c
322
spin_lock_irqsave(&zone->wp_lock, flags);
drivers/block/zloop.c
324
zloop_lru_remove_open_zone(zlo, zone);
drivers/block/zloop.c
325
zone->cond = BLK_ZONE_COND_EMPTY;
drivers/block/zloop.c
326
zone->wp = zone->start;
drivers/block/zloop.c
328
zloop_lru_remove_open_zone(zlo, zone);
drivers/block/zloop.c
329
zone->cond = BLK_ZONE_COND_FULL;
drivers/block/zloop.c
330
zone->wp = ULLONG_MAX;
drivers/block/zloop.c
332
if (zone->cond != BLK_ZONE_COND_IMP_OPEN &&
drivers/block/zloop.c
333
zone->cond != BLK_ZONE_COND_EXP_OPEN)
drivers/block/zloop.c
334
zone->cond = BLK_ZONE_COND_CLOSED;
drivers/block/zloop.c
335
zone->wp = zone->start + file_sectors;
drivers/block/zloop.c
337
spin_unlock_irqrestore(&zone->wp_lock, flags);
drivers/block/zloop.c
344
struct zloop_zone *zone = &zlo->zones[zone_no];
drivers/block/zloop.c
347
if (test_bit(ZLOOP_ZONE_CONV, &zone->flags))
drivers/block/zloop.c
350
mutex_lock(&zone->lock);
drivers/block/zloop.c
352
if (test_and_clear_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags)) {
drivers/block/zloop.c
358
if (!zloop_do_open_zone(zlo, zone, true))
drivers/block/zloop.c
362
mutex_unlock(&zone->lock);
drivers/block/zloop.c
369
struct zloop_zone *zone = &zlo->zones[zone_no];
drivers/block/zloop.c
373
if (test_bit(ZLOOP_ZONE_CONV, &zone->flags))
drivers/block/zloop.c
376
mutex_lock(&zone->lock);
drivers/block/zloop.c
378
if (test_and_clear_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags)) {
drivers/block/zloop.c
384
switch (zone->cond) {
drivers/block/zloop.c
389
spin_lock_irqsave(&zone->wp_lock, flags);
drivers/block/zloop.c
390
zloop_lru_remove_open_zone(zlo, zone);
drivers/block/zloop.c
391
if (zone->wp == zone->start)
drivers/block/zloop.c
392
zone->cond = BLK_ZONE_COND_EMPTY;
drivers/block/zloop.c
394
zone->cond = BLK_ZONE_COND_CLOSED;
drivers/block/zloop.c
395
spin_unlock_irqrestore(&zone->wp_lock, flags);
drivers/block/zloop.c
405
mutex_unlock(&zone->lock);
drivers/block/zloop.c
412
struct zloop_zone *zone = &zlo->zones[zone_no];
drivers/block/zloop.c
416
if (test_bit(ZLOOP_ZONE_CONV, &zone->flags))
drivers/block/zloop.c
419
mutex_lock(&zone->lock);
drivers/block/zloop.c
421
if (!test_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags) &&
drivers/block/zloop.c
422
zone->cond == BLK_ZONE_COND_EMPTY)
drivers/block/zloop.c
425
if (vfs_truncate(&zone->file->f_path, 0)) {
drivers/block/zloop.c
426
set_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags);
drivers/block/zloop.c
431
spin_lock_irqsave(&zone->wp_lock, flags);
drivers/block/zloop.c
432
zloop_lru_remove_open_zone(zlo, zone);
drivers/block/zloop.c
433
zone->cond = BLK_ZONE_COND_EMPTY;
drivers/block/zloop.c
434
zone->wp = zone->start;
drivers/block/zloop.c
435
clear_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags);
drivers/block/zloop.c
436
spin_unlock_irqrestore(&zone->wp_lock, flags);
drivers/block/zloop.c
439
mutex_unlock(&zone->lock);
drivers/block/zloop.c
460
struct zloop_zone *zone = &zlo->zones[zone_no];
drivers/block/zloop.c
464
if (test_bit(ZLOOP_ZONE_CONV, &zone->flags))
drivers/block/zloop.c
467
mutex_lock(&zone->lock);
drivers/block/zloop.c
469
if (!test_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags) &&
drivers/block/zloop.c
470
zone->cond == BLK_ZONE_COND_FULL)
drivers/block/zloop.c
473
if (vfs_truncate(&zone->file->f_path, zlo->zone_size << SECTOR_SHIFT)) {
drivers/block/zloop.c
474
set_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags);
drivers/block/zloop.c
479
spin_lock_irqsave(&zone->wp_lock, flags);
drivers/block/zloop.c
480
zloop_lru_remove_open_zone(zlo, zone);
drivers/block/zloop.c
481
zone->cond = BLK_ZONE_COND_FULL;
drivers/block/zloop.c
482
zone->wp = ULLONG_MAX;
drivers/block/zloop.c
483
clear_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags);
drivers/block/zloop.c
484
spin_unlock_irqrestore(&zone->wp_lock, flags);
drivers/block/zloop.c
487
mutex_unlock(&zone->lock);
drivers/block/zloop.c
518
struct zloop_zone *zone = &zlo->zones[rq_zone_no(rq)];
drivers/block/zloop.c
553
cmd->iocb.ki_pos = (cmd->sector - zone->start) << SECTOR_SHIFT;
drivers/block/zloop.c
554
cmd->iocb.ki_filp = zone->file;
drivers/block/zloop.c
561
return zone->file->f_op->write_iter(&cmd->iocb, &iter);
drivers/block/zloop.c
562
return zone->file->f_op->read_iter(&cmd->iocb, &iter);
drivers/block/zloop.c
572
struct zloop_zone *zone = &zlo->zones[zone_no];
drivers/block/zloop.c
573
sector_t zone_end = zone->start + zlo->zone_capacity;
drivers/block/zloop.c
577
spin_lock_irqsave(&zone->wp_lock, flags);
drivers/block/zloop.c
590
if (zone->cond == BLK_ZONE_COND_FULL ||
drivers/block/zloop.c
591
zone->wp + nr_sectors > zone_end) {
drivers/block/zloop.c
595
cmd->sector = zone->wp;
drivers/block/zloop.c
598
if (cmd->sector != zone->wp) {
drivers/block/zloop.c
600
zone_no, cmd->sector, zone->wp);
drivers/block/zloop.c
607
if (!zloop_do_open_zone(zlo, zone, false)) {
drivers/block/zloop.c
618
zone->wp += nr_sectors;
drivers/block/zloop.c
619
if (zone->wp == zone_end) {
drivers/block/zloop.c
620
zloop_lru_remove_open_zone(zlo, zone);
drivers/block/zloop.c
621
zone->cond = BLK_ZONE_COND_FULL;
drivers/block/zloop.c
622
zone->wp = ULLONG_MAX;
drivers/block/zloop.c
626
spin_unlock_irqrestore(&zone->wp_lock, flags);
drivers/block/zloop.c
638
struct zloop_zone *zone;
drivers/block/zloop.c
653
zone = &zlo->zones[zone_no];
drivers/block/zloop.c
660
zone->start + zlo->zone_size))
drivers/block/zloop.c
663
if (test_and_clear_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags)) {
drivers/block/zloop.c
664
mutex_lock(&zone->lock);
drivers/block/zloop.c
666
mutex_unlock(&zone->lock);
drivers/block/zloop.c
671
if (!test_bit(ZLOOP_ZONE_CONV, &zone->flags) && is_write) {
drivers/block/zloop.c
672
mutex_lock(&zone->lock);
drivers/block/zloop.c
676
mutex_unlock(&zone->lock);
drivers/block/zloop.c
686
static inline bool zloop_zone_is_active(struct zloop_zone *zone)
drivers/block/zloop.c
688
switch (zone->cond) {
drivers/block/zloop.c
704
struct zloop_zone *zone = &zlo->zones[i];
drivers/block/zloop.c
705
struct file *file = zone->file;
drivers/block/zloop.c
707
if (!zloop_zone_is_active(zone))
drivers/block/zloop.c
710
"user.zloop.wp", &zone->wp, sizeof(zone->wp), 0);
drivers/block/zloop.c
804
struct zloop_zone *zone = &zlo->zones[zone_no];
drivers/block/zloop.c
835
if (cmd->ret < 0 && !test_bit(ZLOOP_ZONE_CONV, &zone->flags)) {
drivers/block/zloop.c
841
set_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags);
drivers/block/zloop.c
861
struct zloop_zone *zone = &zlo->zones[zone_no];
drivers/block/zloop.c
862
sector_t zone_end = zone->start + zlo->zone_capacity;
drivers/block/zloop.c
866
spin_lock_irqsave(&zone->wp_lock, flags);
drivers/block/zloop.c
868
if (zone->cond == BLK_ZONE_COND_FULL ||
drivers/block/zloop.c
869
zone->wp + nr_sectors > zone_end) {
drivers/block/zloop.c
870
spin_unlock_irqrestore(&zone->wp_lock, flags);
drivers/block/zloop.c
874
rq->__sector = zone->wp;
drivers/block/zloop.c
875
zone->wp += blk_rq_sectors(rq);
drivers/block/zloop.c
876
if (zone->wp >= zone_end) {
drivers/block/zloop.c
877
zloop_lru_remove_open_zone(zlo, zone);
drivers/block/zloop.c
878
zone->cond = BLK_ZONE_COND_FULL;
drivers/block/zloop.c
879
zone->wp = ULLONG_MAX;
drivers/block/zloop.c
882
spin_unlock_irqrestore(&zone->wp_lock, flags);
drivers/block/zloop.c
951
struct zloop_zone *zone = &zlo->zones[zone_no];
drivers/block/zloop.c
953
mutex_lock(&zone->lock);
drivers/block/zloop.c
955
if (test_and_clear_bit(ZLOOP_ZONE_SEQ_ERROR, &zone->flags)) {
drivers/block/zloop.c
958
mutex_unlock(&zone->lock);
drivers/block/zloop.c
963
blkz.start = zone->start;
drivers/block/zloop.c
965
spin_lock_irqsave(&zone->wp_lock, flags);
drivers/block/zloop.c
966
blkz.wp = zone->wp;
drivers/block/zloop.c
967
spin_unlock_irqrestore(&zone->wp_lock, flags);
drivers/block/zloop.c
968
blkz.cond = zone->cond;
drivers/block/zloop.c
969
if (test_bit(ZLOOP_ZONE_CONV, &zone->flags)) {
drivers/block/zloop.c
977
mutex_unlock(&zone->lock);
drivers/block/zloop.c
995
struct zloop_zone *zone = &zlo->zones[i];
drivers/block/zloop.c
997
mapping_set_gfp_mask(zone->file->f_mapping,
drivers/block/zloop.c
998
zone->old_gfp_mask);
drivers/block/zloop.c
999
fput(zone->file);
drivers/gpu/drm/bridge/sil-sii8620.c
1177
u8 zone;
drivers/gpu/drm/bridge/sil-sii8620.c
1203
REG_MHL3_TX_ZONE_CTL, clk_spec[i].zone);
drivers/hwmon/dme1737.c
1553
static SENSOR_DEVICE_ATTR_2(zone##ix##_auto_channels_temp, S_IRUGO, \
drivers/hwmon/dme1737.c
1555
static SENSOR_DEVICE_ATTR_2(zone##ix##_auto_point1_temp_hyst, S_IRUGO, \
drivers/hwmon/dme1737.c
1557
static SENSOR_DEVICE_ATTR_2(zone##ix##_auto_point1_temp, S_IRUGO, \
drivers/hwmon/dme1737.c
1559
static SENSOR_DEVICE_ATTR_2(zone##ix##_auto_point2_temp, S_IRUGO, \
drivers/hwmon/dme1737.c
1561
static SENSOR_DEVICE_ATTR_2(zone##ix##_auto_point3_temp, S_IRUGO, \
drivers/hwmon/lm85.c
1135
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->zone[nr].limit) -
drivers/hwmon/lm85.c
1136
HYST_FROM_REG(data->zone[nr].hyst));
drivers/hwmon/lm85.c
1155
min = TEMP_FROM_REG(data->zone[nr].limit);
drivers/hwmon/lm85.c
1156
data->zone[nr].hyst = HYST_TO_REG(min - val);
drivers/hwmon/lm85.c
1159
(data->zone[0].hyst << 4)
drivers/hwmon/lm85.c
1160
| data->zone[1].hyst);
drivers/hwmon/lm85.c
1163
(data->zone[2].hyst << 4));
drivers/hwmon/lm85.c
1175
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->zone[nr].limit));
drivers/hwmon/lm85.c
1193
data->zone[nr].limit = TEMP_TO_REG(val);
drivers/hwmon/lm85.c
1195
data->zone[nr].limit);
drivers/hwmon/lm85.c
1198
data->zone[nr].range = RANGE_TO_REG(
drivers/hwmon/lm85.c
1199
TEMP_FROM_REG(data->zone[nr].max_desired) -
drivers/hwmon/lm85.c
1200
TEMP_FROM_REG(data->zone[nr].limit));
drivers/hwmon/lm85.c
1202
((data->zone[nr].range & 0x0f) << 4)
drivers/hwmon/lm85.c
1215
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->zone[nr].limit) +
drivers/hwmon/lm85.c
1216
RANGE_FROM_REG(data->zone[nr].range));
drivers/hwmon/lm85.c
1235
min = TEMP_FROM_REG(data->zone[nr].limit);
drivers/hwmon/lm85.c
1236
data->zone[nr].max_desired = TEMP_TO_REG(val);
drivers/hwmon/lm85.c
1237
data->zone[nr].range = RANGE_TO_REG(
drivers/hwmon/lm85.c
1240
((data->zone[nr].range & 0x0f) << 4)
drivers/hwmon/lm85.c
1252
return sprintf(buf, "%d\n", TEMP_FROM_REG(data->zone[nr].critical));
drivers/hwmon/lm85.c
1270
data->zone[nr].critical = TEMP_TO_REG(val);
drivers/hwmon/lm85.c
1272
data->zone[nr].critical);
drivers/hwmon/lm85.c
227
static int ZONE_TO_REG(int zone)
drivers/hwmon/lm85.c
232
if (zone == lm85_zone_map[i])
drivers/hwmon/lm85.c
318
struct lm85_zone zone[3];
drivers/hwmon/lm85.c
511
data->zone[i].range = val >> 4;
drivers/hwmon/lm85.c
514
data->zone[i].limit =
drivers/hwmon/lm85.c
516
data->zone[i].critical =
drivers/hwmon/lm85.c
522
data->zone[i].limit -= 64;
drivers/hwmon/lm85.c
523
data->zone[i].critical -= 64;
drivers/hwmon/lm85.c
534
data->zone[0].hyst = i >> 4;
drivers/hwmon/lm85.c
535
data->zone[1].hyst = i & 0x0f;
drivers/hwmon/lm85.c
538
data->zone[2].hyst = i >> 4;
drivers/hwmon/lm85.c
820
(data->zone[nr].range << 4)
drivers/hwmon/scpi-hwmon.c
267
struct scpi_thermal_zone *zone;
drivers/hwmon/scpi-hwmon.c
272
zone = devm_kzalloc(dev, sizeof(*zone), GFP_KERNEL);
drivers/hwmon/scpi-hwmon.c
273
if (!zone)
drivers/hwmon/scpi-hwmon.c
276
zone->sensor_id = i;
drivers/hwmon/scpi-hwmon.c
277
zone->scpi_sensors = scpi_sensors;
drivers/hwmon/scpi-hwmon.c
280
zone,
drivers/hwmon/scpi-hwmon.c
289
devm_kfree(dev, zone);
drivers/hwmon/scpi-hwmon.c
67
struct scpi_thermal_zone *zone = thermal_zone_device_priv(tz);
drivers/hwmon/scpi-hwmon.c
68
struct scpi_sensors *scpi_sensors = zone->scpi_sensors;
drivers/hwmon/scpi-hwmon.c
70
struct sensor_data *sensor = &scpi_sensors->data[zone->sensor_id];
drivers/iio/light/lm3533-als.c
104
static int lm3533_als_get_zone(struct iio_dev *indio_dev, u8 *zone)
drivers/iio/light/lm3533-als.c
110
*zone = atomic_read(&als->zone);
drivers/iio/light/lm3533-als.c
112
ret = _lm3533_als_get_zone(indio_dev, zone);
drivers/iio/light/lm3533-als.c
124
static inline u8 lm3533_als_get_target_reg(unsigned channel, unsigned zone)
drivers/iio/light/lm3533-als.c
126
return LM3533_REG_ALS_TARGET_BASE + 5 * channel + zone;
drivers/iio/light/lm3533-als.c
130
unsigned zone, u8 *val)
drivers/iio/light/lm3533-als.c
139
if (zone > LM3533_ALS_ZONE_MAX)
drivers/iio/light/lm3533-als.c
142
reg = lm3533_als_get_target_reg(channel, zone);
drivers/iio/light/lm3533-als.c
151
unsigned zone, u8 val)
drivers/iio/light/lm3533-als.c
160
if (zone > LM3533_ALS_ZONE_MAX)
drivers/iio/light/lm3533-als.c
163
reg = lm3533_als_get_target_reg(channel, zone);
drivers/iio/light/lm3533-als.c
174
u8 zone;
drivers/iio/light/lm3533-als.c
178
ret = lm3533_als_get_zone(indio_dev, &zone);
drivers/iio/light/lm3533-als.c
182
ret = lm3533_als_get_target(indio_dev, channel, zone, &target);
drivers/iio/light/lm3533-als.c
251
u8 zone;
drivers/iio/light/lm3533-als.c
255
ret = _lm3533_als_get_zone(indio_dev, &zone);
drivers/iio/light/lm3533-als.c
259
atomic_set(&als->zone, zone);
drivers/iio/light/lm3533-als.c
431
u8 zone;
drivers/iio/light/lm3533-als.c
443
ret = lm3533_als_get_zone(indio_dev, &zone);
drivers/iio/light/lm3533-als.c
447
atomic_set(&als->zone, zone);
drivers/iio/light/lm3533-als.c
470
u8 zone;
drivers/iio/light/lm3533-als.c
473
ret = lm3533_als_get_zone(indio_dev, &zone);
drivers/iio/light/lm3533-als.c
477
return sysfs_emit(buf, "%u\n", zone);
drivers/iio/light/lm3533-als.c
57
atomic_t zone;
drivers/iio/light/lm3533-als.c
670
static ILLUMINANCE_ATTR_RO(zone);
drivers/iio/light/lm3533-als.c
86
static int _lm3533_als_get_zone(struct iio_dev *indio_dev, u8 *zone)
drivers/iio/light/lm3533-als.c
862
atomic_set(&als->zone, 0);
drivers/iio/light/lm3533-als.c
99
*zone = min_t(u8, val, LM3533_ALS_ZONE_MAX);
drivers/md/dm-vdo/action-manager.c
177
zone_count_t zone;
drivers/md/dm-vdo/action-manager.c
183
zone = manager->acting_zone++;
drivers/md/dm-vdo/action-manager.c
195
manager->current_action->zone_action(manager->context, zone, completion);
drivers/md/dm-vdo/block-map.c
1042
check_for_drain_complete(cache->zone);
drivers/md/dm-vdo/block-map.c
1052
continue_vio_after_io(vio, page_is_written_out, info->cache->zone->thread_id);
drivers/md/dm-vdo/block-map.c
1077
vdo_release_recovery_journal_block_reference(cache->zone->block_map->journal,
drivers/md/dm-vdo/block-map.c
1080
cache->zone->zone_number);
drivers/md/dm-vdo/block-map.c
1098
check_for_drain_complete(cache->zone);
drivers/md/dm-vdo/block-map.c
1223
struct block_map_zone *zone, physical_block_number_t pbn,
drivers/md/dm-vdo/block-map.c
1227
struct vdo_page_cache *cache = &zone->page_cache;
drivers/md/dm-vdo/block-map.c
1243
cache->zone->thread_id, parent);
drivers/md/dm-vdo/block-map.c
1394
static inline struct tree_page *get_tree_page(const struct block_map_zone *zone,
drivers/md/dm-vdo/block-map.c
1397
return get_tree_page_by_index(zone->block_map->forest, lock->root_index,
drivers/md/dm-vdo/block-map.c
1456
static bool __must_check is_not_older(struct block_map_zone *zone, u8 a, u8 b)
drivers/md/dm-vdo/block-map.c
1460
result = VDO_ASSERT((in_cyclic_range(zone->oldest_generation, a, zone->generation, 1 << 8) &&
drivers/md/dm-vdo/block-map.c
1461
in_cyclic_range(zone->oldest_generation, b, zone->generation, 1 << 8)),
drivers/md/dm-vdo/block-map.c
1463
a, b, zone->oldest_generation, zone->generation);
drivers/md/dm-vdo/block-map.c
1465
enter_zone_read_only_mode(zone, result);
drivers/md/dm-vdo/block-map.c
1469
return in_cyclic_range(b, a, zone->generation, 1 << 8);
drivers/md/dm-vdo/block-map.c
1472
static void release_generation(struct block_map_zone *zone, u8 generation)
drivers/md/dm-vdo/block-map.c
1476
result = VDO_ASSERT((zone->dirty_page_counts[generation] > 0),
drivers/md/dm-vdo/block-map.c
1479
enter_zone_read_only_mode(zone, result);
drivers/md/dm-vdo/block-map.c
1483
zone->dirty_page_counts[generation]--;
drivers/md/dm-vdo/block-map.c
1484
while ((zone->dirty_page_counts[zone->oldest_generation] == 0) &&
drivers/md/dm-vdo/block-map.c
1485
(zone->oldest_generation != zone->generation))
drivers/md/dm-vdo/block-map.c
1486
zone->oldest_generation++;
drivers/md/dm-vdo/block-map.c
1489
static void set_generation(struct block_map_zone *zone, struct tree_page *page,
drivers/md/dm-vdo/block-map.c
1501
new_count = ++zone->dirty_page_counts[new_generation];
drivers/md/dm-vdo/block-map.c
1505
enter_zone_read_only_mode(zone, result);
drivers/md/dm-vdo/block-map.c
1510
release_generation(zone, old_generation);
drivers/md/dm-vdo/block-map.c
1521
static void acquire_vio(struct vdo_waiter *waiter, struct block_map_zone *zone)
drivers/md/dm-vdo/block-map.c
1524
acquire_vio_from_pool(zone->vio_pool, waiter);
drivers/md/dm-vdo/block-map.c
1528
static bool attempt_increment(struct block_map_zone *zone)
drivers/md/dm-vdo/block-map.c
1530
u8 generation = zone->generation + 1;
drivers/md/dm-vdo/block-map.c
1532
if (zone->oldest_generation == generation)
drivers/md/dm-vdo/block-map.c
1535
zone->generation = generation;
drivers/md/dm-vdo/block-map.c
1540
static void enqueue_page(struct tree_page *page, struct block_map_zone *zone)
drivers/md/dm-vdo/block-map.c
1542
if ((zone->flusher == NULL) && attempt_increment(zone)) {
drivers/md/dm-vdo/block-map.c
1543
zone->flusher = page;
drivers/md/dm-vdo/block-map.c
1544
acquire_vio(&page->waiter, zone);
drivers/md/dm-vdo/block-map.c
1548
vdo_waitq_enqueue_waiter(&zone->flush_waiters, &page->waiter);
drivers/md/dm-vdo/block-map.c
1557
acquire_vio(waiter, write_context->zone);
drivers/md/dm-vdo/block-map.c
1561
enqueue_page(page, write_context->zone);
drivers/md/dm-vdo/block-map.c
1564
static void return_to_pool(struct block_map_zone *zone, struct pooled_vio *vio)
drivers/md/dm-vdo/block-map.c
1567
check_for_drain_complete(zone);
drivers/md/dm-vdo/block-map.c
1577
struct block_map_zone *zone = pooled->context;
drivers/md/dm-vdo/block-map.c
1579
vdo_release_recovery_journal_block_reference(zone->block_map->journal,
drivers/md/dm-vdo/block-map.c
1582
zone->zone_number);
drivers/md/dm-vdo/block-map.c
1585
release_generation(zone, page->writing_generation);
drivers/md/dm-vdo/block-map.c
1588
if (zone->flusher == page) {
drivers/md/dm-vdo/block-map.c
1590
.zone = zone,
drivers/md/dm-vdo/block-map.c
1594
vdo_waitq_notify_all_waiters(&zone->flush_waiters,
drivers/md/dm-vdo/block-map.c
1596
if (dirty && attempt_increment(zone)) {
drivers/md/dm-vdo/block-map.c
1601
zone->flusher = NULL;
drivers/md/dm-vdo/block-map.c
1605
enqueue_page(page, zone);
drivers/md/dm-vdo/block-map.c
1606
} else if ((zone->flusher == NULL) && vdo_waitq_has_waiters(&zone->flush_waiters) &&
drivers/md/dm-vdo/block-map.c
1607
attempt_increment(zone)) {
drivers/md/dm-vdo/block-map.c
1608
zone->flusher = container_of(vdo_waitq_dequeue_waiter(&zone->flush_waiters),
drivers/md/dm-vdo/block-map.c
1610
write_page(zone->flusher, pooled);
drivers/md/dm-vdo/block-map.c
1614
return_to_pool(zone, pooled);
drivers/md/dm-vdo/block-map.c
1622
struct block_map_zone *zone = pooled->context;
drivers/md/dm-vdo/block-map.c
1625
enter_zone_read_only_mode(zone, result);
drivers/md/dm-vdo/block-map.c
1626
return_to_pool(zone, pooled);
drivers/md/dm-vdo/block-map.c
1635
struct block_map_zone *zone = pooled->context;
drivers/md/dm-vdo/block-map.c
1646
if (zone->flusher == tree_page)
drivers/md/dm-vdo/block-map.c
1657
struct block_map_zone *zone = vio->context;
drivers/md/dm-vdo/block-map.c
1663
zone->thread_id);
drivers/md/dm-vdo/block-map.c
1669
struct block_map_zone *zone = vio->context;
drivers/md/dm-vdo/block-map.c
1672
if ((zone->flusher != tree_page) &&
drivers/md/dm-vdo/block-map.c
1673
is_not_older(zone, tree_page->generation, zone->generation)) {
drivers/md/dm-vdo/block-map.c
1678
enqueue_page(tree_page, zone);
drivers/md/dm-vdo/block-map.c
1679
return_to_pool(zone, vio);
drivers/md/dm-vdo/block-map.c
1685
completion->callback_thread_id = zone->thread_id;
drivers/md/dm-vdo/block-map.c
1714
struct block_map_zone *zone;
drivers/md/dm-vdo/block-map.c
1722
zone = data_vio->logical.zone->block_map_zone;
drivers/md/dm-vdo/block-map.c
1723
lock_holder = vdo_int_map_remove(zone->loading_pages, lock->key);
drivers/md/dm-vdo/block-map.c
1734
--data_vio->logical.zone->block_map_zone->active_lookups;
drivers/md/dm-vdo/block-map.c
1759
enter_zone_read_only_mode(data_vio->logical.zone->block_map_zone, result);
drivers/md/dm-vdo/block-map.c
1792
static void load_block_map_page(struct block_map_zone *zone, struct data_vio *data_vio);
drivers/md/dm-vdo/block-map.c
1793
static void allocate_block_map_page(struct block_map_zone *zone,
drivers/md/dm-vdo/block-map.c
1816
allocate_block_map_page(data_vio->logical.zone->block_map_zone,
drivers/md/dm-vdo/block-map.c
1828
load_block_map_page(data_vio->logical.zone->block_map_zone, data_vio);
drivers/md/dm-vdo/block-map.c
1848
struct block_map_zone *zone = pooled->context;
drivers/md/dm-vdo/block-map.c
1853
tree_page = get_tree_page(zone, tree_lock);
drivers/md/dm-vdo/block-map.c
1855
nonce = zone->block_map->nonce;
drivers/md/dm-vdo/block-map.c
1885
data_vio->logical.zone->thread_id);
drivers/md/dm-vdo/block-map.c
1904
static int attempt_page_lock(struct block_map_zone *zone, struct data_vio *data_vio)
drivers/md/dm-vdo/block-map.c
1921
result = vdo_int_map_put(zone->loading_pages, lock->key,
drivers/md/dm-vdo/block-map.c
1938
static void load_block_map_page(struct block_map_zone *zone, struct data_vio *data_vio)
drivers/md/dm-vdo/block-map.c
1942
result = attempt_page_lock(zone, data_vio);
drivers/md/dm-vdo/block-map.c
1950
acquire_vio_from_pool(zone->vio_pool, &data_vio->waiter);
drivers/md/dm-vdo/block-map.c
1959
data_vio->logical.zone->thread_id))
drivers/md/dm-vdo/block-map.c
1979
allocate_block_map_page(data_vio->logical.zone->block_map_zone, data_vio);
drivers/md/dm-vdo/block-map.c
200
info->vio->completion.callback_thread_id = cache->zone->thread_id;
drivers/md/dm-vdo/block-map.c
2014
static void write_expired_elements(struct block_map_zone *zone)
drivers/md/dm-vdo/block-map.c
2019
u8 generation = zone->generation;
drivers/md/dm-vdo/block-map.c
2021
expired = &zone->dirty_lists->expired[VDO_TREE_PAGE];
drivers/md/dm-vdo/block-map.c
2030
enter_zone_read_only_mode(zone, result);
drivers/md/dm-vdo/block-map.c
2034
set_generation(zone, page, generation);
drivers/md/dm-vdo/block-map.c
2036
enqueue_page(page, zone);
drivers/md/dm-vdo/block-map.c
2039
expired = &zone->dirty_lists->expired[VDO_CACHE_PAGE];
drivers/md/dm-vdo/block-map.c
2045
save_pages(&zone->page_cache);
drivers/md/dm-vdo/block-map.c
2057
static void add_to_dirty_lists(struct block_map_zone *zone,
drivers/md/dm-vdo/block-map.c
2063
struct dirty_lists *dirty_lists = zone->dirty_lists;
drivers/md/dm-vdo/block-map.c
2076
write_expired_elements(zone);
drivers/md/dm-vdo/block-map.c
2090
struct block_map_zone *zone = data_vio->logical.zone->block_map_zone;
drivers/md/dm-vdo/block-map.c
2096
tree_page = get_tree_page(zone, tree_lock);
drivers/md/dm-vdo/block-map.c
2108
if (zone->flusher != tree_page) {
drivers/md/dm-vdo/block-map.c
2113
set_generation(zone, tree_page, zone->generation);
drivers/md/dm-vdo/block-map.c
2119
add_to_dirty_lists(zone, &tree_page->entry, VDO_TREE_PAGE,
drivers/md/dm-vdo/block-map.c
2126
tree_page = get_tree_page(zone, tree_lock);
drivers/md/dm-vdo/block-map.c
2128
zone->block_map->nonce,
drivers/md/dm-vdo/block-map.c
2141
allocate_block_map_page(zone, data_vio);
drivers/md/dm-vdo/block-map.c
2206
static void allocate_block_map_page(struct block_map_zone *zone,
drivers/md/dm-vdo/block-map.c
2217
result = attempt_page_lock(zone, data_vio);
drivers/md/dm-vdo/block-map.c
2244
struct block_map_zone *zone = data_vio->logical.zone->block_map_zone;
drivers/md/dm-vdo/block-map.c
2246
zone->active_lookups++;
drivers/md/dm-vdo/block-map.c
2247
if (vdo_is_state_draining(&zone->state)) {
drivers/md/dm-vdo/block-map.c
2254
page_index = (lock->tree_slots[0].page_index / zone->block_map->root_count);
drivers/md/dm-vdo/block-map.c
2267
page = (struct block_map_page *) (get_tree_page(zone, lock)->page_buffer);
drivers/md/dm-vdo/block-map.c
2294
allocate_block_map_page(zone, data_vio);
drivers/md/dm-vdo/block-map.c
2306
load_block_map_page(zone, data_vio);
drivers/md/dm-vdo/block-map.c
2340
void vdo_write_tree_page(struct tree_page *page, struct block_map_zone *zone)
drivers/md/dm-vdo/block-map.c
2344
if (waiting && (zone->flusher == page))
drivers/md/dm-vdo/block-map.c
2347
set_generation(zone, page, zone->generation);
drivers/md/dm-vdo/block-map.c
2351
enqueue_page(page, zone);
drivers/md/dm-vdo/block-map.c
250
VDO_ASSERT_LOG_ONLY((thread_id == cache->zone->thread_id),
drivers/md/dm-vdo/block-map.c
252
function_name, cache->zone->thread_id, thread_id);
drivers/md/dm-vdo/block-map.c
2552
cursor->parent->zone->block_map->nonce,
drivers/md/dm-vdo/block-map.c
2563
cursor->parent->zone->thread_id);
drivers/md/dm-vdo/block-map.c
258
VDO_ASSERT_LOG_ONLY(!vdo_is_state_quiescent(&cache->zone->state),
drivers/md/dm-vdo/block-map.c
2594
vdo_write_tree_page(tree_page, cursor->parent->zone);
drivers/md/dm-vdo/block-map.c
2604
vdo_write_tree_page(tree_page, cursor->parent->zone);
drivers/md/dm-vdo/block-map.c
2613
vdo_write_tree_page(tree_page, cursor->parent->zone);
drivers/md/dm-vdo/block-map.c
2651
pooled->vio.completion.callback_thread_id = cursor->parent->zone->thread_id;
drivers/md/dm-vdo/block-map.c
2708
cursors->zone = &map->zones[0];
drivers/md/dm-vdo/block-map.c
2709
cursors->pool = cursors->zone->vio_pool;
drivers/md/dm-vdo/block-map.c
2744
struct block_map_zone *zone = &map->zones[zone_number];
drivers/md/dm-vdo/block-map.c
2748
zone->zone_number = zone_number;
drivers/md/dm-vdo/block-map.c
2749
zone->thread_id = vdo->thread_config.logical_threads[zone_number];
drivers/md/dm-vdo/block-map.c
2750
zone->block_map = map;
drivers/md/dm-vdo/block-map.c
2752
result = vdo_allocate_extended(maximum_age, eras, __func__, &zone->dirty_lists);
drivers/md/dm-vdo/block-map.c
2756
zone->dirty_lists->maximum_age = maximum_age;
drivers/md/dm-vdo/block-map.c
2757
INIT_LIST_HEAD(&zone->dirty_lists->expired[VDO_TREE_PAGE]);
drivers/md/dm-vdo/block-map.c
2758
INIT_LIST_HEAD(&zone->dirty_lists->expired[VDO_CACHE_PAGE]);
drivers/md/dm-vdo/block-map.c
2761
INIT_LIST_HEAD(&zone->dirty_lists->eras[i][VDO_TREE_PAGE]);
drivers/md/dm-vdo/block-map.c
2762
INIT_LIST_HEAD(&zone->dirty_lists->eras[i][VDO_CACHE_PAGE]);
drivers/md/dm-vdo/block-map.c
2765
result = vdo_int_map_create(VDO_LOCK_MAP_CAPACITY, &zone->loading_pages);
drivers/md/dm-vdo/block-map.c
2770
zone->thread_id, VIO_TYPE_BLOCK_MAP_INTERIOR,
drivers/md/dm-vdo/block-map.c
2771
VIO_PRIORITY_METADATA, zone, &zone->vio_pool);
drivers/md/dm-vdo/block-map.c
2775
vdo_set_admin_state_code(&zone->state, VDO_ADMIN_STATE_NORMAL_OPERATION);
drivers/md/dm-vdo/block-map.c
2777
zone->page_cache.zone = zone;
drivers/md/dm-vdo/block-map.c
2778
zone->page_cache.vdo = vdo;
drivers/md/dm-vdo/block-map.c
2779
zone->page_cache.page_count = cache_size / map->zone_count;
drivers/md/dm-vdo/block-map.c
2780
zone->page_cache.stats.free_pages = zone->page_cache.page_count;
drivers/md/dm-vdo/block-map.c
2782
result = allocate_cache_components(&zone->page_cache);
drivers/md/dm-vdo/block-map.c
2787
INIT_LIST_HEAD(&zone->page_cache.lru_list);
drivers/md/dm-vdo/block-map.c
2788
INIT_LIST_HEAD(&zone->page_cache.outgoing_list);
drivers/md/dm-vdo/block-map.c
2815
struct block_map_zone *zone = &map->zones[zone_number];
drivers/md/dm-vdo/block-map.c
2817
update_period(zone->dirty_lists, map->current_era_point);
drivers/md/dm-vdo/block-map.c
2818
write_expired_elements(zone);
drivers/md/dm-vdo/block-map.c
2839
static void uninitialize_block_map_zone(struct block_map_zone *zone)
drivers/md/dm-vdo/block-map.c
2841
struct vdo_page_cache *cache = &zone->page_cache;
drivers/md/dm-vdo/block-map.c
2843
vdo_free(vdo_forget(zone->dirty_lists));
drivers/md/dm-vdo/block-map.c
2844
free_vio_pool(vdo_forget(zone->vio_pool));
drivers/md/dm-vdo/block-map.c
2845
vdo_int_map_free(vdo_forget(zone->loading_pages));
drivers/md/dm-vdo/block-map.c
2860
zone_count_t zone;
drivers/md/dm-vdo/block-map.c
2865
for (zone = 0; zone < map->zone_count; zone++)
drivers/md/dm-vdo/block-map.c
2866
uninitialize_block_map_zone(&map->zones[zone]);
drivers/md/dm-vdo/block-map.c
2883
zone_count_t zone = 0;
drivers/md/dm-vdo/block-map.c
2913
for (zone = 0; zone < map->zone_count; zone++) {
drivers/md/dm-vdo/block-map.c
2914
result = initialize_block_map_zone(map, zone, cache_size, maximum_age);
drivers/md/dm-vdo/block-map.c
2989
struct block_map_zone *zone = container_of(state, struct block_map_zone, state);
drivers/md/dm-vdo/block-map.c
2991
VDO_ASSERT_LOG_ONLY((zone->active_lookups == 0),
drivers/md/dm-vdo/block-map.c
2995
while (zone->dirty_lists->oldest_period < zone->dirty_lists->next_period)
drivers/md/dm-vdo/block-map.c
2996
expire_oldest_list(zone->dirty_lists);
drivers/md/dm-vdo/block-map.c
2997
write_expired_elements(zone);
drivers/md/dm-vdo/block-map.c
3000
check_for_drain_complete(zone);
drivers/md/dm-vdo/block-map.c
3008
struct block_map_zone *zone = &map->zones[zone_number];
drivers/md/dm-vdo/block-map.c
3010
vdo_start_draining(&zone->state,
drivers/md/dm-vdo/block-map.c
3027
struct block_map_zone *zone = &map->zones[zone_number];
drivers/md/dm-vdo/block-map.c
3029
vdo_fail_completion(parent, vdo_resume_if_quiescent(&zone->state));
drivers/md/dm-vdo/block-map.c
3099
struct block_map_zone *zone = data_vio->logical.zone->block_map_zone;
drivers/md/dm-vdo/block-map.c
3101
if (vdo_is_state_draining(&zone->state)) {
drivers/md/dm-vdo/block-map.c
3106
vdo_get_page(&data_vio->page_completion, zone,
drivers/md/dm-vdo/block-map.c
3144
mapped.pbn, &data_vio->mapped.zone);
drivers/md/dm-vdo/block-map.c
3216
struct block_map_zone *zone = data_vio->logical.zone->block_map_zone;
drivers/md/dm-vdo/block-map.c
3217
struct block_map *block_map = zone->block_map;
drivers/md/dm-vdo/block-map.c
3233
zone->zone_number);
drivers/md/dm-vdo/block-map.c
3238
zone->zone_number);
drivers/md/dm-vdo/block-map.c
3278
add_to_dirty_lists(info->cache->zone, &info->state_entry,
drivers/md/dm-vdo/block-map.c
3307
zone_count_t zone = 0;
drivers/md/dm-vdo/block-map.c
3311
for (zone = 0; zone < map->zone_count; zone++) {
drivers/md/dm-vdo/block-map.c
3313
&(map->zones[zone].page_cache.stats);
drivers/md/dm-vdo/block-map.c
60
struct block_map_zone *zone;
drivers/md/dm-vdo/block-map.c
636
static void check_for_drain_complete(struct block_map_zone *zone)
drivers/md/dm-vdo/block-map.c
638
if (vdo_is_state_draining(&zone->state) &&
drivers/md/dm-vdo/block-map.c
639
(zone->active_lookups == 0) &&
drivers/md/dm-vdo/block-map.c
640
!vdo_waitq_has_waiters(&zone->flush_waiters) &&
drivers/md/dm-vdo/block-map.c
641
!is_vio_pool_busy(zone->vio_pool) &&
drivers/md/dm-vdo/block-map.c
642
(zone->page_cache.outstanding_reads == 0) &&
drivers/md/dm-vdo/block-map.c
643
(zone->page_cache.outstanding_writes == 0)) {
drivers/md/dm-vdo/block-map.c
644
vdo_finish_draining_with_result(&zone->state,
drivers/md/dm-vdo/block-map.c
645
(vdo_is_read_only(zone->block_map->vdo) ?
drivers/md/dm-vdo/block-map.c
650
static void enter_zone_read_only_mode(struct block_map_zone *zone, int result)
drivers/md/dm-vdo/block-map.c
652
vdo_enter_read_only_mode(zone->block_map->vdo, result);
drivers/md/dm-vdo/block-map.c
658
vdo_waitq_init(&zone->flush_waiters);
drivers/md/dm-vdo/block-map.c
659
check_for_drain_complete(zone);
drivers/md/dm-vdo/block-map.c
671
enter_zone_read_only_mode(completion->info->cache->zone, result);
drivers/md/dm-vdo/block-map.c
687
vdo_enter_read_only_mode(cache->zone->block_map->vdo, result);
drivers/md/dm-vdo/block-map.c
698
check_for_drain_complete(cache->zone);
drivers/md/dm-vdo/block-map.c
709
nonce_t nonce = info->cache->zone->block_map->nonce;
drivers/md/dm-vdo/block-map.c
740
check_for_drain_complete(cache->zone);
drivers/md/dm-vdo/block-map.c
770
continue_vio_after_io(vio, page_is_loaded, info->cache->zone->thread_id);
drivers/md/dm-vdo/block-map.c
823
continue_vio_after_io(vio, write_pages, info->cache->zone->thread_id);
drivers/md/dm-vdo/block-map.c
98
struct block_map_zone *zone;
drivers/md/dm-vdo/block-map.h
302
struct block_map_zone *zone, physical_block_number_t pbn,
drivers/md/dm-vdo/block-map.h
328
void vdo_write_tree_page(struct tree_page *page, struct block_map_zone *zone);
drivers/md/dm-vdo/block-map.h
95
struct block_map_zone *zone;
drivers/md/dm-vdo/data-vio.c
1143
struct int_map *lock_map = lock->zone->lbn_operations;
drivers/md/dm-vdo/data-vio.c
1180
result = vdo_int_map_put(lock->zone->lbn_operations, lock->lbn,
drivers/md/dm-vdo/data-vio.c
1389
allocation->zone = vdo_get_next_allocation_zone(data_vio->logical.zone);
drivers/md/dm-vdo/data-vio.c
1390
allocation->first_allocation_zone = allocation->zone->zone_number;
drivers/md/dm-vdo/data-vio.c
1413
vdo_release_physical_zone_pbn_lock(allocation->zone, locked_pbn,
drivers/md/dm-vdo/data-vio.c
1638
data_vio->logical.zone->thread_id);
drivers/md/dm-vdo/data-vio.c
1688
data_vio->mapped.zone->thread_id);
drivers/md/dm-vdo/data-vio.c
1954
.zone = data_vio->allocation.zone,
drivers/md/dm-vdo/data-vio.c
254
lock->zone = &vdo->logical_zones->zones[zone_number];
drivers/md/dm-vdo/data-vio.c
437
result = vdo_int_map_put(lock->zone->lbn_operations, lock->lbn,
drivers/md/dm-vdo/data-vio.h
146
struct physical_zone *zone;
drivers/md/dm-vdo/data-vio.h
397
thread_id_t expected = data_vio->logical.zone->thread_id;
drivers/md/dm-vdo/data-vio.h
409
data_vio->logical.zone->thread_id);
drivers/md/dm-vdo/data-vio.h
425
thread_id_t expected = data_vio->allocation.zone->thread_id;
drivers/md/dm-vdo/data-vio.h
438
data_vio->allocation.zone->thread_id);
drivers/md/dm-vdo/data-vio.h
455
thread_id_t expected = data_vio->duplicate.zone->thread_id;
drivers/md/dm-vdo/data-vio.h
468
data_vio->duplicate.zone->thread_id);
drivers/md/dm-vdo/data-vio.h
485
thread_id_t expected = data_vio->mapped.zone->thread_id;
drivers/md/dm-vdo/data-vio.h
497
data_vio->mapped.zone->thread_id);
drivers/md/dm-vdo/data-vio.h
502
thread_id_t expected = data_vio->new_mapped.zone->thread_id;
drivers/md/dm-vdo/data-vio.h
515
data_vio->new_mapped.zone->thread_id);
drivers/md/dm-vdo/data-vio.h
59
struct logical_zone *zone;
drivers/md/dm-vdo/data-vio.h
87
struct physical_zone *zone;
drivers/md/dm-vdo/dedupe.c
1291
vdo_release_physical_zone_pbn_lock(agent->duplicate.zone,
drivers/md/dm-vdo/dedupe.c
1315
struct physical_zone *zone = agent->duplicate.zone;
drivers/md/dm-vdo/dedupe.c
1336
result = vdo_attempt_physical_zone_pbn_lock(zone, agent->duplicate.pbn,
drivers/md/dm-vdo/dedupe.c
1633
result = vdo_get_physical_zone(vdo, advice->pbn, &advice->zone);
drivers/md/dm-vdo/dedupe.c
1634
if ((result != VDO_SUCCESS) || (advice->zone == NULL)) {
drivers/md/dm-vdo/dedupe.c
1791
struct hash_zone *zone;
drivers/md/dm-vdo/dedupe.c
1799
zone = candidate->hash_zone;
drivers/md/dm-vdo/dedupe.c
1802
increment_stat(&zone->statistics.concurrent_hash_collisions);
drivers/md/dm-vdo/dedupe.c
1804
increment_stat(&zone->statistics.concurrent_data_matches);
drivers/md/dm-vdo/dedupe.c
1915
struct hash_zone *zone = data_vio->hash_zone;
drivers/md/dm-vdo/dedupe.c
1931
removed = vdo_int_map_remove(zone->hash_lock_map, lock_key);
drivers/md/dm-vdo/dedupe.c
1935
VDO_ASSERT_LOG_ONLY(lock != vdo_int_map_get(zone->hash_lock_map, lock_key),
drivers/md/dm-vdo/dedupe.c
1951
return_hash_lock_to_pool(zone, lock);
drivers/md/dm-vdo/dedupe.c
2139
if (!change_timer_state(context->zone, DEDUPE_QUERY_TIMER_IDLE,
drivers/md/dm-vdo/dedupe.c
2145
mod_timer(&context->zone->timer, end_time);
drivers/md/dm-vdo/dedupe.c
2249
vdo_funnel_queue_put(context->zone->timed_out_complete, &context->queue_entry);
drivers/md/dm-vdo/dedupe.c
2256
static void check_for_drain_complete(struct hash_zone *zone)
drivers/md/dm-vdo/dedupe.c
2260
if (!vdo_is_state_draining(&zone->state))
drivers/md/dm-vdo/dedupe.c
2263
if ((atomic_read(&zone->timer_state) == DEDUPE_QUERY_TIMER_IDLE) ||
drivers/md/dm-vdo/dedupe.c
2264
change_timer_state(zone, DEDUPE_QUERY_TIMER_RUNNING,
drivers/md/dm-vdo/dedupe.c
2266
timer_delete_sync(&zone->timer);
drivers/md/dm-vdo/dedupe.c
2278
entry = vdo_funnel_queue_poll(zone->timed_out_complete);
drivers/md/dm-vdo/dedupe.c
2284
list_add(&context->list_entry, &zone->available);
drivers/md/dm-vdo/dedupe.c
2289
WRITE_ONCE(zone->active, zone->active - recycled);
drivers/md/dm-vdo/dedupe.c
2290
VDO_ASSERT_LOG_ONLY(READ_ONCE(zone->active) == 0, "all contexts inactive");
drivers/md/dm-vdo/dedupe.c
2291
vdo_finish_draining(&zone->state);
drivers/md/dm-vdo/dedupe.c
2297
struct hash_zone *zone = as_hash_zone(completion);
drivers/md/dm-vdo/dedupe.c
2302
atomic_set(&zone->timer_state, DEDUPE_QUERY_TIMER_IDLE);
drivers/md/dm-vdo/dedupe.c
2303
list_for_each_entry_safe(context, tmp, &zone->pending, list_entry) {
drivers/md/dm-vdo/dedupe.c
2337
check_for_drain_complete(zone);
drivers/md/dm-vdo/dedupe.c
2342
struct hash_zone *zone = timer_container_of(zone, t, timer);
drivers/md/dm-vdo/dedupe.c
2344
if (change_timer_state(zone, DEDUPE_QUERY_TIMER_RUNNING,
drivers/md/dm-vdo/dedupe.c
2346
vdo_launch_completion(&zone->completion);
drivers/md/dm-vdo/dedupe.c
2354
struct hash_zone *zone = &zones->zones[zone_number];
drivers/md/dm-vdo/dedupe.c
2356
result = vdo_int_map_create(VDO_LOCK_MAP_CAPACITY, &zone->hash_lock_map);
drivers/md/dm-vdo/dedupe.c
2360
vdo_set_admin_state_code(&zone->state, VDO_ADMIN_STATE_NORMAL_OPERATION);
drivers/md/dm-vdo/dedupe.c
2361
zone->zone_number = zone_number;
drivers/md/dm-vdo/dedupe.c
2362
zone->thread_id = vdo->thread_config.hash_zone_threads[zone_number];
drivers/md/dm-vdo/dedupe.c
2363
vdo_initialize_completion(&zone->completion, vdo, VDO_HASH_ZONE_COMPLETION);
drivers/md/dm-vdo/dedupe.c
2364
vdo_set_completion_callback(&zone->completion, timeout_index_operations_callback,
drivers/md/dm-vdo/dedupe.c
2365
zone->thread_id);
drivers/md/dm-vdo/dedupe.c
2366
INIT_LIST_HEAD(&zone->lock_pool);
drivers/md/dm-vdo/dedupe.c
2367
result = vdo_allocate(LOCK_POOL_CAPACITY, "hash_lock array", &zone->lock_array);
drivers/md/dm-vdo/dedupe.c
2372
return_hash_lock_to_pool(zone, &zone->lock_array[i]);
drivers/md/dm-vdo/dedupe.c
2374
INIT_LIST_HEAD(&zone->available);
drivers/md/dm-vdo/dedupe.c
2375
INIT_LIST_HEAD(&zone->pending);
drivers/md/dm-vdo/dedupe.c
2376
result = vdo_make_funnel_queue(&zone->timed_out_complete);
drivers/md/dm-vdo/dedupe.c
2380
timer_setup(&zone->timer, timeout_index_operations, 0);
drivers/md/dm-vdo/dedupe.c
2383
struct dedupe_context *context = &zone->contexts[i];
drivers/md/dm-vdo/dedupe.c
2385
context->zone = zone;
drivers/md/dm-vdo/dedupe.c
2388
list_add(&context->list_entry, &zone->available);
drivers/md/dm-vdo/dedupe.c
2391
return vdo_make_default_thread(vdo, zone->thread_id);
drivers/md/dm-vdo/dedupe.c
2475
struct hash_zone *zone = &zones->zones[i];
drivers/md/dm-vdo/dedupe.c
2477
vdo_free_funnel_queue(vdo_forget(zone->timed_out_complete));
drivers/md/dm-vdo/dedupe.c
2478
vdo_int_map_free(vdo_forget(zone->hash_lock_map));
drivers/md/dm-vdo/dedupe.c
2479
vdo_free(vdo_forget(zone->lock_array));
drivers/md/dm-vdo/dedupe.c
2605
struct hash_zone *zone = &(((struct hash_zones *) context)->zones[zone_number]);
drivers/md/dm-vdo/dedupe.c
2607
vdo_fail_completion(parent, vdo_resume_if_quiescent(&zone->state));
drivers/md/dm-vdo/dedupe.c
2631
static void get_hash_zone_statistics(const struct hash_zone *zone,
drivers/md/dm-vdo/dedupe.c
2634
const struct hash_lock_statistics *stats = &zone->statistics;
drivers/md/dm-vdo/dedupe.c
2640
tally->curr_dedupe_queries += READ_ONCE(zone->active);
drivers/md/dm-vdo/dedupe.c
2684
zone_count_t zone;
drivers/md/dm-vdo/dedupe.c
2686
for (zone = 0; zone < zones->zone_count; zone++)
drivers/md/dm-vdo/dedupe.c
2687
get_hash_zone_statistics(&zones->zones[zone], &stats->hash_lock);
drivers/md/dm-vdo/dedupe.c
2774
static void dump_hash_zone(const struct hash_zone *zone)
drivers/md/dm-vdo/dedupe.c
2778
if (zone->hash_lock_map == NULL) {
drivers/md/dm-vdo/dedupe.c
2779
vdo_log_info("struct hash_zone %u: NULL map", zone->zone_number);
drivers/md/dm-vdo/dedupe.c
2784
zone->zone_number, vdo_int_map_size(zone->hash_lock_map));
drivers/md/dm-vdo/dedupe.c
2786
dump_hash_lock(&zone->lock_array[i]);
drivers/md/dm-vdo/dedupe.c
2796
zone_count_t zone;
drivers/md/dm-vdo/dedupe.c
2807
for (zone = 0; zone < zones->zone_count; zone++)
drivers/md/dm-vdo/dedupe.c
2808
dump_hash_zone(&zones->zones[zone]);
drivers/md/dm-vdo/dedupe.c
2855
static struct dedupe_context * __must_check acquire_context(struct hash_zone *zone)
drivers/md/dm-vdo/dedupe.c
2860
assert_in_hash_zone(zone, __func__);
drivers/md/dm-vdo/dedupe.c
2862
if (!list_empty(&zone->available)) {
drivers/md/dm-vdo/dedupe.c
2863
WRITE_ONCE(zone->active, zone->active + 1);
drivers/md/dm-vdo/dedupe.c
2864
context = list_first_entry(&zone->available, struct dedupe_context,
drivers/md/dm-vdo/dedupe.c
2870
entry = vdo_funnel_queue_poll(zone->timed_out_complete);
drivers/md/dm-vdo/dedupe.c
2903
struct hash_zone *zone = data_vio->hash_zone;
drivers/md/dm-vdo/dedupe.c
2912
context = acquire_context(zone);
drivers/md/dm-vdo/dedupe.c
2924
list_add_tail(&context->list_entry, &zone->pending);
drivers/md/dm-vdo/dedupe.c
321
static inline void assert_in_hash_zone(struct hash_zone *zone, const char *name)
drivers/md/dm-vdo/dedupe.c
323
VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == zone->thread_id),
drivers/md/dm-vdo/dedupe.c
332
static inline bool change_timer_state(struct hash_zone *zone, int old, int new)
drivers/md/dm-vdo/dedupe.c
334
return (atomic_cmpxchg(&zone->timer_state, old, new) == old);
drivers/md/dm-vdo/dedupe.c
342
static void return_hash_lock_to_pool(struct hash_zone *zone, struct hash_lock *lock)
drivers/md/dm-vdo/dedupe.c
348
list_add_tail(&lock->pool_node, &zone->lock_pool);
drivers/md/dm-vdo/dedupe.c
694
vdo_release_physical_zone_pbn_lock(agent->duplicate.zone, agent->duplicate.pbn,
drivers/md/dm-vdo/dedupe.c
718
struct hash_zone *zone = context->zone;
drivers/md/dm-vdo/dedupe.c
720
WRITE_ONCE(zone->active, zone->active - 1);
drivers/md/dm-vdo/dedupe.c
721
list_move(&context->list_entry, &zone->available);
drivers/md/dm-vdo/dedupe.c
863
static int __must_check acquire_lock(struct hash_zone *zone,
drivers/md/dm-vdo/dedupe.c
875
result = VDO_ASSERT(!list_empty(&zone->lock_pool),
drivers/md/dm-vdo/dedupe.c
880
new_lock = list_entry(zone->lock_pool.prev, struct hash_lock, pool_node);
drivers/md/dm-vdo/dedupe.c
889
result = vdo_int_map_put(zone->hash_lock_map, hash_lock_key(new_lock),
drivers/md/dm-vdo/dedupe.c
892
return_hash_lock_to_pool(zone, vdo_forget(new_lock));
drivers/md/dm-vdo/dedupe.c
911
return_hash_lock_to_pool(zone, vdo_forget(new_lock));
drivers/md/dm-vdo/dedupe.h
21
struct hash_zone *zone;
drivers/md/dm-vdo/flush.c
231
struct logical_zone *zone = flusher->logical_zone_to_notify;
drivers/md/dm-vdo/flush.c
233
vdo_increment_logical_zone_flush_generation(zone, flusher->notify_generation);
drivers/md/dm-vdo/flush.c
234
if (zone->next == NULL) {
drivers/md/dm-vdo/flush.c
240
flusher->logical_zone_to_notify = zone->next;
drivers/md/dm-vdo/flush.c
317
struct logical_zone *zone;
drivers/md/dm-vdo/flush.c
321
for (zone = &flusher->vdo->logical_zones->zones[0]; zone != NULL; zone = zone->next)
drivers/md/dm-vdo/flush.c
324
READ_ONCE(zone->oldest_active_generation));
drivers/md/dm-vdo/indexer/delta-index.c
1086
static int flush_delta_list(struct delta_zone *zone, u32 flush_index)
drivers/md/dm-vdo/indexer/delta-index.c
1092
delta_list = &zone->delta_lists[flush_index + 1];
drivers/md/dm-vdo/indexer/delta-index.c
1094
buffer[0] = zone->tag;
drivers/md/dm-vdo/indexer/delta-index.c
1097
put_unaligned_le32(zone->first_list + flush_index, &buffer[4]);
drivers/md/dm-vdo/indexer/delta-index.c
1099
result = uds_write_to_buffered_writer(zone->buffered_writer, buffer,
drivers/md/dm-vdo/indexer/delta-index.c
1106
result = uds_write_to_buffered_writer(zone->buffered_writer,
drivers/md/dm-vdo/indexer/delta-index.c
1107
zone->memory + get_delta_list_byte_start(delta_list),
drivers/md/dm-vdo/indexer/delta-index.c
222
struct delta_zone *zone = &delta_index->delta_zones[z];
drivers/md/dm-vdo/indexer/delta-index.c
223
struct delta_list *delta_lists = zone->delta_lists;
drivers/md/dm-vdo/indexer/delta-index.c
227
(zone->list_count + 2) * sizeof(struct delta_list));
drivers/md/dm-vdo/indexer/delta-index.c
230
list_bits = (u64) zone->size * BITS_PER_BYTE - GUARD_BITS;
drivers/md/dm-vdo/indexer/delta-index.c
231
delta_lists[zone->list_count + 1].start = list_bits;
drivers/md/dm-vdo/indexer/delta-index.c
232
delta_lists[zone->list_count + 1].size = GUARD_BITS;
drivers/md/dm-vdo/indexer/delta-index.c
233
memset(zone->memory + (list_bits / BITS_PER_BYTE), ~0,
drivers/md/dm-vdo/indexer/delta-index.c
237
spacing = list_bits / zone->list_count;
drivers/md/dm-vdo/indexer/delta-index.c
239
for (i = 1; i <= zone->list_count; i++) {
drivers/md/dm-vdo/indexer/delta-index.c
245
zone->discard_count += zone->record_count;
drivers/md/dm-vdo/indexer/delta-index.c
246
zone->record_count = 0;
drivers/md/dm-vdo/indexer/delta-index.c
247
zone->collision_count = 0;
drivers/md/dm-vdo/indexer/index-layout.c
1071
unsigned int zone;
drivers/md/dm-vdo/indexer/index-layout.c
1098
for (zone = 0; zone < index->zone_count; zone++) {
drivers/md/dm-vdo/indexer/index-layout.c
1099
result = open_region_writer(layout, &isl->volume_index_zones[zone],
drivers/md/dm-vdo/indexer/index-layout.c
1100
&writers[zone]);
drivers/md/dm-vdo/indexer/index-layout.c
1102
for (; zone > 0; zone--)
drivers/md/dm-vdo/indexer/index-layout.c
1103
uds_free_buffered_writer(writers[zone - 1]);
drivers/md/dm-vdo/indexer/index-layout.c
1111
for (zone = 0; zone < index->zone_count; zone++)
drivers/md/dm-vdo/indexer/index-layout.c
1112
uds_free_buffered_writer(writers[zone]);
drivers/md/dm-vdo/indexer/index-layout.c
910
unsigned int zone;
drivers/md/dm-vdo/indexer/index-layout.c
931
for (zone = 0; zone < isl->zone_count; zone++) {
drivers/md/dm-vdo/indexer/index-layout.c
932
result = open_region_reader(layout, &isl->volume_index_zones[zone],
drivers/md/dm-vdo/indexer/index-layout.c
933
&readers[zone]);
drivers/md/dm-vdo/indexer/index-layout.c
935
for (; zone > 0; zone--)
drivers/md/dm-vdo/indexer/index-layout.c
936
uds_free_buffered_reader(readers[zone - 1]);
drivers/md/dm-vdo/indexer/index-layout.c
943
for (zone = 0; zone < isl->zone_count; zone++)
drivers/md/dm-vdo/indexer/index-layout.c
944
uds_free_buffered_reader(readers[zone]);
drivers/md/dm-vdo/indexer/index.c
110
unsigned int zone;
drivers/md/dm-vdo/indexer/index.c
1109
static void free_index_zone(struct index_zone *zone)
drivers/md/dm-vdo/indexer/index.c
1111
if (zone == NULL)
drivers/md/dm-vdo/indexer/index.c
1114
uds_free_open_chapter(zone->open_chapter);
drivers/md/dm-vdo/indexer/index.c
1115
uds_free_open_chapter(zone->writing_chapter);
drivers/md/dm-vdo/indexer/index.c
1116
vdo_free(zone);
drivers/md/dm-vdo/indexer/index.c
112
for (zone = 0; zone < index->zone_count; zone++) {
drivers/md/dm-vdo/indexer/index.c
1122
struct index_zone *zone;
drivers/md/dm-vdo/indexer/index.c
1124
result = vdo_allocate(1, "index zone", &zone);
drivers/md/dm-vdo/indexer/index.c
1129
&zone->open_chapter);
drivers/md/dm-vdo/indexer/index.c
113
int result = launch_zone_message(message, zone, index);
drivers/md/dm-vdo/indexer/index.c
1131
free_index_zone(zone);
drivers/md/dm-vdo/indexer/index.c
1136
&zone->writing_chapter);
drivers/md/dm-vdo/indexer/index.c
1138
free_index_zone(zone);
drivers/md/dm-vdo/indexer/index.c
1142
zone->index = index;
drivers/md/dm-vdo/indexer/index.c
1143
zone->id = zone_number;
drivers/md/dm-vdo/indexer/index.c
1144
index->zones[zone_number] = zone;
drivers/md/dm-vdo/indexer/index.c
1157
struct index_zone *zone;
drivers/md/dm-vdo/indexer/index.c
1246
zone = index->zones[z];
drivers/md/dm-vdo/indexer/index.c
1247
zone->oldest_virtual_chapter = index->oldest_virtual_chapter;
drivers/md/dm-vdo/indexer/index.c
1248
zone->newest_virtual_chapter = index->newest_virtual_chapter;
drivers/md/dm-vdo/indexer/index.c
127
struct index_zone *zone;
drivers/md/dm-vdo/indexer/index.c
134
zone = index->zones[request->zone_number];
drivers/md/dm-vdo/indexer/index.c
135
if (!is_zone_chapter_sparse(zone, virtual_chapter))
drivers/md/dm-vdo/indexer/index.c
151
static int simulate_index_zone_barrier_message(struct index_zone *zone,
drivers/md/dm-vdo/indexer/index.c
156
if ((zone->index->zone_count > 1) ||
drivers/md/dm-vdo/indexer/index.c
157
!uds_is_sparse_index_geometry(zone->index->volume->geometry))
drivers/md/dm-vdo/indexer/index.c
160
sparse_virtual_chapter = triage_index_request(zone->index, request);
drivers/md/dm-vdo/indexer/index.c
164
return uds_update_sparse_cache(zone, sparse_virtual_chapter);
drivers/md/dm-vdo/indexer/index.c
197
static int swap_open_chapter(struct index_zone *zone)
drivers/md/dm-vdo/indexer/index.c
201
result = finish_previous_chapter(zone->index, zone->newest_virtual_chapter);
drivers/md/dm-vdo/indexer/index.c
205
swap(zone->open_chapter, zone->writing_chapter);
drivers/md/dm-vdo/indexer/index.c
229
static int announce_chapter_closed(struct index_zone *zone, u64 closed_chapter)
drivers/md/dm-vdo/indexer/index.c
238
for (i = 0; i < zone->index->zone_count; i++) {
drivers/md/dm-vdo/indexer/index.c
239
if (zone->id == i)
drivers/md/dm-vdo/indexer/index.c
242
result = launch_zone_message(zone_message, i, zone->index);
drivers/md/dm-vdo/indexer/index.c
250
static int open_next_chapter(struct index_zone *zone)
drivers/md/dm-vdo/indexer/index.c
259
(unsigned long long) zone->newest_virtual_chapter, zone->id,
drivers/md/dm-vdo/indexer/index.c
260
zone->open_chapter->size,
drivers/md/dm-vdo/indexer/index.c
261
zone->open_chapter->capacity - zone->open_chapter->size);
drivers/md/dm-vdo/indexer/index.c
263
result = swap_open_chapter(zone);
drivers/md/dm-vdo/indexer/index.c
267
closed_chapter = zone->newest_virtual_chapter++;
drivers/md/dm-vdo/indexer/index.c
268
uds_set_volume_index_zone_open_chapter(zone->index->volume_index, zone->id,
drivers/md/dm-vdo/indexer/index.c
269
zone->newest_virtual_chapter);
drivers/md/dm-vdo/indexer/index.c
270
uds_reset_open_chapter(zone->open_chapter);
drivers/md/dm-vdo/indexer/index.c
272
finished_zones = start_closing_chapter(zone->index, zone->id,
drivers/md/dm-vdo/indexer/index.c
273
zone->writing_chapter);
drivers/md/dm-vdo/indexer/index.c
274
if ((finished_zones == 1) && (zone->index->zone_count > 1)) {
drivers/md/dm-vdo/indexer/index.c
275
result = announce_chapter_closed(zone, closed_chapter);
drivers/md/dm-vdo/indexer/index.c
280
expiring = zone->oldest_virtual_chapter;
drivers/md/dm-vdo/indexer/index.c
281
expire_chapters = uds_chapters_to_expire(zone->index->volume->geometry,
drivers/md/dm-vdo/indexer/index.c
282
zone->newest_virtual_chapter);
drivers/md/dm-vdo/indexer/index.c
283
zone->oldest_virtual_chapter += expire_chapters;
drivers/md/dm-vdo/indexer/index.c
285
if (finished_zones < zone->index->zone_count)
drivers/md/dm-vdo/indexer/index.c
289
uds_forget_chapter(zone->index->volume, expiring++);
drivers/md/dm-vdo/indexer/index.c
294
static int handle_chapter_closed(struct index_zone *zone, u64 virtual_chapter)
drivers/md/dm-vdo/indexer/index.c
296
if (zone->newest_virtual_chapter == virtual_chapter)
drivers/md/dm-vdo/indexer/index.c
297
return open_next_chapter(zone);
drivers/md/dm-vdo/indexer/index.c
305
struct index_zone *zone = request->index->zones[request->zone_number];
drivers/md/dm-vdo/indexer/index.c
309
return uds_update_sparse_cache(zone, message->virtual_chapter);
drivers/md/dm-vdo/indexer/index.c
312
return handle_chapter_closed(zone, message->virtual_chapter);
drivers/md/dm-vdo/indexer/index.c
330
const struct index_zone *zone, u64 virtual_chapter)
drivers/md/dm-vdo/indexer/index.c
333
if (virtual_chapter == zone->newest_virtual_chapter)
drivers/md/dm-vdo/indexer/index.c
335
else if (is_zone_chapter_sparse(zone, virtual_chapter))
drivers/md/dm-vdo/indexer/index.c
341
static int search_sparse_cache_in_zone(struct index_zone *zone, struct uds_request *request,
drivers/md/dm-vdo/indexer/index.c
349
result = uds_search_sparse_cache(zone, &request->record_name, &virtual_chapter,
drivers/md/dm-vdo/indexer/index.c
355
volume = zone->index->volume;
drivers/md/dm-vdo/indexer/index.c
361
static int get_record_from_zone(struct index_zone *zone, struct uds_request *request,
drivers/md/dm-vdo/indexer/index.c
374
if (request->virtual_chapter == zone->newest_virtual_chapter) {
drivers/md/dm-vdo/indexer/index.c
375
uds_search_open_chapter(zone->open_chapter, &request->record_name,
drivers/md/dm-vdo/indexer/index.c
380
if ((zone->newest_virtual_chapter > 0) &&
drivers/md/dm-vdo/indexer/index.c
381
(request->virtual_chapter == (zone->newest_virtual_chapter - 1)) &&
drivers/md/dm-vdo/indexer/index.c
382
(zone->writing_chapter->size > 0)) {
drivers/md/dm-vdo/indexer/index.c
383
uds_search_open_chapter(zone->writing_chapter, &request->record_name,
drivers/md/dm-vdo/indexer/index.c
388
volume = zone->index->volume;
drivers/md/dm-vdo/indexer/index.c
389
if (is_zone_chapter_sparse(zone, request->virtual_chapter) &&
drivers/md/dm-vdo/indexer/index.c
392
return search_sparse_cache_in_zone(zone, request,
drivers/md/dm-vdo/indexer/index.c
398
static int put_record_in_zone(struct index_zone *zone, struct uds_request *request,
drivers/md/dm-vdo/indexer/index.c
403
remaining = uds_put_open_chapter(zone->open_chapter, &request->record_name,
drivers/md/dm-vdo/indexer/index.c
406
return open_next_chapter(zone);
drivers/md/dm-vdo/indexer/index.c
411
static int search_index_zone(struct index_zone *zone, struct uds_request *request)
drivers/md/dm-vdo/indexer/index.c
419
result = uds_get_volume_index_record(zone->index->volume_index,
drivers/md/dm-vdo/indexer/index.c
429
result = get_record_from_zone(zone, request, &found);
drivers/md/dm-vdo/indexer/index.c
435
set_chapter_location(request, zone, record.virtual_chapter);
drivers/md/dm-vdo/indexer/index.c
444
chapter = zone->newest_virtual_chapter;
drivers/md/dm-vdo/indexer/index.c
473
} else if (uds_is_sparse_index_geometry(zone->index->volume->geometry) &&
drivers/md/dm-vdo/indexer/index.c
474
!uds_is_volume_index_sample(zone->index->volume_index,
drivers/md/dm-vdo/indexer/index.c
476
result = search_sparse_cache_in_zone(zone, request, NO_CHAPTER,
drivers/md/dm-vdo/indexer/index.c
517
return put_record_in_zone(zone, request, metadata);
drivers/md/dm-vdo/indexer/index.c
520
static int remove_from_index_zone(struct index_zone *zone, struct uds_request *request)
drivers/md/dm-vdo/indexer/index.c
525
result = uds_get_volume_index_record(zone->index->volume_index,
drivers/md/dm-vdo/indexer/index.c
536
set_chapter_location(request, zone, record.virtual_chapter);
drivers/md/dm-vdo/indexer/index.c
545
result = get_record_from_zone(zone, request, &found);
drivers/md/dm-vdo/indexer/index.c
555
set_chapter_location(request, zone, record.virtual_chapter);
drivers/md/dm-vdo/indexer/index.c
571
uds_remove_from_open_chapter(zone->open_chapter, &request->record_name);
drivers/md/dm-vdo/indexer/index.c
579
struct index_zone *zone = index->zones[request->zone_number];
drivers/md/dm-vdo/indexer/index.c
582
result = simulate_index_zone_barrier_message(zone, request);
drivers/md/dm-vdo/indexer/index.c
592
result = search_index_zone(zone, request);
drivers/md/dm-vdo/indexer/index.c
596
result = remove_from_index_zone(zone, request);
drivers/md/dm-vdo/indexer/index.c
78
static bool is_zone_chapter_sparse(const struct index_zone *zone, u64 virtual_chapter)
drivers/md/dm-vdo/indexer/index.c
80
return uds_is_chapter_sparse(zone->index->volume->geometry,
drivers/md/dm-vdo/indexer/index.c
81
zone->oldest_virtual_chapter,
drivers/md/dm-vdo/indexer/index.c
82
zone->newest_virtual_chapter, virtual_chapter);
drivers/md/dm-vdo/indexer/index.c
85
static int launch_zone_message(struct uds_zone_message message, unsigned int zone,
drivers/md/dm-vdo/indexer/index.c
97
request->zone_number = zone;
drivers/md/dm-vdo/indexer/open-chapter.c
223
struct open_chapter_zone *zone = chapter_zones[z];
drivers/md/dm-vdo/indexer/open-chapter.c
225
if (zone->size == zone->capacity) {
drivers/md/dm-vdo/indexer/open-chapter.c
226
fill_record = &zone->records[zone->size];
drivers/md/dm-vdo/indexer/open-chapter.c
377
unsigned int zone = 0;
drivers/md/dm-vdo/indexer/open-chapter.c
385
zone = uds_get_volume_index_zone(index->volume_index,
drivers/md/dm-vdo/indexer/open-chapter.c
388
if (!full_flags[zone]) {
drivers/md/dm-vdo/indexer/open-chapter.c
392
open_chapter = index->zones[zone]->open_chapter;
drivers/md/dm-vdo/indexer/open-chapter.c
396
full_flags[zone] = (remaining <= 1);
drivers/md/dm-vdo/indexer/sparse-cache.c
490
int uds_update_sparse_cache(struct index_zone *zone, u64 virtual_chapter)
drivers/md/dm-vdo/indexer/sparse-cache.c
493
const struct uds_index *index = zone->index;
drivers/md/dm-vdo/indexer/sparse-cache.c
496
if (uds_sparse_cache_contains(cache, virtual_chapter, zone->id))
drivers/md/dm-vdo/indexer/sparse-cache.c
512
if (zone->id == ZONE_ZERO) {
drivers/md/dm-vdo/indexer/sparse-cache.c
516
purge_search_list(list, cache, zone->oldest_virtual_chapter);
drivers/md/dm-vdo/indexer/sparse-cache.c
573
int uds_search_sparse_cache(struct index_zone *zone, const struct uds_record_name *name,
drivers/md/dm-vdo/indexer/sparse-cache.c
577
struct volume *volume = zone->index->volume;
drivers/md/dm-vdo/indexer/sparse-cache.c
586
search_list = cache->search_lists[zone->id];
drivers/md/dm-vdo/indexer/sparse-cache.c
590
if (should_skip_chapter(chapter, zone->oldest_virtual_chapter,
drivers/md/dm-vdo/indexer/sparse-cache.c
607
if (zone->id == ZONE_ZERO)
drivers/md/dm-vdo/indexer/sparse-cache.c
614
if (zone->id == ZONE_ZERO)
drivers/md/dm-vdo/indexer/sparse-cache.h
38
int __must_check uds_update_sparse_cache(struct index_zone *zone, u64 virtual_chapter);
drivers/md/dm-vdo/indexer/sparse-cache.h
42
int __must_check uds_search_sparse_cache(struct index_zone *zone,
drivers/md/dm-vdo/indexer/volume-index.c
1111
unsigned int zone;
drivers/md/dm-vdo/indexer/volume-index.c
1113
for (zone = 0; zone < writer_count; zone++) {
drivers/md/dm-vdo/indexer/volume-index.c
1114
result = start_saving_volume_index(volume_index, zone, writers[zone]);
drivers/md/dm-vdo/indexer/volume-index.c
1118
result = finish_saving_volume_index(volume_index, zone);
drivers/md/dm-vdo/indexer/volume-index.c
1122
result = uds_write_guard_delta_list(writers[zone]);
drivers/md/dm-vdo/indexer/volume-index.c
1126
result = uds_flush_buffered_writer(writers[zone]);
drivers/md/dm-vdo/indexer/volume-index.c
1226
unsigned int zone;
drivers/md/dm-vdo/indexer/volume-index.c
1257
for (zone = 0; zone < config->zone_count; zone++)
drivers/md/dm-vdo/indexer/volume-index.c
1258
mutex_init(&volume_index->zones[zone].hook_mutex);
drivers/md/dm-vdo/indexer/volume-index.c
543
unsigned int zone =
drivers/md/dm-vdo/indexer/volume-index.c
545
struct mutex *mutex = &volume_index->zones[zone].hook_mutex;
drivers/md/dm-vdo/indexer/volume-index.c
627
struct volume_sub_index_zone *zone = &sub_index->zones[zone_number];
drivers/md/dm-vdo/indexer/volume-index.c
631
zone->virtual_chapter_low = (virtual_chapter >= sub_index->chapter_count ?
drivers/md/dm-vdo/indexer/volume-index.c
634
zone->virtual_chapter_high = virtual_chapter;
drivers/md/dm-vdo/indexer/volume-index.c
651
(unsigned long long) zone->virtual_chapter_low);
drivers/md/dm-vdo/indexer/volume-index.c
652
zone->early_flushes++;
drivers/md/dm-vdo/indexer/volume-index.c
653
zone->virtual_chapter_low++;
drivers/md/dm-vdo/indexer/volume-index.c
655
u64 first_expired = zone->virtual_chapter_low;
drivers/md/dm-vdo/indexer/volume-index.c
657
if (first_expired + expire_count < zone->virtual_chapter_high) {
drivers/md/dm-vdo/indexer/volume-index.c
658
zone->early_flushes += expire_count;
drivers/md/dm-vdo/indexer/volume-index.c
659
zone->virtual_chapter_low += expire_count;
drivers/md/dm-vdo/indexer/volume-index.c
661
zone->early_flushes +=
drivers/md/dm-vdo/indexer/volume-index.c
662
zone->virtual_chapter_high - zone->virtual_chapter_low;
drivers/md/dm-vdo/indexer/volume-index.c
663
zone->virtual_chapter_low = zone->virtual_chapter_high;
drivers/md/dm-vdo/indexer/volume-index.c
670
(unsigned long long) zone->virtual_chapter_low - 1);
drivers/md/dm-vdo/indexer/volume-index.c
703
unsigned int zone;
drivers/md/dm-vdo/indexer/volume-index.c
705
for (zone = 0; zone < volume_index->zone_count; zone++)
drivers/md/dm-vdo/indexer/volume-index.c
706
uds_set_volume_index_zone_open_chapter(volume_index, zone, virtual_chapter);
drivers/md/dm-vdo/indexer/volume-index.c
751
const struct volume_sub_index_zone *zone = &sub_index->zones[zone_number];
drivers/md/dm-vdo/indexer/volume-index.c
766
rolling_chapter = (index_chapter - zone->virtual_chapter_low) & sub_index->chapter_mask;
drivers/md/dm-vdo/indexer/volume-index.c
768
virtual_chapter = zone->virtual_chapter_low + rolling_chapter;
drivers/md/dm-vdo/indexer/volume-index.c
769
if (virtual_chapter > zone->virtual_chapter_high)
drivers/md/dm-vdo/logical-zone.c
103
for (zone = 0; zone < zone_count; zone++) {
drivers/md/dm-vdo/logical-zone.c
104
result = initialize_zone(zones, zone);
drivers/md/dm-vdo/logical-zone.c
142
static inline void assert_on_zone_thread(struct logical_zone *zone, const char *what)
drivers/md/dm-vdo/logical-zone.c
144
VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == zone->thread_id),
drivers/md/dm-vdo/logical-zone.c
152
static void check_for_drain_complete(struct logical_zone *zone)
drivers/md/dm-vdo/logical-zone.c
154
if (!vdo_is_state_draining(&zone->state) || zone->notifying ||
drivers/md/dm-vdo/logical-zone.c
155
!list_empty(&zone->write_vios))
drivers/md/dm-vdo/logical-zone.c
158
vdo_finish_draining(&zone->state);
drivers/md/dm-vdo/logical-zone.c
190
struct logical_zone *zone = &(((struct logical_zones *) context)->zones[zone_number]);
drivers/md/dm-vdo/logical-zone.c
192
vdo_fail_completion(parent, vdo_resume_if_quiescent(&zone->state));
drivers/md/dm-vdo/logical-zone.c
212
static bool update_oldest_active_generation(struct logical_zone *zone)
drivers/md/dm-vdo/logical-zone.c
215
list_first_entry_or_null(&zone->write_vios, struct data_vio,
drivers/md/dm-vdo/logical-zone.c
218
(data_vio == NULL) ? zone->flush_generation : data_vio->flush_generation;
drivers/md/dm-vdo/logical-zone.c
220
if (oldest == zone->oldest_active_generation)
drivers/md/dm-vdo/logical-zone.c
223
WRITE_ONCE(zone->oldest_active_generation, oldest);
drivers/md/dm-vdo/logical-zone.c
233
void vdo_increment_logical_zone_flush_generation(struct logical_zone *zone,
drivers/md/dm-vdo/logical-zone.c
236
assert_on_zone_thread(zone, __func__);
drivers/md/dm-vdo/logical-zone.c
237
VDO_ASSERT_LOG_ONLY((zone->flush_generation == expected_generation),
drivers/md/dm-vdo/logical-zone.c
239
zone->zone_number, (unsigned long long) zone->flush_generation,
drivers/md/dm-vdo/logical-zone.c
242
zone->flush_generation++;
drivers/md/dm-vdo/logical-zone.c
243
zone->ios_in_flush_generation = 0;
drivers/md/dm-vdo/logical-zone.c
244
update_oldest_active_generation(zone);
drivers/md/dm-vdo/logical-zone.c
254
struct logical_zone *zone = data_vio->logical.zone;
drivers/md/dm-vdo/logical-zone.c
256
assert_on_zone_thread(zone, __func__);
drivers/md/dm-vdo/logical-zone.c
257
VDO_ASSERT_LOG_ONLY(vdo_is_state_normal(&zone->state), "vdo state is normal");
drivers/md/dm-vdo/logical-zone.c
259
data_vio->flush_generation = zone->flush_generation;
drivers/md/dm-vdo/logical-zone.c
260
list_add_tail(&data_vio->write_entry, &zone->write_vios);
drivers/md/dm-vdo/logical-zone.c
261
zone->ios_in_flush_generation++;
drivers/md/dm-vdo/logical-zone.c
274
struct logical_zone *zone = as_logical_zone(completion);
drivers/md/dm-vdo/logical-zone.c
276
vdo_complete_flushes(zone->zones->vdo->flusher);
drivers/md/dm-vdo/logical-zone.c
279
zone->thread_id);
drivers/md/dm-vdo/logical-zone.c
289
struct logical_zone *zone = as_logical_zone(completion);
drivers/md/dm-vdo/logical-zone.c
291
assert_on_zone_thread(zone, __func__);
drivers/md/dm-vdo/logical-zone.c
292
if (zone->oldest_active_generation <= zone->notification_generation) {
drivers/md/dm-vdo/logical-zone.c
293
zone->notifying = false;
drivers/md/dm-vdo/logical-zone.c
294
check_for_drain_complete(zone);
drivers/md/dm-vdo/logical-zone.c
298
zone->notifying = true;
drivers/md/dm-vdo/logical-zone.c
299
zone->notification_generation = zone->oldest_active_generation;
drivers/md/dm-vdo/logical-zone.c
300
vdo_launch_completion_callback(&zone->completion, notify_flusher,
drivers/md/dm-vdo/logical-zone.c
301
vdo_get_flusher_thread_id(zone->zones->vdo->flusher));
drivers/md/dm-vdo/logical-zone.c
314
struct logical_zone *zone = data_vio->logical.zone;
drivers/md/dm-vdo/logical-zone.c
316
assert_on_zone_thread(zone, __func__);
drivers/md/dm-vdo/logical-zone.c
322
VDO_ASSERT_LOG_ONLY((zone->oldest_active_generation <= data_vio->flush_generation),
drivers/md/dm-vdo/logical-zone.c
325
(unsigned long long) zone->oldest_active_generation);
drivers/md/dm-vdo/logical-zone.c
327
if (!update_oldest_active_generation(zone) || zone->notifying)
drivers/md/dm-vdo/logical-zone.c
330
attempt_generation_complete_notification(&zone->completion);
drivers/md/dm-vdo/logical-zone.c
333
struct physical_zone *vdo_get_next_allocation_zone(struct logical_zone *zone)
drivers/md/dm-vdo/logical-zone.c
335
if (zone->allocation_count == ALLOCATIONS_PER_ZONE) {
drivers/md/dm-vdo/logical-zone.c
336
zone->allocation_count = 0;
drivers/md/dm-vdo/logical-zone.c
337
zone->allocation_zone = zone->allocation_zone->next;
drivers/md/dm-vdo/logical-zone.c
340
zone->allocation_count++;
drivers/md/dm-vdo/logical-zone.c
341
return zone->allocation_zone;
drivers/md/dm-vdo/logical-zone.c
351
void vdo_dump_logical_zone(const struct logical_zone *zone)
drivers/md/dm-vdo/logical-zone.c
353
vdo_log_info("logical_zone %u", zone->zone_number);
drivers/md/dm-vdo/logical-zone.c
355
(unsigned long long) READ_ONCE(zone->flush_generation),
drivers/md/dm-vdo/logical-zone.c
356
(unsigned long long) READ_ONCE(zone->oldest_active_generation),
drivers/md/dm-vdo/logical-zone.c
357
(unsigned long long) READ_ONCE(zone->notification_generation),
drivers/md/dm-vdo/logical-zone.c
358
vdo_bool_to_string(READ_ONCE(zone->notifying)),
drivers/md/dm-vdo/logical-zone.c
359
(unsigned long long) READ_ONCE(zone->ios_in_flush_generation));
drivers/md/dm-vdo/logical-zone.c
55
struct logical_zone *zone = &zones->zones[zone_number];
drivers/md/dm-vdo/logical-zone.c
58
result = vdo_int_map_create(VDO_LOCK_MAP_CAPACITY, &zone->lbn_operations);
drivers/md/dm-vdo/logical-zone.c
63
zone->next = &zones->zones[zone_number + 1];
drivers/md/dm-vdo/logical-zone.c
65
vdo_initialize_completion(&zone->completion, vdo,
drivers/md/dm-vdo/logical-zone.c
67
zone->zones = zones;
drivers/md/dm-vdo/logical-zone.c
68
zone->zone_number = zone_number;
drivers/md/dm-vdo/logical-zone.c
69
zone->thread_id = vdo->thread_config.logical_threads[zone_number];
drivers/md/dm-vdo/logical-zone.c
70
zone->block_map_zone = &vdo->block_map->zones[zone_number];
drivers/md/dm-vdo/logical-zone.c
71
INIT_LIST_HEAD(&zone->write_vios);
drivers/md/dm-vdo/logical-zone.c
72
vdo_set_admin_state_code(&zone->state, VDO_ADMIN_STATE_NORMAL_OPERATION);
drivers/md/dm-vdo/logical-zone.c
74
allocation_zone_number = zone->thread_id % vdo->thread_config.physical_zone_count;
drivers/md/dm-vdo/logical-zone.c
75
zone->allocation_zone = &vdo->physical_zones->zones[allocation_zone_number];
drivers/md/dm-vdo/logical-zone.c
77
return vdo_make_default_thread(vdo, zone->thread_id);
drivers/md/dm-vdo/logical-zone.c
91
zone_count_t zone;
drivers/md/dm-vdo/logical-zone.h
78
void vdo_increment_logical_zone_flush_generation(struct logical_zone *zone,
drivers/md/dm-vdo/logical-zone.h
85
struct physical_zone * __must_check vdo_get_next_allocation_zone(struct logical_zone *zone);
drivers/md/dm-vdo/logical-zone.h
87
void vdo_dump_logical_zone(const struct logical_zone *zone);
drivers/md/dm-vdo/packer.c
260
.zone = allocation->zone,
drivers/md/dm-vdo/packer.c
300
if (vdo_requeue_completion_if_needed(completion, allocation->zone->thread_id))
drivers/md/dm-vdo/physical-zone.c
329
struct physical_zone *zone = &zones->zones[zone_number];
drivers/md/dm-vdo/physical-zone.c
331
result = vdo_int_map_create(VDO_LOCK_MAP_CAPACITY, &zone->pbn_operations);
drivers/md/dm-vdo/physical-zone.c
335
result = make_pbn_lock_pool(LOCK_POOL_CAPACITY, &zone->lock_pool);
drivers/md/dm-vdo/physical-zone.c
337
vdo_int_map_free(zone->pbn_operations);
drivers/md/dm-vdo/physical-zone.c
341
zone->zone_number = zone_number;
drivers/md/dm-vdo/physical-zone.c
342
zone->thread_id = vdo->thread_config.physical_threads[zone_number];
drivers/md/dm-vdo/physical-zone.c
343
zone->allocator = &vdo->depot->allocators[zone_number];
drivers/md/dm-vdo/physical-zone.c
344
zone->next = &zones->zones[(zone_number + 1) % vdo->thread_config.physical_zone_count];
drivers/md/dm-vdo/physical-zone.c
345
result = vdo_make_default_thread(vdo, zone->thread_id);
drivers/md/dm-vdo/physical-zone.c
347
free_pbn_lock_pool(vdo_forget(zone->lock_pool));
drivers/md/dm-vdo/physical-zone.c
348
vdo_int_map_free(zone->pbn_operations);
drivers/md/dm-vdo/physical-zone.c
398
struct physical_zone *zone = &zones->zones[index];
drivers/md/dm-vdo/physical-zone.c
400
free_pbn_lock_pool(vdo_forget(zone->lock_pool));
drivers/md/dm-vdo/physical-zone.c
401
vdo_int_map_free(vdo_forget(zone->pbn_operations));
drivers/md/dm-vdo/physical-zone.c
414
struct pbn_lock *vdo_get_physical_zone_pbn_lock(struct physical_zone *zone,
drivers/md/dm-vdo/physical-zone.c
417
return ((zone == NULL) ? NULL : vdo_int_map_get(zone->pbn_operations, pbn));
drivers/md/dm-vdo/physical-zone.c
435
int vdo_attempt_physical_zone_pbn_lock(struct physical_zone *zone,
drivers/md/dm-vdo/physical-zone.c
447
result = borrow_pbn_lock_from_pool(zone->lock_pool, type, &new_lock);
drivers/md/dm-vdo/physical-zone.c
453
result = vdo_int_map_put(zone->pbn_operations, pbn, new_lock, false,
drivers/md/dm-vdo/physical-zone.c
456
return_pbn_lock_to_pool(zone->lock_pool, new_lock);
drivers/md/dm-vdo/physical-zone.c
462
return_pbn_lock_to_pool(zone->lock_pool, vdo_forget(new_lock));
drivers/md/dm-vdo/physical-zone.c
490
result = vdo_allocate_block(allocation->zone->allocator, &allocation->pbn);
drivers/md/dm-vdo/physical-zone.c
494
result = vdo_attempt_physical_zone_pbn_lock(allocation->zone, allocation->pbn,
drivers/md/dm-vdo/physical-zone.c
525
data_vio->allocation.first_allocation_zone = data_vio->allocation.zone->zone_number;
drivers/md/dm-vdo/physical-zone.c
541
struct physical_zone *zone = allocation->zone;
drivers/md/dm-vdo/physical-zone.c
545
bool tried_all = (allocation->first_allocation_zone == zone->next->zone_number);
drivers/md/dm-vdo/physical-zone.c
555
allocation->first_allocation_zone = zone->zone_number;
drivers/md/dm-vdo/physical-zone.c
560
result = vdo_enqueue_clean_slab_waiter(zone->allocator,
drivers/md/dm-vdo/physical-zone.c
573
allocation->zone = zone->next;
drivers/md/dm-vdo/physical-zone.c
574
completion->callback_thread_id = allocation->zone->thread_id;
drivers/md/dm-vdo/physical-zone.c
610
void vdo_release_physical_zone_pbn_lock(struct physical_zone *zone,
drivers/md/dm-vdo/physical-zone.c
628
holder = vdo_int_map_remove(zone->pbn_operations, locked_pbn);
drivers/md/dm-vdo/physical-zone.c
632
release_pbn_lock_provisional_reference(lock, locked_pbn, zone->allocator);
drivers/md/dm-vdo/physical-zone.c
633
return_pbn_lock_to_pool(zone->lock_pool, lock);
drivers/md/dm-vdo/physical-zone.c
640
void vdo_dump_physical_zone(const struct physical_zone *zone)
drivers/md/dm-vdo/physical-zone.c
642
vdo_dump_block_allocator(zone->allocator);
drivers/md/dm-vdo/physical-zone.h
102
int __must_check vdo_attempt_physical_zone_pbn_lock(struct physical_zone *zone,
drivers/md/dm-vdo/physical-zone.h
109
void vdo_release_physical_zone_pbn_lock(struct physical_zone *zone,
drivers/md/dm-vdo/physical-zone.h
113
void vdo_dump_physical_zone(const struct physical_zone *zone);
drivers/md/dm-vdo/physical-zone.h
99
struct pbn_lock * __must_check vdo_get_physical_zone_pbn_lock(struct physical_zone *zone,
drivers/md/dm-vdo/slab-depot.c
1484
if (updater->zpbn.zone != NULL) {
drivers/md/dm-vdo/slab-depot.c
1485
struct pbn_lock *lock = vdo_get_physical_zone_pbn_lock(updater->zpbn.zone,
drivers/md/dm-vdo/slab-depot.c
4059
zone_count_t zone)
drivers/md/dm-vdo/slab-depot.c
4063
struct block_allocator *allocator = &depot->allocators[zone];
drivers/md/dm-vdo/slab-depot.c
4071
.zone_number = zone,
drivers/md/dm-vdo/slab-depot.c
4072
.thread_id = vdo->thread_config.physical_threads[zone],
drivers/md/dm-vdo/slab-depot.c
4118
allocator->summary_entries = depot->summary_entries + (MAX_VDO_SLABS * zone);
drivers/md/dm-vdo/slab-depot.c
4150
zone_count_t zone;
drivers/md/dm-vdo/slab-depot.c
4201
for (zone = 0; zone < depot->zone_count; zone++) {
drivers/md/dm-vdo/slab-depot.c
4202
result = initialize_block_allocator(depot, zone);
drivers/md/dm-vdo/slab-depot.c
4305
zone_count_t zone = 0;
drivers/md/dm-vdo/slab-depot.c
4312
for (zone = 0; zone < depot->zone_count; zone++) {
drivers/md/dm-vdo/slab-depot.c
4313
struct block_allocator *allocator = &depot->allocators[zone];
drivers/md/dm-vdo/slab-depot.c
4504
zone_count_t zone;
drivers/md/dm-vdo/slab-depot.c
4506
for (zone = 0; zone < depot->zone_count; zone++) {
drivers/md/dm-vdo/slab-depot.c
4508
total += READ_ONCE(depot->allocators[zone].allocated_blocks);
drivers/md/dm-vdo/slab-depot.c
4567
zone_count_t zone = 0;
drivers/md/dm-vdo/slab-depot.c
4574
if (zone != 0) {
drivers/md/dm-vdo/slab-depot.c
4576
entries + (zone * MAX_VDO_SLABS) + entry_number,
drivers/md/dm-vdo/slab-depot.c
4580
zone++;
drivers/md/dm-vdo/slab-depot.c
4581
if (zone == depot->old_zone_count)
drivers/md/dm-vdo/slab-depot.c
4582
zone = 0;
drivers/md/dm-vdo/slab-depot.c
4587
for (zone = 1; zone < MAX_VDO_PHYSICAL_ZONES; zone++) {
drivers/md/dm-vdo/slab-depot.c
4588
memcpy(entries + (zone * MAX_VDO_SLABS), entries,
drivers/md/dm-vdo/slab-depot.c
5085
zone_count_t zone;
drivers/md/dm-vdo/slab-depot.c
5089
for (zone = 0; zone < depot->zone_count; zone++) {
drivers/md/dm-vdo/slab-depot.c
5090
const struct block_allocator *allocator = &depot->allocators[zone];
drivers/md/dm-vdo/slab-depot.c
5111
zone_count_t zone;
drivers/md/dm-vdo/slab-depot.c
5115
for (zone = 0; zone < depot->zone_count; zone++) {
drivers/md/dm-vdo/slab-depot.c
5117
READ_ONCE(depot->allocators[zone].ref_counts_statistics.blocks_written);
drivers/md/dm-vdo/slab-depot.c
5133
zone_count_t zone;
drivers/md/dm-vdo/slab-depot.c
5137
for (zone = 0; zone < depot->zone_count; zone++) {
drivers/md/dm-vdo/slab-depot.c
5139
&depot->allocators[zone].slab_journal_statistics;
drivers/md/dm-vdo/slab-depot.c
5162
zone_count_t zone;
drivers/md/dm-vdo/slab-depot.c
5164
for (zone = 0; zone < depot->zone_count; zone++) {
drivers/md/dm-vdo/slab-depot.c
5166
unrecovered += READ_ONCE(depot->allocators[zone].scrubber.slab_count);
drivers/md/dm-vdo/vdo.c
1759
zone_count_t zone;
drivers/md/dm-vdo/vdo.c
1766
for (zone = 0; zone < vdo->thread_config.logical_zone_count; zone++)
drivers/md/dm-vdo/vdo.c
1767
vdo_dump_logical_zone(&vdo->logical_zones->zones[zone]);
drivers/md/dm-vdo/vdo.c
1769
for (zone = 0; zone < vdo->thread_config.physical_zone_count; zone++)
drivers/md/dm-vdo/vdo.c
1770
vdo_dump_physical_zone(&vdo->physical_zones->zones[zone]);
drivers/md/dm-vdo/vdo.c
179
zone_count_t zone;
drivers/md/dm-vdo/vdo.c
181
for (zone = 0; zone < count; zone++)
drivers/md/dm-vdo/vdo.c
182
thread_ids[zone] = config->thread_count++;
drivers/md/dm-zone.c
100
if (zone->start >= args->start + args->tgt->len)
drivers/md/dm-zone.c
107
zone->start += sector_diff;
drivers/md/dm-zone.c
108
if (zone->type != BLK_ZONE_TYPE_CONVENTIONAL) {
drivers/md/dm-zone.c
109
if (zone->cond == BLK_ZONE_COND_FULL)
drivers/md/dm-zone.c
110
zone->wp = zone->start + zone->len;
drivers/md/dm-zone.c
111
else if (zone->cond == BLK_ZONE_COND_EMPTY)
drivers/md/dm-zone.c
112
zone->wp = zone->start;
drivers/md/dm-zone.c
114
zone->wp += sector_diff;
drivers/md/dm-zone.c
117
args->next_sector = zone->start + zone->len;
drivers/md/dm-zone.c
123
ret = args->cb(zone, args->zone_idx, args->data);
drivers/md/dm-zone.c
128
return disk_report_zone(args->disk, zone, args->zone_idx++,
drivers/md/dm-zone.c
246
static int dm_device_count_zones_cb(struct blk_zone *zone,
drivers/md/dm-zone.c
251
if (zone->type != BLK_ZONE_TYPE_CONVENTIONAL) {
drivers/md/dm-zone.c
253
if (zone->start >= zc->start &&
drivers/md/dm-zone.c
254
zone->start < zc->start + zc->len)
drivers/md/dm-zone.c
482
static int dm_zone_need_reset_cb(struct blk_zone *zone, unsigned int idx,
drivers/md/dm-zone.c
489
switch (zone->cond) {
drivers/md/dm-zone.c
91
static int dm_report_zones_cb(struct blk_zone *zone, unsigned int idx,
drivers/md/dm-zoned-metadata.c
1011
if (sb_block != (u64)dsb->zone->id << zmd->zone_nr_blocks_shift) {
drivers/md/dm-zoned-metadata.c
1013
sb_block, (u64)dsb->zone->id << zmd->zone_nr_blocks_shift);
drivers/md/dm-zoned-metadata.c
1111
unsigned int zone_id = zmd->sb[0].zone->id;
drivers/md/dm-zoned-metadata.c
1124
zmd->sb[1].zone = dmz_get(zmd, zone_id + 1);
drivers/md/dm-zoned-metadata.c
1132
zmd->sb[1].zone = dmz_get(zmd, zone_id + i);
drivers/md/dm-zoned-metadata.c
1137
zmd->sb[1].zone = NULL;
drivers/md/dm-zoned-metadata.c
1183
zmd->sb[0].block = dmz_start_block(zmd, zmd->sb[0].zone);
drivers/md/dm-zoned-metadata.c
1185
zmd->sb[1].block = dmz_start_block(zmd, zmd->sb[1].zone);
drivers/md/dm-zoned-metadata.c
1229
if (!zmd->sb[0].zone) {
drivers/md/dm-zoned-metadata.c
1235
zmd->sb[0].block = dmz_start_block(zmd, zmd->sb[0].zone);
drivers/md/dm-zoned-metadata.c
1236
zmd->sb[0].dev = zmd->sb[0].zone->dev;
drivers/md/dm-zoned-metadata.c
1248
if (!zmd->sb[1].zone) {
drivers/md/dm-zoned-metadata.c
1250
zmd->sb[0].zone->id + zmd->nr_meta_zones;
drivers/md/dm-zoned-metadata.c
1252
zmd->sb[1].zone = dmz_get(zmd, zone_id);
drivers/md/dm-zoned-metadata.c
1254
zmd->sb[1].block = dmz_start_block(zmd, zmd->sb[1].zone);
drivers/md/dm-zoned-metadata.c
1319
sb->zone = dmz_get(zmd, zmd->dev[i].zone_offset);
drivers/md/dm-zoned-metadata.c
1321
if (!dmz_is_meta(sb->zone)) {
drivers/md/dm-zoned-metadata.c
1324
sb->zone->id);
drivers/md/dm-zoned-metadata.c
1354
struct dm_zone *zone;
drivers/md/dm-zoned-metadata.c
1356
zone = dmz_insert(zmd, idx, dev);
drivers/md/dm-zoned-metadata.c
1357
if (IS_ERR(zone))
drivers/md/dm-zoned-metadata.c
1358
return PTR_ERR(zone);
drivers/md/dm-zoned-metadata.c
1363
set_bit(DMZ_OFFLINE, &zone->flags);
drivers/md/dm-zoned-metadata.c
137
struct dm_zone *zone;
drivers/md/dm-zoned-metadata.c
1379
set_bit(DMZ_RND, &zone->flags);
drivers/md/dm-zoned-metadata.c
1383
set_bit(DMZ_SEQ, &zone->flags);
drivers/md/dm-zoned-metadata.c
1389
if (dmz_is_rnd(zone))
drivers/md/dm-zoned-metadata.c
1390
zone->wp_block = 0;
drivers/md/dm-zoned-metadata.c
1392
zone->wp_block = dmz_sect2blk(blkz->wp - blkz->start);
drivers/md/dm-zoned-metadata.c
1395
set_bit(DMZ_OFFLINE, &zone->flags);
drivers/md/dm-zoned-metadata.c
1397
set_bit(DMZ_READ_ONLY, &zone->flags);
drivers/md/dm-zoned-metadata.c
1400
if (dmz_is_rnd(zone)) {
drivers/md/dm-zoned-metadata.c
1402
if (zmd->nr_devs == 1 && !zmd->sb[0].zone) {
drivers/md/dm-zoned-metadata.c
1404
zmd->sb[0].zone = zone;
drivers/md/dm-zoned-metadata.c
1413
set_bit(DMZ_META, &zone->flags);
drivers/md/dm-zoned-metadata.c
1425
struct dm_zone *zone;
drivers/md/dm-zoned-metadata.c
1427
zone = dmz_insert(zmd, idx, dev);
drivers/md/dm-zoned-metadata.c
1428
if (IS_ERR(zone))
drivers/md/dm-zoned-metadata.c
1429
return PTR_ERR(zone);
drivers/md/dm-zoned-metadata.c
1430
set_bit(DMZ_CACHE, &zone->flags);
drivers/md/dm-zoned-metadata.c
1431
zone->wp_block = 0;
drivers/md/dm-zoned-metadata.c
1436
set_bit(DMZ_OFFLINE, &zone->flags);
drivers/md/dm-zoned-metadata.c
1452
struct dm_zone *zone = xa_load(&zmd->zones, idx);
drivers/md/dm-zoned-metadata.c
1454
kfree(zone);
drivers/md/dm-zoned-metadata.c
1519
zmd->sb[0].zone = dmz_get(zmd, 0);
drivers/md/dm-zoned-metadata.c
1557
struct dm_zone *zone = data;
drivers/md/dm-zoned-metadata.c
1559
clear_bit(DMZ_OFFLINE, &zone->flags);
drivers/md/dm-zoned-metadata.c
1560
clear_bit(DMZ_READ_ONLY, &zone->flags);
drivers/md/dm-zoned-metadata.c
1562
set_bit(DMZ_OFFLINE, &zone->flags);
drivers/md/dm-zoned-metadata.c
1564
set_bit(DMZ_READ_ONLY, &zone->flags);
drivers/md/dm-zoned-metadata.c
1566
if (dmz_is_seq(zone))
drivers/md/dm-zoned-metadata.c
1567
zone->wp_block = dmz_sect2blk(blkz->wp - blkz->start);
drivers/md/dm-zoned-metadata.c
1569
zone->wp_block = 0;
drivers/md/dm-zoned-metadata.c
1576
static int dmz_update_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
drivers/md/dm-zoned-metadata.c
1578
struct dmz_dev *dev = zone->dev;
drivers/md/dm-zoned-metadata.c
1592
ret = blkdev_report_zones(dev->bdev, dmz_start_sect(zmd, zone), 1,
drivers/md/dm-zoned-metadata.c
1593
dmz_update_zone_cb, zone);
drivers/md/dm-zoned-metadata.c
1600
zone->id);
drivers/md/dm-zoned-metadata.c
1613
struct dm_zone *zone)
drivers/md/dm-zoned-metadata.c
1615
struct dmz_dev *dev = zone->dev;
drivers/md/dm-zoned-metadata.c
1619
wp = zone->wp_block;
drivers/md/dm-zoned-metadata.c
1620
ret = dmz_update_zone(zmd, zone);
drivers/md/dm-zoned-metadata.c
1625
zone->id, zone->wp_block, wp);
drivers/md/dm-zoned-metadata.c
1627
if (zone->wp_block < wp) {
drivers/md/dm-zoned-metadata.c
1628
dmz_invalidate_blocks(zmd, zone, zone->wp_block,
drivers/md/dm-zoned-metadata.c
1629
wp - zone->wp_block);
drivers/md/dm-zoned-metadata.c
1638
static int dmz_reset_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
drivers/md/dm-zoned-metadata.c
1646
if (dmz_is_offline(zone) ||
drivers/md/dm-zoned-metadata.c
1647
dmz_is_readonly(zone) ||
drivers/md/dm-zoned-metadata.c
1648
dmz_is_rnd(zone))
drivers/md/dm-zoned-metadata.c
1651
if (!dmz_is_empty(zone) || dmz_seq_write_err(zone)) {
drivers/md/dm-zoned-metadata.c
1652
struct dmz_dev *dev = zone->dev;
drivers/md/dm-zoned-metadata.c
1657
dmz_start_sect(zmd, zone),
drivers/md/dm-zoned-metadata.c
1662
zone->id, ret);
drivers/md/dm-zoned-metadata.c
1668
clear_bit(DMZ_SEQ_WRITE_ERR, &zone->flags);
drivers/md/dm-zoned-metadata.c
1669
zone->wp_block = 0;
drivers/md/dm-zoned-metadata.c
1674
static void dmz_get_zone_weight(struct dmz_metadata *zmd, struct dm_zone *zone);
drivers/md/dm-zoned-metadata.c
1844
static void __dmz_lru_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
drivers/md/dm-zoned-metadata.c
1846
if (list_empty(&zone->link))
drivers/md/dm-zoned-metadata.c
1849
list_del_init(&zone->link);
drivers/md/dm-zoned-metadata.c
1850
if (dmz_is_seq(zone)) {
drivers/md/dm-zoned-metadata.c
1852
list_add_tail(&zone->link, &zone->dev->map_seq_list);
drivers/md/dm-zoned-metadata.c
1853
} else if (dmz_is_cache(zone)) {
drivers/md/dm-zoned-metadata.c
1855
list_add_tail(&zone->link, &zmd->map_cache_list);
drivers/md/dm-zoned-metadata.c
1858
list_add_tail(&zone->link, &zone->dev->map_rnd_list);
drivers/md/dm-zoned-metadata.c
1866
static void dmz_lru_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
drivers/md/dm-zoned-metadata.c
1868
__dmz_lru_zone(zmd, zone);
drivers/md/dm-zoned-metadata.c
1869
if (zone->bzone)
drivers/md/dm-zoned-metadata.c
1870
__dmz_lru_zone(zmd, zone->bzone);
drivers/md/dm-zoned-metadata.c
1896
int dmz_lock_zone_reclaim(struct dm_zone *zone)
drivers/md/dm-zoned-metadata.c
1899
if (dmz_is_active(zone))
drivers/md/dm-zoned-metadata.c
1902
return !test_and_set_bit(DMZ_RECLAIM, &zone->flags);
drivers/md/dm-zoned-metadata.c
1908
void dmz_unlock_zone_reclaim(struct dm_zone *zone)
drivers/md/dm-zoned-metadata.c
1910
WARN_ON(dmz_is_active(zone));
drivers/md/dm-zoned-metadata.c
1911
WARN_ON(!dmz_in_reclaim(zone));
drivers/md/dm-zoned-metadata.c
1913
clear_bit_unlock(DMZ_RECLAIM, &zone->flags);
drivers/md/dm-zoned-metadata.c
1915
wake_up_bit(&zone->flags, DMZ_RECLAIM);
drivers/md/dm-zoned-metadata.c
1921
static void dmz_wait_for_reclaim(struct dmz_metadata *zmd, struct dm_zone *zone)
drivers/md/dm-zoned-metadata.c
1925
set_bit(DMZ_RECLAIM_TERMINATE, &zone->flags);
drivers/md/dm-zoned-metadata.c
1926
wait_on_bit_timeout(&zone->flags, DMZ_RECLAIM, TASK_UNINTERRUPTIBLE, HZ);
drivers/md/dm-zoned-metadata.c
1927
clear_bit(DMZ_RECLAIM_TERMINATE, &zone->flags);
drivers/md/dm-zoned-metadata.c
1939
struct dm_zone *zone, *maxw_z = NULL;
drivers/md/dm-zoned-metadata.c
1955
list_for_each_entry(zone, zone_list, link) {
drivers/md/dm-zoned-metadata.c
1956
if (dmz_is_buf(zone)) {
drivers/md/dm-zoned-metadata.c
1957
dzone = zone->bzone;
drivers/md/dm-zoned-metadata.c
1963
dzone = zone;
drivers/md/dm-zoned-metadata.c
1977
list_for_each_entry(zone, zone_list, link) {
drivers/md/dm-zoned-metadata.c
1978
if (dmz_is_buf(zone)) {
drivers/md/dm-zoned-metadata.c
1979
dzone = zone->bzone;
drivers/md/dm-zoned-metadata.c
1983
dzone = zone;
drivers/md/dm-zoned-metadata.c
1997
struct dm_zone *zone;
drivers/md/dm-zoned-metadata.c
1999
list_for_each_entry(zone, &zmd->dev[idx].map_seq_list, link) {
drivers/md/dm-zoned-metadata.c
2000
if (!zone->bzone)
drivers/md/dm-zoned-metadata.c
2002
if (dmz_lock_zone_reclaim(zone))
drivers/md/dm-zoned-metadata.c
2003
return zone;
drivers/md/dm-zoned-metadata.c
2015
struct dm_zone *zone = NULL;
drivers/md/dm-zoned-metadata.c
2027
zone = dmz_get_seq_zone_for_reclaim(zmd, dev_idx);
drivers/md/dm-zoned-metadata.c
2028
if (!zone)
drivers/md/dm-zoned-metadata.c
2029
zone = dmz_get_rnd_zone_for_reclaim(zmd, dev_idx, idle);
drivers/md/dm-zoned-metadata.c
2032
return zone;
drivers/md/dm-zoned-metadata.c
2206
struct dm_zone *zone;
drivers/md/dm-zoned-metadata.c
221
static unsigned int dmz_dev_zone_id(struct dmz_metadata *zmd, struct dm_zone *zone)
drivers/md/dm-zoned-metadata.c
223
if (WARN_ON(!zone))
drivers/md/dm-zoned-metadata.c
2242
zone = list_first_entry_or_null(&zmd->reserved_seq_zones_list,
drivers/md/dm-zoned-metadata.c
2244
if (zone) {
drivers/md/dm-zoned-metadata.c
2245
list_del_init(&zone->link);
drivers/md/dm-zoned-metadata.c
2248
return zone;
drivers/md/dm-zoned-metadata.c
2251
zone = list_first_entry(list, struct dm_zone, link);
drivers/md/dm-zoned-metadata.c
2252
list_del_init(&zone->link);
drivers/md/dm-zoned-metadata.c
2254
if (dmz_is_cache(zone))
drivers/md/dm-zoned-metadata.c
2256
else if (dmz_is_rnd(zone))
drivers/md/dm-zoned-metadata.c
2257
atomic_dec(&zone->dev->unmap_nr_rnd);
drivers/md/dm-zoned-metadata.c
2259
atomic_dec(&zone->dev->unmap_nr_seq);
drivers/md/dm-zoned-metadata.c
226
return zone->id - zone->dev->zone_offset;
drivers/md/dm-zoned-metadata.c
2261
if (dmz_is_offline(zone)) {
drivers/md/dm-zoned-metadata.c
2262
dmz_zmd_warn(zmd, "Zone %u is offline", zone->id);
drivers/md/dm-zoned-metadata.c
2263
zone = NULL;
drivers/md/dm-zoned-metadata.c
2266
if (dmz_is_meta(zone)) {
drivers/md/dm-zoned-metadata.c
2267
dmz_zmd_warn(zmd, "Zone %u has metadata", zone->id);
drivers/md/dm-zoned-metadata.c
2268
zone = NULL;
drivers/md/dm-zoned-metadata.c
2271
return zone;
drivers/md/dm-zoned-metadata.c
2278
void dmz_free_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
drivers/md/dm-zoned-metadata.c
2281
if (dmz_is_seq(zone))
drivers/md/dm-zoned-metadata.c
2282
dmz_reset_zone(zmd, zone);
drivers/md/dm-zoned-metadata.c
2285
if (dmz_is_cache(zone)) {
drivers/md/dm-zoned-metadata.c
2286
list_add_tail(&zone->link, &zmd->unmap_cache_list);
drivers/md/dm-zoned-metadata.c
2288
} else if (dmz_is_rnd(zone)) {
drivers/md/dm-zoned-metadata.c
2289
list_add_tail(&zone->link, &zone->dev->unmap_rnd_list);
drivers/md/dm-zoned-metadata.c
229
sector_t dmz_start_sect(struct dmz_metadata *zmd, struct dm_zone *zone)
drivers/md/dm-zoned-metadata.c
2290
atomic_inc(&zone->dev->unmap_nr_rnd);
drivers/md/dm-zoned-metadata.c
2291
} else if (dmz_is_reserved(zone)) {
drivers/md/dm-zoned-metadata.c
2292
list_add_tail(&zone->link, &zmd->reserved_seq_zones_list);
drivers/md/dm-zoned-metadata.c
2295
list_add_tail(&zone->link, &zone->dev->unmap_seq_list);
drivers/md/dm-zoned-metadata.c
2296
atomic_inc(&zone->dev->unmap_nr_seq);
drivers/md/dm-zoned-metadata.c
231
unsigned int zone_id = dmz_dev_zone_id(zmd, zone);
drivers/md/dm-zoned-metadata.c
2325
void dmz_unmap_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
drivers/md/dm-zoned-metadata.c
2327
unsigned int chunk = zone->chunk;
drivers/md/dm-zoned-metadata.c
2335
if (test_and_clear_bit(DMZ_BUF, &zone->flags)) {
drivers/md/dm-zoned-metadata.c
2340
dzone_id = zone->bzone->id;
drivers/md/dm-zoned-metadata.c
2341
zone->bzone->bzone = NULL;
drivers/md/dm-zoned-metadata.c
2342
zone->bzone = NULL;
drivers/md/dm-zoned-metadata.c
2349
if (WARN_ON(zone->bzone)) {
drivers/md/dm-zoned-metadata.c
2350
zone->bzone->bzone = NULL;
drivers/md/dm-zoned-metadata.c
2351
zone->bzone = NULL;
drivers/md/dm-zoned-metadata.c
2358
zone->chunk = DMZ_MAP_UNMAPPED;
drivers/md/dm-zoned-metadata.c
2359
list_del_init(&zone->link);
drivers/md/dm-zoned-metadata.c
236
sector_t dmz_start_block(struct dmz_metadata *zmd, struct dm_zone *zone)
drivers/md/dm-zoned-metadata.c
238
unsigned int zone_id = dmz_dev_zone_id(zmd, zone);
drivers/md/dm-zoned-metadata.c
2398
struct dm_zone *zone,
drivers/md/dm-zoned-metadata.c
2402
(sector_t)(zone->id * zmd->zone_nr_bitmap_blocks) +
drivers/md/dm-zoned-metadata.c
2473
int dmz_validate_blocks(struct dmz_metadata *zmd, struct dm_zone *zone,
drivers/md/dm-zoned-metadata.c
2482
zone->id, (unsigned long long)chunk_block,
drivers/md/dm-zoned-metadata.c
2489
mblk = dmz_get_bitmap(zmd, zone, chunk_block);
drivers/md/dm-zoned-metadata.c
2508
if (likely(zone->weight + n <= zone_nr_blocks))
drivers/md/dm-zoned-metadata.c
2509
zone->weight += n;
drivers/md/dm-zoned-metadata.c
2512
zone->id, zone->weight,
drivers/md/dm-zoned-metadata.c
2514
zone->weight = zone_nr_blocks;
drivers/md/dm-zoned-metadata.c
2554
int dmz_invalidate_blocks(struct dmz_metadata *zmd, struct dm_zone *zone,
drivers/md/dm-zoned-metadata.c
2562
zone->id, (u64)chunk_block, nr_blocks);
drivers/md/dm-zoned-metadata.c
2568
mblk = dmz_get_bitmap(zmd, zone, chunk_block);
drivers/md/dm-zoned-metadata.c
2588
if (zone->weight >= n)
drivers/md/dm-zoned-metadata.c
2589
zone->weight -= n;
drivers/md/dm-zoned-metadata.c
2592
zone->id, zone->weight, n);
drivers/md/dm-zoned-metadata.c
2593
zone->weight = 0;
drivers/md/dm-zoned-metadata.c
2602
static int dmz_test_block(struct dmz_metadata *zmd, struct dm_zone *zone,
drivers/md/dm-zoned-metadata.c
2611
mblk = dmz_get_bitmap(zmd, zone, chunk_block);
drivers/md/dm-zoned-metadata.c
2628
static int dmz_to_next_set_block(struct dmz_metadata *zmd, struct dm_zone *zone,
drivers/md/dm-zoned-metadata.c
2642
mblk = dmz_get_bitmap(zmd, zone, chunk_block);
drivers/md/dm-zoned-metadata.c
2671
int dmz_block_valid(struct dmz_metadata *zmd, struct dm_zone *zone,
drivers/md/dm-zoned-metadata.c
2676
valid = dmz_test_block(zmd, zone, chunk_block);
drivers/md/dm-zoned-metadata.c
2681
return dmz_to_next_set_block(zmd, zone, chunk_block,
drivers/md/dm-zoned-metadata.c
2691
int dmz_first_valid_block(struct dmz_metadata *zmd, struct dm_zone *zone,
drivers/md/dm-zoned-metadata.c
2697
ret = dmz_to_next_set_block(zmd, zone, start_block,
drivers/md/dm-zoned-metadata.c
2705
return dmz_to_next_set_block(zmd, zone, start_block,
drivers/md/dm-zoned-metadata.c
2740
static void dmz_get_zone_weight(struct dmz_metadata *zmd, struct dm_zone *zone)
drivers/md/dm-zoned-metadata.c
2751
mblk = dmz_get_bitmap(zmd, zone, chunk_block);
drivers/md/dm-zoned-metadata.c
2769
zone->weight = n;
drivers/md/dm-zoned-metadata.c
2867
struct dm_zone *zone;
drivers/md/dm-zoned-metadata.c
2907
zone = dmz_get(zmd, zmd->sb[0].zone->id + i);
drivers/md/dm-zoned-metadata.c
2908
if (!zone) {
drivers/md/dm-zoned-metadata.c
2914
if (!dmz_is_rnd(zone) && !dmz_is_cache(zone)) {
drivers/md/dm-zoned-metadata.c
2920
set_bit(DMZ_META, &zone->flags);
drivers/md/dm-zoned-metadata.c
306
struct dm_zone *zone = kzalloc_obj(struct dm_zone);
drivers/md/dm-zoned-metadata.c
308
if (!zone)
drivers/md/dm-zoned-metadata.c
311
if (xa_insert(&zmd->zones, zone_id, zone, GFP_KERNEL)) {
drivers/md/dm-zoned-metadata.c
312
kfree(zone);
drivers/md/dm-zoned-metadata.c
316
INIT_LIST_HEAD(&zone->link);
drivers/md/dm-zoned-metadata.c
317
atomic_set(&zone->refcount, 0);
drivers/md/dm-zoned-metadata.c
318
zone->id = zone_id;
drivers/md/dm-zoned-metadata.c
319
zone->chunk = DMZ_MAP_UNMAPPED;
drivers/md/dm-zoned-metadata.c
320
zone->dev = dev;
drivers/md/dm-zoned-metadata.c
322
return zone;
drivers/md/dm-zoned-metadata.c
788
sb_block = zmd->sb[set].zone->id << zmd->zone_nr_blocks_shift;
drivers/md/dm-zoned-reclaim.c
59
static int dmz_reclaim_align_wp(struct dmz_reclaim *zrc, struct dm_zone *zone,
drivers/md/dm-zoned-reclaim.c
63
struct dmz_dev *dev = zone->dev;
drivers/md/dm-zoned-reclaim.c
64
sector_t wp_block = zone->wp_block;
drivers/md/dm-zoned-reclaim.c
80
dmz_start_sect(zmd, zone) + dmz_blk2sect(wp_block),
drivers/md/dm-zoned-reclaim.c
85
zone->id, (unsigned long long)wp_block,
drivers/md/dm-zoned-reclaim.c
91
zone->wp_block = block;
drivers/md/dm-zoned-target.c
116
static int dmz_submit_bio(struct dmz_target *dmz, struct dm_zone *zone,
drivers/md/dm-zoned-target.c
122
struct dmz_dev *dev = zone->dev;
drivers/md/dm-zoned-target.c
134
dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block);
drivers/md/dm-zoned-target.c
144
if (bio_op(bio) == REQ_OP_WRITE && dmz_is_seq(zone))
drivers/md/dm-zoned-target.c
145
zone->wp_block += nr_blocks;
drivers/md/dm-zoned-target.c
169
static int dmz_handle_read(struct dmz_target *dmz, struct dm_zone *zone,
drivers/md/dm-zoned-target.c
180
if (!zone) {
drivers/md/dm-zoned-target.c
188
(dmz_is_rnd(zone) ? "RND" :
drivers/md/dm-zoned-target.c
189
(dmz_is_cache(zone) ? "CACHE" : "SEQ")),
drivers/md/dm-zoned-target.c
190
zone->id,
drivers/md/dm-zoned-target.c
194
bzone = zone->bzone;
drivers/md/dm-zoned-target.c
197
if (dmz_is_rnd(zone) || dmz_is_cache(zone) ||
drivers/md/dm-zoned-target.c
198
chunk_block < zone->wp_block) {
drivers/md/dm-zoned-target.c
200
ret = dmz_block_valid(zmd, zone, chunk_block);
drivers/md/dm-zoned-target.c
206
rzone = zone;
drivers/md/dm-zoned-target.c
21
struct dm_zone *zone;
drivers/md/dm-zoned-target.c
250
struct dm_zone *zone, struct bio *bio,
drivers/md/dm-zoned-target.c
255
struct dm_zone *bzone = zone->bzone;
drivers/md/dm-zoned-target.c
258
if (dmz_is_readonly(zone))
drivers/md/dm-zoned-target.c
262
ret = dmz_submit_bio(dmz, zone, bio, chunk_block, nr_blocks);
drivers/md/dm-zoned-target.c
270
ret = dmz_validate_blocks(zmd, zone, chunk_block, nr_blocks);
drivers/md/dm-zoned-target.c
283
struct dm_zone *zone, struct bio *bio,
drivers/md/dm-zoned-target.c
292
bzone = dmz_get_chunk_buffer(zmd, zone);
drivers/md/dm-zoned-target.c
309
if (ret == 0 && chunk_block < zone->wp_block)
drivers/md/dm-zoned-target.c
310
ret = dmz_invalidate_blocks(zmd, zone, chunk_block, nr_blocks);
drivers/md/dm-zoned-target.c
318
static int dmz_handle_write(struct dmz_target *dmz, struct dm_zone *zone,
drivers/md/dm-zoned-target.c
325
if (!zone)
drivers/md/dm-zoned-target.c
331
(dmz_is_rnd(zone) ? "RND" :
drivers/md/dm-zoned-target.c
332
(dmz_is_cache(zone) ? "CACHE" : "SEQ")),
drivers/md/dm-zoned-target.c
333
zone->id,
drivers/md/dm-zoned-target.c
336
if (dmz_is_rnd(zone) || dmz_is_cache(zone) ||
drivers/md/dm-zoned-target.c
337
chunk_block == zone->wp_block) {
drivers/md/dm-zoned-target.c
343
return dmz_handle_direct_write(dmz, zone, bio,
drivers/md/dm-zoned-target.c
351
return dmz_handle_buffered_write(dmz, zone, bio, chunk_block, nr_blocks);
drivers/md/dm-zoned-target.c
357
static int dmz_handle_discard(struct dmz_target *dmz, struct dm_zone *zone,
drivers/md/dm-zoned-target.c
367
if (!zone)
drivers/md/dm-zoned-target.c
370
if (dmz_is_readonly(zone))
drivers/md/dm-zoned-target.c
376
zone->id,
drivers/md/dm-zoned-target.c
383
if (dmz_is_rnd(zone) || dmz_is_cache(zone) ||
drivers/md/dm-zoned-target.c
384
chunk_block < zone->wp_block)
drivers/md/dm-zoned-target.c
385
ret = dmz_invalidate_blocks(zmd, zone, chunk_block, nr_blocks);
drivers/md/dm-zoned-target.c
386
if (ret == 0 && zone->bzone)
drivers/md/dm-zoned-target.c
387
ret = dmz_invalidate_blocks(zmd, zone->bzone,
drivers/md/dm-zoned-target.c
401
struct dm_zone *zone;
drivers/md/dm-zoned-target.c
411
zone = dmz_get_chunk_mapping(zmd, dmz_bio_chunk(zmd, bio),
drivers/md/dm-zoned-target.c
413
if (IS_ERR(zone)) {
drivers/md/dm-zoned-target.c
414
ret = PTR_ERR(zone);
drivers/md/dm-zoned-target.c
419
if (zone) {
drivers/md/dm-zoned-target.c
420
dmz_activate_zone(zone);
drivers/md/dm-zoned-target.c
421
bioctx->zone = zone;
drivers/md/dm-zoned-target.c
422
dmz_reclaim_bio_acc(zone->dev->reclaim);
drivers/md/dm-zoned-target.c
427
ret = dmz_handle_read(dmz, zone, bio);
drivers/md/dm-zoned-target.c
430
ret = dmz_handle_write(dmz, zone, bio);
drivers/md/dm-zoned-target.c
434
ret = dmz_handle_discard(dmz, zone, bio);
drivers/md/dm-zoned-target.c
446
if (zone)
drivers/md/dm-zoned-target.c
447
dmz_put_chunk_mapping(zmd, zone);
drivers/md/dm-zoned-target.c
654
bioctx->zone = NULL;
drivers/md/dm-zoned-target.c
86
struct dm_zone *zone = bioctx->zone;
drivers/md/dm-zoned-target.c
88
if (zone) {
drivers/md/dm-zoned-target.c
91
dmz_is_seq(zone))
drivers/md/dm-zoned-target.c
92
set_bit(DMZ_SEQ_WRITE_ERR, &zone->flags);
drivers/md/dm-zoned-target.c
93
dmz_deactivate_zone(zone);
drivers/md/dm-zoned.h
205
sector_t dmz_start_sect(struct dmz_metadata *zmd, struct dm_zone *zone);
drivers/md/dm-zoned.h
206
sector_t dmz_start_block(struct dmz_metadata *zmd, struct dm_zone *zone);
drivers/md/dm-zoned.h
219
void dmz_free_zone(struct dmz_metadata *zmd, struct dm_zone *zone);
drivers/md/dm-zoned.h
221
void dmz_map_zone(struct dmz_metadata *zmd, struct dm_zone *zone,
drivers/md/dm-zoned.h
223
void dmz_unmap_zone(struct dmz_metadata *zmd, struct dm_zone *zone);
drivers/md/dm-zoned.h
238
static inline void dmz_activate_zone(struct dm_zone *zone)
drivers/md/dm-zoned.h
240
atomic_inc(&zone->refcount);
drivers/md/dm-zoned.h
243
int dmz_lock_zone_reclaim(struct dm_zone *zone);
drivers/md/dm-zoned.h
244
void dmz_unlock_zone_reclaim(struct dm_zone *zone);
drivers/md/dm-zoned.h
250
void dmz_put_chunk_mapping(struct dmz_metadata *zmd, struct dm_zone *zone);
drivers/md/dm-zoned.h
254
int dmz_validate_blocks(struct dmz_metadata *zmd, struct dm_zone *zone,
drivers/md/dm-zoned.h
256
int dmz_invalidate_blocks(struct dmz_metadata *zmd, struct dm_zone *zone,
drivers/md/dm-zoned.h
258
int dmz_block_valid(struct dmz_metadata *zmd, struct dm_zone *zone,
drivers/md/dm-zoned.h
260
int dmz_first_valid_block(struct dmz_metadata *zmd, struct dm_zone *zone,
drivers/md/dm-zoned.h
287
static inline void dmz_deactivate_zone(struct dm_zone *zone)
drivers/md/dm-zoned.h
289
dmz_reclaim_bio_acc(zone->dev->reclaim);
drivers/md/dm-zoned.h
290
atomic_dec(&zone->refcount);
drivers/md/dm-zoned.h
296
static inline bool dmz_is_active(struct dm_zone *zone)
drivers/md/dm-zoned.h
298
return atomic_read(&zone->refcount);
drivers/md/raid0.c
159
zone = &conf->strip_zone[0];
drivers/md/raid0.c
207
zone->nb_dev = cnt;
drivers/md/raid0.c
208
zone->zone_end = smallest->sectors * cnt;
drivers/md/raid0.c
210
curr_zone_end = zone->zone_end;
drivers/md/raid0.c
217
zone = conf->strip_zone + i;
drivers/md/raid0.c
221
zone->dev_start = smallest->sectors;
drivers/md/raid0.c
227
if (rdev->sectors <= zone->dev_start) {
drivers/md/raid0.c
247
zone->nb_dev = c;
drivers/md/raid0.c
248
sectors = (smallest->sectors - zone->dev_start) * c;
drivers/md/raid0.c
251
zone->nb_dev, (unsigned long long)sectors);
drivers/md/raid0.c
254
zone->zone_end = curr_zone_end;
drivers/md/raid0.c
282
zone = conf->strip_zone + i;
drivers/md/raid0.c
284
zone->disk_shift = sector_div(first_sector,
drivers/md/raid0.c
285
zone->nb_dev);
drivers/md/raid0.c
324
static struct md_rdev *map_sector(struct mddev *mddev, struct strip_zone *zone,
drivers/md/raid0.c
341
sector_div(chunk, zone->nb_dev << chunksect_bits);
drivers/md/raid0.c
345
sector_div(chunk, chunk_sects * zone->nb_dev);
drivers/md/raid0.c
353
return conf->devlist[(zone - conf->strip_zone)*raid_disks
drivers/md/raid0.c
354
+ sector_div(sector, zone->nb_dev)];
drivers/md/raid0.c
457
struct strip_zone *zone;
drivers/md/raid0.c
470
zone = find_zone(conf, &start);
drivers/md/raid0.c
472
if (bio_end_sector(bio) > zone->zone_end) {
drivers/md/raid0.c
474
zone->zone_end - bio->bi_iter.bi_sector,
drivers/md/raid0.c
479
end = zone->zone_end;
drivers/md/raid0.c
485
if (zone != conf->strip_zone)
drivers/md/raid0.c
486
end = end - zone[-1].zone_end;
drivers/md/raid0.c
489
stripe_size = zone->nb_dev * mddev->chunk_sectors;
drivers/md/raid0.c
497
if ((conf->layout == RAID0_ORIG_LAYOUT) && (zone != conf->strip_zone)) {
drivers/md/raid0.c
499
start_disk_index = sector_div(orig_start, zone->nb_dev);
drivers/md/raid0.c
501
zone->nb_dev,
drivers/md/raid0.c
502
zone->disk_shift);
drivers/md/raid0.c
504
end_disk_index = sector_div(orig_end, zone->nb_dev);
drivers/md/raid0.c
506
zone->nb_dev, zone->disk_shift);
drivers/md/raid0.c
520
for (disk = 0; disk < zone->nb_dev; disk++) {
drivers/md/raid0.c
525
compare_disk = map_disk_shift(disk, zone->nb_dev,
drivers/md/raid0.c
526
zone->disk_shift);
drivers/md/raid0.c
546
rdev = conf->devlist[(zone - conf->strip_zone) *
drivers/md/raid0.c
549
dev_start + zone->dev_start + rdev->data_offset,
drivers/md/raid0.c
558
struct strip_zone *zone;
drivers/md/raid0.c
565
zone = find_zone(mddev->private, §or);
drivers/md/raid0.c
568
tmp_dev = map_sector(mddev, zone, bio_sector, §or);
drivers/md/raid0.c
571
tmp_dev = map_sector(mddev, zone, sector, §or);
drivers/md/raid0.c
586
bio->bi_iter.bi_sector = sector + zone->dev_start +
drivers/md/raid0.c
70
struct strip_zone *zone;
drivers/memstick/core/ms_block.c
1080
static u16 msb_get_free_block(struct msb_data *msb, int zone)
drivers/memstick/core/ms_block.c
1083
int pba = zone * MS_BLOCKS_IN_ZONE;
drivers/memstick/core/ms_block.c
1088
if (!msb->free_block_count[zone]) {
drivers/memstick/core/ms_block.c
1089
pr_err("NO free blocks in the zone %d, to use for a write, (media is WORN out) switching to RO mode", zone);
drivers/memstick/core/ms_block.c
1094
pos %= msb->free_block_count[zone];
drivers/memstick/core/ms_block.c
1097
msb->free_block_count[zone], pos);
drivers/memstick/core/ms_block.c
1107
if (pba == msb->block_count || (msb_get_zone_from_pba(pba)) != zone) {
drivers/memstick/core/ms_block.c
170
int zone = msb_get_zone_from_pba(pba);
drivers/memstick/core/ms_block.c
184
msb->free_block_count[zone]--;
drivers/memstick/core/ms_block.c
190
int zone = msb_get_zone_from_pba(pba);
drivers/memstick/core/ms_block.c
203
msb->free_block_count[zone]++;
drivers/mtd/mtdpstore.c
431
cxt->dev.zone.read = mtdpstore_read;
drivers/mtd/mtdpstore.c
432
cxt->dev.zone.write = mtdpstore_write;
drivers/mtd/mtdpstore.c
433
cxt->dev.zone.erase = mtdpstore_erase;
drivers/mtd/mtdpstore.c
434
cxt->dev.zone.panic_write = mtdpstore_panic_write;
drivers/mtd/mtdpstore.c
435
cxt->dev.zone.total_size = mtd->size;
drivers/mtd/sm_ftl.c
1015
struct ftl_zone *zone;
drivers/mtd/sm_ftl.c
1023
zone = sm_get_zone(ftl, zone_num);
drivers/mtd/sm_ftl.c
1024
if (IS_ERR(zone)) {
drivers/mtd/sm_ftl.c
1025
error = PTR_ERR(zone);
drivers/mtd/sm_ftl.c
1037
block = zone->lba_to_phys_table[block];
drivers/mtd/sm_ftl.c
1061
struct ftl_zone *zone;
drivers/mtd/sm_ftl.c
1071
zone = sm_get_zone(ftl, zone_num);
drivers/mtd/sm_ftl.c
1072
if (IS_ERR(zone)) {
drivers/mtd/sm_ftl.c
1073
error = PTR_ERR(zone);
drivers/mtd/sm_ftl.c
190
static loff_t sm_mkoffset(struct sm_ftl *ftl, int zone, int block, int boffset)
drivers/mtd/sm_ftl.c
193
WARN_ON(zone < 0 || zone >= ftl->zone_count);
drivers/mtd/sm_ftl.c
200
return (zone * SM_MAX_ZONE_SIZE + block) * ftl->block_size + boffset;
drivers/mtd/sm_ftl.c
205
int *zone, int *block, int *boffset)
drivers/mtd/sm_ftl.c
210
*zone = offset >= ftl->zone_count ? -1 : offset;
drivers/mtd/sm_ftl.c
236
int zone, int block, int boffset,
drivers/mtd/sm_ftl.c
268
if (zone == 0 && block == ftl->cis_block && boffset ==
drivers/mtd/sm_ftl.c
280
ret = mtd_read_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops);
drivers/mtd/sm_ftl.c
285
block, zone, ret);
drivers/mtd/sm_ftl.c
303
" as bad" , block, zone);
drivers/mtd/sm_ftl.c
312
block, zone);
drivers/mtd/sm_ftl.c
321
int zone, int block, int boffset,
drivers/mtd/sm_ftl.c
330
if (zone == 0 && (block == ftl->cis_block || block == 0)) {
drivers/mtd/sm_ftl.c
345
ret = mtd_write_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops);
drivers/mtd/sm_ftl.c
351
block, zone, ret);
drivers/mtd/sm_ftl.c
368
int zone, int block, int lba,
drivers/mtd/sm_ftl.c
392
boffset / SM_SECTOR_SIZE, lba, zone);
drivers/mtd/sm_ftl.c
406
if (!sm_write_sector(ftl, zone, block, boffset,
drivers/mtd/sm_ftl.c
419
if (sm_erase_block(ftl, zone, block, 0))
drivers/mtd/sm_ftl.c
425
sm_mark_block_bad(ftl, zone, block);
drivers/mtd/sm_ftl.c
434
static void sm_mark_block_bad(struct sm_ftl *ftl, int zone, int block)
drivers/mtd/sm_ftl.c
448
sm_printk("marking block %d of zone %d as bad", block, zone);
drivers/mtd/sm_ftl.c
455
sm_write_sector(ftl, zone, block, boffset, NULL, &oob);
drivers/mtd/sm_ftl.c
465
struct ftl_zone *zone = &ftl->zones[zone_num];
drivers/mtd/sm_ftl.c
489
kfifo_in(&zone->free_sectors,
drivers/mtd/sm_ftl.c
499
static int sm_check_block(struct sm_ftl *ftl, int zone, int block)
drivers/mtd/sm_ftl.c
516
if (sm_read_sector(ftl, zone, block, boffset, NULL, &oob))
drivers/mtd/sm_ftl.c
531
sm_erase_block(ftl, zone, block, 1);
drivers/mtd/sm_ftl.c
752
struct ftl_zone *zone = &ftl->zones[zone_num];
drivers/mtd/sm_ftl.c
762
zone->lba_to_phys_table = kmalloc_array(ftl->max_lba, 2, GFP_KERNEL);
drivers/mtd/sm_ftl.c
764
if (!zone->lba_to_phys_table)
drivers/mtd/sm_ftl.c
766
memset(zone->lba_to_phys_table, -1, ftl->max_lba * 2);
drivers/mtd/sm_ftl.c
770
if (kfifo_alloc(&zone->free_sectors, ftl->zone_size * 2, GFP_KERNEL)) {
drivers/mtd/sm_ftl.c
771
kfree(zone->lba_to_phys_table);
drivers/mtd/sm_ftl.c
784
kfifo_free(&zone->free_sectors);
drivers/mtd/sm_ftl.c
785
kfree(zone->lba_to_phys_table);
drivers/mtd/sm_ftl.c
793
kfifo_in(&zone->free_sectors,
drivers/mtd/sm_ftl.c
824
if (zone->lba_to_phys_table[lba] < 0) {
drivers/mtd/sm_ftl.c
826
zone->lba_to_phys_table[lba] = block;
drivers/mtd/sm_ftl.c
832
lba, zone->lba_to_phys_table[lba], block, zone_num);
drivers/mtd/sm_ftl.c
840
zone->lba_to_phys_table[lba])) {
drivers/mtd/sm_ftl.c
841
zone->lba_to_phys_table[lba] = block;
drivers/mtd/sm_ftl.c
854
zone->initialized = 1;
drivers/mtd/sm_ftl.c
859
if (!kfifo_len(&zone->free_sectors)) {
drivers/mtd/sm_ftl.c
866
i %= (kfifo_len(&zone->free_sectors) / 2);
drivers/mtd/sm_ftl.c
869
len = kfifo_out(&zone->free_sectors,
drivers/mtd/sm_ftl.c
872
kfifo_in(&zone->free_sectors, (const unsigned char *)&block, 2);
drivers/mtd/sm_ftl.c
880
struct ftl_zone *zone;
drivers/mtd/sm_ftl.c
884
zone = &ftl->zones[zone_num];
drivers/mtd/sm_ftl.c
886
if (!zone->initialized) {
drivers/mtd/sm_ftl.c
892
return zone;
drivers/mtd/sm_ftl.c
930
struct ftl_zone *zone;
drivers/mtd/sm_ftl.c
944
zone = &ftl->zones[zone_num];
drivers/mtd/sm_ftl.c
945
block_num = zone->lba_to_phys_table[ftl->cache_block];
drivers/mtd/sm_ftl.c
968
if (kfifo_out(&zone->free_sectors,
drivers/mtd/sm_ftl.c
980
zone->lba_to_phys_table[ftl->cache_block] = write_sector;
drivers/net/ethernet/mellanox/mlx4/alloc.c
250
struct mlx4_zone_entry *zone = kmalloc_obj(*zone);
drivers/net/ethernet/mellanox/mlx4/alloc.c
252
if (NULL == zone)
drivers/net/ethernet/mellanox/mlx4/alloc.c
255
zone->flags = flags;
drivers/net/ethernet/mellanox/mlx4/alloc.c
256
zone->bitmap = bitmap;
drivers/net/ethernet/mellanox/mlx4/alloc.c
257
zone->use_rr = (flags & MLX4_ZONE_USE_RR) ? MLX4_USE_RR : 0;
drivers/net/ethernet/mellanox/mlx4/alloc.c
258
zone->priority = priority;
drivers/net/ethernet/mellanox/mlx4/alloc.c
259
zone->offset = offset;
drivers/net/ethernet/mellanox/mlx4/alloc.c
263
zone->uid = zone_alloc->last_uid++;
drivers/net/ethernet/mellanox/mlx4/alloc.c
264
zone->allocator = zone_alloc;
drivers/net/ethernet/mellanox/mlx4/alloc.c
274
list_add_tail(&zone->prio_list, &it->prio_list);
drivers/net/ethernet/mellanox/mlx4/alloc.c
275
list_add_tail(&zone->list, &it->list);
drivers/net/ethernet/mellanox/mlx4/alloc.c
279
*puid = zone->uid;
drivers/net/ethernet/mellanox/mlx4/alloc.c
321
struct mlx4_zone_entry *zone, *tmp;
drivers/net/ethernet/mellanox/mlx4/alloc.c
325
list_for_each_entry_safe(zone, tmp, &zone_alloc->entries, list) {
drivers/net/ethernet/mellanox/mlx4/alloc.c
326
list_del(&zone->list);
drivers/net/ethernet/mellanox/mlx4/alloc.c
327
list_del(&zone->prio_list);
drivers/net/ethernet/mellanox/mlx4/alloc.c
328
kfree(zone);
drivers/net/ethernet/mellanox/mlx4/alloc.c
336
static u32 __mlx4_alloc_from_zone(struct mlx4_zone_entry *zone, int count,
drivers/net/ethernet/mellanox/mlx4/alloc.c
341
struct mlx4_zone_allocator *zone_alloc = zone->allocator;
drivers/net/ethernet/mellanox/mlx4/alloc.c
344
res = mlx4_bitmap_alloc_range(zone->bitmap, count,
drivers/net/ethernet/mellanox/mlx4/alloc.c
348
res += zone->offset;
drivers/net/ethernet/mellanox/mlx4/alloc.c
349
uid = zone->uid;
drivers/net/ethernet/mellanox/mlx4/alloc.c
354
if (unlikely(curr_node->priority == zone->priority))
drivers/net/ethernet/mellanox/mlx4/alloc.c
358
if (zone->flags & MLX4_ZONE_ALLOW_ALLOC_FROM_LOWER_PRIO) {
drivers/net/ethernet/mellanox/mlx4/alloc.c
372
if (zone->flags & MLX4_ZONE_ALLOW_ALLOC_FROM_EQ_PRIO) {
drivers/net/ethernet/mellanox/mlx4/alloc.c
376
if (unlikely(it == zone))
drivers/net/ethernet/mellanox/mlx4/alloc.c
392
if (zone->flags & MLX4_ZONE_FALLBACK_TO_HIGHER_PRIO) {
drivers/net/ethernet/mellanox/mlx4/alloc.c
418
static void __mlx4_free_from_zone(struct mlx4_zone_entry *zone, u32 obj,
drivers/net/ethernet/mellanox/mlx4/alloc.c
421
mlx4_bitmap_free_range(zone->bitmap, obj - zone->offset, count, zone->use_rr);
drivers/net/ethernet/mellanox/mlx4/alloc.c
428
struct mlx4_zone_entry *zone;
drivers/net/ethernet/mellanox/mlx4/alloc.c
430
list_for_each_entry(zone, &zones->entries, list) {
drivers/net/ethernet/mellanox/mlx4/alloc.c
431
if (zone->uid == uid)
drivers/net/ethernet/mellanox/mlx4/alloc.c
432
return zone;
drivers/net/ethernet/mellanox/mlx4/alloc.c
440
struct mlx4_zone_entry *zone;
drivers/net/ethernet/mellanox/mlx4/alloc.c
445
zone = __mlx4_find_zone_by_uid(zones, uid);
drivers/net/ethernet/mellanox/mlx4/alloc.c
447
bitmap = zone == NULL ? NULL : zone->bitmap;
drivers/net/ethernet/mellanox/mlx4/alloc.c
456
struct mlx4_zone_entry *zone;
drivers/net/ethernet/mellanox/mlx4/alloc.c
461
zone = __mlx4_find_zone_by_uid(zones, uid);
drivers/net/ethernet/mellanox/mlx4/alloc.c
463
if (NULL == zone) {
drivers/net/ethernet/mellanox/mlx4/alloc.c
468
__mlx4_zone_remove_one_entry(zone);
drivers/net/ethernet/mellanox/mlx4/alloc.c
472
kfree(zone);
drivers/net/ethernet/mellanox/mlx4/alloc.c
481
struct mlx4_zone_entry *zone, *zone_candidate = NULL;
drivers/net/ethernet/mellanox/mlx4/alloc.c
490
list_for_each_entry(zone, &zones->entries, list) {
drivers/net/ethernet/mellanox/mlx4/alloc.c
491
if (obj >= zone->offset) {
drivers/net/ethernet/mellanox/mlx4/alloc.c
492
u32 mobj = (obj - zone->offset) & zones->mask;
drivers/net/ethernet/mellanox/mlx4/alloc.c
494
if (mobj < zone->bitmap->max) {
drivers/net/ethernet/mellanox/mlx4/alloc.c
495
u32 curr_dist = zone->bitmap->effective_len;
drivers/net/ethernet/mellanox/mlx4/alloc.c
499
zone_candidate = zone;
drivers/net/ethernet/mellanox/mlx4/alloc.c
511
struct mlx4_zone_entry *zone;
drivers/net/ethernet/mellanox/mlx4/alloc.c
516
zone = __mlx4_find_zone_by_uid(zones, uid);
drivers/net/ethernet/mellanox/mlx4/alloc.c
518
if (NULL == zone)
drivers/net/ethernet/mellanox/mlx4/alloc.c
521
res = __mlx4_alloc_from_zone(zone, count, align, skip_mask, puid);
drivers/net/ethernet/mellanox/mlx4/alloc.c
531
struct mlx4_zone_entry *zone;
drivers/net/ethernet/mellanox/mlx4/alloc.c
539
zone = __mlx4_find_zone_by_uid_unique(zones, obj);
drivers/net/ethernet/mellanox/mlx4/alloc.c
541
if (NULL == zone) {
drivers/net/ethernet/mellanox/mlx4/alloc.c
546
__mlx4_free_from_zone(zone, obj, count);
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
106
u16 zone;
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
1226
entry->tuple.zone = ft->zone;
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
135
u16 zone;
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
1410
u16 zone)
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
1417
tuple->zone = zone;
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
1598
attr->ct_attr.zone = act->ct.zone;
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
1620
u16 zone;
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
1627
zone = ct_ft->zone & MLX5_CT_ZONE_MASK;
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
1629
ZONE_TO_REG, zone);
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
1654
zone, MLX5_CT_ZONE_MASK);
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
1664
ct_dbg("Failed to add pre ct flow rule zone %d", zone);
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
1674
ct_dbg("Failed to add pre ct miss rule zone %d", zone);
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
183
.key_offset = offsetof(struct mlx5_ct_ft, zone),
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
184
.key_len = sizeof(((struct mlx5_ct_ft *)0)->zone),
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
1847
mlx5_tc_ct_add_ft_cb(struct mlx5_tc_ct_priv *ct_priv, u16 zone,
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
1853
ft = rhashtable_lookup_fast(&ct_priv->zone_ht, &zone, zone_params);
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
1863
err = mapping_add(ct_priv->zone_mapping, &zone, &ft->zone_restore_id);
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
1867
ft->zone = zone;
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
1980
u16 zone;
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
1983
ft = mlx5_tc_ct_add_ft_cb(ct_priv, attr->ct_attr.zone,
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
2010
zone = ft->zone & MLX5_CT_ZONE_MASK;
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
2012
ct_priv->ns_type, ZONE_TO_REG, zone);
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
2434
u16 zone;
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
2439
if (mapping_find(ct_priv->zone_mapping, zone_restore_id, &zone))
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
2442
if (!mlx5_tc_ct_skb_to_tuple(skb, &tuple, zone))
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
527
ct_dbg("Deleting ct entry rule in zone %d", entry->tuple.zone);
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
852
mlx5e_tc_match_to_reg_match(spec, ZONE_TO_REG, entry->tuple.zone, MLX5_CT_ZONE_MASK);
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
864
ct_dbg("Offloaded ct entry rule in zone %d", entry->tuple.zone);
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
909
mlx5e_tc_match_to_reg_match(spec, ZONE_TO_REG, entry->tuple.zone, MLX5_CT_ZONE_MASK);
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
923
ct_dbg("Updated ct entry rule in zone %d", entry->tuple.zone);
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
26
u16 zone;
drivers/net/ethernet/netronome/nfp/flower/conntrack.c
1342
u16 zone, bool wildcarded)
drivers/net/ethernet/netronome/nfp/flower/conntrack.c
1351
zt = get_hashentry(&priv->ct_zone_table, &zone,
drivers/net/ethernet/netronome/nfp/flower/conntrack.c
1363
zt->zone = zone;
drivers/net/ethernet/netronome/nfp/flower/conntrack.c
1830
zt = get_nfp_zone_entry(priv, ct_act->ct.zone, false);
drivers/net/ethernet/netronome/nfp/flower/conntrack.h
60
u16 zone;
drivers/net/ethernet/netronome/nfp/flower/metadata.c
494
.key_offset = offsetof(struct nfp_fl_ct_zone_entry, zone),
drivers/net/ethernet/sfc/mae.c
1709
MAE_OUTER_RULE_INSERT_IN_DO_CT, !!act->zone,
drivers/net/ethernet/sfc/mae.c
1712
act->zone ? act->zone->zone : 0,
drivers/net/ethernet/sfc/mae.c
1753
MAE_ACTION_RULE_RESPONSE_DO_CT, !!act->zone,
drivers/net/ethernet/sfc/mae.c
1755
act->rid && !act->zone,
drivers/net/ethernet/sfc/mae.c
1761
act->zone ? act->zone->zone : 0);
drivers/net/ethernet/sfc/mae.c
1957
return TABLE_POPULATE_KEY(key, ct, zone, cpu_to_be16(conn->zone->zone));
drivers/net/ethernet/sfc/mae.c
364
rc = TABLE_HOOK_KEY(meta_ct, zone, DOMAIN);
drivers/net/ethernet/sfc/tc.c
1026
if (act->zone) {
drivers/net/ethernet/sfc/tc.c
1050
ct_zone = efx_tc_ct_register_zone(efx, fa->ct.zone,
drivers/net/ethernet/sfc/tc.c
1056
act->zone = ct_zone;
drivers/net/ethernet/sfc/tc.c
1077
if (act->zone)
drivers/net/ethernet/sfc/tc.c
1078
efx_tc_ct_unregister_zone(efx, act->zone);
drivers/net/ethernet/sfc/tc.c
2952
if (rule->lhs_act.zone)
drivers/net/ethernet/sfc/tc.c
2953
efx_tc_ct_unregister_zone(efx, rule->lhs_act.zone);
drivers/net/ethernet/sfc/tc.h
194
struct efx_tc_ct_zone *zone;
drivers/net/ethernet/sfc/tc_conntrack.c
19
.key_len = sizeof_field(struct efx_tc_ct_zone, zone),
drivers/net/ethernet/sfc/tc_conntrack.c
32
struct efx_tc_ct_zone *zone = ptr;
drivers/net/ethernet/sfc/tc_conntrack.c
33
struct efx_nic *efx = zone->efx;
drivers/net/ethernet/sfc/tc_conntrack.c
37
zone->zone);
drivers/net/ethernet/sfc/tc_conntrack.c
386
conn->zone = ct_zone;
drivers/net/ethernet/sfc/tc_conntrack.c
39
nf_flow_table_offload_del_cb(zone->nf_ft, efx_tc_flow_block, zone);
drivers/net/ethernet/sfc/tc_conntrack.c
40
kfree(zone);
drivers/net/ethernet/sfc/tc_conntrack.c
559
struct efx_tc_ct_zone *efx_tc_ct_register_zone(struct efx_nic *efx, u16 zone,
drivers/net/ethernet/sfc/tc_conntrack.c
568
ct_zone->zone = zone;
drivers/net/ethernet/sfc/tc_conntrack.c
582
"Found existing ct_zone for %u\n", zone);
drivers/net/ethernet/sfc/tc_conntrack.c
591
zone, rc);
drivers/net/ethernet/sfc/tc_conntrack.c
623
ct_zone->zone);
drivers/net/ethernet/sfc/tc_conntrack.h
20
u16 zone;
drivers/net/ethernet/sfc/tc_conntrack.h
34
struct efx_tc_ct_zone *efx_tc_ct_register_zone(struct efx_nic *efx, u16 zone,
drivers/net/ethernet/sfc/tc_conntrack.h
48
struct efx_tc_ct_zone *zone;
drivers/net/fjes/fjes_hw.c
1030
hw->ep_shm_info[epidx].zone =
drivers/net/fjes/fjes_hw.c
1031
info[epidx].zone;
drivers/net/fjes/fjes_hw.c
1039
if ((info[epidx].zone !=
drivers/net/fjes/fjes_hw.c
1043
(info[epidx].zone ==
drivers/net/fjes/fjes_hw.c
1044
info[hw->my_epid].zone))
drivers/net/fjes/fjes_hw.c
1052
if ((info[epidx].zone ==
drivers/net/fjes/fjes_hw.c
1056
(info[epidx].zone !=
drivers/net/fjes/fjes_hw.c
1057
info[hw->my_epid].zone)) {
drivers/net/fjes/fjes_hw.c
1066
if ((info[epidx].zone ==
drivers/net/fjes/fjes_hw.c
1070
(info[epidx].zone !=
drivers/net/fjes/fjes_hw.c
1071
info[hw->my_epid].zone))
drivers/net/fjes/fjes_hw.c
1078
hw->ep_shm_info[epidx].zone = info[epidx].zone;
drivers/net/fjes/fjes_hw.c
737
(hw->ep_shm_info[hw->my_epid].zone ==
drivers/net/fjes/fjes_hw.c
741
return (hw->ep_shm_info[epid].zone ==
drivers/net/fjes/fjes_hw.c
742
hw->ep_shm_info[hw->my_epid].zone);
drivers/net/fjes/fjes_hw.c
991
struct my_s {u8 es_status; u8 zone; } *info;
drivers/net/fjes/fjes_hw.h
144
u8 zone;
drivers/net/fjes/fjes_hw.h
260
u8 zone;
drivers/net/fjes/fjes_main.c
130
hw->ep_shm_info[epidx].zone =
drivers/net/fjes/fjes_main.c
131
hw->hw_info.res_buf->info.info[epidx].zone;
drivers/net/fjes/fjes_trace.h
56
__dynamic_array(u8, zone, hw->max_epid)
drivers/net/fjes/fjes_trace.h
65
*((u8 *)__get_dynamic_array(zone) + x) =
drivers/net/fjes/fjes_trace.h
66
res_buf->info.info[x].zone;
drivers/net/fjes/fjes_trace.h
73
__print_array(__get_dynamic_array(zone),
drivers/net/fjes/fjes_trace.h
74
__get_dynamic_array_len(zone) / sizeof(u8),
drivers/net/wireless/rsi/rsi_91x_main.c
55
void rsi_dbg(u32 zone, const char *fmt, ...)
drivers/net/wireless/rsi/rsi_91x_main.c
65
if (zone & rsi_zone_enabled)
drivers/net/wireless/rsi/rsi_main.h
62
extern __printf(2, 3) void rsi_dbg(u32 zone, const char *fmt, ...);
drivers/nvme/host/zns.c
155
struct blk_zone zone = { };
drivers/nvme/host/zns.c
162
zone.type = BLK_ZONE_TYPE_SEQWRITE_REQ;
drivers/nvme/host/zns.c
163
zone.cond = entry->zs >> 4;
drivers/nvme/host/zns.c
164
zone.len = head->zsze;
drivers/nvme/host/zns.c
165
zone.capacity = nvme_lba_to_sect(head, le64_to_cpu(entry->zcap));
drivers/nvme/host/zns.c
166
zone.start = nvme_lba_to_sect(head, le64_to_cpu(entry->zslba));
drivers/nvme/host/zns.c
167
if (zone.cond == BLK_ZONE_COND_FULL)
drivers/nvme/host/zns.c
168
zone.wp = zone.start + zone.len;
drivers/nvme/host/zns.c
170
zone.wp = nvme_lba_to_sect(head, le64_to_cpu(entry->wp));
drivers/nvme/host/zns.c
172
return disk_report_zone(ns->disk, &zone, idx, args);
drivers/platform/chrome/wilco_ec/telemetry.c
103
u8 zone;
drivers/platform/x86/dell/alienware-wmi-base.c
342
DEFINE_SYSFS_GROUP_VISIBLE(zone);
drivers/platform/x86/dell/alienware-wmi-base.c
355
.is_visible = SYSFS_GROUP_VISIBLE(zone),
drivers/pmdomain/bcm/bcm-pmb.c
124
int zone)
drivers/pmdomain/bcm/bcm-pmb.c
130
offset = BPCM_ZONE0 + zone * BPCM_ZONE_SIZE + BPCM_ZONE_CONTROL;
drivers/pmdomain/bcm/bcm-pmb.c
145
int zone)
drivers/pmdomain/bcm/bcm-pmb.c
151
offset = BPCM_ZONE0 + zone * BPCM_ZONE_SIZE + BPCM_ZONE_CONTROL;
drivers/powercap/arm_scmi_powercap.c
18
container_of(z, struct scmi_powercap_zone, zone)
drivers/powercap/arm_scmi_powercap.c
273
&spz->zone);
drivers/powercap/arm_scmi_powercap.c
30
struct powercap_zone zone;
drivers/powercap/arm_scmi_powercap.c
308
z = powercap_register_zone(&spz->zone, scmi_top_pcntrl, spz->info->name,
drivers/powercap/arm_scmi_powercap.c
309
parent ? &parent->zone : NULL,
drivers/powercap/dtpm.c
145
dtpm->zone.name, ret);
drivers/powercap/dtpm.c
221
dtpm->zone.name, power_limit);
drivers/powercap/dtpm.c
251
child->zone.name, power);
drivers/powercap/dtpm.c
255
ret = get_power_limit_uw(&child->zone, cid, &power);
drivers/powercap/dtpm.c
282
dtpm->zone.name, dtpm->power_limit, dtpm->power_max);
drivers/powercap/dtpm.c
338
powercap_unregister_zone(pct, &dtpm->zone);
drivers/powercap/dtpm.c
340
pr_debug("Unregistered dtpm node '%s'\n", dtpm->zone.name);
drivers/powercap/dtpm.c
390
pcz = powercap_register_zone(&dtpm->zone, pct, name,
drivers/powercap/dtpm.c
391
parent ? &parent->zone : NULL,
drivers/powercap/dtpm.c
410
dtpm->zone.name, dtpm->power_min, dtpm->power_max);
drivers/powercap/dtpm.c
94
child->weight, child->zone.name);
drivers/s390/crypto/vfio_ap_ops.c
510
aqic_gisa.zone, aqic_gisa.ir, aqic_gisa.gisc,
drivers/scsi/sd_zbc.c
49
struct blk_zone zone = { 0 };
drivers/scsi/sd_zbc.c
55
zone.type = buf[0] & 0x0f;
drivers/scsi/sd_zbc.c
56
zone.cond = (buf[1] >> 4) & 0xf;
drivers/scsi/sd_zbc.c
58
zone.reset = 1;
drivers/scsi/sd_zbc.c
60
zone.non_seq = 1;
drivers/scsi/sd_zbc.c
63
zone.start = logical_to_sectors(sdp, start_lba);
drivers/scsi/sd_zbc.c
64
zone.capacity = logical_to_sectors(sdp, get_unaligned_be64(&buf[8]));
drivers/scsi/sd_zbc.c
65
zone.len = zone.capacity;
drivers/scsi/sd_zbc.c
68
if (zone.len > gran) {
drivers/scsi/sd_zbc.c
72
sectors_to_logical(sdp, zone.capacity),
drivers/scsi/sd_zbc.c
73
sectors_to_logical(sdp, zone.len),
drivers/scsi/sd_zbc.c
81
zone.len = gran;
drivers/scsi/sd_zbc.c
83
if (zone.cond == ZBC_ZONE_COND_FULL)
drivers/scsi/sd_zbc.c
84
zone.wp = zone.start + zone.len;
drivers/scsi/sd_zbc.c
86
zone.wp = logical_to_sectors(sdp, get_unaligned_be64(&buf[24]));
drivers/scsi/sd_zbc.c
88
return disk_report_zone(sdkp->disk, &zone, idx, args);
drivers/soc/qcom/icc-bwmon.c
128
#define BWMON_V4_ZONE_MAX(zone) (0x2e0 + 4 * (zone))
drivers/soc/qcom/icc-bwmon.c
129
#define BWMON_V5_ZONE_MAX(zone) (0x044 + 4 * (zone))
drivers/soc/qcom/icc-bwmon.c
608
int zone;
drivers/soc/qcom/icc-bwmon.c
629
zone = get_bitmask_order(status) - 1;
drivers/soc/qcom/icc-bwmon.c
635
if (regmap_field_read(bwmon->regs[F_ZONE0_MAX + zone], &max))
drivers/thermal/da9062-thermal.c
111
thermal_zone_device_update(thermal->zone,
drivers/thermal/da9062-thermal.c
199
thermal->zone = thermal_zone_device_register_with_trips(thermal->config->name,
drivers/thermal/da9062-thermal.c
203
if (IS_ERR(thermal->zone)) {
drivers/thermal/da9062-thermal.c
205
ret = PTR_ERR(thermal->zone);
drivers/thermal/da9062-thermal.c
208
ret = thermal_zone_device_enable(thermal->zone);
drivers/thermal/da9062-thermal.c
237
thermal_zone_device_unregister(thermal->zone);
drivers/thermal/da9062-thermal.c
248
thermal_zone_device_unregister(thermal->zone);
drivers/thermal/da9062-thermal.c
53
struct thermal_zone_device *zone;
drivers/thermal/da9062-thermal.c
97
thermal_zone_device_update(thermal->zone,
drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
14
static int int340x_thermal_get_zone_temp(struct thermal_zone_device *zone,
drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
163
int34x_zone->zone = thermal_zone_device_register_with_trips(
drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
17
struct int34x_thermal_zone *d = thermal_zone_device_priv(zone);
drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
172
if (IS_ERR(int34x_zone->zone)) {
drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
173
ret = PTR_ERR(int34x_zone->zone);
drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
176
ret = thermal_zone_device_enable(int34x_zone->zone);
drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
183
thermal_zone_device_unregister(int34x_zone->zone);
drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
194
thermal_zone_device_unregister(int34x_zone->zone);
drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
227
thermal_zone_set_trip_temp(int34x_zone->zone, trip, temp);
drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
234
thermal_zone_for_each_trip(int34x_zone->zone, int340x_update_one_trip,
drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
41
static int int340x_thermal_set_trip_temp(struct thermal_zone_device *zone,
drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
44
struct int34x_thermal_zone *d = thermal_zone_device_priv(zone);
drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
60
static void int340x_thermal_critical(struct thermal_zone_device *zone)
drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
62
dev_dbg(thermal_zone_device(zone), "%s: critical temperature reached\n",
drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.c
63
thermal_zone_device_type(zone));
drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.h
24
struct thermal_zone_device *zone;
drivers/thermal/intel/int340x_thermal/int340x_thermal_zone.h
50
thermal_zone_device_update(tzone->zone, event);
drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
177
static int proc_thermal_get_zone_temp(struct thermal_zone_device *zone,
drivers/thermal/renesas/rcar_gen3_thermal.c
260
if (status && priv->tscs[i]->zone)
drivers/thermal/renesas/rcar_gen3_thermal.c
261
thermal_zone_device_update(priv->tscs[i]->zone,
drivers/thermal/renesas/rcar_gen3_thermal.c
479
struct thermal_zone_device *zone = data;
drivers/thermal/renesas/rcar_gen3_thermal.c
481
thermal_remove_hwmon_sysfs(zone);
drivers/thermal/renesas/rcar_gen3_thermal.c
517
struct thermal_zone_device *zone;
drivers/thermal/renesas/rcar_gen3_thermal.c
572
zone = devm_thermal_of_zone_register(dev, i, tsc, &priv->ops);
drivers/thermal/renesas/rcar_gen3_thermal.c
573
if (IS_ERR(zone)) {
drivers/thermal/renesas/rcar_gen3_thermal.c
575
ret = PTR_ERR(zone);
drivers/thermal/renesas/rcar_gen3_thermal.c
578
tsc->zone = zone;
drivers/thermal/renesas/rcar_gen3_thermal.c
580
ret = thermal_add_hwmon_sysfs(tsc->zone);
drivers/thermal/renesas/rcar_gen3_thermal.c
584
ret = devm_add_action_or_reset(dev, rcar_gen3_hwmon_action, zone);
drivers/thermal/renesas/rcar_gen3_thermal.c
97
struct thermal_zone_device *zone;
drivers/thermal/renesas/rcar_thermal.c
273
static int rcar_thermal_get_temp(struct thermal_zone_device *zone, int *temp)
drivers/thermal/renesas/rcar_thermal.c
275
struct rcar_thermal_priv *priv = thermal_zone_device_priv(zone);
drivers/thermal/renesas/rcar_thermal.c
322
thermal_zone_device_update(priv->zone, THERMAL_EVENT_UNSPECIFIED);
drivers/thermal/renesas/rcar_thermal.c
384
thermal_remove_hwmon_sysfs(priv->zone);
drivers/thermal/renesas/rcar_thermal.c
386
thermal_zone_device_unregister(priv->zone);
drivers/thermal/renesas/rcar_thermal.c
487
priv->zone = devm_thermal_of_zone_register(
drivers/thermal/renesas/rcar_thermal.c
491
priv->zone = thermal_zone_device_register_with_trips(
drivers/thermal/renesas/rcar_thermal.c
496
ret = thermal_zone_device_enable(priv->zone);
drivers/thermal/renesas/rcar_thermal.c
498
thermal_zone_device_unregister(priv->zone);
drivers/thermal/renesas/rcar_thermal.c
499
priv->zone = ERR_PTR(ret);
drivers/thermal/renesas/rcar_thermal.c
502
if (IS_ERR(priv->zone)) {
drivers/thermal/renesas/rcar_thermal.c
504
ret = PTR_ERR(priv->zone);
drivers/thermal/renesas/rcar_thermal.c
505
priv->zone = NULL;
drivers/thermal/renesas/rcar_thermal.c
510
ret = thermal_add_hwmon_sysfs(priv->zone);
drivers/thermal/renesas/rcar_thermal.c
92
struct thermal_zone_device *zone;
drivers/thermal/renesas/rzg2l_thermal.c
157
thermal_remove_hwmon_sysfs(priv->zone);
drivers/thermal/renesas/rzg2l_thermal.c
163
struct thermal_zone_device *zone;
drivers/thermal/renesas/rzg2l_thermal.c
208
zone = devm_thermal_of_zone_register(dev, 0, priv,
drivers/thermal/renesas/rzg2l_thermal.c
210
if (IS_ERR(zone)) {
drivers/thermal/renesas/rzg2l_thermal.c
212
ret = PTR_ERR(zone);
drivers/thermal/renesas/rzg2l_thermal.c
216
priv->zone = zone;
drivers/thermal/renesas/rzg2l_thermal.c
217
ret = thermal_add_hwmon_sysfs(priv->zone);
drivers/thermal/renesas/rzg2l_thermal.c
60
struct thermal_zone_device *zone;
drivers/thermal/renesas/rzg3e_thermal.c
101
struct thermal_zone_device *zone;
drivers/thermal/renesas/rzg3e_thermal.c
313
thermal_zone_device_update(priv->zone, THERMAL_TRIP_VIOLATED);
drivers/thermal/renesas/rzg3e_thermal.c
453
priv->zone = devm_thermal_of_zone_register(dev, 0, priv, &rzg3e_tz_ops);
drivers/thermal/renesas/rzg3e_thermal.c
454
if (IS_ERR(priv->zone)) {
drivers/thermal/renesas/rzg3e_thermal.c
455
ret = PTR_ERR(priv->zone);
drivers/thermal/renesas/rzg3e_thermal.c
470
ret = devm_thermal_add_hwmon_sysfs(dev, priv->zone);
drivers/thermal/tegra/soctherm.c
2185
struct tegra_thermctl_zone *zone =
drivers/thermal/tegra/soctherm.c
2186
devm_kzalloc(&pdev->dev, sizeof(*zone), GFP_KERNEL);
drivers/thermal/tegra/soctherm.c
2187
if (!zone) {
drivers/thermal/tegra/soctherm.c
2192
zone->reg = tegra->regs + soc->ttgs[i]->sensor_temp_offset;
drivers/thermal/tegra/soctherm.c
2193
zone->dev = &pdev->dev;
drivers/thermal/tegra/soctherm.c
2194
zone->sg = soc->ttgs[i];
drivers/thermal/tegra/soctherm.c
2195
zone->ts = tegra;
drivers/thermal/tegra/soctherm.c
2198
soc->ttgs[i]->id, zone,
drivers/thermal/tegra/soctherm.c
2207
zone->tz = z;
drivers/thermal/tegra/soctherm.c
433
struct tegra_thermctl_zone *zone = thermal_zone_device_priv(tz);
drivers/thermal/tegra/soctherm.c
436
val = readl(zone->reg);
drivers/thermal/tegra/soctherm.c
437
val = REG_GET_MASK(val, zone->sg->sensor_temp_mask);
drivers/thermal/tegra/soctherm.c
595
struct tegra_thermctl_zone *zone = thermal_zone_device_priv(tz);
drivers/thermal/tegra/soctherm.c
596
struct tegra_soctherm *ts = zone->ts;
drivers/thermal/tegra/soctherm.c
597
const struct tegra_tsensor_group *sg = zone->sg;
drivers/thermal/tegra/soctherm.c
598
struct device *dev = zone->dev;
drivers/thermal/tegra/soctherm.c
663
struct tegra_thermctl_zone *zone = thermal_zone_device_priv(tz);
drivers/thermal/tegra/soctherm.c
666
thermal_irq_disable(zone);
drivers/thermal/tegra/soctherm.c
668
r = readl(zone->ts->regs + zone->sg->thermctl_lvl0_offset);
drivers/thermal/tegra/soctherm.c
670
writel(r, zone->ts->regs + zone->sg->thermctl_lvl0_offset);
drivers/thermal/tegra/soctherm.c
672
lo = enforce_temp_range(zone->dev, lo) / zone->ts->soc->thresh_grain;
drivers/thermal/tegra/soctherm.c
673
hi = enforce_temp_range(zone->dev, hi) / zone->ts->soc->thresh_grain;
drivers/thermal/tegra/soctherm.c
674
dev_dbg(zone->dev, "%s hi:%d, lo:%d\n", __func__, hi, lo);
drivers/thermal/tegra/soctherm.c
676
r = REG_SET_MASK(r, zone->sg->thermctl_lvl0_up_thresh_mask, hi);
drivers/thermal/tegra/soctherm.c
677
r = REG_SET_MASK(r, zone->sg->thermctl_lvl0_dn_thresh_mask, lo);
drivers/thermal/tegra/soctherm.c
679
writel(r, zone->ts->regs + zone->sg->thermctl_lvl0_offset);
drivers/thermal/tegra/soctherm.c
681
thermal_irq_enable(zone);
drivers/thermal/tegra/tegra-bpmp-thermal.c
102
struct tegra_bpmp_thermal_zone *zone;
drivers/thermal/tegra/tegra-bpmp-thermal.c
104
zone = container_of(work, struct tegra_bpmp_thermal_zone,
drivers/thermal/tegra/tegra-bpmp-thermal.c
107
thermal_zone_device_update(zone->tzd, THERMAL_TRIP_VIOLATED);
drivers/thermal/tegra/tegra-bpmp-thermal.c
128
if (tegra->zones[i]->idx != req.host_trip_reached.zone)
drivers/thermal/tegra/tegra-bpmp-thermal.c
137
req.host_trip_reached.zone);
drivers/thermal/tegra/tegra-bpmp-thermal.c
253
struct tegra_bpmp_thermal_zone *zone;
drivers/thermal/tegra/tegra-bpmp-thermal.c
256
zone = devm_kzalloc(&pdev->dev, sizeof(*zone), GFP_KERNEL);
drivers/thermal/tegra/tegra-bpmp-thermal.c
257
if (!zone)
drivers/thermal/tegra/tegra-bpmp-thermal.c
260
zone->idx = i;
drivers/thermal/tegra/tegra-bpmp-thermal.c
261
zone->tegra = tegra;
drivers/thermal/tegra/tegra-bpmp-thermal.c
263
err = __tegra_bpmp_thermal_get_temp(zone, &temp);
drivers/thermal/tegra/tegra-bpmp-thermal.c
270
devm_kfree(&pdev->dev, zone);
drivers/thermal/tegra/tegra-bpmp-thermal.c
275
&pdev->dev, i, zone, thermal_ops);
drivers/thermal/tegra/tegra-bpmp-thermal.c
279
devm_kfree(&pdev->dev, zone);
drivers/thermal/tegra/tegra-bpmp-thermal.c
283
zone->tzd = tzd;
drivers/thermal/tegra/tegra-bpmp-thermal.c
284
INIT_WORK(&zone->tz_device_update_work,
drivers/thermal/tegra/tegra-bpmp-thermal.c
287
tegra->zones[tegra->num_zones++] = zone;
drivers/thermal/tegra/tegra-bpmp-thermal.c
33
static int __tegra_bpmp_thermal_get_temp(struct tegra_bpmp_thermal_zone *zone,
drivers/thermal/tegra/tegra-bpmp-thermal.c
43
req.get_temp.zone = zone->idx;
drivers/thermal/tegra/tegra-bpmp-thermal.c
52
err = tegra_bpmp_transfer(zone->tegra->bpmp, &msg);
drivers/thermal/tegra/tegra-bpmp-thermal.c
67
struct tegra_bpmp_thermal_zone *zone = thermal_zone_device_priv(tz);
drivers/thermal/tegra/tegra-bpmp-thermal.c
69
return __tegra_bpmp_thermal_get_temp(zone, out_temp);
drivers/thermal/tegra/tegra-bpmp-thermal.c
74
struct tegra_bpmp_thermal_zone *zone = thermal_zone_device_priv(tz);
drivers/thermal/tegra/tegra-bpmp-thermal.c
81
req.set_trip.zone = zone->idx;
drivers/thermal/tegra/tegra-bpmp-thermal.c
91
err = tegra_bpmp_transfer(zone->tegra->bpmp, &msg);
drivers/usb/storage/alauda.c
552
unsigned int zone)
drivers/usb/storage/alauda.c
554
u16 *pba_to_lba = info->pba_to_lba[zone];
drivers/usb/storage/alauda.c
559
return (zone << info->zoneshift) + i;
drivers/usb/storage/alauda.c
568
static int alauda_read_map(struct us_data *us, unsigned int zone)
drivers/usb/storage/alauda.c
576
unsigned int zone_base_lba = zone * uzonesize;
drivers/usb/storage/alauda.c
577
unsigned int zone_base_pba = zone * zonesize;
drivers/usb/storage/alauda.c
585
usb_stor_dbg(us, "Mapping blocks for zone %d\n", zone);
drivers/usb/storage/alauda.c
676
MEDIA_INFO(us).lba_to_pba[zone] = lba_to_pba;
drivers/usb/storage/alauda.c
677
MEDIA_INFO(us).pba_to_lba[zone] = pba_to_lba;
drivers/usb/storage/alauda.c
692
static void alauda_ensure_map_for_zone(struct us_data *us, unsigned int zone)
drivers/usb/storage/alauda.c
694
if (MEDIA_INFO(us).lba_to_pba[zone] == NULL
drivers/usb/storage/alauda.c
695
|| MEDIA_INFO(us).pba_to_lba[zone] == NULL)
drivers/usb/storage/alauda.c
696
alauda_read_map(us, zone);
drivers/usb/storage/alauda.c
824
unsigned int zone = lba / uzonesize;
drivers/usb/storage/alauda.c
826
alauda_ensure_map_for_zone(us, zone);
drivers/usb/storage/alauda.c
828
pba = MEDIA_INFO(us).lba_to_pba[zone][lba_offset];
drivers/usb/storage/alauda.c
839
new_pba = alauda_find_unused_pba(&MEDIA_INFO(us), zone);
drivers/usb/storage/alauda.c
897
new_pba_offset = new_pba - (zone * zonesize);
drivers/usb/storage/alauda.c
898
MEDIA_INFO(us).pba_to_lba[zone][new_pba_offset] = lba;
drivers/usb/storage/alauda.c
899
MEDIA_INFO(us).lba_to_pba[zone][lba_offset] = new_pba;
drivers/usb/storage/alauda.c
903
unsigned int pba_offset = pba - (zone * zonesize);
drivers/usb/storage/alauda.c
907
MEDIA_INFO(us).pba_to_lba[zone][pba_offset] = UNDEF;
drivers/usb/storage/alauda.c
953
unsigned int zone = lba / uzonesize; /* integer division */
drivers/usb/storage/alauda.c
954
unsigned int lba_offset = lba - (zone * uzonesize);
drivers/usb/storage/alauda.c
957
alauda_ensure_map_for_zone(us, zone);
drivers/usb/storage/alauda.c
972
pba = MEDIA_INFO(us).lba_to_pba[zone][lba_offset];
drivers/usb/storage/sddr55.c
720
int zone = i / 1024;
drivers/usb/storage/sddr55.c
748
if (info->lba_to_pba[lba + zone * 1000] != NOT_ALLOCATED &&
drivers/usb/storage/sddr55.c
752
lba + zone * 1000);
drivers/usb/storage/sddr55.c
759
info->lba_to_pba[lba + zone * 1000] = i;
fs/adfs/map.c
159
static int scan_map(struct adfs_sb_info *asb, unsigned int zone,
fs/adfs/map.c
166
dm = asb->s_map + zone;
fs/adfs/map.c
167
zone = asb->s_map_size;
fs/adfs/map.c
168
dm_end = asb->s_map + zone;
fs/adfs/map.c
179
} while (--zone > 0);
fs/adfs/map.c
202
unsigned int zone;
fs/adfs/map.c
205
zone = asb->s_map_size;
fs/adfs/map.c
209
} while (--zone > 0);
fs/adfs/map.c
220
unsigned int zone, mapoff;
fs/adfs/map.c
228
zone = asb->s_map_size >> 1;
fs/adfs/map.c
230
zone = frag_id / asb->s_ids_per_zone;
fs/adfs/map.c
232
if (zone >= asb->s_map_size)
fs/adfs/map.c
239
result = scan_map(asb, zone, frag_id, mapoff);
fs/adfs/map.c
256
frag_id, zone, asb->s_map_size);
fs/adfs/map.c
312
unsigned int zone, zone_size;
fs/adfs/map.c
322
for (zone = 1; zone < nzones; zone++) {
fs/adfs/map.c
323
dm[zone].dm_bh = NULL;
fs/adfs/map.c
324
dm[zone].dm_startblk = zone * zone_size - ADFS_DR_SIZE_BITS;
fs/adfs/map.c
325
dm[zone].dm_startbit = 32;
fs/adfs/map.c
326
dm[zone].dm_endbit = 32 + zone_size;
fs/adfs/map.c
337
unsigned int zone;
fs/adfs/map.c
339
for (zone = 0; zone < nzones; zone++) {
fs/adfs/map.c
340
dm[zone].dm_bh = sb_bread(sb, map_addr + zone);
fs/adfs/map.c
341
if (!dm[zone].dm_bh)
fs/adfs/map.c
350
unsigned int zone;
fs/adfs/map.c
352
for (zone = 0; zone < nzones; zone++)
fs/adfs/map.c
353
brelse(dm[zone].dm_bh);
fs/btrfs/zoned.c
1003
zone->wp = zone->start + zone->len;
fs/btrfs/zoned.c
1004
zone->cond = BLK_ZONE_COND_FULL;
fs/btrfs/zoned.c
1316
struct blk_zone zone;
fs/btrfs/zoned.c
1368
ret = btrfs_get_dev_zone(device, info->physical, &zone);
fs/btrfs/zoned.c
1378
if (unlikely(zone.type == BLK_ZONE_TYPE_CONVENTIONAL)) {
fs/btrfs/zoned.c
1381
zone.start << SECTOR_SHIFT, rcu_dereference(device->name),
fs/btrfs/zoned.c
1387
info->capacity = (zone.capacity << SECTOR_SHIFT);
fs/btrfs/zoned.c
1389
switch (zone.cond) {
fs/btrfs/zoned.c
1406
info->alloc_offset = ((zone.wp - zone.start) << SECTOR_SHIFT);
fs/btrfs/zoned.c
168
u64 zone = U64_MAX;
fs/btrfs/zoned.c
172
case 0: zone = 0; break;
fs/btrfs/zoned.c
173
case 1: zone = 1ULL << (BTRFS_SB_LOG_FIRST_SHIFT - shift); break;
fs/btrfs/zoned.c
174
case 2: zone = 1ULL << (BTRFS_SB_LOG_SECOND_SHIFT - shift); break;
fs/btrfs/zoned.c
177
ASSERT(zone <= U32_MAX, "zone=%llu", zone);
fs/btrfs/zoned.c
179
return (u32)zone;
fs/btrfs/zoned.c
2292
struct blk_zone *zone)
fs/btrfs/zoned.c
2322
ret = btrfs_get_dev_zone(dev, physical, zone);
fs/btrfs/zoned.c
2343
struct blk_zone zone;
fs/btrfs/zoned.c
2351
ret = read_zone_info(fs_info, logical, &zone);
fs/btrfs/zoned.c
2355
wp = physical_start + ((zone.wp - zone.start) << SECTOR_SHIFT);
fs/btrfs/zoned.c
664
static int btrfs_get_dev_zone(struct btrfs_device *device, u64 pos, struct blk_zone *zone)
fs/btrfs/zoned.c
669
ret = btrfs_get_dev_zones(device, pos, zone, &nr_zones);
fs/btrfs/zoned.c
73
static inline bool sb_zone_is_full(const struct blk_zone *zone)
fs/btrfs/zoned.c
75
return (zone->cond == BLK_ZONE_COND_FULL) ||
fs/btrfs/zoned.c
76
(zone->wp + SUPER_INFO_SECTORS > zone->start + zone->capacity);
fs/btrfs/zoned.c
79
static int copy_zone_info_cb(struct blk_zone *zone, unsigned int idx, void *data)
fs/btrfs/zoned.c
83
memcpy(&zones[idx], zone, sizeof(*zone));
fs/btrfs/zoned.c
962
struct blk_zone *zone;
fs/btrfs/zoned.c
968
zone = &zinfo->sb_zones[BTRFS_NR_SB_LOG_ZONES * mirror];
fs/btrfs/zoned.c
971
if (zone->cond == BLK_ZONE_COND_FULL) {
fs/btrfs/zoned.c
972
zone++;
fs/btrfs/zoned.c
976
if (zone->cond == BLK_ZONE_COND_EMPTY)
fs/btrfs/zoned.c
977
zone->cond = BLK_ZONE_COND_IMP_OPEN;
fs/btrfs/zoned.c
979
zone->wp += SUPER_INFO_SECTORS;
fs/btrfs/zoned.c
981
if (sb_zone_is_full(zone)) {
fs/btrfs/zoned.c
990
if (zone->wp != zone->start + zone->capacity) {
fs/btrfs/zoned.c
996
REQ_OP_ZONE_FINISH, zone->start,
fs/btrfs/zoned.c
997
zone->len);
fs/f2fs/f2fs.h
4818
unsigned int zone)
fs/f2fs/f2fs.h
4820
return test_bit(zone, FDEV(devi).blkz_seq);
fs/f2fs/segment.c
2873
if (CURSEG_I(sbi, i)->zone == zoneno)
fs/f2fs/segment.c
2921
curseg->zone = GET_ZONE_FROM_SEG(sbi, curseg->segno);
fs/f2fs/segment.c
5303
struct blk_zone *zone)
fs/f2fs/segment.c
5311
if (zone->type != BLK_ZONE_TYPE_SEQWRITE_REQ)
fs/f2fs/segment.c
5314
zone_block = fdev->start_blk + (zone->start >> log_sectors_per_block);
fs/f2fs/segment.c
5331
blk_zone_cond_str(zone->cond));
fs/f2fs/segment.c
5335
if ((!valid_block_cnt && zone->cond == BLK_ZONE_COND_EMPTY) ||
fs/f2fs/segment.c
5336
(valid_block_cnt && zone->cond == BLK_ZONE_COND_FULL))
fs/f2fs/segment.c
5342
blk_zone_cond_str(zone->cond));
fs/f2fs/segment.c
5344
zone->len >> log_sectors_per_block);
fs/f2fs/segment.c
5360
zone_segno, valid_block_cnt, blk_zone_cond_str(zone->cond));
fs/f2fs/segment.c
5364
zone->start, zone->len);
fs/f2fs/segment.c
5367
ret = blkdev_issue_zeroout(fdev->bdev, zone->wp,
fs/f2fs/segment.c
5368
zone->len - (zone->wp - zone->start),
fs/f2fs/segment.c
5397
static int report_one_zone_cb(struct blk_zone *zone, unsigned int idx,
fs/f2fs/segment.c
5400
memcpy(data, zone, sizeof(struct blk_zone));
fs/f2fs/segment.c
5408
struct blk_zone zone;
fs/f2fs/segment.c
5426
report_one_zone_cb, &zone);
fs/f2fs/segment.c
5433
if (zone.type != BLK_ZONE_TYPE_SEQWRITE_REQ)
fs/f2fs/segment.c
5441
wp_block = zbd->start_blk + (zone.wp >> log_sectors_per_block);
fs/f2fs/segment.c
5444
wp_sector_off = zone.wp & GENMASK(log_sectors_per_block - 1, 0);
fs/f2fs/segment.c
5468
if (check_zone_write_pointer(sbi, zbd, &zone))
fs/f2fs/segment.c
5482
report_one_zone_cb, &zone);
fs/f2fs/segment.c
5489
if (zone.type != BLK_ZONE_TYPE_SEQWRITE_REQ)
fs/f2fs/segment.c
5492
if (zone.wp != zone.start) {
fs/f2fs/segment.c
5498
zone.len >> log_sectors_per_block);
fs/f2fs/segment.c
5527
static int check_zone_write_pointer_cb(struct blk_zone *zone, unsigned int idx,
fs/f2fs/segment.c
5534
return check_zone_write_pointer(args->sbi, args->fdev, zone);
fs/f2fs/segment.h
281
unsigned int zone; /* current zone number */
fs/f2fs/super.c
4414
static int f2fs_report_zone_cb(struct blk_zone *zone, unsigned int idx,
fs/f2fs/super.c
4418
block_t unusable_blocks = (zone->len - zone->capacity) >>
fs/f2fs/super.c
4421
if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
fs/minix/bitmap.c
48
unsigned long bit, zone;
fs/minix/bitmap.c
54
zone = block - sbi->s_firstdatazone + 1;
fs/minix/bitmap.c
55
bit = zone & ((1<<k) - 1);
fs/minix/bitmap.c
56
zone >>= k;
fs/minix/bitmap.c
57
if (zone >= sbi->s_zmap_blocks) {
fs/minix/bitmap.c
61
bh = sbi->s_zmap[zone];
fs/ntfs3/run.c
1113
bool ok, zone;
fs/ntfs3/run.c
1144
zone = max(wnd->zone_bit, lcn) < min(wnd->zone_end, lcn + len);
fs/ntfs3/run.c
1146
ok = !zone && wnd_is_used(wnd, lcn, len);
fs/ntfs3/run.c
1157
if (zone) {
fs/ntfs3/run.c
1167
if (zone) {
fs/pstore/blk.c
107
if (!dev->zone.total_size) {
fs/pstore/blk.c
111
if (!dev->zone.read) {
fs/pstore/blk.c
115
if (!dev->zone.write) {
fs/pstore/blk.c
133
dev->zone.max_reason = max_reason;
fs/pstore/blk.c
136
dev->zone.name = KBUILD_MODNAME;
fs/pstore/blk.c
137
dev->zone.owner = THIS_MODULE;
fs/pstore/blk.c
139
ret = register_pstore_zone(&dev->zone);
fs/pstore/blk.c
170
unregister_pstore_zone(&dev->zone);
fs/pstore/blk.c
224
dev->zone.total_size =
fs/pstore/blk.c
304
best_effort_dev->zone.read = psblk_generic_blk_read;
fs/pstore/blk.c
305
best_effort_dev->zone.write = psblk_generic_blk_write;
fs/pstore/blk.c
313
blkdev, best_effort_dev->zone.total_size);
fs/pstore/blk.c
94
dev->zone.name = _##name_; \
fs/pstore/zone.c
1002
static ssize_t psz_ftrace_read(struct pstore_zone *zone,
fs/pstore/zone.c
1009
if (!zone || !record)
fs/pstore/zone.c
1012
if (!psz_old_ok(zone))
fs/pstore/zone.c
1015
buf = (struct psz_buffer *)zone->oldbuf;
fs/pstore/zone.c
1033
static ssize_t psz_record_read(struct pstore_zone *zone,
fs/pstore/zone.c
1039
if (!zone || !record)
fs/pstore/zone.c
1042
buf = (struct psz_buffer *)zone->oldbuf;
fs/pstore/zone.c
1051
if (unlikely(psz_zone_read_oldbuf(zone, record->buf, len, 0))) {
fs/pstore/zone.c
1062
ssize_t (*readop)(struct pstore_zone *zone,
fs/pstore/zone.c
1064
struct pstore_zone *zone;
fs/pstore/zone.c
1073
zone = psz_read_next_zone(cxt);
fs/pstore/zone.c
1074
if (!zone)
fs/pstore/zone.c
1077
record->type = zone->type;
fs/pstore/zone.c
1094
ret = readop(zone, record);
fs/pstore/zone.c
1116
struct pstore_zone *zone = *pszone;
fs/pstore/zone.c
1118
if (!zone)
fs/pstore/zone.c
1121
kfree(zone->buffer);
fs/pstore/zone.c
1122
kfree(zone);
fs/pstore/zone.c
1157
struct pstore_zone *zone;
fs/pstore/zone.c
1169
zone = kzalloc_obj(struct pstore_zone);
fs/pstore/zone.c
1170
if (!zone)
fs/pstore/zone.c
1173
zone->buffer = kmalloc(size, GFP_KERNEL);
fs/pstore/zone.c
1174
if (!zone->buffer) {
fs/pstore/zone.c
1175
kfree(zone);
fs/pstore/zone.c
1178
memset(zone->buffer, 0xFF, size);
fs/pstore/zone.c
1179
zone->off = *off;
fs/pstore/zone.c
1180
zone->name = name;
fs/pstore/zone.c
1181
zone->type = type;
fs/pstore/zone.c
1182
zone->buffer_size = size - sizeof(struct psz_buffer);
fs/pstore/zone.c
1183
zone->buffer->sig = type ^ PSZ_SIG;
fs/pstore/zone.c
1184
zone->oldbuf = NULL;
fs/pstore/zone.c
1185
atomic_set(&zone->dirty, 0);
fs/pstore/zone.c
1186
atomic_set(&zone->buffer->datalen, 0);
fs/pstore/zone.c
1187
atomic_set(&zone->buffer->start, 0);
fs/pstore/zone.c
1191
pr_debug("pszone %s: off 0x%llx, %zu header, %zu data\n", zone->name,
fs/pstore/zone.c
1192
zone->off, sizeof(*zone->buffer), zone->buffer_size);
fs/pstore/zone.c
1193
return zone;
fs/pstore/zone.c
1201
struct pstore_zone **zones, *zone;
fs/pstore/zone.c
1228
zone = psz_init_zone(type, off, record_size);
fs/pstore/zone.c
1229
if (!zone || IS_ERR(zone)) {
fs/pstore/zone.c
1232
return (void *)zone;
fs/pstore/zone.c
1234
zones[i] = zone;
fs/pstore/zone.c
160
static inline int buffer_datalen(struct pstore_zone *zone)
fs/pstore/zone.c
162
return atomic_read(&zone->buffer->datalen);
fs/pstore/zone.c
165
static inline int buffer_start(struct pstore_zone *zone)
fs/pstore/zone.c
167
return atomic_read(&zone->buffer->start);
fs/pstore/zone.c
175
static ssize_t psz_zone_read_buffer(struct pstore_zone *zone, char *buf,
fs/pstore/zone.c
178
if (!buf || !zone || !zone->buffer)
fs/pstore/zone.c
180
if (off > zone->buffer_size)
fs/pstore/zone.c
182
len = min_t(size_t, len, zone->buffer_size - off);
fs/pstore/zone.c
183
memcpy(buf, zone->buffer->data + off, len);
fs/pstore/zone.c
187
static int psz_zone_read_oldbuf(struct pstore_zone *zone, char *buf,
fs/pstore/zone.c
190
if (!buf || !zone || !zone->oldbuf)
fs/pstore/zone.c
192
if (off > zone->buffer_size)
fs/pstore/zone.c
194
len = min_t(size_t, len, zone->buffer_size - off);
fs/pstore/zone.c
195
memcpy(buf, zone->oldbuf->data + off, len);
fs/pstore/zone.c
199
static int psz_zone_write(struct pstore_zone *zone,
fs/pstore/zone.c
208
if (off > zone->buffer_size)
fs/pstore/zone.c
211
wlen = min_t(size_t, len, zone->buffer_size - off);
fs/pstore/zone.c
213
memcpy(zone->buffer->data + off, buf, wlen);
fs/pstore/zone.c
214
atomic_set(&zone->buffer->datalen, wlen + off);
fs/pstore/zone.c
231
wcnt = writeop((const char *)zone->buffer->data + off, wlen,
fs/pstore/zone.c
232
zone->off + sizeof(*zone->buffer) + off);
fs/pstore/zone.c
238
wcnt = writeop((const char *)zone->buffer, wlen, zone->off);
fs/pstore/zone.c
243
wlen = zone->buffer_size + sizeof(*zone->buffer);
fs/pstore/zone.c
244
wcnt = writeop((const char *)zone->buffer, wlen, zone->off);
fs/pstore/zone.c
255
atomic_set(&zone->dirty, true);
fs/pstore/zone.c
262
static int psz_flush_dirty_zone(struct pstore_zone *zone)
fs/pstore/zone.c
266
if (unlikely(!zone))
fs/pstore/zone.c
272
if (!atomic_xchg(&zone->dirty, false))
fs/pstore/zone.c
275
ret = psz_zone_write(zone, FLUSH_ALL, NULL, 0, 0);
fs/pstore/zone.c
277
atomic_set(&zone->dirty, true);
fs/pstore/zone.c
284
struct pstore_zone *zone;
fs/pstore/zone.c
290
zone = zones[i];
fs/pstore/zone.c
291
if (!zone)
fs/pstore/zone.c
293
ret = psz_flush_dirty_zone(zone);
fs/pstore/zone.c
335
struct pstore_zone *zone = NULL;
fs/pstore/zone.c
344
zone = cxt->kpszs[i];
fs/pstore/zone.c
345
if (unlikely(!zone))
fs/pstore/zone.c
347
if (atomic_read(&zone->dirty)) {
fs/pstore/zone.c
352
ret = psz_move_zone(zone, new);
fs/pstore/zone.c
360
if (!zone->should_recover)
fs/pstore/zone.c
362
buf = zone->buffer;
fs/pstore/zone.c
363
rcnt = info->read((char *)buf, zone->buffer_size + sizeof(*buf),
fs/pstore/zone.c
364
zone->off);
fs/pstore/zone.c
365
if (rcnt != zone->buffer_size + sizeof(*buf))
fs/pstore/zone.c
374
struct pstore_zone *zone;
fs/pstore/zone.c
392
zone = cxt->kpszs[i];
fs/pstore/zone.c
393
if (unlikely(!zone))
fs/pstore/zone.c
396
rcnt = info->read((char *)buf, len, zone->off);
fs/pstore/zone.c
399
zone->name, i);
fs/pstore/zone.c
402
pr_err("read %s with id %lu failed\n", zone->name, i);
fs/pstore/zone.c
406
if (buf->sig != zone->buffer->sig) {
fs/pstore/zone.c
411
if (zone->buffer_size < atomic_read(&buf->datalen)) {
fs/pstore/zone.c
413
zone->name, i, zone->off,
fs/pstore/zone.c
414
zone->buffer_size);
fs/pstore/zone.c
421
zone->name, i, zone->off,
fs/pstore/zone.c
422
zone->buffer_size);
fs/pstore/zone.c
444
zone->name, i, zone->off,
fs/pstore/zone.c
445
zone->buffer_size,
fs/pstore/zone.c
451
zone->should_recover = true;
fs/pstore/zone.c
453
zone->name, i, zone->off,
fs/pstore/zone.c
454
zone->buffer_size, atomic_read(&buf->datalen));
fs/pstore/zone.c
481
static int psz_recover_zone(struct psz_context *cxt, struct pstore_zone *zone)
fs/pstore/zone.c
489
if (!zone || zone->oldbuf)
fs/pstore/zone.c
494
psz_flush_dirty_zone(zone);
fs/pstore/zone.c
502
rcnt = info->read((char *)&tmpbuf, len, zone->off);
fs/pstore/zone.c
504
pr_debug("read zone %s failed\n", zone->name);
fs/pstore/zone.c
508
if (tmpbuf.sig != zone->buffer->sig) {
fs/pstore/zone.c
509
pr_debug("no valid data in zone %s\n", zone->name);
fs/pstore/zone.c
513
if (zone->buffer_size < atomic_read(&tmpbuf.datalen) ||
fs/pstore/zone.c
514
zone->buffer_size < atomic_read(&tmpbuf.start)) {
fs/pstore/zone.c
516
zone->name, zone->off, zone->buffer_size);
fs/pstore/zone.c
523
zone->name, zone->off, zone->buffer_size,
fs/pstore/zone.c
529
zone->name, zone->off, zone->buffer_size,
fs/pstore/zone.c
541
off = zone->off + sizeof(*oldbuf);
fs/pstore/zone.c
546
pr_err("read zone %s failed\n", zone->name);
fs/pstore/zone.c
554
pr_err("read zone %s failed\n", zone->name);
fs/pstore/zone.c
559
zone->oldbuf = oldbuf;
fs/pstore/zone.c
560
psz_flush_dirty_zone(zone);
fs/pstore/zone.c
573
struct pstore_zone *zone;
fs/pstore/zone.c
579
zone = zones[i];
fs/pstore/zone.c
580
if (unlikely(!zone))
fs/pstore/zone.c
582
ret = psz_recover_zone(cxt, zone);
fs/pstore/zone.c
589
pr_debug("recover %s[%u] failed\n", zone->name, i);
fs/pstore/zone.c
643
static inline bool psz_old_ok(struct pstore_zone *zone)
fs/pstore/zone.c
645
if (zone && zone->oldbuf && atomic_read(&zone->oldbuf->datalen))
fs/pstore/zone.c
650
static inline bool psz_ok(struct pstore_zone *zone)
fs/pstore/zone.c
652
if (zone && zone->buffer && buffer_datalen(zone))
fs/pstore/zone.c
658
struct pstore_zone *zone, struct pstore_record *record)
fs/pstore/zone.c
660
struct psz_buffer *buffer = zone->buffer;
fs/pstore/zone.c
665
if (unlikely(!psz_ok(zone)))
fs/pstore/zone.c
672
size = buffer_datalen(zone) + sizeof(*zone->buffer);
fs/pstore/zone.c
673
atomic_set(&zone->buffer->datalen, 0);
fs/pstore/zone.c
675
return cxt->pstore_zone_info->erase(size, zone->off);
fs/pstore/zone.c
677
return psz_zone_write(zone, FLUSH_META, NULL, 0, 0);
fs/pstore/zone.c
681
struct pstore_zone *zone)
fs/pstore/zone.c
683
if (unlikely(!psz_old_ok(zone)))
fs/pstore/zone.c
686
kfree(zone->oldbuf);
fs/pstore/zone.c
687
zone->oldbuf = NULL;
fs/pstore/zone.c
693
if (!buffer_datalen(zone))
fs/pstore/zone.c
694
return psz_zone_write(zone, FLUSH_META, NULL, 0, 0);
fs/pstore/zone.c
695
psz_flush_dirty_zone(zone);
fs/pstore/zone.c
720
static void psz_write_kmsg_hdr(struct pstore_zone *zone,
fs/pstore/zone.c
724
struct psz_buffer *buffer = zone->buffer;
fs/pstore/zone.c
749
struct pstore_zone *zone;
fs/pstore/zone.c
757
zone = cxt->kpszs[zonenum];
fs/pstore/zone.c
758
if (unlikely(!zone))
fs/pstore/zone.c
762
len = zone->buffer_size + sizeof(*zone->buffer);
fs/pstore/zone.c
763
zone->oldbuf = zone->buffer;
fs/pstore/zone.c
764
zone->buffer = kzalloc(len, GFP_ATOMIC);
fs/pstore/zone.c
765
if (!zone->buffer) {
fs/pstore/zone.c
766
zone->buffer = zone->oldbuf;
fs/pstore/zone.c
769
zone->buffer->sig = zone->oldbuf->sig;
fs/pstore/zone.c
771
pr_debug("write %s to zone id %d\n", zone->name, zonenum);
fs/pstore/zone.c
772
psz_write_kmsg_hdr(zone, record);
fs/pstore/zone.c
774
size = min_t(size_t, record->size, zone->buffer_size - hlen);
fs/pstore/zone.c
775
ret = psz_zone_write(zone, FLUSH_ALL, record->buf, size, hlen);
fs/pstore/zone.c
780
kfree(zone->oldbuf);
fs/pstore/zone.c
781
zone->oldbuf = NULL;
fs/pstore/zone.c
787
kfree(zone->buffer);
fs/pstore/zone.c
788
zone->buffer = zone->oldbuf;
fs/pstore/zone.c
789
zone->oldbuf = NULL;
fs/pstore/zone.c
823
static int notrace psz_record_write(struct pstore_zone *zone,
fs/pstore/zone.c
831
if (!zone || !record)
fs/pstore/zone.c
834
if (atomic_read(&zone->buffer->datalen) >= zone->buffer_size)
fs/pstore/zone.c
839
if (unlikely(cnt > zone->buffer_size)) {
fs/pstore/zone.c
840
buf += cnt - zone->buffer_size;
fs/pstore/zone.c
841
cnt = zone->buffer_size;
fs/pstore/zone.c
844
start = buffer_start(zone);
fs/pstore/zone.c
845
rem = zone->buffer_size - start;
fs/pstore/zone.c
847
psz_zone_write(zone, FLUSH_PART, buf, rem, start);
fs/pstore/zone.c
854
atomic_set(&zone->buffer->start, cnt + start);
fs/pstore/zone.c
855
psz_zone_write(zone, FLUSH_PART, buf, cnt, start);
fs/pstore/zone.c
866
atomic_set(&zone->buffer->datalen, zone->buffer_size);
fs/pstore/zone.c
867
psz_zone_write(zone, FLUSH_META, NULL, 0, 0);
fs/pstore/zone.c
909
struct pstore_zone *zone = NULL;
fs/pstore/zone.c
912
zone = cxt->kpszs[cxt->kmsg_read_cnt++];
fs/pstore/zone.c
913
if (psz_ok(zone))
fs/pstore/zone.c
914
return zone;
fs/pstore/zone.c
927
zone = cxt->ppsz;
fs/pstore/zone.c
928
if (psz_old_ok(zone))
fs/pstore/zone.c
929
return zone;
fs/pstore/zone.c
934
zone = cxt->cpsz;
fs/pstore/zone.c
935
if (psz_old_ok(zone))
fs/pstore/zone.c
936
return zone;
fs/pstore/zone.c
942
static int psz_kmsg_read_hdr(struct pstore_zone *zone,
fs/pstore/zone.c
945
struct psz_buffer *buffer = zone->buffer;
fs/pstore/zone.c
959
static ssize_t psz_kmsg_read(struct pstore_zone *zone,
fs/pstore/zone.c
964
size = buffer_datalen(zone);
fs/pstore/zone.c
966
if (psz_kmsg_read_hdr(zone, record)) {
fs/pstore/zone.c
967
atomic_set(&zone->buffer->datalen, 0);
fs/pstore/zone.c
968
atomic_set(&zone->dirty, 0);
fs/pstore/zone.c
991
size = psz_zone_read_buffer(zone, record->buf + hlen, size,
fs/xfs/libxfs/xfs_zones.c
100
zone_no, XFS_BB_TO_FSB(mp, zone->len),
fs/xfs/libxfs/xfs_zones.c
105
switch (zone->type) {
fs/xfs/libxfs/xfs_zones.c
107
return xfs_validate_blk_zone_conv(mp, zone, zone_no);
fs/xfs/libxfs/xfs_zones.c
109
return xfs_validate_blk_zone_seq(mp, zone, zone_no,
fs/xfs/libxfs/xfs_zones.c
113
zone_no, zone->type);
fs/xfs/libxfs/xfs_zones.c
20
struct blk_zone *zone,
fs/xfs/libxfs/xfs_zones.c
24
switch (zone->cond) {
fs/xfs/libxfs/xfs_zones.c
32
if (zone->wp < zone->start ||
fs/xfs/libxfs/xfs_zones.c
33
zone->wp >= zone->start + zone->capacity) {
fs/xfs/libxfs/xfs_zones.c
36
zone_no, zone->wp);
fs/xfs/libxfs/xfs_zones.c
40
*write_pointer = XFS_BB_TO_FSB(mp, zone->wp - zone->start);
fs/xfs/libxfs/xfs_zones.c
43
*write_pointer = XFS_BB_TO_FSB(mp, zone->capacity);
fs/xfs/libxfs/xfs_zones.c
49
zone_no, zone->cond);
fs/xfs/libxfs/xfs_zones.c
53
zone_no, zone->cond);
fs/xfs/libxfs/xfs_zones.c
61
struct blk_zone *zone,
fs/xfs/libxfs/xfs_zones.c
64
switch (zone->cond) {
fs/xfs/libxfs/xfs_zones.c
70
zone_no, zone->cond);
fs/xfs/libxfs/xfs_zones.c
78
struct blk_zone *zone,
fs/xfs/libxfs/xfs_zones.c
89
if (XFS_BB_TO_FSB(mp, zone->capacity) != expected_capacity) {
fs/xfs/libxfs/xfs_zones.c
92
zone_no, XFS_BB_TO_FSB(mp, zone->capacity),
fs/xfs/libxfs/xfs_zones.c
97
if (XFS_BB_TO_FSB(mp, zone->len) != expected_size) {
fs/xfs/libxfs/xfs_zones.h
40
bool xfs_validate_blk_zone(struct xfs_mount *mp, struct blk_zone *zone,
fs/xfs/xfs_zone_alloc.c
1000
error = blkdev_get_zone_info(bdev, start, &zone);
fs/xfs/xfs_zone_alloc.c
1003
if (zone.start != start) {
fs/xfs/xfs_zone_alloc.c
1005
zone.start, start);
fs/xfs/xfs_zone_alloc.c
1009
if (!xfs_validate_blk_zone(mp, &zone, rtg_rgno(rtg),
fs/xfs/xfs_zone_alloc.c
1019
if (zone.cond != BLK_ZONE_COND_NOT_WP)
fs/xfs/xfs_zone_alloc.c
996
struct blk_zone zone = {};
fs/zonefs/super.c
1007
zone->capacity << SECTOR_SHIFT);
fs/zonefs/super.c
1008
z->z_wpoffset = zonefs_check_zone_condition(sb, z, zone);
fs/zonefs/super.c
1032
(zone->cond == BLK_ZONE_COND_IMP_OPEN ||
fs/zonefs/super.c
1033
zone->cond == BLK_ZONE_COND_EXP_OPEN)) {
fs/zonefs/super.c
192
struct blk_zone *zone)
fs/zonefs/super.c
194
switch (zone->cond) {
fs/zonefs/super.c
221
return (zone->wp - zone->start) << SECTOR_SHIFT;
fs/zonefs/super.c
250
static int zonefs_io_error_cb(struct blk_zone *zone, unsigned int idx,
fs/zonefs/super.c
255
*z = *zone;
fs/zonefs/super.c
259
static void zonefs_handle_io_error(struct inode *inode, struct blk_zone *zone,
fs/zonefs/super.c
273
data_size = zonefs_check_zone_condition(sb, z, zone);
fs/zonefs/super.c
372
struct blk_zone zone;
fs/zonefs/super.c
382
zone.start = z->z_sector;
fs/zonefs/super.c
383
zone.len = z->z_size >> SECTOR_SHIFT;
fs/zonefs/super.c
384
zone.wp = zone.start + zone.len;
fs/zonefs/super.c
385
zone.type = BLK_ZONE_TYPE_CONVENTIONAL;
fs/zonefs/super.c
386
zone.cond = BLK_ZONE_COND_NOT_WP;
fs/zonefs/super.c
387
zone.capacity = zone.len;
fs/zonefs/super.c
401
zonefs_io_error_cb, &zone);
fs/zonefs/super.c
413
zonefs_handle_io_error(inode, &zone, write);
fs/zonefs/super.c
853
static int zonefs_get_zone_info_cb(struct blk_zone *zone, unsigned int idx,
fs/zonefs/super.c
873
switch (zone->type) {
fs/zonefs/super.c
878
zone->start != zd->cnv_zone_start)
fs/zonefs/super.c
880
zd->cnv_zone_start = zone->start + zone->len;
fs/zonefs/super.c
892
zone->type);
fs/zonefs/super.c
896
memcpy(&zd->zones[idx], zone, sizeof(struct blk_zone));
fs/zonefs/super.c
941
struct blk_zone *zone, *next, *end;
fs/zonefs/super.c
960
for (zone = &zd->zones[1]; zone < end; zone = next) {
fs/zonefs/super.c
962
next = zone + 1;
fs/zonefs/super.c
963
if (zonefs_zone_type(zone) != ztype)
fs/zonefs/super.c
982
zone->len += next->len;
fs/zonefs/super.c
983
zone->capacity += next->capacity;
fs/zonefs/super.c
985
zone->cond != BLK_ZONE_COND_OFFLINE)
fs/zonefs/super.c
986
zone->cond = BLK_ZONE_COND_READONLY;
fs/zonefs/super.c
988
zone->cond = BLK_ZONE_COND_OFFLINE;
fs/zonefs/super.c
995
z->z_sector = zone->start;
fs/zonefs/super.c
996
z->z_size = zone->len << SECTOR_SHIFT;
fs/zonefs/zonefs.h
35
static inline enum zonefs_ztype zonefs_zone_type(struct blk_zone *zone)
fs/zonefs/zonefs.h
37
if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
include/linux/blkdev.h
439
typedef int (*report_zones_cb)(struct blk_zone *zone, unsigned int idx,
include/linux/blkdev.h
442
int disk_report_zone(struct gendisk *disk, struct blk_zone *zone,
include/linux/blkdev.h
446
struct blk_zone *zone);
include/linux/bnge/hsi.h
9846
__le16 zone;
include/linux/bnge/hsi.h
9888
__le16 zone;
include/linux/bnxt/hsi.h
9694
__le16 zone;
include/linux/compaction.h
100
extern void compaction_defer_reset(struct zone *zone, int order,
include/linux/compaction.h
115
static inline bool compaction_suitable(struct zone *zone, int order,
include/linux/compaction.h
90
extern unsigned int extfrag_for_order(struct zone *zone, unsigned int order);
include/linux/compaction.h
91
extern int fragmentation_index(struct zone *zone, unsigned int order);
include/linux/compaction.h
97
extern bool compaction_suitable(struct zone *zone, int order,
include/linux/cpuset.h
232
static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
include/linux/cpuset.h
237
static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
include/linux/cpuset.h
90
static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
include/linux/cpuset.h
95
static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
include/linux/dtpm.h
16
struct powercap_zone zone;
include/linux/dtpm.h
55
static inline struct dtpm *to_dtpm(struct powercap_zone *zone)
include/linux/dtpm.h
57
return container_of(zone, struct dtpm, zone);
include/linux/gfp.h
397
bool decay_pcp_high(struct zone *zone, struct per_cpu_pages *pcp);
include/linux/gfp.h
398
void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp);
include/linux/gfp.h
399
void drain_all_pages(struct zone *zone);
include/linux/gfp.h
400
void drain_local_pages(struct zone *zone);
include/linux/memory.h
89
struct zone *zone;
include/linux/memory_hotplug.h
101
static inline void zone_seqlock_init(struct zone *zone)
include/linux/memory_hotplug.h
103
seqlock_init(&zone->span_seqlock);
include/linux/memory_hotplug.h
11
struct zone;
include/linux/memory_hotplug.h
110
struct zone *zone);
include/linux/memory_hotplug.h
113
struct zone *zone, struct memory_group *group);
include/linux/memory_hotplug.h
188
static inline unsigned zone_span_seqbegin(struct zone *zone)
include/linux/memory_hotplug.h
192
static inline int zone_span_seqretry(struct zone *zone, unsigned iv)
include/linux/memory_hotplug.h
196
static inline void zone_span_writelock(struct zone *zone) {}
include/linux/memory_hotplug.h
197
static inline void zone_span_writeunlock(struct zone *zone) {}
include/linux/memory_hotplug.h
198
static inline void zone_seqlock_init(struct zone *zone) {}
include/linux/memory_hotplug.h
265
struct zone *zone, struct memory_group *group);
include/linux/memory_hotplug.h
274
struct zone *zone, struct memory_group *group)
include/linux/memory_hotplug.h
299
extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
include/linux/memory_hotplug.h
303
extern void remove_pfn_range_from_zone(struct zone *zone,
include/linux/memory_hotplug.h
311
extern struct zone *zone_for_pfn_range(enum mmop online_type,
include/linux/memory_hotplug.h
85
static inline unsigned zone_span_seqbegin(struct zone *zone)
include/linux/memory_hotplug.h
87
return read_seqbegin(&zone->span_seqlock);
include/linux/memory_hotplug.h
89
static inline int zone_span_seqretry(struct zone *zone, unsigned iv)
include/linux/memory_hotplug.h
91
return read_seqretry(&zone->span_seqlock, iv);
include/linux/memory_hotplug.h
93
static inline void zone_span_writelock(struct zone *zone)
include/linux/memory_hotplug.h
95
write_seqlock(&zone->span_seqlock);
include/linux/memory_hotplug.h
97
static inline void zone_span_writeunlock(struct zone *zone)
include/linux/memory_hotplug.h
99
write_sequnlock(&zone->span_seqlock);
include/linux/mempolicy.h
181
extern bool apply_policy_zone(struct mempolicy *policy, enum zone_type zone);
include/linux/mm.h
2478
static inline struct zone *page_zone(const struct page *page)
include/linux/mm.h
2493
static inline struct zone *folio_zone(const struct folio *folio)
include/linux/mm.h
2715
static inline void set_page_zone(struct page *page, enum zone_type zone)
include/linux/mm.h
2718
page->flags.f |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
include/linux/mm.h
2727
static inline void set_page_links(struct page *page, enum zone_type zone,
include/linux/mm.h
2730
set_page_zone(page, zone);
include/linux/mm.h
4736
bool __set_page_guard(struct zone *zone, struct page *page, unsigned int order);
include/linux/mm.h
4737
static inline bool set_page_guard(struct zone *zone, struct page *page,
include/linux/mm.h
4742
return __set_page_guard(zone, page, order);
include/linux/mm.h
4745
void __clear_page_guard(struct zone *zone, struct page *page, unsigned int order);
include/linux/mm.h
4746
static inline void clear_page_guard(struct zone *zone, struct page *page,
include/linux/mm.h
4751
__clear_page_guard(zone, page, order);
include/linux/mm.h
4760
static inline bool set_page_guard(struct zone *zone, struct page *page,
include/linux/mm.h
4762
static inline void clear_page_guard(struct zone *zone, struct page *page,
include/linux/mm.h
4884
unsigned int order, struct zone *zone,
include/linux/mm_inline.h
179
int zone = folio_zonenum(folio);
include/linux/mm_inline.h
189
WRITE_ONCE(lrugen->nr_pages[old_gen][type][zone],
include/linux/mm_inline.h
190
lrugen->nr_pages[old_gen][type][zone] - delta);
include/linux/mm_inline.h
192
WRITE_ONCE(lrugen->nr_pages[new_gen][type][zone],
include/linux/mm_inline.h
193
lrugen->nr_pages[new_gen][type][zone] + delta);
include/linux/mm_inline.h
199
__update_lru_size(lruvec, lru, zone, delta);
include/linux/mm_inline.h
207
__update_lru_size(lruvec, lru, zone, -delta);
include/linux/mm_inline.h
213
__update_lru_size(lruvec, lru, zone, -delta);
include/linux/mm_inline.h
214
__update_lru_size(lruvec, lru + LRU_ACTIVE, zone, delta);
include/linux/mm_inline.h
261
int zone = folio_zonenum(folio);
include/linux/mm_inline.h
278
list_add_tail(&folio->lru, &lrugen->folios[gen][type][zone]);
include/linux/mm_inline.h
280
list_add(&folio->lru, &lrugen->folios[gen][type][zone]);
include/linux/mmzone.h
1151
static inline unsigned long wmark_pages(const struct zone *z,
include/linux/mmzone.h
1157
static inline unsigned long min_wmark_pages(const struct zone *z)
include/linux/mmzone.h
1162
static inline unsigned long low_wmark_pages(const struct zone *z)
include/linux/mmzone.h
1167
static inline unsigned long high_wmark_pages(const struct zone *z)
include/linux/mmzone.h
1172
static inline unsigned long promo_wmark_pages(const struct zone *z)
include/linux/mmzone.h
1177
static inline unsigned long zone_managed_pages(const struct zone *zone)
include/linux/mmzone.h
1179
return (unsigned long)atomic_long_read(&zone->managed_pages);
include/linux/mmzone.h
1182
static inline unsigned long zone_cma_pages(struct zone *zone)
include/linux/mmzone.h
1185
return zone->cma_pages;
include/linux/mmzone.h
1191
static inline unsigned long zone_end_pfn(const struct zone *zone)
include/linux/mmzone.h
1193
return zone->zone_start_pfn + zone->spanned_pages;
include/linux/mmzone.h
1196
static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn)
include/linux/mmzone.h
1198
return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone);
include/linux/mmzone.h
1201
static inline bool zone_is_initialized(const struct zone *zone)
include/linux/mmzone.h
1203
return zone->initialized;
include/linux/mmzone.h
1206
static inline bool zone_is_empty(const struct zone *zone)
include/linux/mmzone.h
1208
return zone->spanned_pages == 0;
include/linux/mmzone.h
1303
extern void memmap_init_zone_device(struct zone *, unsigned long,
include/linux/mmzone.h
1346
static inline bool zone_intersects(const struct zone *zone,
include/linux/mmzone.h
1349
if (zone_is_empty(zone))
include/linux/mmzone.h
1351
if (start_pfn >= zone_end_pfn(zone) ||
include/linux/mmzone.h
1352
start_pfn + nr_pages <= zone->zone_start_pfn)
include/linux/mmzone.h
1385
struct zone *zone; /* Pointer to actual zone */
include/linux/mmzone.h
1461
struct zone node_zones[MAX_NR_ZONES];
include/linux/mmzone.h
1611
bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
include/linux/mmzone.h
1614
bool zone_watermark_ok(struct zone *z, unsigned int order,
include/linux/mmzone.h
1625
void wakeup_kswapd(struct zone *zone, gfp_t gfp_mask, int order,
include/linux/mmzone.h
1641
extern void init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
include/linux/mmzone.h
1664
#define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones)
include/linux/mmzone.h
1667
static inline bool zone_is_zone_device(const struct zone *zone)
include/linux/mmzone.h
1669
return zone_idx(zone) == ZONE_DEVICE;
include/linux/mmzone.h
1672
static inline bool zone_is_zone_device(const struct zone *zone)
include/linux/mmzone.h
1684
static inline bool managed_zone(const struct zone *zone)
include/linux/mmzone.h
1686
return zone_managed_pages(zone);
include/linux/mmzone.h
1690
static inline bool populated_zone(const struct zone *zone)
include/linux/mmzone.h
1692
return zone->present_pages;
include/linux/mmzone.h
1696
static inline int zone_to_nid(const struct zone *zone)
include/linux/mmzone.h
1698
return zone->node;
include/linux/mmzone.h
1701
static inline void zone_set_nid(struct zone *zone, int nid)
include/linux/mmzone.h
1703
zone->node = nid;
include/linux/mmzone.h
1706
static inline int zone_to_nid(const struct zone *zone)
include/linux/mmzone.h
1711
static inline void zone_set_nid(struct zone *zone, int nid) {}
include/linux/mmzone.h
1733
static inline int is_highmem(const struct zone *zone)
include/linux/mmzone.h
1735
return is_highmem_idx(zone_idx(zone));
include/linux/mmzone.h
1738
bool has_managed_zone(enum zone_type zone);
include/linux/mmzone.h
1765
extern struct zone *next_zone(struct zone *zone);
include/linux/mmzone.h
1782
#define for_each_zone(zone) \
include/linux/mmzone.h
1783
for (zone = (first_online_pgdat())->node_zones; \
include/linux/mmzone.h
1784
zone; \
include/linux/mmzone.h
1785
zone = next_zone(zone))
include/linux/mmzone.h
1787
#define for_each_populated_zone(zone) \
include/linux/mmzone.h
1788
for (zone = (first_online_pgdat())->node_zones; \
include/linux/mmzone.h
1789
zone; \
include/linux/mmzone.h
1790
zone = next_zone(zone)) \
include/linux/mmzone.h
1791
if (!populated_zone(zone)) \
include/linux/mmzone.h
1795
static inline struct zone *zonelist_zone(struct zoneref *zoneref)
include/linux/mmzone.h
1797
return zoneref->zone;
include/linux/mmzone.h
1807
return zone_to_nid(zoneref->zone);
include/linux/mmzone.h
1874
#define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
include/linux/mmzone.h
1875
for (z = first_zones_zonelist(zlist, highidx, nodemask), zone = zonelist_zone(z); \
include/linux/mmzone.h
1876
zone; \
include/linux/mmzone.h
1878
zone = zonelist_zone(z))
include/linux/mmzone.h
1880
#define for_next_zone_zonelist_nodemask(zone, z, highidx, nodemask) \
include/linux/mmzone.h
1881
for (zone = zonelist_zone(z); \
include/linux/mmzone.h
1882
zone; \
include/linux/mmzone.h
1884
zone = zonelist_zone(z))
include/linux/mmzone.h
1896
#define for_each_zone_zonelist(zone, z, zlist, highidx) \
include/linux/mmzone.h
1897
for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL)
include/linux/page-isolation.h
60
bool pageblock_isolate_and_move_free_pages(struct zone *zone, struct page *page);
include/linux/page-isolation.h
61
bool pageblock_unisolate_and_move_free_pages(struct zone *zone, struct page *page);
include/linux/page-isolation.h
70
bool page_is_unmovable(struct zone *zone, struct page *page,
include/linux/page_owner.h
20
pg_data_t *pgdat, struct zone *zone);
include/linux/pstore_blk.h
21
struct pstore_zone_info zone;
include/linux/skbuff.h
1641
bool post_ct, u16 zone);
include/linux/skbuff.h
328
__u16 zone;
include/linux/swap.h
179
struct zone;
include/linux/swap.h
346
extern void lru_add_drain_cpu_zone(struct zone *zone);
include/linux/swap.h
353
extern unsigned long zone_reclaimable_pages(struct zone *zone);
include/linux/vmstat.h
142
static inline void zone_numa_event_add(long x, struct zone *zone,
include/linux/vmstat.h
145
atomic_long_add(x, &zone->vm_numa_event[item]);
include/linux/vmstat.h
149
static inline unsigned long zone_numa_event_state(struct zone *zone,
include/linux/vmstat.h
152
return atomic_long_read(&zone->vm_numa_event[item]);
include/linux/vmstat.h
162
static inline void zone_page_state_add(long x, struct zone *zone,
include/linux/vmstat.h
165
atomic_long_add(x, &zone->vm_stat[item]);
include/linux/vmstat.h
204
static inline unsigned long zone_page_state(struct zone *zone,
include/linux/vmstat.h
207
long x = atomic_long_read(&zone->vm_stat[item]);
include/linux/vmstat.h
221
static inline unsigned long zone_page_state_snapshot(struct zone *zone,
include/linux/vmstat.h
224
long x = atomic_long_read(&zone->vm_stat[item]);
include/linux/vmstat.h
229
x += per_cpu_ptr(zone->per_cpu_zonestats, cpu)->vm_stat_diff[item];
include/linux/vmstat.h
240
__count_numa_event(struct zone *zone, enum numa_stat_item item)
include/linux/vmstat.h
242
struct per_cpu_zonestat __percpu *pzstats = zone->per_cpu_zonestats;
include/linux/vmstat.h
248
__count_numa_events(struct zone *zone, enum numa_stat_item item, long delta)
include/linux/vmstat.h
250
struct per_cpu_zonestat __percpu *pzstats = zone->per_cpu_zonestats;
include/linux/vmstat.h
273
void __mod_zone_page_state(struct zone *, enum zone_stat_item item, long);
include/linux/vmstat.h
281
void mod_zone_page_state(struct zone *, enum zone_stat_item, long);
include/linux/vmstat.h
289
extern void __inc_zone_state(struct zone *, enum zone_stat_item);
include/linux/vmstat.h
291
extern void __dec_zone_state(struct zone *, enum zone_stat_item);
include/linux/vmstat.h
298
void drain_zonestat(struct zone *zone, struct per_cpu_zonestat *);
include/linux/vmstat.h
300
int calculate_pressure_threshold(struct zone *zone);
include/linux/vmstat.h
301
int calculate_normal_threshold(struct zone *zone);
include/linux/vmstat.h
303
int (*calculate_pressure)(struct zone *));
include/linux/vmstat.h
311
static inline void __mod_zone_page_state(struct zone *zone,
include/linux/vmstat.h
314
zone_page_state_add(delta, zone, item);
include/linux/vmstat.h
334
static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
include/linux/vmstat.h
336
atomic_long_inc(&zone->vm_stat[item]);
include/linux/vmstat.h
346
static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
include/linux/vmstat.h
348
atomic_long_dec(&zone->vm_stat[item]);
include/linux/vmstat.h
403
static inline void drain_zonestat(struct zone *zone,
include/net/flow_offload.h
296
u16 zone;
include/net/netfilter/nf_conntrack.h
253
const struct nf_conntrack_zone *zone,
include/net/netfilter/nf_conntrack.h
353
const struct nf_conntrack_zone *zone,
include/net/netfilter/nf_conntrack.h
375
u16 zone, u8 family, u8 *proto, u16 *mru);
include/net/netfilter/nf_conntrack.h
92
struct nf_conntrack_zone zone;
include/net/netfilter/nf_conntrack_core.h
49
const struct nf_conntrack_zone *zone,
include/net/netfilter/nf_conntrack_expect.h
108
const struct nf_conntrack_zone *zone,
include/net/netfilter/nf_conntrack_expect.h
113
const struct nf_conntrack_zone *zone,
include/net/netfilter/nf_conntrack_expect.h
118
const struct nf_conntrack_zone *zone,
include/net/netfilter/nf_conntrack_expect.h
33
struct nf_conntrack_zone zone;
include/net/netfilter/nf_conntrack_expect.h
78
return a->zone.id == b->id;
include/net/netfilter/nf_conntrack_zones.h
12
return &ct->zone;
include/net/netfilter/nf_conntrack_zones.h
19
nf_ct_zone_init(struct nf_conntrack_zone *zone, u16 id, u8 dir, u8 flags)
include/net/netfilter/nf_conntrack_zones.h
21
zone->id = id;
include/net/netfilter/nf_conntrack_zones.h
22
zone->flags = flags;
include/net/netfilter/nf_conntrack_zones.h
23
zone->dir = dir;
include/net/netfilter/nf_conntrack_zones.h
25
return zone;
include/net/netfilter/nf_conntrack_zones.h
36
if (tmpl->zone.flags & NF_CT_FLAG_MARK)
include/net/netfilter/nf_conntrack_zones.h
37
return nf_ct_zone_init(tmp, skb->mark, tmpl->zone.dir, 0);
include/net/netfilter/nf_conntrack_zones.h
43
const struct nf_conntrack_zone *zone)
include/net/netfilter/nf_conntrack_zones.h
46
ct->zone = *zone;
include/net/netfilter/nf_conntrack_zones.h
50
static inline bool nf_ct_zone_matches_dir(const struct nf_conntrack_zone *zone,
include/net/netfilter/nf_conntrack_zones.h
53
return zone->dir & (1 << dir);
include/net/netfilter/nf_conntrack_zones.h
56
static inline u16 nf_ct_zone_id(const struct nf_conntrack_zone *zone,
include/net/netfilter/nf_conntrack_zones.h
60
return nf_ct_zone_matches_dir(zone, dir) ?
include/net/netfilter/nf_conntrack_zones.h
61
zone->id : NF_CT_DEFAULT_ZONE_ID;
include/net/sch_generic.h
1138
u16 zone; /* Only valid if qdisc_skb_cb(skb)->post_ct = true */
include/net/tc_act/tc_connmark.h
9
u16 zone;
include/net/tc_act/tc_ct.h
15
u16 zone;
include/net/tc_act/tc_ct.h
48
return to_ct_params(a)->zone;
include/net/tc_act/tc_ctinfo.h
14
u16 zone;
include/soc/tegra/bpmp-abi.h
1793
uint32_t zone;
include/soc/tegra/bpmp-abi.h
1818
uint32_t zone;
include/soc/tegra/bpmp-abi.h
1830
uint32_t zone;
include/soc/tegra/bpmp-abi.h
1849
uint32_t zone;
include/sound/emux_synth.h
168
struct snd_sf_zone *zone; /* Zone assigned to this note */
include/sound/sof/debug.h
28
uint32_t zone; /**< see sof_ipc_dbg_mem_zone */
include/trace/events/compaction.h
194
TP_PROTO(struct zone *zone,
include/trace/events/compaction.h
198
TP_ARGS(zone, order, ret),
include/trace/events/compaction.h
208
__entry->nid = zone_to_nid(zone);
include/trace/events/compaction.h
209
__entry->idx = zone_idx(zone);
include/trace/events/compaction.h
223
TP_PROTO(struct zone *zone,
include/trace/events/compaction.h
227
TP_ARGS(zone, order, ret)
include/trace/events/compaction.h
232
TP_PROTO(struct zone *zone,
include/trace/events/compaction.h
236
TP_ARGS(zone, order, ret)
include/trace/events/compaction.h
241
TP_PROTO(struct zone *zone, int order),
include/trace/events/compaction.h
243
TP_ARGS(zone, order),
include/trace/events/compaction.h
255
__entry->nid = zone_to_nid(zone);
include/trace/events/compaction.h
256
__entry->idx = zone_idx(zone);
include/trace/events/compaction.h
258
__entry->considered = zone->compact_considered;
include/trace/events/compaction.h
259
__entry->defer_shift = zone->compact_defer_shift;
include/trace/events/compaction.h
260
__entry->order_failed = zone->compact_order_failed;
include/trace/events/compaction.h
274
TP_PROTO(struct zone *zone, int order),
include/trace/events/compaction.h
276
TP_ARGS(zone, order)
include/trace/events/compaction.h
281
TP_PROTO(struct zone *zone, int order),
include/trace/events/compaction.h
283
TP_ARGS(zone, order)
include/trace/events/compaction.h
288
TP_PROTO(struct zone *zone, int order),
include/trace/events/compaction.h
290
TP_ARGS(zone, order)
include/trace/events/kmem.h
312
TP_PROTO(struct zone *zone),
include/trace/events/kmem.h
314
TP_ARGS(zone),
include/trace/events/kmem.h
318
__string(name, zone->name)
include/trace/events/kmem.h
326
__entry->node_id = zone->zone_pgdat->node_id;
include/trace/events/kmem.h
328
__entry->watermark_min = zone->_watermark[WMARK_MIN];
include/trace/events/kmem.h
329
__entry->watermark_low = zone->_watermark[WMARK_LOW];
include/trace/events/kmem.h
330
__entry->watermark_high = zone->_watermark[WMARK_HIGH];
include/trace/events/kmem.h
331
__entry->watermark_promo = zone->_watermark[WMARK_PROMO];
include/trace/events/kmem.h
345
TP_PROTO(struct zone *zone, struct zone *upper_zone, long lowmem_reserve),
include/trace/events/kmem.h
347
TP_ARGS(zone, upper_zone, lowmem_reserve),
include/trace/events/kmem.h
351
__string(name, zone->name)
include/trace/events/kmem.h
357
__entry->node_id = zone->zone_pgdat->node_id;
include/uapi/linux/netfilter/xt_CT.h
21
__u16 zone;
include/uapi/linux/netfilter/xt_CT.h
32
__u16 zone;
include/uapi/linux/tc_act/tc_connmark.h
10
__u16 zone;
include/uapi/linux/tipc.h
291
static inline __u32 tipc_addr(unsigned int zone,
include/uapi/linux/tipc.h
295
return (zone << TIPC_ZONE_OFFSET) |
kernel/power/power.h
161
extern unsigned int snapshot_additional_pages(struct zone *zone);
kernel/power/snapshot.c
1224
unsigned int snapshot_additional_pages(struct zone *zone)
kernel/power/snapshot.c
1228
rtree = nodes = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
kernel/power/snapshot.c
1244
static void mark_free_pages(struct zone *zone)
kernel/power/snapshot.c
1251
if (zone_is_empty(zone))
kernel/power/snapshot.c
1254
spin_lock_irqsave(&zone->lock, flags);
kernel/power/snapshot.c
1256
max_zone_pfn = zone_end_pfn(zone);
kernel/power/snapshot.c
1257
for_each_valid_pfn(pfn, zone->zone_start_pfn, max_zone_pfn) {
kernel/power/snapshot.c
1265
if (page_zone(page) != zone)
kernel/power/snapshot.c
1274
&zone->free_area[order].free_list[t], buddy_list) {
kernel/power/snapshot.c
1287
spin_unlock_irqrestore(&zone->lock, flags);
kernel/power/snapshot.c
1298
struct zone *zone;
kernel/power/snapshot.c
1301
for_each_populated_zone(zone)
kernel/power/snapshot.c
1302
if (is_highmem(zone))
kernel/power/snapshot.c
1303
cnt += zone_page_state(zone, NR_FREE_PAGES);
kernel/power/snapshot.c
1316
static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn)
kernel/power/snapshot.c
1324
if (!page || page_zone(page) != zone)
kernel/power/snapshot.c
1346
struct zone *zone;
kernel/power/snapshot.c
1349
for_each_populated_zone(zone) {
kernel/power/snapshot.c
1352
if (!is_highmem(zone))
kernel/power/snapshot.c
1355
mark_free_pages(zone);
kernel/power/snapshot.c
1356
max_zone_pfn = zone_end_pfn(zone);
kernel/power/snapshot.c
1357
for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
kernel/power/snapshot.c
1358
if (saveable_highmem_page(zone, pfn))
kernel/power/snapshot.c
1375
static struct page *saveable_page(struct zone *zone, unsigned long pfn)
kernel/power/snapshot.c
1383
if (!page || page_zone(page) != zone)
kernel/power/snapshot.c
1409
struct zone *zone;
kernel/power/snapshot.c
1413
for_each_populated_zone(zone) {
kernel/power/snapshot.c
1414
if (is_highmem(zone))
kernel/power/snapshot.c
1417
mark_free_pages(zone);
kernel/power/snapshot.c
1418
max_zone_pfn = zone_end_pfn(zone);
kernel/power/snapshot.c
1419
for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
kernel/power/snapshot.c
1420
if (saveable_page(zone, pfn))
kernel/power/snapshot.c
1467
static inline struct page *page_is_saveable(struct zone *zone, unsigned long pfn)
kernel/power/snapshot.c
1469
return is_highmem(zone) ?
kernel/power/snapshot.c
1470
saveable_highmem_page(zone, pfn) : saveable_page(zone, pfn);
kernel/power/snapshot.c
1504
#define page_is_saveable(zone, pfn) saveable_page(zone, pfn)
kernel/power/snapshot.c
1524
struct zone *zone;
kernel/power/snapshot.c
1527
for_each_populated_zone(zone) {
kernel/power/snapshot.c
1530
mark_free_pages(zone);
kernel/power/snapshot.c
1531
max_zone_pfn = zone_end_pfn(zone);
kernel/power/snapshot.c
1532
for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
kernel/power/snapshot.c
1533
if (page_is_saveable(zone, pfn))
kernel/power/snapshot.c
1826
struct zone *zone;
kernel/power/snapshot.c
1869
for_each_populated_zone(zone) {
kernel/power/snapshot.c
1870
size += snapshot_additional_pages(zone);
kernel/power/snapshot.c
1871
if (is_highmem(zone))
kernel/power/snapshot.c
1872
highmem += zone_page_state(zone, NR_FREE_PAGES);
kernel/power/snapshot.c
1874
count += zone_page_state(zone, NR_FREE_PAGES);
kernel/power/snapshot.c
2011
struct zone *zone;
kernel/power/snapshot.c
2014
for_each_populated_zone(zone)
kernel/power/snapshot.c
2015
if (!is_highmem(zone))
kernel/power/snapshot.c
2016
free += zone_page_state(zone, NR_FREE_PAGES);
kernel/power/snapshot.c
406
struct mem_zone_bm_rtree *zone;
kernel/power/snapshot.c
468
static int add_rtree_block(struct mem_zone_bm_rtree *zone, gfp_t gfp_mask,
kernel/power/snapshot.c
475
block_nr = zone->blocks;
kernel/power/snapshot.c
485
for (i = zone->levels; i < levels_needed; i++) {
kernel/power/snapshot.c
487
&zone->nodes);
kernel/power/snapshot.c
491
node->data[0] = (unsigned long)zone->rtree;
kernel/power/snapshot.c
492
zone->rtree = node;
kernel/power/snapshot.c
493
zone->levels += 1;
kernel/power/snapshot.c
497
block = alloc_rtree_node(gfp_mask, safe_needed, ca, &zone->leaves);
kernel/power/snapshot.c
502
node = zone->rtree;
kernel/power/snapshot.c
503
dst = &zone->rtree;
kernel/power/snapshot.c
504
block_nr = zone->blocks;
kernel/power/snapshot.c
505
for (i = zone->levels; i > 0; i--) {
kernel/power/snapshot.c
510
&zone->nodes);
kernel/power/snapshot.c
522
zone->blocks += 1;
kernel/power/snapshot.c
528
static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
kernel/power/snapshot.c
544
struct mem_zone_bm_rtree *zone;
kernel/power/snapshot.c
549
zone = chain_alloc(ca, sizeof(struct mem_zone_bm_rtree));
kernel/power/snapshot.c
550
if (!zone)
kernel/power/snapshot.c
553
INIT_LIST_HEAD(&zone->nodes);
kernel/power/snapshot.c
554
INIT_LIST_HEAD(&zone->leaves);
kernel/power/snapshot.c
555
zone->start_pfn = start;
kernel/power/snapshot.c
556
zone->end_pfn = end;
kernel/power/snapshot.c
560
if (add_rtree_block(zone, gfp_mask, safe_needed, ca)) {
kernel/power/snapshot.c
561
free_zone_bm_rtree(zone, PG_UNSAFE_CLEAR);
kernel/power/snapshot.c
566
return zone;
kernel/power/snapshot.c
576
static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
kernel/power/snapshot.c
581
list_for_each_entry(node, &zone->nodes, list)
kernel/power/snapshot.c
584
list_for_each_entry(node, &zone->leaves, list)
kernel/power/snapshot.c
590
bm->cur.zone = list_entry(bm->zones.next, struct mem_zone_bm_rtree,
kernel/power/snapshot.c
592
bm->cur.node = list_entry(bm->cur.zone->leaves.next,
kernel/power/snapshot.c
630
struct zone *zone;
kernel/power/snapshot.c
634
for_each_populated_zone(zone) {
kernel/power/snapshot.c
638
zone_start = zone->zone_start_pfn;
kernel/power/snapshot.c
639
zone_end = zone_end_pfn(zone);
kernel/power/snapshot.c
700
struct mem_zone_bm_rtree *zone;
kernel/power/snapshot.c
702
zone = create_zone_bm_rtree(gfp_mask, safe_needed, &ca,
kernel/power/snapshot.c
704
if (!zone) {
kernel/power/snapshot.c
708
list_add_tail(&zone->list, &bm->zones);
kernel/power/snapshot.c
729
struct mem_zone_bm_rtree *zone;
kernel/power/snapshot.c
731
list_for_each_entry(zone, &bm->zones, list)
kernel/power/snapshot.c
732
free_zone_bm_rtree(zone, clear_nosave_free);
kernel/power/snapshot.c
751
struct mem_zone_bm_rtree *curr, *zone;
kernel/power/snapshot.c
755
zone = bm->cur.zone;
kernel/power/snapshot.c
757
if (pfn >= zone->start_pfn && pfn < zone->end_pfn)
kernel/power/snapshot.c
760
zone = NULL;
kernel/power/snapshot.c
765
zone = curr;
kernel/power/snapshot.c
770
if (!zone)
kernel/power/snapshot.c
785
if (zone == bm->cur.zone &&
kernel/power/snapshot.c
786
((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn)
kernel/power/snapshot.c
789
node = zone->rtree;
kernel/power/snapshot.c
790
block_nr = (pfn - zone->start_pfn) >> BM_BLOCK_SHIFT;
kernel/power/snapshot.c
792
for (i = zone->levels; i > 0; i--) {
kernel/power/snapshot.c
803
bm->cur.zone = zone;
kernel/power/snapshot.c
805
bm->cur.node_pfn = (pfn - zone->start_pfn) & ~BM_BLOCK_MASK;
kernel/power/snapshot.c
810
*bit_nr = (pfn - zone->start_pfn) & BM_BLOCK_MASK;
kernel/power/snapshot.c
894
if (!list_is_last(&bm->cur.node->list, &bm->cur.zone->leaves)) {
kernel/power/snapshot.c
904
if (!list_is_last(&bm->cur.zone->list, &bm->zones)) {
kernel/power/snapshot.c
905
bm->cur.zone = list_entry(bm->cur.zone->list.next,
kernel/power/snapshot.c
907
bm->cur.node = list_entry(bm->cur.zone->leaves.next,
kernel/power/snapshot.c
935
pages = bm->cur.zone->end_pfn - bm->cur.zone->start_pfn;
kernel/power/snapshot.c
940
pfn = bm->cur.zone->start_pfn + bm->cur.node_pfn + bit;
kernel/power/snapshot.c
963
static void recycle_zone_bm_rtree(struct mem_zone_bm_rtree *zone)
kernel/power/snapshot.c
967
list_for_each_entry(node, &zone->nodes, list)
kernel/power/snapshot.c
970
list_for_each_entry(node, &zone->leaves, list)
kernel/power/snapshot.c
976
struct mem_zone_bm_rtree *zone;
kernel/power/snapshot.c
979
list_for_each_entry(zone, &bm->zones, list)
kernel/power/snapshot.c
980
recycle_zone_bm_rtree(zone);
kernel/sched/fair.c
1987
struct zone *zone = pgdat->node_zones + z;
kernel/sched/fair.c
1989
if (!populated_zone(zone))
kernel/sched/fair.c
1992
if (zone_watermark_ok(zone, 0,
kernel/sched/fair.c
1993
promo_wmark_pages(zone) + enough_wmark,
kernel/vmcore_info.c
191
VMCOREINFO_STRUCT_SIZE(zone);
kernel/vmcore_info.c
210
VMCOREINFO_OFFSET(zone, free_area);
kernel/vmcore_info.c
211
VMCOREINFO_OFFSET(zone, vm_stat);
kernel/vmcore_info.c
212
VMCOREINFO_OFFSET(zone, spanned_pages);
kernel/vmcore_info.c
216
VMCOREINFO_LENGTH(zone.free_area, NR_PAGE_ORDERS);
lib/tests/printf_kunit.c
602
page_flags_test(struct kunit *kunittest, int section, int node, int zone,
lib/tests/printf_kunit.c
606
unsigned long values[] = {section, node, zone, last_cpupid, kasan_tag};
mm/compaction.c
126
static void defer_compaction(struct zone *zone, int order)
mm/compaction.c
128
zone->compact_considered = 0;
mm/compaction.c
129
zone->compact_defer_shift++;
mm/compaction.c
131
if (order < zone->compact_order_failed)
mm/compaction.c
132
zone->compact_order_failed = order;
mm/compaction.c
1329
if (block_start_pfn < cc->zone->zone_start_pfn)
mm/compaction.c
1330
block_start_pfn = cc->zone->zone_start_pfn;
mm/compaction.c
134
if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT)
mm/compaction.c
1340
block_end_pfn, cc->zone))
mm/compaction.c
135
zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT;
mm/compaction.c
137
trace_mm_compaction_defer_compaction(zone, order);
mm/compaction.c
141
static bool compaction_deferred(struct zone *zone, int order)
mm/compaction.c
143
unsigned long defer_limit = 1UL << zone->compact_defer_shift;
mm/compaction.c
145
if (order < zone->compact_order_failed)
mm/compaction.c
1472
start_pfn = max(pageblock_start_pfn(pfn), cc->zone->zone_start_pfn);
mm/compaction.c
1473
end_pfn = min(pageblock_end_pfn(pfn), zone_end_pfn(cc->zone));
mm/compaction.c
1475
page = pageblock_pfn_to_page(start_pfn, end_pfn, cc->zone);
mm/compaction.c
149
if (++zone->compact_considered >= defer_limit) {
mm/compaction.c
150
zone->compact_considered = defer_limit;
mm/compaction.c
1523
if (cc->free_pfn >= cc->zone->compact_init_free_pfn) {
mm/compaction.c
154
trace_mm_compaction_deferred(zone, order);
mm/compaction.c
1548
struct free_area *area = &cc->zone->free_area[order];
mm/compaction.c
1558
spin_lock_irqsave(&cc->zone->lock, flags);
mm/compaction.c
1569
cc->zone->zone_start_pfn);
mm/compaction.c
1617
spin_unlock_irqrestore(&cc->zone->lock, flags);
mm/compaction.c
164
void compaction_defer_reset(struct zone *zone, int order,
mm/compaction.c
1649
zone_end_pfn(cc->zone)),
mm/compaction.c
1650
cc->zone);
mm/compaction.c
1660
if (highest && highest >= cc->zone->compact_cached_free_pfn) {
mm/compaction.c
1662
cc->zone->compact_cached_free_pfn = highest;
mm/compaction.c
1679
struct zone *zone = cc->zone;
mm/compaction.c
168
zone->compact_considered = 0;
mm/compaction.c
169
zone->compact_defer_shift = 0;
mm/compaction.c
1706
zone_end_pfn(zone));
mm/compaction.c
171
if (order >= zone->compact_order_failed)
mm/compaction.c
172
zone->compact_order_failed = order + 1;
mm/compaction.c
1729
zone);
mm/compaction.c
174
trace_mm_compaction_defer_reset(zone, order);
mm/compaction.c
178
static bool compaction_restarting(struct zone *zone, int order)
mm/compaction.c
180
if (order < zone->compact_order_failed)
mm/compaction.c
183
return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT &&
mm/compaction.c
184
zone->compact_considered >= 1UL << zone->compact_defer_shift;
mm/compaction.c
1949
if (pfn != cc->zone->zone_start_pfn && pfn != pageblock_start_pfn(pfn))
mm/compaction.c
197
static void reset_cached_positions(struct zone *zone)
mm/compaction.c
1976
if (cc->migrate_pfn != cc->zone->zone_start_pfn)
mm/compaction.c
1983
struct free_area *area = &cc->zone->free_area[order];
mm/compaction.c
199
zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn;
mm/compaction.c
1991
spin_lock_irqsave(&cc->zone->lock, flags);
mm/compaction.c
200
zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn;
mm/compaction.c
201
zone->compact_cached_free_pfn =
mm/compaction.c
2017
if (pfn < cc->zone->zone_start_pfn)
mm/compaction.c
2018
pfn = cc->zone->zone_start_pfn;
mm/compaction.c
202
pageblock_start_pfn(zone_end_pfn(zone) - 1);
mm/compaction.c
2024
spin_unlock_irqrestore(&cc->zone->lock, flags);
mm/compaction.c
2063
if (block_start_pfn < cc->zone->zone_start_pfn)
mm/compaction.c
2064
block_start_pfn = cc->zone->zone_start_pfn;
mm/compaction.c
2095
block_end_pfn, cc->zone);
mm/compaction.c
2113
low_pfn == cc->zone->zone_start_pfn) &&
mm/compaction.c
2167
static unsigned int fragmentation_score_zone(struct zone *zone)
mm/compaction.c
2169
return extfrag_for_order(zone, COMPACTION_HPAGE_ORDER);
mm/compaction.c
2182
static unsigned int fragmentation_score_zone_weighted(struct zone *zone)
mm/compaction.c
2186
score = zone->present_pages * fragmentation_score_zone(zone);
mm/compaction.c
2187
return div64_ul(score, zone->zone_pgdat->node_present_pages + 1);
mm/compaction.c
2203
struct zone *zone;
mm/compaction.c
2205
zone = &pgdat->node_zones[zoneid];
mm/compaction.c
2206
if (!populated_zone(zone))
mm/compaction.c
2208
score += fragmentation_score_zone_weighted(zone);
mm/compaction.c
2243
reset_cached_positions(cc->zone);
mm/compaction.c
2252
cc->zone->compact_blockskip_flush = true;
mm/compaction.c
2264
pgdat = cc->zone->zone_pgdat;
mm/compaction.c
2268
score = fragmentation_score_zone(cc->zone);
mm/compaction.c
2297
if (__zone_watermark_ok(cc->zone, cc->order,
mm/compaction.c
2298
high_wmark_pages(cc->zone),
mm/compaction.c
2300
zone_page_state(cc->zone,
mm/compaction.c
2310
struct free_area *area = &cc->zone->free_area[order];
mm/compaction.c
2350
trace_mm_compaction_finished(cc->zone, cc->order, ret);
mm/compaction.c
2357
static bool __compaction_suitable(struct zone *zone, int order,
mm/compaction.c
2379
watermark += low_wmark_pages(zone) - min_wmark_pages(zone);
mm/compaction.c
2380
return __zone_watermark_ok(zone, 0, watermark, highest_zoneidx,
mm/compaction.c
2387
bool compaction_suitable(struct zone *zone, int order, unsigned long watermark,
mm/compaction.c
2393
suitable = __compaction_suitable(zone, order, watermark, highest_zoneidx,
mm/compaction.c
2394
zone_page_state(zone, NR_FREE_PAGES));
mm/compaction.c
2414
int fragindex = fragmentation_index(zone, order);
mm/compaction.c
2426
trace_mm_compaction_suitable(zone, order, compact_result);
mm/compaction.c
2435
struct zone *zone;
mm/compaction.c
2442
for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
mm/compaction.c
2452
available = zone_reclaimable_pages(zone) / order;
mm/compaction.c
2453
available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
mm/compaction.c
2454
if (__compaction_suitable(zone, order, min_wmark_pages(zone),
mm/compaction.c
2470
compaction_suit_allocation_order(struct zone *zone, unsigned int order,
mm/compaction.c
2478
free_pages = zone_page_state(zone, NR_FREE_PAGES_BLOCKS);
mm/compaction.c
2480
free_pages = zone_page_state(zone, NR_FREE_PAGES);
mm/compaction.c
2482
watermark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
mm/compaction.c
2483
if (__zone_watermark_ok(zone, order, watermark, highest_zoneidx,
mm/compaction.c
2498
if (!__zone_watermark_ok(zone, 0, watermark + compact_gap(order),
mm/compaction.c
2500
zone_page_state(zone, NR_FREE_PAGES)))
mm/compaction.c
2504
if (!compaction_suitable(zone, order, watermark, highest_zoneidx))
mm/compaction.c
2514
unsigned long start_pfn = cc->zone->zone_start_pfn;
mm/compaction.c
2515
unsigned long end_pfn = zone_end_pfn(cc->zone);
mm/compaction.c
2537
ret = compaction_suit_allocation_order(cc->zone, cc->order,
mm/compaction.c
2550
if (compaction_restarting(cc->zone, cc->order))
mm/compaction.c
2551
__reset_isolation_suitable(cc->zone);
mm/compaction.c
2564
cc->migrate_pfn = cc->zone->compact_cached_migrate_pfn[sync];
mm/compaction.c
2565
cc->free_pfn = cc->zone->compact_cached_free_pfn;
mm/compaction.c
2568
cc->zone->compact_cached_free_pfn = cc->free_pfn;
mm/compaction.c
2572
cc->zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn;
mm/compaction.c
2573
cc->zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn;
mm/compaction.c
2576
if (cc->migrate_pfn <= cc->zone->compact_init_migrate_pfn)
mm/compaction.c
2591
cc->zone->compact_cached_migrate_pfn[0] == cc->zone->compact_cached_migrate_pfn[1];
mm/compaction.c
2625
cc->zone->compact_cached_migrate_pfn[1] =
mm/compaction.c
2626
cc->zone->compact_cached_migrate_pfn[0];
mm/compaction.c
2637
last_migrated_pfn = max(cc->zone->zone_start_pfn,
mm/compaction.c
2712
lru_add_drain_cpu_zone(cc->zone);
mm/compaction.c
2735
if (free_pfn > cc->zone->compact_cached_free_pfn)
mm/compaction.c
2736
cc->zone->compact_cached_free_pfn = free_pfn;
mm/compaction.c
2749
static enum compact_result compact_zone_order(struct zone *zone, int order,
mm/compaction.c
2759
.zone = zone,
mm/compaction.c
276
__reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source,
mm/compaction.c
2819
struct zone *zone;
mm/compaction.c
2828
for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
mm/compaction.c
2834
!__cpuset_zone_allowed(zone, gfp_mask))
mm/compaction.c
2838
&& compaction_deferred(zone, order)) {
mm/compaction.c
2843
status = compact_zone_order(zone, order, gfp_mask, prio,
mm/compaction.c
2855
compaction_defer_reset(zone, order, false);
mm/compaction.c
286
if (zone != page_zone(page))
mm/compaction.c
2867
defer_compaction(zone, order);
mm/compaction.c
2896
struct zone *zone;
mm/compaction.c
2907
zone = &pgdat->node_zones[zoneid];
mm/compaction.c
2908
if (!populated_zone(zone))
mm/compaction.c
2914
cc.zone = zone;
mm/compaction.c
3032
struct zone *zone;
mm/compaction.c
3039
zone = &pgdat->node_zones[zoneid];
mm/compaction.c
3041
if (!populated_zone(zone))
mm/compaction.c
3044
ret = compaction_suit_allocation_order(zone,
mm/compaction.c
3062
struct zone *zone;
mm/compaction.c
308
block_pfn = max(block_pfn, zone->zone_start_pfn);
mm/compaction.c
3081
zone = &pgdat->node_zones[zoneid];
mm/compaction.c
3082
if (!populated_zone(zone))
mm/compaction.c
3085
if (compaction_deferred(zone, cc.order))
mm/compaction.c
3088
ret = compaction_suit_allocation_order(zone,
mm/compaction.c
3097
cc.zone = zone;
mm/compaction.c
3101
compaction_defer_reset(zone, cc.order, false);
mm/compaction.c
3109
drain_all_pages(zone);
mm/compaction.c
3115
defer_compaction(zone, cc.order);
mm/compaction.c
317
block_pfn = min(block_pfn, zone_end_pfn(zone) - 1);
mm/compaction.c
349
static void __reset_isolation_suitable(struct zone *zone)
mm/compaction.c
351
unsigned long migrate_pfn = zone->zone_start_pfn;
mm/compaction.c
352
unsigned long free_pfn = zone_end_pfn(zone) - 1;
mm/compaction.c
359
if (!zone->compact_blockskip_flush)
mm/compaction.c
362
zone->compact_blockskip_flush = false;
mm/compaction.c
375
if (__reset_isolation_pfn(zone, migrate_pfn, true, source_set) &&
mm/compaction.c
379
zone->compact_init_migrate_pfn = reset_migrate;
mm/compaction.c
380
zone->compact_cached_migrate_pfn[0] = reset_migrate;
mm/compaction.c
381
zone->compact_cached_migrate_pfn[1] = reset_migrate;
mm/compaction.c
385
if (__reset_isolation_pfn(zone, free_pfn, free_set, true) &&
mm/compaction.c
389
zone->compact_init_free_pfn = reset_free;
mm/compaction.c
390
zone->compact_cached_free_pfn = reset_free;
mm/compaction.c
396
zone->compact_cached_migrate_pfn[0] = migrate_pfn;
mm/compaction.c
397
zone->compact_cached_migrate_pfn[1] = migrate_pfn;
mm/compaction.c
398
zone->compact_cached_free_pfn = free_pfn;
mm/compaction.c
407
struct zone *zone = &pgdat->node_zones[zoneid];
mm/compaction.c
408
if (!populated_zone(zone))
mm/compaction.c
411
__reset_isolation_suitable(zone);
mm/compaction.c
436
struct zone *zone = cc->zone;
mm/compaction.c
445
if (pfn > zone->compact_cached_migrate_pfn[0])
mm/compaction.c
446
zone->compact_cached_migrate_pfn[0] = pfn;
mm/compaction.c
448
pfn > zone->compact_cached_migrate_pfn[1])
mm/compaction.c
449
zone->compact_cached_migrate_pfn[1] = pfn;
mm/compaction.c
459
struct zone *zone = cc->zone;
mm/compaction.c
466
if (pfn < zone->compact_cached_free_pfn)
mm/compaction.c
467
zone->compact_cached_free_pfn = pfn;
mm/compaction.c
586
&& compact_unlock_should_abort(&cc->zone->lock, flags,
mm/compaction.c
616
locked = compact_lock_irqsave(&cc->zone->lock,
mm/compaction.c
652
spin_unlock_irqrestore(&cc->zone->lock, flags);
mm/compaction.c
706
if (block_start_pfn < cc->zone->zone_start_pfn)
mm/compaction.c
707
block_start_pfn = cc->zone->zone_start_pfn;
mm/compaction.c
729
block_end_pfn, cc->zone))
mm/compaction.c
763
pg_data_t *pgdat = cc->zone->zone_pgdat;
mm/compaction.c
840
pg_data_t *pgdat = cc->zone->zone_pgdat;
mm/compaction.c
941
low_pfn == cc->zone->zone_start_pfn)) {
mm/debug_page_alloc.c
35
bool __set_page_guard(struct zone *zone, struct page *page, unsigned int order)
mm/debug_page_alloc.c
47
void __clear_page_guard(struct zone *zone, struct page *page, unsigned int order)
mm/highmem.c
117
struct zone *zone;
mm/highmem.c
119
for_each_populated_zone(zone) {
mm/highmem.c
120
if (is_highmem(zone))
mm/highmem.c
121
pages += zone_page_state(zone, NR_FREE_PAGES);
mm/highmem.c
130
struct zone *zone;
mm/highmem.c
132
for_each_populated_zone(zone) {
mm/highmem.c
133
if (is_highmem(zone))
mm/highmem.c
134
pages += zone_managed_pages(zone);
mm/huge_memory.c
4620
struct zone *zone;
mm/huge_memory.c
4627
for_each_zone(zone) {
mm/huge_memory.c
4628
if (!managed_zone(zone))
mm/huge_memory.c
4630
max_zone_pfn = zone_end_pfn(zone);
mm/huge_memory.c
4631
for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) {
mm/huge_memory.c
4644
if (zone != folio_zone(folio))
mm/hugetlb.c
1297
struct zone *zone;
mm/hugetlb.c
1309
for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) {
mm/hugetlb.c
1312
if (!cpuset_zone_allowed(zone, gfp_mask))
mm/hugetlb.c
1318
if (zone_to_nid(zone) == node)
mm/hugetlb.c
1320
node = zone_to_nid(zone);
mm/hugetlb.c
3139
enum zone_type zone = folio_zonenum(folio);
mm/hugetlb.c
3151
__init_single_page(page, pfn, zone, nid);
mm/hugetlb_vmemmap.c
496
static struct page *vmemmap_get_tail(unsigned int order, struct zone *zone)
mm/hugetlb_vmemmap.c
500
int node = zone_to_nid(zone);
mm/hugetlb_vmemmap.c
502
tail = READ_ONCE(zone->vmemmap_tails[idx]);
mm/hugetlb_vmemmap.c
512
init_compound_tail(p + i, NULL, order, zone);
mm/hugetlb_vmemmap.c
514
if (cmpxchg(&zone->vmemmap_tails[idx], NULL, tail)) {
mm/hugetlb_vmemmap.c
516
tail = READ_ONCE(zone->vmemmap_tails[idx]);
mm/hugetlb_vmemmap.c
787
static struct zone *pfn_to_zone(unsigned nid, unsigned long pfn)
mm/hugetlb_vmemmap.c
789
struct zone *zone;
mm/hugetlb_vmemmap.c
793
zone = &NODE_DATA(nid)->node_zones[zone_type];
mm/hugetlb_vmemmap.c
794
if (zone_spans_pfn(zone, pfn))
mm/hugetlb_vmemmap.c
795
return zone;
mm/hugetlb_vmemmap.c
806
struct zone *zone = NULL;
mm/hugetlb_vmemmap.c
840
if (!zone || !zone_spans_pfn(zone, pfn))
mm/hugetlb_vmemmap.c
841
zone = pfn_to_zone(nid, pfn);
mm/hugetlb_vmemmap.c
842
if (WARN_ON_ONCE(!zone))
mm/hugetlb_vmemmap.c
845
if (vmemmap_populate_hvo(start, end, huge_page_order(h), zone,
mm/hugetlb_vmemmap.c
873
struct zone *zone;
mm/hugetlb_vmemmap.c
878
for_each_zone(zone) {
mm/hugetlb_vmemmap.c
883
tail = zone->vmemmap_tails[i];
mm/hugetlb_vmemmap.c
890
init_compound_tail(p + j, NULL, order, zone);
mm/internal.h
1041
struct zone *zone;
mm/internal.h
1325
bool __init deferred_grow_zone(struct zone *zone, unsigned int order);
mm/internal.h
1433
unsigned int reclaim_clean_pages_from_list(struct zone *zone,
mm/internal.h
1506
void setup_zone_pageset(struct zone *zone);
mm/internal.h
1749
unsigned long zone, int nid);
mm/internal.h
813
unsigned long end_pfn, struct zone *zone);
mm/internal.h
816
unsigned long end_pfn, struct zone *zone)
mm/internal.h
818
if (zone->contiguous)
mm/internal.h
821
return __pageblock_pfn_to_page(start_pfn, end_pfn, zone);
mm/internal.h
824
void set_zone_contiguous(struct zone *zone);
mm/internal.h
828
static inline void clear_zone_contiguous(struct zone *zone)
mm/internal.h
830
zone->contiguous = false;
mm/internal.h
912
const struct page *head, unsigned int order, struct zone *zone)
mm/internal.h
915
set_page_node(tail, zone_to_nid(zone));
mm/internal.h
916
set_page_zone(tail, zone_idx(zone));
mm/internal.h
949
extern void zone_pcp_reset(struct zone *zone);
mm/internal.h
950
extern void zone_pcp_disable(struct zone *zone);
mm/internal.h
951
extern void zone_pcp_enable(struct zone *zone);
mm/internal.h
952
extern void zone_pcp_init(struct zone *zone);
mm/khugepaged.c
2702
struct zone *zone;
mm/khugepaged.c
2711
for_each_populated_zone(zone) {
mm/khugepaged.c
2716
if (zone_idx(zone) > gfp_zone(GFP_USER))
mm/kmemleak.c
1695
struct zone *zone;
mm/kmemleak.c
1750
for_each_populated_zone(zone) {
mm/kmemleak.c
1751
unsigned long start_pfn = zone->zone_start_pfn;
mm/kmemleak.c
1752
unsigned long end_pfn = zone_end_pfn(zone);
mm/kmemleak.c
1765
if (page_zone(page) != zone)
mm/memblock.c
2318
struct zone *z;
mm/memory_hotplug.c
1017
static inline struct zone *default_zone_for_pfn(int nid, unsigned long start_pfn,
mm/memory_hotplug.c
1020
struct zone *kernel_zone = default_kernel_zone_for_pfn(nid, start_pfn,
mm/memory_hotplug.c
1022
struct zone *movable_zone = &NODE_DATA(nid)->node_zones[ZONE_MOVABLE];
mm/memory_hotplug.c
1041
struct zone *zone_for_pfn_range(enum mmop online_type, int nid,
mm/memory_hotplug.c
1064
struct zone *zone = page_zone(page);
mm/memory_hotplug.c
1065
const bool movable = zone_idx(zone) == ZONE_MOVABLE;
mm/memory_hotplug.c
1072
zone->present_early_pages += nr_pages;
mm/memory_hotplug.c
1073
zone->present_pages += nr_pages;
mm/memory_hotplug.c
1074
zone->zone_pgdat->node_present_pages += nr_pages;
mm/memory_hotplug.c
1083
struct zone *zone)
mm/memory_hotplug.c
1092
move_pfn_range_to_zone(zone, pfn, nr_pages, NULL, MIGRATE_UNMOVABLE,
mm/memory_hotplug.c
1137
struct zone *zone, struct memory_group *group)
mm/memory_hotplug.c
1146
const int nid = zone_to_nid(zone);
mm/memory_hotplug.c
1164
move_pfn_range_to_zone(zone, pfn, nr_pages, NULL, MIGRATE_MOVABLE,
mm/memory_hotplug.c
1185
spin_lock_irqsave(&zone->lock, flags);
mm/memory_hotplug.c
1186
zone->nr_isolate_pageblock += nr_pages / pageblock_nr_pages;
mm/memory_hotplug.c
1187
spin_unlock_irqrestore(&zone->lock, flags);
mm/memory_hotplug.c
1194
if (!populated_zone(zone)) {
mm/memory_hotplug.c
1196
setup_zone_pageset(zone);
mm/memory_hotplug.c
1208
if (!node_state(nid, N_NORMAL_MEMORY) && zone_idx(zone) <= ZONE_NORMAL)
mm/memory_hotplug.c
1223
shuffle_zone(zone);
mm/memory_hotplug.c
1247
remove_pfn_range_from_zone(zone, pfn, nr_pages);
mm/memory_hotplug.c
1899
struct zone *zone, struct memory_group *group)
mm/memory_hotplug.c
1903
struct pglist_data *pgdat = zone->zone_pgdat;
mm/memory_hotplug.c
1904
const int node = zone_to_nid(zone);
mm/memory_hotplug.c
1950
if (WARN_ON_ONCE(page_zone(pfn_to_page(start_pfn)) != zone ||
mm/memory_hotplug.c
1951
page_zone(pfn_to_page(end_pfn - 1)) != zone)) {
mm/memory_hotplug.c
1961
zone_pcp_disable(zone);
mm/memory_hotplug.c
2050
spin_lock_irqsave(&zone->lock, flags);
mm/memory_hotplug.c
2051
zone->nr_isolate_pageblock -= nr_pages / pageblock_nr_pages;
mm/memory_hotplug.c
2052
spin_unlock_irqrestore(&zone->lock, flags);
mm/memory_hotplug.c
2055
zone_pcp_enable(zone);
mm/memory_hotplug.c
2069
if (zone_idx(zone) <= ZONE_NORMAL) {
mm/memory_hotplug.c
2081
if (!populated_zone(zone)) {
mm/memory_hotplug.c
2082
zone_pcp_reset(zone);
mm/memory_hotplug.c
2096
remove_pfn_range_from_zone(zone, start_pfn, nr_pages);
mm/memory_hotplug.c
2107
zone_pcp_enable(zone);
mm/memory_hotplug.c
426
static unsigned long find_smallest_section_pfn(int nid, struct zone *zone,
mm/memory_hotplug.c
437
if (zone != page_zone(pfn_to_page(start_pfn)))
mm/memory_hotplug.c
447
static unsigned long find_biggest_section_pfn(int nid, struct zone *zone,
mm/memory_hotplug.c
462
if (zone != page_zone(pfn_to_page(pfn)))
mm/memory_hotplug.c
471
static void shrink_zone_span(struct zone *zone, unsigned long start_pfn,
mm/memory_hotplug.c
475
int nid = zone_to_nid(zone);
mm/memory_hotplug.c
477
if (zone->zone_start_pfn == start_pfn) {
mm/memory_hotplug.c
484
pfn = find_smallest_section_pfn(nid, zone, end_pfn,
mm/memory_hotplug.c
485
zone_end_pfn(zone));
mm/memory_hotplug.c
487
zone->spanned_pages = zone_end_pfn(zone) - pfn;
mm/memory_hotplug.c
488
zone->zone_start_pfn = pfn;
mm/memory_hotplug.c
490
zone->zone_start_pfn = 0;
mm/memory_hotplug.c
491
zone->spanned_pages = 0;
mm/memory_hotplug.c
493
} else if (zone_end_pfn(zone) == end_pfn) {
mm/memory_hotplug.c
500
pfn = find_biggest_section_pfn(nid, zone, zone->zone_start_pfn,
mm/memory_hotplug.c
503
zone->spanned_pages = pfn - zone->zone_start_pfn + 1;
mm/memory_hotplug.c
505
zone->zone_start_pfn = 0;
mm/memory_hotplug.c
506
zone->spanned_pages = 0;
mm/memory_hotplug.c
514
struct zone *zone;
mm/memory_hotplug.c
516
for (zone = pgdat->node_zones;
mm/memory_hotplug.c
517
zone < pgdat->node_zones + MAX_NR_ZONES; zone++) {
mm/memory_hotplug.c
518
unsigned long end_pfn = zone_end_pfn(zone);
mm/memory_hotplug.c
521
if (!zone->spanned_pages)
mm/memory_hotplug.c
524
node_start_pfn = zone->zone_start_pfn;
mm/memory_hotplug.c
531
if (zone->zone_start_pfn < node_start_pfn)
mm/memory_hotplug.c
532
node_start_pfn = zone->zone_start_pfn;
mm/memory_hotplug.c
539
void remove_pfn_range_from_zone(struct zone *zone,
mm/memory_hotplug.c
544
struct pglist_data *pgdat = zone->zone_pgdat;
mm/memory_hotplug.c
563
if (zone_is_zone_device(zone))
mm/memory_hotplug.c
566
clear_zone_contiguous(zone);
mm/memory_hotplug.c
568
shrink_zone_span(zone, start_pfn, start_pfn + nr_pages);
mm/memory_hotplug.c
571
set_zone_contiguous(zone);
mm/memory_hotplug.c
695
static void __meminit resize_zone_range(struct zone *zone, unsigned long start_pfn,
mm/memory_hotplug.c
698
unsigned long old_end_pfn = zone_end_pfn(zone);
mm/memory_hotplug.c
700
if (zone_is_empty(zone) || start_pfn < zone->zone_start_pfn)
mm/memory_hotplug.c
701
zone->zone_start_pfn = start_pfn;
mm/memory_hotplug.c
703
zone->spanned_pages = max(start_pfn + nr_pages, old_end_pfn) - zone->zone_start_pfn;
mm/memory_hotplug.c
740
void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
mm/memory_hotplug.c
745
struct pglist_data *pgdat = zone->zone_pgdat;
mm/memory_hotplug.c
748
clear_zone_contiguous(zone);
mm/memory_hotplug.c
750
if (zone_is_empty(zone))
mm/memory_hotplug.c
751
init_currently_empty_zone(zone, start_pfn, nr_pages);
mm/memory_hotplug.c
752
resize_zone_range(zone, start_pfn, nr_pages);
mm/memory_hotplug.c
761
if (zone_is_zone_device(zone)) {
mm/memory_hotplug.c
774
memmap_init_range(nr_pages, nid, zone_idx(zone), start_pfn, 0,
mm/memory_hotplug.c
778
set_zone_contiguous(zone);
mm/memory_hotplug.c
787
struct zone *zone)
mm/memory_hotplug.c
789
if (zone_idx(zone) == ZONE_MOVABLE) {
mm/memory_hotplug.c
790
stats->movable_pages += zone->present_pages;
mm/memory_hotplug.c
792
stats->kernel_early_pages += zone->present_early_pages;
mm/memory_hotplug.c
798
stats->movable_pages += zone->cma_pages;
mm/memory_hotplug.c
799
stats->kernel_early_pages -= zone->cma_pages;
mm/memory_hotplug.c
841
struct zone *zone;
mm/memory_hotplug.c
847
for_each_populated_zone(zone)
mm/memory_hotplug.c
848
auto_movable_stats_account_zone(&stats, zone);
mm/memory_hotplug.c
853
zone = pgdat->node_zones + i;
mm/memory_hotplug.c
854
if (populated_zone(zone))
mm/memory_hotplug.c
855
auto_movable_stats_account_zone(&stats, zone);
mm/memory_hotplug.c
890
static struct zone *default_kernel_zone_for_pfn(int nid, unsigned long start_pfn,
mm/memory_hotplug.c
897
struct zone *zone = &pgdat->node_zones[zid];
mm/memory_hotplug.c
899
if (zone_intersects(zone, start_pfn, nr_pages))
mm/memory_hotplug.c
900
return zone;
mm/memory_hotplug.c
956
static struct zone *auto_movable_zone_for_pfn(int nid,
mm/mempolicy.c
2080
bool apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
mm/mempolicy.c
2097
return zone >= dynamic_policy_zone;
mm/memremap.c
226
struct zone *zone;
mm/memremap.c
228
zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE];
mm/memremap.c
229
move_pfn_range_to_zone(zone, PHYS_PFN(range->start),
mm/migrate.c
2637
struct zone *zone = pgdat->node_zones + z;
mm/migrate.c
2639
if (!managed_zone(zone))
mm/migrate.c
2643
if (!zone_watermark_ok(zone, 0,
mm/migrate.c
2644
high_wmark_pages(zone) +
mm/migrate.c
576
struct zone *oldzone, *newzone;
mm/mm_init.c
101
for_each_zone_zonelist(zone, z, zonelist, zoneid)
mm/mm_init.c
102
pr_cont("%d:%s ", zone_to_nid(zone), zone->name);
mm/mm_init.c
1119
void __ref memmap_init_zone_device(struct zone *zone,
mm/mm_init.c
1125
struct pglist_data *pgdat = zone->zone_pgdat;
mm/mm_init.c
1128
unsigned long zone_idx = zone_idx(zone);
mm/mm_init.c
1307
struct zone *z;
mm/mm_init.c
1355
struct zone *zone = pgdat->node_zones + i;
mm/mm_init.c
1372
zone->zone_start_pfn = zone_start_pfn;
mm/mm_init.c
1374
zone->zone_start_pfn = 0;
mm/mm_init.c
1375
zone->spanned_pages = spanned;
mm/mm_init.c
1376
zone->present_pages = real_size;
mm/mm_init.c
1378
zone->present_early_pages = real_size;
mm/mm_init.c
1432
static void __meminit zone_init_internals(struct zone *zone, enum zone_type idx, int nid,
mm/mm_init.c
1435
atomic_long_set(&zone->managed_pages, remaining_pages);
mm/mm_init.c
1436
zone_set_nid(zone, nid);
mm/mm_init.c
1437
zone->name = zone_names[idx];
mm/mm_init.c
1438
zone->zone_pgdat = NODE_DATA(nid);
mm/mm_init.c
1439
spin_lock_init(&zone->lock);
mm/mm_init.c
1440
zone_seqlock_init(zone);
mm/mm_init.c
1441
zone_pcp_init(zone);
mm/mm_init.c
1444
static void __meminit zone_init_free_lists(struct zone *zone)
mm/mm_init.c
1448
INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
mm/mm_init.c
1449
zone->free_area[order].nr_free = 0;
mm/mm_init.c
1453
INIT_LIST_HEAD(&zone->unaccepted_pages);
mm/mm_init.c
1457
void __meminit init_currently_empty_zone(struct zone *zone,
mm/mm_init.c
1461
struct pglist_data *pgdat = zone->zone_pgdat;
mm/mm_init.c
1462
int zone_idx = zone_idx(zone) + 1;
mm/mm_init.c
1467
zone->zone_start_pfn = zone_start_pfn;
mm/mm_init.c
1472
(unsigned long)zone_idx(zone),
mm/mm_init.c
1475
zone_init_free_lists(zone);
mm/mm_init.c
1476
zone->initialized = 1;
mm/mm_init.c
1500
static void __ref setup_usemap(struct zone *zone)
mm/mm_init.c
1502
unsigned long usemapsize = usemap_size(zone->zone_start_pfn,
mm/mm_init.c
1503
zone->spanned_pages);
mm/mm_init.c
1504
zone->pageblock_flags = NULL;
mm/mm_init.c
1506
zone->pageblock_flags =
mm/mm_init.c
1508
zone_to_nid(zone));
mm/mm_init.c
1509
if (!zone->pageblock_flags)
mm/mm_init.c
1511
usemapsize, zone->name, zone_to_nid(zone));
mm/mm_init.c
1515
static inline void setup_usemap(struct zone *zone) {}
mm/mm_init.c
1596
struct zone *zone = pgdat->node_zones + z;
mm/mm_init.c
1598
zone->present_pages = 0;
mm/mm_init.c
1599
zone_init_internals(zone, z, nid, 0);
mm/mm_init.c
1613
struct zone *zone = pgdat->node_zones + j;
mm/mm_init.c
1614
unsigned long size = zone->spanned_pages;
mm/mm_init.c
1620
zone_init_internals(zone, j, nid, zone->present_pages);
mm/mm_init.c
1625
setup_usemap(zone);
mm/mm_init.c
1626
init_currently_empty_zone(zone, zone->zone_start_pfn, size);
mm/mm_init.c
1765
struct zone *zone = &pgdat->node_zones[zone_type];
mm/mm_init.c
1766
if (populated_zone(zone)) {
mm/mm_init.c
1835
int i, nid, zone;
mm/mm_init.c
1846
zone = MAX_NR_ZONES - i - 1;
mm/mm_init.c
1848
zone = i;
mm/mm_init.c
1850
if (zone == ZONE_MOVABLE)
mm/mm_init.c
1853
end_pfn = max(max_zone_pfn[zone], start_pfn);
mm/mm_init.c
1854
arch_zone_lowest_possible_pfn[zone] = start_pfn;
mm/mm_init.c
1855
arch_zone_highest_possible_pfn[zone] = end_pfn;
mm/mm_init.c
2043
static unsigned long __init deferred_init_pages(struct zone *zone,
mm/mm_init.c
2046
int nid = zone_to_nid(zone);
mm/mm_init.c
2048
int zid = zone_idx(zone);
mm/mm_init.c
2071
struct zone *zone, bool can_resched)
mm/mm_init.c
2073
int nid = zone_to_nid(zone);
mm/mm_init.c
2092
nr_pages += deferred_init_pages(zone, spfn, chunk_end);
mm/mm_init.c
2111
struct zone *zone = arg;
mm/mm_init.c
2113
deferred_init_memmap_chunk(start_pfn, end_pfn, zone, true);
mm/mm_init.c
2130
struct zone *zone;
mm/mm_init.c
2157
zone = pgdat->node_zones + pgdat->nr_zones - 1;
mm/mm_init.c
2158
last_pfn = SECTION_ALIGN_UP(zone_end_pfn(zone));
mm/mm_init.c
2162
.fn_arg = zone,
mm/mm_init.c
2174
WARN_ON(pgdat->nr_zones < MAX_NR_ZONES && populated_zone(++zone));
mm/mm_init.c
2194
bool __init deferred_grow_zone(struct zone *zone, unsigned int order)
mm/mm_init.c
2197
pg_data_t *pgdat = zone->zone_pgdat;
mm/mm_init.c
2203
if (zone_end_pfn(zone) != pgdat_end_pfn(pgdat))
mm/mm_init.c
2226
nr_pages < nr_pages_needed && spfn < zone_end_pfn(zone);
mm/mm_init.c
2228
nr_pages += deferred_init_memmap_chunk(spfn, epfn, zone, false);
mm/mm_init.c
2275
void set_zone_contiguous(struct zone *zone)
mm/mm_init.c
2277
unsigned long block_start_pfn = zone->zone_start_pfn;
mm/mm_init.c
2281
for (; block_start_pfn < zone_end_pfn(zone);
mm/mm_init.c
2285
block_end_pfn = min(block_end_pfn, zone_end_pfn(zone));
mm/mm_init.c
2288
block_end_pfn, zone))
mm/mm_init.c
2294
zone->contiguous = true;
mm/mm_init.c
2305
struct zone *zone, *izone = NULL;
mm/mm_init.c
2307
for_each_zone(zone) {
mm/mm_init.c
2308
if (nid != NUMA_NO_NODE && zone_to_nid(zone) != nid)
mm/mm_init.c
2311
if (zone_intersects(zone, start_pfn, nr_pages)) {
mm/mm_init.c
2314
izone = zone;
mm/mm_init.c
2325
struct zone *zone;
mm/mm_init.c
2359
for_each_populated_zone(zone)
mm/mm_init.c
2360
set_zone_contiguous(zone);
mm/mm_init.c
593
unsigned long zone, int nid)
mm/mm_init.c
596
set_page_links(page, zone, nid, pfn);
mm/mm_init.c
605
if (!is_highmem_idx(zone))
mm/mm_init.c
688
struct zone *zone = &pgdat->node_zones[zid];
mm/mm_init.c
690
if (zone_spans_pfn(zone, pfn))
mm/mm_init.c
80
struct zone *zone;
mm/mm_init.c
813
overlap_memmap_init(unsigned long zone, unsigned long *pfn)
mm/mm_init.c
817
if (mirrored_kernelcore && zone == ZONE_MOVABLE) {
mm/mm_init.c
858
int zone, int node)
mm/mm_init.c
864
__init_single_page(pfn_to_page(pfn), pfn, zone, node);
mm/mm_init.c
871
node, zone_names[zone], pgcnt);
mm/mm_init.c
883
void __meminit memmap_init_range(unsigned long size, int nid, unsigned long zone,
mm/mm_init.c
903
if (zone == ZONE_DEVICE) {
mm/mm_init.c
91
zone = &pgdat->node_zones[zoneid];
mm/mm_init.c
919
if (overlap_memmap_init(zone, &pfn))
mm/mm_init.c
92
if (!populated_zone(zone))
mm/mm_init.c
928
__init_single_page(page, pfn, zone, nid);
mm/mm_init.c
931
if (zone == ZONE_DEVICE)
mm/mm_init.c
952
static void __init memmap_init_zone_range(struct zone *zone,
mm/mm_init.c
957
unsigned long zone_start_pfn = zone->zone_start_pfn;
mm/mm_init.c
958
unsigned long zone_end_pfn = zone_start_pfn + zone->spanned_pages;
mm/mm_init.c
959
int nid = zone_to_nid(zone), zone_id = zone_idx(zone);
mm/mm_init.c
98
zone->name);
mm/mm_init.c
987
struct zone *zone = node->node_zones + j;
mm/mm_init.c
989
if (!populated_zone(zone))
mm/mm_init.c
992
memmap_init_zone_range(zone, start_pfn, end_pfn,
mm/mmzone.c
30
struct zone *next_zone(struct zone *zone)
mm/mmzone.c
32
pg_data_t *pgdat = zone->zone_pgdat;
mm/mmzone.c
34
if (zone < pgdat->node_zones + MAX_NR_ZONES - 1)
mm/mmzone.c
35
zone++;
mm/mmzone.c
39
zone = pgdat->node_zones;
mm/mmzone.c
41
zone = NULL;
mm/mmzone.c
43
return zone;
mm/oom_kill.c
251
struct zone *zone;
mm/oom_kill.c
292
for_each_zone_zonelist_nodemask(zone, z, oc->zonelist,
mm/oom_kill.c
294
if (!cpuset_zone_allowed(zone, oc->gfp_mask))
mm/page-writeback.c
250
struct zone *zone = pgdat->node_zones + z;
mm/page-writeback.c
252
if (!populated_zone(zone))
mm/page-writeback.c
255
nr_pages += zone_page_state(zone, NR_FREE_PAGES);
mm/page-writeback.c
280
struct zone *z;
mm/page_alloc.c
1025
__add_to_free_list(page, zone, order, migratetype, to_tail);
mm/page_alloc.c
1457
static void free_pcppages_bulk(struct zone *zone, int count,
mm/page_alloc.c
1474
spin_lock_irqsave(&zone->lock, flags);
mm/page_alloc.c
1502
__free_one_page(page, pfn, zone, order, mt, FPI_NONE);
mm/page_alloc.c
1507
spin_unlock_irqrestore(&zone->lock, flags);
mm/page_alloc.c
1511
static void split_large_buddy(struct zone *zone, struct page *page,
mm/page_alloc.c
1526
__free_one_page(page, pfn, zone, order, mt, fpi);
mm/page_alloc.c
1534
static void add_page_to_zone_llist(struct zone *zone, struct page *page,
mm/page_alloc.c
1540
llist_add(&page->pcp_llist, &zone->trylock_free_pages);
mm/page_alloc.c
1543
static void free_one_page(struct zone *zone, struct page *page,
mm/page_alloc.c
1551
if (!spin_trylock_irqsave(&zone->lock, flags)) {
mm/page_alloc.c
1552
add_page_to_zone_llist(zone, page, order);
mm/page_alloc.c
1556
spin_lock_irqsave(&zone->lock, flags);
mm/page_alloc.c
1560
llhead = &zone->trylock_free_pages;
mm/page_alloc.c
1569
split_large_buddy(zone, p, page_to_pfn(p), p_order, fpi_flags);
mm/page_alloc.c
1573
split_large_buddy(zone, page, pfn, order, fpi_flags);
mm/page_alloc.c
1574
spin_unlock_irqrestore(&zone->lock, flags);
mm/page_alloc.c
1583
struct zone *zone = page_zone(page);
mm/page_alloc.c
1586
free_one_page(zone, page, pfn, order, fpi_flags);
mm/page_alloc.c
1662
unsigned long end_pfn, struct zone *zone)
mm/page_alloc.c
1677
if (page_zone(start_page) != zone)
mm/page_alloc.c
1703
static inline unsigned int expand(struct zone *zone, struct page *page, int low,
mm/page_alloc.c
1712
VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
mm/page_alloc.c
1720
if (set_page_guard(zone, &page[size], high))
mm/page_alloc.c
1723
__add_to_free_list(&page[size], zone, high, migratetype, false);
mm/page_alloc.c
1731
static __always_inline void page_del_and_expand(struct zone *zone,
mm/page_alloc.c
1737
__del_page_from_free_list(page, zone, high, migratetype);
mm/page_alloc.c
1738
nr_pages -= expand(zone, page, low, high, migratetype);
mm/page_alloc.c
1739
account_freepages(zone, -nr_pages, migratetype);
mm/page_alloc.c
1890
struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
mm/page_alloc.c
1899
area = &(zone->free_area[current_order]);
mm/page_alloc.c
1904
page_del_and_expand(zone, page, order, current_order,
mm/page_alloc.c
1929
static __always_inline struct page *__rmqueue_cma_fallback(struct zone *zone,
mm/page_alloc.c
1932
return __rmqueue_smallest(zone, order, MIGRATE_CMA);
mm/page_alloc.c
1935
static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
mm/page_alloc.c
1943
static int __move_freepages_block(struct zone *zone, unsigned long start_pfn,
mm/page_alloc.c
1962
VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
mm/page_alloc.c
1963
VM_BUG_ON_PAGE(page_zone(page) != zone, page);
mm/page_alloc.c
1967
move_to_free_list(page, zone, order, old_mt, new_mt);
mm/page_alloc.c
1976
static bool prep_move_freepages_block(struct zone *zone, struct page *page,
mm/page_alloc.c
1993
if (!zone_spans_pfn(zone, start))
mm/page_alloc.c
1995
if (!zone_spans_pfn(zone, end - 1))
mm/page_alloc.c
2026
static int move_freepages_block(struct zone *zone, struct page *page,
mm/page_alloc.c
2032
if (!prep_move_freepages_block(zone, page, &start_pfn, NULL, NULL))
mm/page_alloc.c
2035
res = __move_freepages_block(zone, start_pfn, old_mt, new_mt);
mm/page_alloc.c
2100
static bool __move_freepages_block_isolate(struct zone *zone,
mm/page_alloc.c
211
struct zone *zone);
mm/page_alloc.c
2114
if (!prep_move_freepages_block(zone, page, &start_pfn, NULL, NULL))
mm/page_alloc.c
2127
del_page_from_free_list(buddy, zone, order,
mm/page_alloc.c
2130
split_large_buddy(zone, buddy, buddy_pfn, order, FPI_NONE);
mm/page_alloc.c
2146
__move_freepages_block(zone, start_pfn, from_mt, to_mt);
mm/page_alloc.c
2152
bool pageblock_isolate_and_move_free_pages(struct zone *zone, struct page *page)
mm/page_alloc.c
2154
return __move_freepages_block_isolate(zone, page, true);
mm/page_alloc.c
2157
bool pageblock_unisolate_and_move_free_pages(struct zone *zone, struct page *page)
mm/page_alloc.c
2159
return __move_freepages_block_isolate(zone, page, false);
mm/page_alloc.c
2164
static inline bool boost_watermark(struct zone *zone)
mm/page_alloc.c
2176
if ((pageblock_nr_pages * 4) > zone_managed_pages(zone))
mm/page_alloc.c
2179
max_boost = mult_frac(zone->_watermark[WMARK_HIGH],
mm/page_alloc.c
2195
zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages,
mm/page_alloc.c
2283
try_to_claim_block(struct zone *zone, struct page *page,
mm/page_alloc.c
2294
del_page_from_free_list(page, zone, current_order, block_type);
mm/page_alloc.c
2296
nr_added = expand(zone, page, order, current_order, start_type);
mm/page_alloc.c
2297
account_freepages(zone, nr_added, start_type);
mm/page_alloc.c
2306
if (boost_watermark(zone) && (alloc_flags & ALLOC_KSWAPD))
mm/page_alloc.c
2307
set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
mm/page_alloc.c
2310
if (!prep_move_freepages_block(zone, page, &start_pfn, &free_pages,
mm/page_alloc.c
2341
__move_freepages_block(zone, start_pfn, block_type, start_type);
mm/page_alloc.c
2343
return __rmqueue_smallest(zone, order, start_type);
mm/page_alloc.c
2358
__rmqueue_claim(struct zone *zone, int order, int start_migratetype,
mm/page_alloc.c
2382
area = &(zone->free_area[current_order]);
mm/page_alloc.c
2395
page = try_to_claim_block(zone, page, current_order, order,
mm/page_alloc.c
2413
__rmqueue_steal(struct zone *zone, int order, int start_migratetype)
mm/page_alloc.c
2421
area = &(zone->free_area[current_order]);
mm/page_alloc.c
2428
page_del_and_expand(zone, page, order, current_order, fallback_mt);
mm/page_alloc.c
2449
__rmqueue(struct zone *zone, unsigned int order, int migratetype,
mm/page_alloc.c
2461
zone_page_state(zone, NR_FREE_CMA_PAGES) >
mm/page_alloc.c
2462
zone_page_state(zone, NR_FREE_PAGES) / 2) {
mm/page_alloc.c
2463
page = __rmqueue_cma_fallback(zone, order);
mm/page_alloc.c
2480
page = __rmqueue_smallest(zone, order, migratetype);
mm/page_alloc.c
2486
page = __rmqueue_cma_fallback(zone, order);
mm/page_alloc.c
2494
page = __rmqueue_claim(zone, order, migratetype, alloc_flags);
mm/page_alloc.c
2503
page = __rmqueue_steal(zone, order, migratetype);
mm/page_alloc.c
2518
static int rmqueue_bulk(struct zone *zone, unsigned int order,
mm/page_alloc.c
2527
if (!spin_trylock_irqsave(&zone->lock, flags))
mm/page_alloc.c
2530
spin_lock_irqsave(&zone->lock, flags);
mm/page_alloc.c
2533
struct page *page = __rmqueue(zone, order, migratetype,
mm/page_alloc.c
2550
spin_unlock_irqrestore(&zone->lock, flags);
mm/page_alloc.c
2559
bool decay_pcp_high(struct zone *zone, struct per_cpu_pages *pcp)
mm/page_alloc.c
2582
free_pcppages_bulk(zone, to_drain_batched, pcp, 0);
mm/page_alloc.c
2598
void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
mm/page_alloc.c
2606
free_pcppages_bulk(zone, to_drain, pcp, 0);
mm/page_alloc.c
2615
static void drain_pages_zone(unsigned int cpu, struct zone *zone)
mm/page_alloc.c
2617
struct per_cpu_pages *pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
mm/page_alloc.c
2627
free_pcppages_bulk(zone, to_drain, pcp, 0);
mm/page_alloc.c
2639
struct zone *zone;
mm/page_alloc.c
2641
for_each_populated_zone(zone) {
mm/page_alloc.c
2642
drain_pages_zone(cpu, zone);
mm/page_alloc.c
2649
void drain_local_pages(struct zone *zone)
mm/page_alloc.c
2653
if (zone)
mm/page_alloc.c
2654
drain_pages_zone(cpu, zone);
mm/page_alloc.c
2669
static void __drain_all_pages(struct zone *zone, bool force_all_cpus)
mm/page_alloc.c
2685
if (!zone)
mm/page_alloc.c
2698
struct zone *z;
mm/page_alloc.c
2707
} else if (zone) {
mm/page_alloc.c
2708
pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
mm/page_alloc.c
2728
if (zone)
mm/page_alloc.c
2729
drain_pages_zone(cpu, zone);
mm/page_alloc.c
2742
void drain_all_pages(struct zone *zone)
mm/page_alloc.c
2744
__drain_all_pages(zone, false);
mm/page_alloc.c
2772
static int nr_pcp_high(struct per_cpu_pages *pcp, struct zone *zone,
mm/page_alloc.c
2794
if (test_bit(ZONE_RECLAIM_ACTIVE, &zone->flags)) {
mm/page_alloc.c
2804
if (test_bit(ZONE_BELOW_HIGH, &zone->flags)) {
mm/page_alloc.c
2827
static bool free_frozen_page_commit(struct zone *zone,
mm/page_alloc.c
286
static bool cond_accept_memory(struct zone *zone, unsigned int order,
mm/page_alloc.c
2876
high = nr_pcp_high(pcp, zone, batch, free_high);
mm/page_alloc.c
2883
free_pcppages_bulk(zone, to_free_batched, pcp, pindex);
mm/page_alloc.c
2891
pcp = pcp_spin_trylock(zone->per_cpu_pageset);
mm/page_alloc.c
2909
if (test_bit(ZONE_BELOW_HIGH, &zone->flags) &&
mm/page_alloc.c
2910
zone_watermark_ok(zone, 0, high_wmark_pages(zone),
mm/page_alloc.c
2912
struct pglist_data *pgdat = zone->zone_pgdat;
mm/page_alloc.c
2913
clear_bit(ZONE_BELOW_HIGH, &zone->flags);
mm/page_alloc.c
2936
struct zone *zone;
mm/page_alloc.c
2955
zone = page_zone(page);
mm/page_alloc.c
2959
free_one_page(zone, page, pfn, order, fpi_flags);
mm/page_alloc.c
2967
add_page_to_zone_llist(zone, page, order);
mm/page_alloc.c
2970
pcp = pcp_spin_trylock(zone->per_cpu_pageset);
mm/page_alloc.c
2972
if (!free_frozen_page_commit(zone, pcp, page, migratetype,
mm/page_alloc.c
2977
free_one_page(zone, page, pfn, order, fpi_flags);
mm/page_alloc.c
2997
struct zone *locked_zone = NULL;
mm/page_alloc.c
3026
struct zone *zone = folio_zone(folio);
mm/page_alloc.c
3035
if (zone != locked_zone ||
mm/page_alloc.c
3048
free_one_page(zone, &folio->page, pfn,
mm/page_alloc.c
3057
pcp = pcp_spin_trylock(zone->per_cpu_pageset);
mm/page_alloc.c
3059
free_one_page(zone, &folio->page, pfn,
mm/page_alloc.c
3063
locked_zone = zone;
mm/page_alloc.c
3074
if (!free_frozen_page_commit(zone, pcp, &folio->page,
mm/page_alloc.c
3118
struct zone *zone = page_zone(page);
mm/page_alloc.c
312
_deferred_grow_zone(struct zone *zone, unsigned int order)
mm/page_alloc.c
3129
watermark = zone->_watermark[WMARK_MIN] + (1UL << order);
mm/page_alloc.c
3130
if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA))
mm/page_alloc.c
3134
del_page_from_free_list(page, zone, order, mt);
mm/page_alloc.c
314
return deferred_grow_zone(zone, order);
mm/page_alloc.c
3149
move_freepages_block(zone, page, mt,
mm/page_alloc.c
3168
struct zone *zone = page_zone(page);
mm/page_alloc.c
3171
lockdep_assert_held(&zone->lock);
mm/page_alloc.c
3174
__free_one_page(page, page_to_pfn(page), zone, order, mt,
mm/page_alloc.c
3181
static inline void zone_statistics(struct zone *preferred_zone, struct zone *z,
mm/page_alloc.c
3205
struct page *rmqueue_buddy(struct zone *preferred_zone, struct zone *zone,
mm/page_alloc.c
3215
if (!spin_trylock_irqsave(&zone->lock, flags))
mm/page_alloc.c
3218
spin_lock_irqsave(&zone->lock, flags);
mm/page_alloc.c
322
static inline bool _deferred_grow_zone(struct zone *zone, unsigned int order)
mm/page_alloc.c
3221
page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
mm/page_alloc.c
3225
page = __rmqueue(zone, order, migratetype, alloc_flags, &rmqm);
mm/page_alloc.c
3234
page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
mm/page_alloc.c
3237
spin_unlock_irqrestore(&zone->lock, flags);
mm/page_alloc.c
3241
spin_unlock_irqrestore(&zone->lock, flags);
mm/page_alloc.c
3249
reserve_highatomic_pageblock(page, order, zone);
mm/page_alloc.c
3252
zone_statistics(preferred_zone, zone, 1);
mm/page_alloc.c
3257
static int nr_pcp_alloc(struct per_cpu_pages *pcp, struct zone *zone, int order)
mm/page_alloc.c
3280
if (high_min != high_max && !test_bit(ZONE_BELOW_HIGH, &zone->flags))
mm/page_alloc.c
3309
struct page *__rmqueue_pcplist(struct zone *zone, unsigned int order,
mm/page_alloc.c
3319
int batch = nr_pcp_alloc(pcp, zone, order);
mm/page_alloc.c
3336
alloced = rmqueue_bulk(zone, order,
mm/page_alloc.c
3354
static struct page *rmqueue_pcplist(struct zone *preferred_zone,
mm/page_alloc.c
3355
struct zone *zone, unsigned int order,
mm/page_alloc.c
3363
pcp = pcp_spin_trylock(zone->per_cpu_pageset);
mm/page_alloc.c
3374
page = __rmqueue_pcplist(zone, order, migratetype, alloc_flags, pcp, list);
mm/page_alloc.c
3378
zone_statistics(preferred_zone, zone, 1);
mm/page_alloc.c
3396
struct page *rmqueue(struct zone *preferred_zone,
mm/page_alloc.c
3397
struct zone *zone, unsigned int order,
mm/page_alloc.c
3404
page = rmqueue_pcplist(preferred_zone, zone, order,
mm/page_alloc.c
3410
page = rmqueue_buddy(preferred_zone, zone, order, alloc_flags,
mm/page_alloc.c
3416
unlikely(test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags))) {
mm/page_alloc.c
3417
clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
mm/page_alloc.c
3418
wakeup_kswapd(zone, 0, 0, zone_idx(zone));
mm/page_alloc.c
3421
VM_BUG_ON_PAGE(page && bad_range(zone, page), page);
mm/page_alloc.c
3431
struct zone *zone)
mm/page_alloc.c
3442
if ((zone_managed_pages(zone) / 100) < pageblock_nr_pages)
mm/page_alloc.c
3444
max_managed = ALIGN((zone_managed_pages(zone) / 100), pageblock_nr_pages);
mm/page_alloc.c
3445
if (zone->nr_reserved_highatomic >= max_managed)
mm/page_alloc.c
3448
spin_lock_irqsave(&zone->lock, flags);
mm/page_alloc.c
3451
if (zone->nr_reserved_highatomic >= max_managed)
mm/page_alloc.c
3461
if (move_freepages_block(zone, page, mt, MIGRATE_HIGHATOMIC) == -1)
mm/page_alloc.c
3463
zone->nr_reserved_highatomic += pageblock_nr_pages;
mm/page_alloc.c
3466
zone->nr_reserved_highatomic += 1 << order;
mm/page_alloc.c
3470
spin_unlock_irqrestore(&zone->lock, flags);
mm/page_alloc.c
3488
struct zone *zone;
mm/page_alloc.c
3493
for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx,
mm/page_alloc.c
3499
if (!force && zone->nr_reserved_highatomic <=
mm/page_alloc.c
3503
spin_lock_irqsave(&zone->lock, flags);
mm/page_alloc.c
3505
struct free_area *area = &(zone->free_area[order]);
mm/page_alloc.c
3520
if (WARN_ON_ONCE(size > zone->nr_reserved_highatomic))
mm/page_alloc.c
3521
size = zone->nr_reserved_highatomic;
mm/page_alloc.c
3522
zone->nr_reserved_highatomic -= size;
mm/page_alloc.c
3534
ret = move_freepages_block(zone, page,
mm/page_alloc.c
3538
move_to_free_list(page, zone, order,
mm/page_alloc.c
3551
spin_unlock_irqrestore(&zone->lock, flags);
mm/page_alloc.c
3555
spin_unlock_irqrestore(&zone->lock, flags);
mm/page_alloc.c
3561
static inline long __zone_watermark_unusable_free(struct zone *z,
mm/page_alloc.c
3588
bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
mm/page_alloc.c
3666
bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
mm/page_alloc.c
3673
static inline bool zone_watermark_fast(struct zone *z, unsigned int order,
mm/page_alloc.c
3721
static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
mm/page_alloc.c
3723
return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <=
mm/page_alloc.c
3727
static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
mm/page_alloc.c
3742
alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask)
mm/page_alloc.c
3758
if (!zone)
mm/page_alloc.c
3761
if (zone_idx(zone) != ZONE_NORMAL)
mm/page_alloc.c
3770
if (nr_online_nodes > 1 && !populated_zone(--zone))
mm/page_alloc.c
3798
struct zone *zone;
mm/page_alloc.c
3812
for_next_zone_zonelist_nodemask(zone, z, ac->highest_zoneidx,
mm/page_alloc.c
3819
!__cpuset_zone_allowed(zone, gfp_mask))
mm/page_alloc.c
3841
if (last_pgdat != zone->zone_pgdat) {
mm/page_alloc.c
3842
last_pgdat = zone->zone_pgdat;
mm/page_alloc.c
3843
last_pgdat_dirty_ok = node_dirty_ok(zone->zone_pgdat);
mm/page_alloc.c
3851
zone != zonelist_zone(ac->preferred_zoneref)) {
mm/page_alloc.c
3860
if (zone_to_nid(zone) != local_nid) {
mm/page_alloc.c
3874
!waitqueue_active(&zone->zone_pgdat->kswapd_wait)) {
mm/page_alloc.c
3879
cond_accept_memory(zone, order, alloc_flags);
mm/page_alloc.c
3888
if (test_bit(ZONE_BELOW_HIGH, &zone->flags))
mm/page_alloc.c
3891
mark = high_wmark_pages(zone);
mm/page_alloc.c
3892
if (zone_watermark_fast(zone, order, mark,
mm/page_alloc.c
3897
set_bit(ZONE_BELOW_HIGH, &zone->flags);
mm/page_alloc.c
3900
mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
mm/page_alloc.c
3901
if (!zone_watermark_fast(zone, order, mark,
mm/page_alloc.c
3906
if (cond_accept_memory(zone, order, alloc_flags))
mm/page_alloc.c
3914
if (_deferred_grow_zone(zone, order))
mm/page_alloc.c
3923
!zone_allows_reclaim(zonelist_zone(ac->preferred_zoneref), zone))
mm/page_alloc.c
3926
ret = node_reclaim(zone->zone_pgdat, gfp_mask, order);
mm/page_alloc.c
3936
if (zone_watermark_ok(zone, order, mark,
mm/page_alloc.c
3945
page = rmqueue(zonelist_zone(ac->preferred_zoneref), zone, order,
mm/page_alloc.c
3952
if (cond_accept_memory(zone, order, alloc_flags))
mm/page_alloc.c
3957
if (_deferred_grow_zone(zone, order))
mm/page_alloc.c
4183
struct zone *zone = page_zone(page);
mm/page_alloc.c
4185
zone->compact_blockskip_flush = false;
mm/page_alloc.c
4186
compaction_defer_reset(zone, order, true);
mm/page_alloc.c
4283
struct zone *zone;
mm/page_alloc.c
4295
for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
mm/page_alloc.c
4297
if (zone_watermark_ok(zone, 0, min_wmark_pages(zone),
mm/page_alloc.c
4453
struct zone *zone;
mm/page_alloc.c
4463
for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, highest_zoneidx,
mm/page_alloc.c
4465
if (!managed_zone(zone))
mm/page_alloc.c
4467
if (last_pgdat == zone->zone_pgdat)
mm/page_alloc.c
4469
wakeup_kswapd(zone, gfp_mask, reclaim_order, highest_zoneidx);
mm/page_alloc.c
4470
last_pgdat = zone->zone_pgdat;
mm/page_alloc.c
4583
struct zone *zone;
mm/page_alloc.c
4607
for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
mm/page_alloc.c
4611
unsigned long min_wmark = min_wmark_pages(zone);
mm/page_alloc.c
4616
!__cpuset_zone_allowed(zone, gfp_mask))
mm/page_alloc.c
4619
available = reclaimable = zone_reclaimable_pages(zone);
mm/page_alloc.c
4620
available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
mm/page_alloc.c
4626
wmark = __zone_watermark_ok(zone, order, min_wmark,
mm/page_alloc.c
5049
struct zone *zone;
mm/page_alloc.c
5102
for_next_zone_zonelist_nodemask(zone, z, ac.highest_zoneidx, ac.nodemask) {
mm/page_alloc.c
5106
!__cpuset_zone_allowed(zone, gfp)) {
mm/page_alloc.c
5110
if (nr_online_nodes > 1 && zone != zonelist_zone(ac.preferred_zoneref) &&
mm/page_alloc.c
5111
zone_to_nid(zone) != zonelist_node_idx(ac.preferred_zoneref)) {
mm/page_alloc.c
5115
cond_accept_memory(zone, 0, alloc_flags);
mm/page_alloc.c
5117
mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK) + nr_pages - nr_populated;
mm/page_alloc.c
5118
if (zone_watermark_fast(zone, 0, mark,
mm/page_alloc.c
5124
if (cond_accept_memory(zone, 0, alloc_flags))
mm/page_alloc.c
5129
if (_deferred_grow_zone(zone, 0))
mm/page_alloc.c
5138
if (unlikely(!zone))
mm/page_alloc.c
5142
pcp = pcp_spin_trylock(zone->per_cpu_pageset);
mm/page_alloc.c
5156
page = __rmqueue_pcplist(zone, 0, ac.migratetype, alloc_flags,
mm/page_alloc.c
5175
__count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account);
mm/page_alloc.c
5176
zone_statistics(zonelist_zone(ac.preferred_zoneref), zone, nr_account);
mm/page_alloc.c
5486
struct zone *zone;
mm/page_alloc.c
5493
for_each_zone_zonelist(zone, z, zonelist, offset) {
mm/page_alloc.c
5494
unsigned long size = zone_managed_pages(zone);
mm/page_alloc.c
5495
unsigned long high = high_wmark_pages(zone);
mm/page_alloc.c
5518
static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
mm/page_alloc.c
5520
zoneref->zone = zone;
mm/page_alloc.c
5521
zoneref->zone_idx = zone_idx(zone);
mm/page_alloc.c
5531
struct zone *zone;
mm/page_alloc.c
5537
zone = pgdat->node_zones + zone_type;
mm/page_alloc.c
5538
if (populated_zone(zone)) {
mm/page_alloc.c
5539
zoneref_set_zone(zone, &zonerefs[nr_zones++]);
mm/page_alloc.c
5663
zonerefs->zone = NULL;
mm/page_alloc.c
5678
zonerefs->zone = NULL;
mm/page_alloc.c
570
static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
mm/page_alloc.c
5747
zonerefs->zone = NULL;
mm/page_alloc.c
578
seq = zone_span_seqbegin(zone);
mm/page_alloc.c
579
start_pfn = zone->zone_start_pfn;
mm/page_alloc.c
580
sp = zone->spanned_pages;
mm/page_alloc.c
581
ret = !zone_spans_pfn(zone, pfn);
mm/page_alloc.c
582
} while (zone_span_seqretry(zone, seq));
mm/page_alloc.c
586
pfn, zone_to_nid(zone), zone->name,
mm/page_alloc.c
5900
static int zone_batchsize(struct zone *zone)
mm/page_alloc.c
5911
batch = min(zone_managed_pages(zone) >> 12, SZ_256K / PAGE_SIZE);
mm/page_alloc.c
5948
static int zone_highsize(struct zone *zone, int batch, int cpu_online,
mm/page_alloc.c
595
static bool __maybe_unused bad_range(struct zone *zone, struct page *page)
mm/page_alloc.c
5962
total_pages = low_wmark_pages(zone);
mm/page_alloc.c
5969
total_pages = zone_managed_pages(zone) / high_fraction;
mm/page_alloc.c
597
if (page_outside_zone_boundaries(zone, page))
mm/page_alloc.c
5980
nr_split_cpus = cpumask_weight(cpumask_of_node(zone_to_nid(zone))) + cpu_online;
mm/page_alloc.c
599
if (zone != page_zone(page))
mm/page_alloc.c
6044
static void __zone_set_pageset_high_and_batch(struct zone *zone, unsigned long high_min,
mm/page_alloc.c
605
static inline bool __maybe_unused bad_range(struct zone *zone, struct page *page)
mm/page_alloc.c
6051
pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
mm/page_alloc.c
6060
static void zone_set_pageset_high_and_batch(struct zone *zone, int cpu_online)
mm/page_alloc.c
6064
new_batch = zone_batchsize(zone);
mm/page_alloc.c
6066
new_high_min = zone_highsize(zone, new_batch, cpu_online,
mm/page_alloc.c
6074
new_high_min = zone_highsize(zone, new_batch, cpu_online, 0);
mm/page_alloc.c
6075
new_high_max = zone_highsize(zone, new_batch, cpu_online,
mm/page_alloc.c
6079
if (zone->pageset_high_min == new_high_min &&
mm/page_alloc.c
6080
zone->pageset_high_max == new_high_max &&
mm/page_alloc.c
6081
zone->pageset_batch == new_batch)
mm/page_alloc.c
6084
zone->pageset_high_min = new_high_min;
mm/page_alloc.c
6085
zone->pageset_high_max = new_high_max;
mm/page_alloc.c
6086
zone->pageset_batch = new_batch;
mm/page_alloc.c
6088
__zone_set_pageset_high_and_batch(zone, new_high_min, new_high_max,
mm/page_alloc.c
6092
void __meminit setup_zone_pageset(struct zone *zone)
mm/page_alloc.c
6098
zone->per_cpu_zonestats = alloc_percpu(struct per_cpu_zonestat);
mm/page_alloc.c
6100
zone->per_cpu_pageset = alloc_percpu(struct per_cpu_pages);
mm/page_alloc.c
6105
pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
mm/page_alloc.c
6106
pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
mm/page_alloc.c
6110
zone_set_pageset_high_and_batch(zone, 0);
mm/page_alloc.c
6117
static void zone_pcp_update(struct zone *zone, int cpu_online)
mm/page_alloc.c
6120
zone_set_pageset_high_and_batch(zone, cpu_online);
mm/page_alloc.c
6124
static void zone_pcp_update_cacheinfo(struct zone *zone, unsigned int cpu)
mm/page_alloc.c
6129
pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
mm/page_alloc.c
6148
struct zone *zone;
mm/page_alloc.c
6150
for_each_populated_zone(zone)
mm/page_alloc.c
6151
zone_pcp_update_cacheinfo(zone, cpu);
mm/page_alloc.c
6161
struct zone *zone;
mm/page_alloc.c
6164
for_each_populated_zone(zone)
mm/page_alloc.c
6165
setup_zone_pageset(zone);
mm/page_alloc.c
6186
__meminit void zone_pcp_init(struct zone *zone)
mm/page_alloc.c
6193
zone->per_cpu_pageset = &boot_pageset;
mm/page_alloc.c
6194
zone->per_cpu_zonestats = &boot_zonestats;
mm/page_alloc.c
6195
zone->pageset_high_min = BOOT_PAGESET_HIGH;
mm/page_alloc.c
6196
zone->pageset_high_max = BOOT_PAGESET_HIGH;
mm/page_alloc.c
6197
zone->pageset_batch = BOOT_PAGESET_BATCH;
mm/page_alloc.c
6199
if (populated_zone(zone))
mm/page_alloc.c
6200
pr_debug(" %s zone: %lu pages, LIFO batch:%u\n", zone->name,
mm/page_alloc.c
6201
zone->present_pages, zone_batchsize(zone));
mm/page_alloc.c
6262
struct zone *zone;
mm/page_alloc.c
6285
for_each_populated_zone(zone)
mm/page_alloc.c
6286
zone_pcp_update(zone, 0);
mm/page_alloc.c
6293
struct zone *zone;
mm/page_alloc.c
6295
for_each_populated_zone(zone)
mm/page_alloc.c
6296
zone_pcp_update(zone, 1);
mm/page_alloc.c
6326
struct zone *zone = pgdat->node_zones + i;
mm/page_alloc.c
6328
unsigned long managed_pages = zone_managed_pages(zone);
mm/page_alloc.c
6339
if (!zone->lowmem_reserve[j])
mm/page_alloc.c
6342
max = zone->lowmem_reserve[j];
mm/page_alloc.c
6346
max += high_wmark_pages(zone);
mm/page_alloc.c
6386
struct zone *zone = &pgdat->node_zones[i];
mm/page_alloc.c
6388
bool clear = !ratio || !zone_managed_pages(zone);
mm/page_alloc.c
6392
struct zone *upper_zone = &pgdat->node_zones[j];
mm/page_alloc.c
6397
zone->lowmem_reserve[j] = 0;
mm/page_alloc.c
6399
zone->lowmem_reserve[j] = managed_pages / ratio;
mm/page_alloc.c
6400
trace_mm_setup_per_zone_lowmem_reserve(zone, upper_zone,
mm/page_alloc.c
6401
zone->lowmem_reserve[j]);
mm/page_alloc.c
6414
struct zone *zone;
mm/page_alloc.c
6418
for_each_zone(zone) {
mm/page_alloc.c
6419
if (!is_highmem(zone) && zone_idx(zone) != ZONE_MOVABLE)
mm/page_alloc.c
6420
lowmem_pages += zone_managed_pages(zone);
mm/page_alloc.c
6423
for_each_zone(zone) {
mm/page_alloc.c
6426
spin_lock_irqsave(&zone->lock, flags);
mm/page_alloc.c
6427
tmp = (u64)pages_min * zone_managed_pages(zone);
mm/page_alloc.c
6429
if (is_highmem(zone) || zone_idx(zone) == ZONE_MOVABLE) {
mm/page_alloc.c
6441
min_pages = zone_managed_pages(zone) / 1024;
mm/page_alloc.c
6443
zone->_watermark[WMARK_MIN] = min_pages;
mm/page_alloc.c
6449
zone->_watermark[WMARK_MIN] = tmp;
mm/page_alloc.c
6458
mult_frac(zone_managed_pages(zone),
mm/page_alloc.c
6461
zone->watermark_boost = 0;
mm/page_alloc.c
6462
zone->_watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp;
mm/page_alloc.c
6463
zone->_watermark[WMARK_HIGH] = low_wmark_pages(zone) + tmp;
mm/page_alloc.c
6464
zone->_watermark[WMARK_PROMO] = high_wmark_pages(zone) + tmp;
mm/page_alloc.c
6465
trace_mm_setup_per_zone_wmarks(zone);
mm/page_alloc.c
6467
spin_unlock_irqrestore(&zone->lock, flags);
mm/page_alloc.c
6483
struct zone *zone;
mm/page_alloc.c
6494
for_each_zone(zone)
mm/page_alloc.c
6495
zone_pcp_update(zone, 0);
mm/page_alloc.c
6596
struct zone *zone;
mm/page_alloc.c
6601
for_each_zone(zone)
mm/page_alloc.c
6602
zone->zone_pgdat->min_unmapped_pages += (zone_managed_pages(zone) *
mm/page_alloc.c
6624
struct zone *zone;
mm/page_alloc.c
6629
for_each_zone(zone)
mm/page_alloc.c
6630
zone->zone_pgdat->min_slab_pages += (zone_managed_pages(zone) *
mm/page_alloc.c
6682
struct zone *zone;
mm/page_alloc.c
6713
for_each_populated_zone(zone)
mm/page_alloc.c
6714
zone_set_pageset_high_and_batch(zone, 0);
mm/page_alloc.c
6829
.nid = zone_to_nid(cc->zone),
mm/page_alloc.c
6854
nr_reclaimed = reclaim_clean_pages_from_list(cc->zone,
mm/page_alloc.c
6978
.zone = page_zone(pfn_to_page(start)),
mm/page_alloc.c
7026
drain_all_pages(cc.zone);
mm/page_alloc.c
7141
static bool pfn_range_valid_contig(struct zone *z, unsigned long start_pfn,
mm/page_alloc.c
7190
static bool zone_spans_last_pfn(const struct zone *zone,
mm/page_alloc.c
7195
return zone_spans_pfn(zone, last_pfn);
mm/page_alloc.c
7229
struct zone *zone;
mm/page_alloc.c
7236
for_each_zone_zonelist_nodemask(zone, z, zonelist,
mm/page_alloc.c
7238
spin_lock_irqsave(&zone->lock, flags);
mm/page_alloc.c
7240
pfn = ALIGN(zone->zone_start_pfn, nr_pages);
mm/page_alloc.c
7241
while (zone_spans_last_pfn(zone, pfn, nr_pages)) {
mm/page_alloc.c
7242
if (pfn_range_valid_contig(zone, pfn, nr_pages,
mm/page_alloc.c
725
static inline struct capture_control *task_capc(struct zone *zone)
mm/page_alloc.c
7252
spin_unlock_irqrestore(&zone->lock, flags);
mm/page_alloc.c
7259
spin_lock_irqsave(&zone->lock, flags);
mm/page_alloc.c
7263
spin_unlock_irqrestore(&zone->lock, flags);
mm/page_alloc.c
732
capc->cc->zone == zone ? capc : NULL;
mm/page_alloc.c
7364
void zone_pcp_disable(struct zone *zone)
mm/page_alloc.c
7367
__zone_set_pageset_high_and_batch(zone, 0, 0, 1);
mm/page_alloc.c
7368
__drain_all_pages(zone, true);
mm/page_alloc.c
7371
void zone_pcp_enable(struct zone *zone)
mm/page_alloc.c
7373
__zone_set_pageset_high_and_batch(zone, zone->pageset_high_min,
mm/page_alloc.c
7374
zone->pageset_high_max, zone->pageset_batch);
mm/page_alloc.c
7378
void zone_pcp_reset(struct zone *zone)
mm/page_alloc.c
7383
if (zone->per_cpu_pageset != &boot_pageset) {
mm/page_alloc.c
7385
pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
mm/page_alloc.c
7386
drain_zonestat(zone, pzstats);
mm/page_alloc.c
7388
free_percpu(zone->per_cpu_pageset);
mm/page_alloc.c
7389
zone->per_cpu_pageset = &boot_pageset;
mm/page_alloc.c
7390
if (zone->per_cpu_zonestats != &boot_zonestats) {
mm/page_alloc.c
7391
free_percpu(zone->per_cpu_zonestats);
mm/page_alloc.c
7392
zone->per_cpu_zonestats = &boot_zonestats;
mm/page_alloc.c
7412
struct zone *zone;
mm/page_alloc.c
7416
zone = page_zone(pfn_to_page(pfn));
mm/page_alloc.c
7417
spin_lock_irqsave(&zone->lock, flags);
mm/page_alloc.c
7444
del_page_from_free_list(page, zone, order, MIGRATE_ISOLATE);
mm/page_alloc.c
7447
spin_unlock_irqrestore(&zone->lock, flags);
mm/page_alloc.c
7474
static inline void add_to_free_list(struct page *page, struct zone *zone,
mm/page_alloc.c
7478
__add_to_free_list(page, zone, order, migratetype, tail);
mm/page_alloc.c
7479
account_freepages(zone, 1 << order, migratetype);
mm/page_alloc.c
7486
static void break_down_buddy_pages(struct zone *zone, struct page *page,
mm/page_alloc.c
7504
if (set_page_guard(zone, current_buddy, high))
mm/page_alloc.c
7507
add_to_free_list(current_buddy, zone, high, migratetype, false);
mm/page_alloc.c
7517
struct zone *zone = page_zone(page);
mm/page_alloc.c
7523
spin_lock_irqsave(&zone->lock, flags);
mm/page_alloc.c
7533
del_page_from_free_list(page_head, zone, page_order,
mm/page_alloc.c
7535
break_down_buddy_pages(zone, page_head, page, 0,
mm/page_alloc.c
7544
spin_unlock_irqrestore(&zone->lock, flags);
mm/page_alloc.c
7553
struct zone *zone = page_zone(page);
mm/page_alloc.c
7557
spin_lock_irqsave(&zone->lock, flags);
mm/page_alloc.c
7563
__free_one_page(page, pfn, zone, 0, migratetype, FPI_NONE);
mm/page_alloc.c
7568
spin_unlock_irqrestore(&zone->lock, flags);
mm/page_alloc.c
7574
bool has_managed_zone(enum zone_type zone)
mm/page_alloc.c
7579
if (managed_zone(&pgdat->node_zones[zone]))
mm/page_alloc.c
7610
static void __accept_page(struct zone *zone, unsigned long *flags,
mm/page_alloc.c
7614
account_freepages(zone, -MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE);
mm/page_alloc.c
7615
__mod_zone_page_state(zone, NR_UNACCEPTED, -MAX_ORDER_NR_PAGES);
mm/page_alloc.c
7617
spin_unlock_irqrestore(&zone->lock, *flags);
mm/page_alloc.c
7626
struct zone *zone = page_zone(page);
mm/page_alloc.c
7629
spin_lock_irqsave(&zone->lock, flags);
mm/page_alloc.c
7631
spin_unlock_irqrestore(&zone->lock, flags);
mm/page_alloc.c
7636
__accept_page(zone, &flags, page);
mm/page_alloc.c
7639
static bool try_to_accept_memory_one(struct zone *zone)
mm/page_alloc.c
7644
spin_lock_irqsave(&zone->lock, flags);
mm/page_alloc.c
7645
page = list_first_entry_or_null(&zone->unaccepted_pages,
mm/page_alloc.c
7648
spin_unlock_irqrestore(&zone->lock, flags);
mm/page_alloc.c
7653
__accept_page(zone, &flags, page);
mm/page_alloc.c
7658
static bool cond_accept_memory(struct zone *zone, unsigned int order,
mm/page_alloc.c
7664
if (list_empty(&zone->unaccepted_pages))
mm/page_alloc.c
767
static inline struct capture_control *task_capc(struct zone *zone)
mm/page_alloc.c
7671
wmark = promo_wmark_pages(zone);
mm/page_alloc.c
7679
return try_to_accept_memory_one(zone);
mm/page_alloc.c
7683
(zone_page_state(zone, NR_FREE_PAGES) -
mm/page_alloc.c
7684
__zone_watermark_unusable_free(zone, order, 0) -
mm/page_alloc.c
7685
zone_page_state(zone, NR_UNACCEPTED));
mm/page_alloc.c
7688
if (!try_to_accept_memory_one(zone))
mm/page_alloc.c
7699
struct zone *zone = page_zone(page);
mm/page_alloc.c
7705
spin_lock_irqsave(&zone->lock, flags);
mm/page_alloc.c
7706
list_add_tail(&page->lru, &zone->unaccepted_pages);
mm/page_alloc.c
7707
account_freepages(zone, MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE);
mm/page_alloc.c
7708
__mod_zone_page_state(zone, NR_UNACCEPTED, MAX_ORDER_NR_PAGES);
mm/page_alloc.c
7710
spin_unlock_irqrestore(&zone->lock, flags);
mm/page_alloc.c
7722
static bool cond_accept_memory(struct zone *zone, unsigned int order,
mm/page_alloc.c
780
static inline void account_freepages(struct zone *zone, int nr_pages,
mm/page_alloc.c
783
lockdep_assert_held(&zone->lock);
mm/page_alloc.c
788
__mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages);
mm/page_alloc.c
791
__mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages);
mm/page_alloc.c
793
WRITE_ONCE(zone->nr_free_highatomic,
mm/page_alloc.c
794
zone->nr_free_highatomic + nr_pages);
mm/page_alloc.c
798
static inline void __add_to_free_list(struct page *page, struct zone *zone,
mm/page_alloc.c
802
struct free_area *area = &zone->free_area[order];
mm/page_alloc.c
816
__mod_zone_page_state(zone, NR_FREE_PAGES_BLOCKS, nr_pages);
mm/page_alloc.c
824
static inline void move_to_free_list(struct page *page, struct zone *zone,
mm/page_alloc.c
827
struct free_area *area = &zone->free_area[order];
mm/page_alloc.c
837
account_freepages(zone, -nr_pages, old_mt);
mm/page_alloc.c
838
account_freepages(zone, nr_pages, new_mt);
mm/page_alloc.c
844
__mod_zone_page_state(zone, NR_FREE_PAGES_BLOCKS, nr_pages);
mm/page_alloc.c
848
static inline void __del_page_from_free_list(struct page *page, struct zone *zone,
mm/page_alloc.c
864
zone->free_area[order].nr_free--;
mm/page_alloc.c
867
__mod_zone_page_state(zone, NR_FREE_PAGES_BLOCKS, -nr_pages);
mm/page_alloc.c
870
static inline void del_page_from_free_list(struct page *page, struct zone *zone,
mm/page_alloc.c
873
__del_page_from_free_list(page, zone, order, migratetype);
mm/page_alloc.c
874
account_freepages(zone, -(1 << order), migratetype);
mm/page_alloc.c
946
struct zone *zone, unsigned int order,
mm/page_alloc.c
949
struct capture_control *capc = task_capc(zone);
mm/page_alloc.c
955
VM_BUG_ON(!zone_is_initialized(zone));
mm/page_alloc.c
960
VM_BUG_ON_PAGE(bad_range(zone, page), page);
mm/page_alloc.c
962
account_freepages(zone, 1 << order, migratetype);
mm/page_alloc.c
968
account_freepages(zone, -(1 << order), migratetype);
mm/page_alloc.c
996
clear_page_guard(zone, buddy, order);
mm/page_alloc.c
998
__del_page_from_free_list(buddy, zone, order, buddy_mt);
mm/page_isolation.c
131
struct zone *zone = page_zone(page);
mm/page_isolation.c
152
if (page_is_unmovable(zone, page, mode, &step))
mm/page_isolation.c
168
struct zone *zone = page_zone(page);
mm/page_isolation.c
176
spin_lock_irqsave(&zone->lock, flags);
mm/page_isolation.c
18
bool page_is_unmovable(struct zone *zone, struct page *page,
mm/page_isolation.c
184
spin_unlock_irqrestore(&zone->lock, flags);
mm/page_isolation.c
202
if (!pageblock_isolate_and_move_free_pages(zone, page)) {
mm/page_isolation.c
203
spin_unlock_irqrestore(&zone->lock, flags);
mm/page_isolation.c
206
zone->nr_isolate_pageblock++;
mm/page_isolation.c
207
spin_unlock_irqrestore(&zone->lock, flags);
mm/page_isolation.c
211
spin_unlock_irqrestore(&zone->lock, flags);
mm/page_isolation.c
225
struct zone *zone;
mm/page_isolation.c
231
zone = page_zone(page);
mm/page_isolation.c
232
spin_lock_irqsave(&zone->lock, flags);
mm/page_isolation.c
276
WARN_ON_ONCE(!pageblock_unisolate_and_move_free_pages(zone, page));
mm/page_isolation.c
281
zone->nr_isolate_pageblock--;
mm/page_isolation.c
283
spin_unlock_irqrestore(&zone->lock, flags);
mm/page_isolation.c
331
struct zone *zone;
mm/page_isolation.c
347
zone = page_zone(pfn_to_page(isolate_pageblock));
mm/page_isolation.c
349
zone->zone_start_pfn);
mm/page_isolation.c
35
if (zone_idx(zone) == ZONE_MOVABLE)
mm/page_isolation.c
613
struct zone *zone;
mm/page_isolation.c
643
zone = page_zone(page);
mm/page_isolation.c
644
spin_lock_irqsave(&zone->lock, flags);
mm/page_isolation.c
646
spin_unlock_irqrestore(&zone->lock, flags);
mm/page_owner.c
426
pg_data_t *pgdat, struct zone *zone)
mm/page_owner.c
432
unsigned long end_pfn = zone_end_pfn(zone);
mm/page_owner.c
438
pfn = zone->zone_start_pfn;
mm/page_owner.c
461
if (page_zone(page) != zone)
mm/page_owner.c
502
seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
mm/page_owner.c
772
static void init_pages_in_zone(struct zone *zone)
mm/page_owner.c
774
unsigned long pfn = zone->zone_start_pfn;
mm/page_owner.c
775
unsigned long end_pfn = zone_end_pfn(zone);
mm/page_owner.c
798
if (page_zone(page) != zone)
mm/page_owner.c
839
zone->zone_pgdat->node_id, zone->name, count);
mm/page_owner.c
844
struct zone *zone;
mm/page_owner.c
846
for_each_populated_zone(zone)
mm/page_owner.c
847
init_pages_in_zone(zone);
mm/page_reporting.c
146
page_reporting_cycle(struct page_reporting_dev_info *prdev, struct zone *zone,
mm/page_reporting.c
150
struct free_area *area = &zone->free_area[order];
mm/page_reporting.c
164
spin_lock_irq(&zone->lock);
mm/page_reporting.c
222
spin_unlock_irq(&zone->lock);
mm/page_reporting.c
234
spin_lock_irq(&zone->lock);
mm/page_reporting.c
254
spin_unlock_irq(&zone->lock);
mm/page_reporting.c
261
struct scatterlist *sgl, struct zone *zone)
mm/page_reporting.c
268
watermark = low_wmark_pages(zone) +
mm/page_reporting.c
275
if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA))
mm/page_reporting.c
285
err = page_reporting_cycle(prdev, zone, order, mt,
mm/page_reporting.c
299
spin_lock_irq(&zone->lock);
mm/page_reporting.c
301
spin_unlock_irq(&zone->lock);
mm/page_reporting.c
314
struct zone *zone;
mm/page_reporting.c
331
for_each_zone(zone) {
mm/page_reporting.c
332
err = page_reporting_process_zone(prdev, sgl, zone);
mm/show_mem.c
100
if (is_highmem(zone)) {
mm/show_mem.c
101
managed_highpages += zone_managed_pages(zone);
mm/show_mem.c
102
free_highpages += zone_page_state(zone, NR_FREE_PAGES);
mm/show_mem.c
184
struct zone *zone;
mm/show_mem.c
187
for_each_populated_zone(zone) {
mm/show_mem.c
188
if (zone_idx(zone) > max_zone_idx)
mm/show_mem.c
190
if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
mm/show_mem.c
194
free_pcp += per_cpu_ptr(zone->per_cpu_pageset, cpu)->count;
mm/show_mem.c
26
static inline void show_node(struct zone *zone)
mm/show_mem.c
289
for_each_populated_zone(zone) {
mm/show_mem.c
29
printk("Node %d ", zone_to_nid(zone));
mm/show_mem.c
292
if (zone_idx(zone) > max_zone_idx)
mm/show_mem.c
294
if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
mm/show_mem.c
299
free_pcp += per_cpu_ptr(zone->per_cpu_pageset, cpu)->count;
mm/show_mem.c
301
show_node(zone);
mm/show_mem.c
326
zone->name,
mm/show_mem.c
327
K(zone_page_state(zone, NR_FREE_PAGES)),
mm/show_mem.c
328
K(zone->watermark_boost),
mm/show_mem.c
329
K(min_wmark_pages(zone)),
mm/show_mem.c
330
K(low_wmark_pages(zone)),
mm/show_mem.c
331
K(high_wmark_pages(zone)),
mm/show_mem.c
332
K(zone->nr_reserved_highatomic),
mm/show_mem.c
333
K(zone->nr_free_highatomic),
mm/show_mem.c
334
K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON)),
mm/show_mem.c
335
K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON)),
mm/show_mem.c
336
K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE)),
mm/show_mem.c
337
K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE)),
mm/show_mem.c
338
K(zone_page_state(zone, NR_ZONE_UNEVICTABLE)),
mm/show_mem.c
339
K(zone_page_state(zone, NR_ZONE_WRITE_PENDING)),
mm/show_mem.c
341
K(zone_page_state(zone, NR_ZSPAGES)),
mm/show_mem.c
345
K(zone->present_pages),
mm/show_mem.c
346
K(zone_managed_pages(zone)),
mm/show_mem.c
347
K(zone_page_state(zone, NR_MLOCK)),
mm/show_mem.c
350
K(this_cpu_read(zone->per_cpu_pageset->count)),
mm/show_mem.c
351
K(zone_page_state(zone, NR_FREE_CMA_PAGES)));
mm/show_mem.c
354
printk(KERN_CONT " %ld", zone->lowmem_reserve[i]);
mm/show_mem.c
358
for_each_populated_zone(zone) {
mm/show_mem.c
363
if (zone_idx(zone) > max_zone_idx)
mm/show_mem.c
365
if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
mm/show_mem.c
367
show_node(zone);
mm/show_mem.c
368
printk(KERN_CONT "%s: ", zone->name);
mm/show_mem.c
370
spin_lock_irqsave(&zone->lock, flags);
mm/show_mem.c
372
struct free_area *area = &zone->free_area[order];
mm/show_mem.c
38
struct zone *zone;
mm/show_mem.c
384
spin_unlock_irqrestore(&zone->lock, flags);
mm/show_mem.c
40
for_each_zone(zone)
mm/show_mem.c
408
struct zone *zone;
mm/show_mem.c
41
wmark_low += low_wmark_pages(zone);
mm/show_mem.c
413
for_each_populated_zone(zone) {
mm/show_mem.c
415
total += zone->present_pages;
mm/show_mem.c
416
reserved += zone->present_pages - zone_managed_pages(zone);
mm/show_mem.c
418
if (is_highmem(zone))
mm/show_mem.c
419
highmem += zone->present_pages;
mm/show_mem.c
98
struct zone *zone = &pgdat->node_zones[zone_type];
mm/show_mem.c
99
managed_pages += zone_managed_pages(zone);
mm/shuffle.c
155
struct zone *z;
mm/shuffle.c
36
static struct page * __meminit shuffle_valid_page(struct zone *zone,
mm/shuffle.c
51
if (page_zone(page) != zone)
mm/shuffle.c
80
void __meminit __shuffle_zone(struct zone *z)
mm/shuffle.h
20
extern void __shuffle_zone(struct zone *z);
mm/shuffle.h
21
static inline void __meminit shuffle_zone(struct zone *z)
mm/shuffle.h
44
static inline void shuffle_zone(struct zone *z)
mm/slub.c
3847
struct zone *zone;
mm/slub.c
3885
for_each_zone_zonelist(zone, z, zonelist, highest_zoneidx) {
mm/slub.c
3888
n = get_node(s, zone_to_nid(zone));
mm/slub.c
3890
if (n && cpuset_zone_allowed(zone, pc->flags) &&
mm/slub.c
7174
struct zone *zone;
mm/slub.c
7187
for_each_zone_zonelist(zone, z, zonelist, highest_zoneidx) {
mm/slub.c
7191
n = get_node(s, zone_to_nid(zone));
mm/slub.c
7193
if (!n || !cpuset_zone_allowed(zone, gfp) ||
mm/sparse-vmemmap.c
329
static __meminit struct page *vmemmap_get_tail(unsigned int order, struct zone *zone)
mm/sparse-vmemmap.c
333
int node = zone_to_nid(zone);
mm/sparse-vmemmap.c
341
tail = zone->vmemmap_tails[idx];
mm/sparse-vmemmap.c
359
zone->vmemmap_tails[idx] = tail;
mm/sparse-vmemmap.c
365
unsigned int order, struct zone *zone,
mm/sparse-vmemmap.c
371
int node = zone_to_nid(zone);
mm/sparse-vmemmap.c
373
tail = vmemmap_get_tail(order, zone);
mm/swap.c
757
void lru_add_drain_cpu_zone(struct zone *zone)
mm/swap.c
761
drain_local_pages(zone);
mm/vmscan.c
1615
unsigned int reclaim_clean_pages_from_list(struct zone *zone,
mm/vmscan.c
1646
nr_reclaimed = shrink_folio_list(&clean_folios, zone->zone_pgdat, &sc,
mm/vmscan.c
1651
mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE,
mm/vmscan.c
1659
mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_ANON,
mm/vmscan.c
1661
mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE,
mm/vmscan.c
2386
struct zone *zone;
mm/vmscan.c
2392
for_each_managed_zone_pgdat(zone, pgdat, z, MAX_NR_ZONES - 1) {
mm/vmscan.c
2393
total_high_wmark += high_wmark_pages(zone);
mm/vmscan.c
2689
#define for_each_gen_type_zone(gen, type, zone) \
mm/vmscan.c
2692
for ((zone) = 0; (zone) < MAX_NR_ZONES; (zone)++)
mm/vmscan.c
283
#define for_each_managed_zone_pgdat(zone, pgdat, idx, highidx) \
mm/vmscan.c
284
for ((idx) = 0, (zone) = (pgdat)->node_zones; \
mm/vmscan.c
286
(idx)++, (zone)++) \
mm/vmscan.c
287
if (!managed_zone(zone)) \
mm/vmscan.c
3278
int zone = folio_zonenum(folio);
mm/vmscan.c
3286
walk->nr_pages[old_gen][type][zone] -= delta;
mm/vmscan.c
3287
walk->nr_pages[new_gen][type][zone] += delta;
mm/vmscan.c
3292
int gen, type, zone;
mm/vmscan.c
3298
for_each_gen_type_zone(gen, type, zone) {
mm/vmscan.c
3300
int delta = walk->nr_pages[gen][type][zone];
mm/vmscan.c
3305
walk->nr_pages[gen][type][zone] = 0;
mm/vmscan.c
3306
WRITE_ONCE(lrugen->nr_pages[gen][type][zone],
mm/vmscan.c
3307
lrugen->nr_pages[gen][type][zone] + delta);
mm/vmscan.c
3311
__update_lru_size(lruvec, lru, zone, delta);
mm/vmscan.c
3850
int zone;
mm/vmscan.c
3865
for (zone = 0; zone < MAX_NR_ZONES; zone++) {
mm/vmscan.c
3866
struct list_head *head = &lrugen->folios[old_gen][type][zone];
mm/vmscan.c
3876
VM_WARN_ON_ONCE_FOLIO(folio_zonenum(folio) != zone, folio);
mm/vmscan.c
3879
list_move_tail(&folio->lru, &lrugen->folios[new_gen][type][zone]);
mm/vmscan.c
3903
int gen, type, zone;
mm/vmscan.c
3916
for (zone = 0; zone < MAX_NR_ZONES; zone++) {
mm/vmscan.c
3917
if (!list_empty(&lrugen->folios[gen][type][zone]))
mm/vmscan.c
393
unsigned long zone_reclaimable_pages(struct zone *zone)
mm/vmscan.c
3962
int type, zone;
mm/vmscan.c
397
nr = zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_FILE) +
mm/vmscan.c
398
zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_FILE);
mm/vmscan.c
399
if (can_reclaim_anon_pages(NULL, zone_to_nid(zone), NULL))
mm/vmscan.c
3998
for (zone = 0; zone < MAX_NR_ZONES; zone++) {
mm/vmscan.c
400
nr += zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_ANON) +
mm/vmscan.c
4000
long delta = lrugen->nr_pages[prev][type][zone] -
mm/vmscan.c
4001
lrugen->nr_pages[next][type][zone];
mm/vmscan.c
4006
__update_lru_size(lruvec, lru, zone, delta);
mm/vmscan.c
4007
__update_lru_size(lruvec, lru + LRU_ACTIVE, zone, -delta);
mm/vmscan.c
401
zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_ANON);
mm/vmscan.c
4109
int gen, type, zone;
mm/vmscan.c
4123
for (zone = 0; zone < MAX_NR_ZONES; zone++)
mm/vmscan.c
4124
total += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L);
mm/vmscan.c
417
struct zone *zone;
mm/vmscan.c
419
for_each_managed_zone_pgdat(zone, lruvec_pgdat(lruvec), zid, zone_idx) {
mm/vmscan.c
423
size += zone_page_state(zone, NR_ZONE_LRU_BASE + lru);
mm/vmscan.c
4453
int zone = folio_zonenum(folio);
mm/vmscan.c
4474
list_move(&folio->lru, &lrugen->folios[gen][type][zone]);
mm/vmscan.c
4481
list_move(&folio->lru, &lrugen->folios[gen][type][zone]);
mm/vmscan.c
4494
if (zone > sc->reclaim_idx) {
mm/vmscan.c
4496
list_move_tail(&folio->lru, &lrugen->folios[gen][type][zone]);
mm/vmscan.c
4511
list_move(&folio->lru, &lrugen->folios[gen][type][zone]);
mm/vmscan.c
4576
int zone = (sc->reclaim_idx + i) % MAX_NR_ZONES;
mm/vmscan.c
4577
struct list_head *head = &lrugen->folios[gen][type][zone];
mm/vmscan.c
4586
VM_WARN_ON_ONCE_FOLIO(folio_zonenum(folio) != zone, folio);
mm/vmscan.c
4606
__count_zid_vm_events(PGSCAN_SKIP, zone, skipped_zone);
mm/vmscan.c
4784
int gen, type, zone;
mm/vmscan.c
4800
for (zone = 0; zone < MAX_NR_ZONES; zone++)
mm/vmscan.c
4801
size += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L);
mm/vmscan.c
4861
struct zone *zone = lruvec_pgdat(lruvec)->node_zones + i;
mm/vmscan.c
4862
unsigned long size = wmark_pages(zone, mark) + MIN_LRU_BATCH;
mm/vmscan.c
4864
if (managed_zone(zone) && !zone_watermark_ok(zone, 0, size, sc->reclaim_idx, 0))
mm/vmscan.c
506
struct zone *zone;
mm/vmscan.c
5100
int gen, type, zone;
mm/vmscan.c
5102
for_each_gen_type_zone(gen, type, zone) {
mm/vmscan.c
5103
if (!list_empty(&lrugen->folios[gen][type][zone]))
mm/vmscan.c
5144
int gen, type, zone;
mm/vmscan.c
5147
for_each_gen_type_zone(gen, type, zone) {
mm/vmscan.c
5148
struct list_head *head = &lruvec->lrugen.folios[gen][type][zone];
mm/vmscan.c
5157
VM_WARN_ON_ONCE_FOLIO(folio_zonenum(folio) != zone, folio);
mm/vmscan.c
519
for_each_managed_zone_pgdat(zone, pgdat, i, MAX_NR_ZONES - 1) {
mm/vmscan.c
520
reclaimable += zone_reclaimable_pages(zone);
mm/vmscan.c
521
write_pending += zone_page_state_snapshot(zone,
mm/vmscan.c
5449
int type, zone;
mm/vmscan.c
5459
for (zone = 0; zone < MAX_NR_ZONES; zone++)
mm/vmscan.c
5460
size += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L);
mm/vmscan.c
5686
int gen, type, zone;
mm/vmscan.c
5696
for_each_gen_type_zone(gen, type, zone)
mm/vmscan.c
5697
INIT_LIST_HEAD(&lrugen->folios[gen][type][zone]);
mm/vmscan.c
5927
struct zone *zone;
mm/vmscan.c
5947
for_each_managed_zone_pgdat(zone, pgdat, z, sc->reclaim_idx) {
mm/vmscan.c
5948
unsigned long watermark = min_wmark_pages(zone);
mm/vmscan.c
5951
if (zone_watermark_ok(zone, sc->order, watermark,
mm/vmscan.c
5955
if (compaction_suitable(zone, sc->order, watermark,
mm/vmscan.c
6169
static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
mm/vmscan.c
6177
if (zone_watermark_ok(zone, sc->order, min_wmark_pages(zone),
mm/vmscan.c
6191
watermark = high_wmark_pages(zone);
mm/vmscan.c
6192
if (compaction_suitable(zone, sc->order, watermark, sc->reclaim_idx))
mm/vmscan.c
6239
struct zone *zone;
mm/vmscan.c
6257
for_each_zone_zonelist_nodemask(zone, z, zonelist,
mm/vmscan.c
6264
if (!cpuset_zone_allowed(zone,
mm/vmscan.c
6279
compaction_ready(zone, sc)) {
mm/vmscan.c
6290
if (zone->zone_pgdat == last_pgdat)
mm/vmscan.c
6300
nr_soft_reclaimed = memcg1_soft_limit_reclaim(zone->zone_pgdat,
mm/vmscan.c
6309
first_pgdat = zone->zone_pgdat;
mm/vmscan.c
6312
if (zone->zone_pgdat == last_pgdat)
mm/vmscan.c
6314
last_pgdat = zone->zone_pgdat;
mm/vmscan.c
6315
shrink_node(zone->zone_pgdat, sc);
mm/vmscan.c
6365
struct zone *zone;
mm/vmscan.c
6387
for_each_zone_zonelist_nodemask(zone, z, zonelist, sc->reclaim_idx,
mm/vmscan.c
6389
if (zone->zone_pgdat == last_pgdat)
mm/vmscan.c
6391
last_pgdat = zone->zone_pgdat;
mm/vmscan.c
6393
snapshot_refaults(sc->target_mem_cgroup, zone->zone_pgdat);
mm/vmscan.c
6399
zone->zone_pgdat);
mm/vmscan.c
6458
struct zone *zone;
mm/vmscan.c
6467
for_each_managed_zone_pgdat(zone, pgdat, i, ZONE_NORMAL) {
mm/vmscan.c
6468
if (!zone_reclaimable_pages(zone) && zone_page_state_snapshot(zone, NR_FREE_PAGES))
mm/vmscan.c
6471
pfmemalloc_reserve += min_wmark_pages(zone);
mm/vmscan.c
6472
free_pages += zone_page_state_snapshot(zone, NR_FREE_PAGES);
mm/vmscan.c
6505
struct zone *zone;
mm/vmscan.c
6539
for_each_zone_zonelist_nodemask(zone, z, zonelist,
mm/vmscan.c
6541
if (zone_idx(zone) > ZONE_NORMAL)
mm/vmscan.c
6545
pgdat = zone->zone_pgdat;
mm/vmscan.c
6571
wait_event_killable(zone->zone_pgdat->pfmemalloc_wait,
mm/vmscan.c
6750
struct zone *zone;
mm/vmscan.c
6760
zone = pgdat->node_zones + i;
mm/vmscan.c
6761
if (!managed_zone(zone))
mm/vmscan.c
6764
if (zone->watermark_boost)
mm/vmscan.c
6779
struct zone *zone;
mm/vmscan.c
6785
for_each_managed_zone_pgdat(zone, pgdat, i, highest_zoneidx) {
mm/vmscan.c
6790
mark = promo_wmark_pages(zone);
mm/vmscan.c
6792
mark = high_wmark_pages(zone);
mm/vmscan.c
6821
free_pages = zone_page_state(zone, item);
mm/vmscan.c
6822
if (zone->percpu_drift_mark && free_pages < zone->percpu_drift_mark)
mm/vmscan.c
6823
free_pages = zone_page_state_snapshot(zone, item);
mm/vmscan.c
6825
if (__zone_watermark_ok(zone, order, mark, highest_zoneidx,
mm/vmscan.c
6899
struct zone *zone;
mm/vmscan.c
6905
for_each_managed_zone_pgdat(zone, pgdat, z, sc->reclaim_idx) {
mm/vmscan.c
6906
sc->nr_to_reclaim += max(high_wmark_pages(zone), SWAP_CLUSTER_MAX);
mm/vmscan.c
6934
struct zone *zone;
mm/vmscan.c
6936
for_each_managed_zone_pgdat(zone, pgdat, i, highest_zoneidx) {
mm/vmscan.c
6938
set_bit(ZONE_RECLAIM_ACTIVE, &zone->flags);
mm/vmscan.c
6940
clear_bit(ZONE_RECLAIM_ACTIVE, &zone->flags);
mm/vmscan.c
6978
struct zone *zone;
mm/vmscan.c
6997
for_each_managed_zone_pgdat(zone, pgdat, i, highest_zoneidx) {
mm/vmscan.c
6998
nr_boost_reclaim += zone->watermark_boost;
mm/vmscan.c
6999
zone_boosts[i] = zone->watermark_boost;
mm/vmscan.c
7027
zone = pgdat->node_zones + i;
mm/vmscan.c
7028
if (!managed_zone(zone))
mm/vmscan.c
7160
zone = pgdat->node_zones + i;
mm/vmscan.c
7161
spin_lock_irqsave(&zone->lock, flags);
mm/vmscan.c
7162
zone->watermark_boost -= min(zone->watermark_boost, zone_boosts[i]);
mm/vmscan.c
7163
spin_unlock_irqrestore(&zone->lock, flags);
mm/vmscan.c
7380
void wakeup_kswapd(struct zone *zone, gfp_t gfp_flags, int order,
mm/vmscan.c
7386
if (!managed_zone(zone))
mm/vmscan.c
7389
if (!cpuset_zone_allowed(zone, gfp_flags))
mm/vmscan.c
7392
pgdat = zone->zone_pgdat;
mm/vmstat.c
1063
static void fill_contig_page_info(struct zone *zone,
mm/vmstat.c
1082
blocks = data_race(zone->free_area[order].nr_free);
mm/vmstat.c
1130
unsigned int extfrag_for_order(struct zone *zone, unsigned int order)
mm/vmstat.c
1134
fill_contig_page_info(zone, order, &info);
mm/vmstat.c
1144
int fragmentation_index(struct zone *zone, unsigned int order)
mm/vmstat.c
1148
fill_contig_page_info(zone, order, &info);
mm/vmstat.c
1530
void (*print)(struct seq_file *m, pg_data_t *, struct zone *))
mm/vmstat.c
1532
struct zone *zone;
mm/vmstat.c
1533
struct zone *node_zones = pgdat->node_zones;
mm/vmstat.c
1536
for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
mm/vmstat.c
1537
if (assert_populated && !populated_zone(zone))
mm/vmstat.c
1541
spin_lock_irqsave(&zone->lock, flags);
mm/vmstat.c
1542
print(m, pgdat, zone);
mm/vmstat.c
1544
spin_unlock_irqrestore(&zone->lock, flags);
mm/vmstat.c
1551
struct zone *zone)
mm/vmstat.c
1555
seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
mm/vmstat.c
1561
seq_printf(m, "%6lu ", data_race(zone->free_area[order].nr_free));
mm/vmstat.c
1576
pg_data_t *pgdat, struct zone *zone)
mm/vmstat.c
1583
zone->name,
mm/vmstat.c
1591
area = &(zone->free_area[order]);
mm/vmstat.c
1609
spin_unlock_irq(&zone->lock);
mm/vmstat.c
1611
spin_lock_irq(&zone->lock);
mm/vmstat.c
1633
pg_data_t *pgdat, struct zone *zone)
mm/vmstat.c
1637
unsigned long start_pfn = zone->zone_start_pfn;
mm/vmstat.c
1638
unsigned long end_pfn = zone_end_pfn(zone);
mm/vmstat.c
1648
if (page_zone(page) != zone)
mm/vmstat.c
1658
seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
mm/vmstat.c
172
static void fold_vm_zone_numa_events(struct zone *zone)
mm/vmstat.c
1740
static bool is_zone_first_populated(pg_data_t *pgdat, struct zone *zone)
mm/vmstat.c
1745
struct zone *compare = &pgdat->node_zones[zid];
mm/vmstat.c
1748
return zone == compare;
mm/vmstat.c
1755
struct zone *zone)
mm/vmstat.c
1758
seq_printf(m, "Node %d, zone %8s", pgdat->node_id, zone->name);
mm/vmstat.c
1759
if (is_zone_first_populated(pgdat, zone)) {
mm/vmstat.c
1781
zone_page_state(zone, NR_FREE_PAGES),
mm/vmstat.c
1782
zone->watermark_boost,
mm/vmstat.c
1783
min_wmark_pages(zone),
mm/vmstat.c
1784
low_wmark_pages(zone),
mm/vmstat.c
1785
high_wmark_pages(zone),
mm/vmstat.c
1786
promo_wmark_pages(zone),
mm/vmstat.c
1787
zone->spanned_pages,
mm/vmstat.c
1788
zone->present_pages,
mm/vmstat.c
1789
zone_managed_pages(zone),
mm/vmstat.c
1790
zone_cma_pages(zone));
mm/vmstat.c
1794
zone->lowmem_reserve[0]);
mm/vmstat.c
1795
for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++)
mm/vmstat.c
1796
seq_printf(m, ", %ld", zone->lowmem_reserve[i]);
mm/vmstat.c
1800
if (!populated_zone(zone)) {
mm/vmstat.c
1807
zone_page_state(zone, i));
mm/vmstat.c
181
pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
mm/vmstat.c
1810
fold_vm_zone_numa_events(zone);
mm/vmstat.c
1813
zone_numa_event_state(zone, i));
mm/vmstat.c
1821
pcp = per_cpu_ptr(zone->per_cpu_pageset, i);
mm/vmstat.c
1836
pzstats = per_cpu_ptr(zone->per_cpu_zonestats, i);
mm/vmstat.c
1847
zone->zone_start_pfn,
mm/vmstat.c
1848
zone->nr_reserved_highatomic,
mm/vmstat.c
1849
zone->nr_free_highatomic);
mm/vmstat.c
187
zone_numa_event_add(zone_numa_events[item], zone, item);
mm/vmstat.c
192
struct zone *zone;
mm/vmstat.c
194
for_each_populated_zone(zone)
mm/vmstat.c
195
fold_vm_zone_numa_events(zone);
mm/vmstat.c
201
int calculate_pressure_threshold(struct zone *zone)
mm/vmstat.c
2058
struct zone *zone;
mm/vmstat.c
2060
for_each_populated_zone(zone) {
mm/vmstat.c
2061
struct per_cpu_zonestat *pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
mm/vmstat.c
2070
if (last_pgdat == zone->zone_pgdat)
mm/vmstat.c
2072
last_pgdat = zone->zone_pgdat;
mm/vmstat.c
2073
n = per_cpu_ptr(zone->zone_pgdat->per_cpu_nodestats, cpu);
mm/vmstat.c
214
watermark_distance = low_wmark_pages(zone) - min_wmark_pages(zone);
mm/vmstat.c
225
int calculate_normal_threshold(struct zone *zone)
mm/vmstat.c
2327
pg_data_t *pgdat, struct zone *zone)
mm/vmstat.c
2335
zone->name);
mm/vmstat.c
2337
fill_contig_page_info(zone, order, &info);
mm/vmstat.c
2377
pg_data_t *pgdat, struct zone *zone)
mm/vmstat.c
2387
zone->name);
mm/vmstat.c
2389
fill_contig_page_info(zone, order, &info);
mm/vmstat.c
260
mem = zone_managed_pages(zone) >> (27 - PAGE_SHIFT);
mm/vmstat.c
278
struct zone *zone;
mm/vmstat.c
289
for_each_populated_zone(zone) {
mm/vmstat.c
290
struct pglist_data *pgdat = zone->zone_pgdat;
mm/vmstat.c
293
threshold = calculate_normal_threshold(zone);
mm/vmstat.c
298
per_cpu_ptr(zone->per_cpu_zonestats, cpu)->stat_threshold
mm/vmstat.c
312
tolerate_drift = low_wmark_pages(zone) - min_wmark_pages(zone);
mm/vmstat.c
315
zone->percpu_drift_mark = high_wmark_pages(zone) +
mm/vmstat.c
321
int (*calculate_pressure)(struct zone *))
mm/vmstat.c
323
struct zone *zone;
mm/vmstat.c
329
zone = &pgdat->node_zones[i];
mm/vmstat.c
330
if (!zone->percpu_drift_mark)
mm/vmstat.c
333
threshold = (*calculate_pressure)(zone);
mm/vmstat.c
335
per_cpu_ptr(zone->per_cpu_zonestats, cpu)->stat_threshold
mm/vmstat.c
345
void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
mm/vmstat.c
348
struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats;
mm/vmstat.c
367
zone_page_state_add(x, zone, item);
mm/vmstat.c
40
static void zero_zone_numa_counters(struct zone *zone)
mm/vmstat.c
435
void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
mm/vmstat.c
437
struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats;
mm/vmstat.c
449
zone_page_state_add(v + overstep, zone, item);
mm/vmstat.c
45
atomic_long_set(&zone->vm_numa_event[item], 0);
mm/vmstat.c
47
per_cpu_ptr(zone->per_cpu_zonestats, cpu)->vm_numa_event[item]
mm/vmstat.c
491
void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
mm/vmstat.c
493
struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats;
mm/vmstat.c
505
zone_page_state_add(v - overstep, zone, item);
mm/vmstat.c
56
struct zone *zone;
mm/vmstat.c
560
static inline void mod_zone_state(struct zone *zone,
mm/vmstat.c
563
struct per_cpu_zonestat __percpu *pcp = zone->per_cpu_zonestats;
mm/vmstat.c
58
for_each_populated_zone(zone)
mm/vmstat.c
59
zero_zone_numa_counters(zone);
mm/vmstat.c
596
zone_page_state_add(z, zone, item);
mm/vmstat.c
599
void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
mm/vmstat.c
602
mod_zone_state(zone, item, delta, 0);
mm/vmstat.c
690
void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
mm/vmstat.c
696
__mod_zone_page_state(zone, item, delta);
mm/vmstat.c
704
struct zone *zone;
mm/vmstat.c
706
zone = page_zone(page);
mm/vmstat.c
708
__inc_zone_state(zone, item);
mm/vmstat.c
802
struct zone *zone;
mm/vmstat.c
808
for_each_populated_zone(zone) {
mm/vmstat.c
809
struct per_cpu_zonestat __percpu *pzstats = zone->per_cpu_zonestats;
mm/vmstat.c
810
struct per_cpu_pages __percpu *pcp = zone->per_cpu_pageset;
mm/vmstat.c
818
atomic_long_add(v, &zone->vm_stat[i]);
mm/vmstat.c
830
if (decay_pcp_high(zone, this_cpu_ptr(pcp)))
mm/vmstat.c
847
if (zone_to_nid(zone) == numa_node_id()) {
mm/vmstat.c
858
drain_zone_pages(zone, this_cpu_ptr(pcp));
mm/vmstat.c
892
struct zone *zone;
mm/vmstat.c
897
for_each_populated_zone(zone) {
mm/vmstat.c
900
pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu);
mm/vmstat.c
908
atomic_long_add(v, &zone->vm_stat[i]);
mm/vmstat.c
919
zone_numa_event_add(v, zone, i);
mm/vmstat.c
948
void drain_zonestat(struct zone *zone, struct per_cpu_zonestat *pzstats)
mm/vmstat.c
957
zone_page_state_add(v, zone, i);
mm/vmstat.c
966
zone_numa_event_add(v, zone, i);
mm/vmstat.c
982
struct zone *zones = NODE_DATA(node)->node_zones;
mm/vmstat.c
996
struct zone *zones = NODE_DATA(node)->node_zones;
mm/zpdesc.h
165
static inline struct zone *zpdesc_zone(struct zpdesc *zpdesc)
net/core/flow_dissector.c
323
size_t mapsize, bool post_ct, u16 zone)
net/core/flow_dissector.c
345
key->ct_zone = zone;
net/core/flow_dissector.c
352
key->ct_zone = ct->zone.id;
net/netfilter/nf_conncount.c
106
found = nf_conntrack_find_get(net, &conn->zone, &conn->tuple);
net/netfilter/nf_conncount.c
131
const struct nf_conntrack_zone **zone,
net/netfilter/nf_conncount.c
141
*zone = nf_ct_zone(found_ct);
net/netfilter/nf_conncount.c
150
*zone = nf_ct_zone(found_ct);
net/netfilter/nf_conncount.c
152
h = nf_conntrack_find_get(net, *zone, tuple);
net/netfilter/nf_conncount.c
168
const struct nf_conntrack_zone *zone = &nf_ct_zone_dflt;
net/netfilter/nf_conncount.c
178
if (!get_ct_or_tuple_from_skb(net, skb, l3num, &ct, &tuple, &zone, &refcounted))
net/netfilter/nf_conncount.c
211
nf_ct_zone_id(&conn->zone, conn->zone.dir) ==
net/netfilter/nf_conncount.c
212
nf_ct_zone_id(zone, zone->dir))
net/netfilter/nf_conncount.c
223
nf_ct_zone_equal(found_ct, zone, zone->dir)) {
net/netfilter/nf_conncount.c
261
conn->zone = *zone;
net/netfilter/nf_conncount.c
404
const struct nf_conntrack_zone *zone = &nf_ct_zone_dflt;
net/netfilter/nf_conncount.c
45
struct nf_conntrack_zone zone;
net/netfilter/nf_conncount.c
454
if (get_ct_or_tuple_from_skb(net, skb, l3num, &ct, &tuple, &zone, &refcounted)) {
net/netfilter/nf_conncount.c
467
conn->zone = *zone;
net/netfilter/nf_conntrack_broadcast.c
77
exp->zone = ct->zone;
net/netfilter/nf_conntrack_core.c
1083
const struct nf_conntrack_zone *zone;
net/netfilter/nf_conntrack_core.c
1088
zone = nf_ct_zone(loser_ct);
net/netfilter/nf_conntrack_core.c
1097
zone, net))
net/netfilter/nf_conntrack_core.c
1203
const struct nf_conntrack_zone *zone;
net/netfilter/nf_conntrack_core.c
1223
zone = nf_ct_zone(ct);
net/netfilter/nf_conntrack_core.c
1274
zone, net))
net/netfilter/nf_conntrack_core.c
1283
zone, net))
net/netfilter/nf_conntrack_core.c
1354
const struct nf_conntrack_zone *zone;
net/netfilter/nf_conntrack_core.c
1361
zone = nf_ct_zone(ignored_conntrack);
net/netfilter/nf_conntrack_core.c
1366
hash = __hash_conntrack(net, tuple, nf_ct_zone_id(zone, IP_CT_DIR_REPLY), hsize);
net/netfilter/nf_conntrack_core.c
1379
if (nf_ct_key_equal(h, tuple, zone, net)) {
net/netfilter/nf_conntrack_core.c
1394
nf_ct_zone_equal(ct, zone, IP_CT_DIR_ORIGINAL))
net/netfilter/nf_conntrack_core.c
1656
const struct nf_conntrack_zone *zone,
net/netfilter/nf_conntrack_core.c
1701
nf_ct_zone_add(ct, zone);
net/netfilter/nf_conntrack_core.c
1714
const struct nf_conntrack_zone *zone,
net/netfilter/nf_conntrack_core.c
1719
return __nf_conntrack_alloc(net, zone, orig, repl, gfp, 0);
net/netfilter/nf_conntrack_core.c
1768
const struct nf_conntrack_zone *zone;
net/netfilter/nf_conntrack_core.c
1776
zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
net/netfilter/nf_conntrack_core.c
1777
ct = __nf_conntrack_alloc(net, zone, tuple, &repl_tuple, GFP_ATOMIC,
net/netfilter/nf_conntrack_core.c
1812
exp = nf_ct_find_expectation(net, zone, tuple, !tmpl || nf_ct_is_confirmed(tmpl));
net/netfilter/nf_conntrack_core.c
1867
const struct nf_conntrack_zone *zone;
net/netfilter/nf_conntrack_core.c
1881
zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
net/netfilter/nf_conntrack_core.c
1883
zone_id = nf_ct_zone_id(zone, IP_CT_DIR_ORIGINAL);
net/netfilter/nf_conntrack_core.c
1885
h = __nf_conntrack_find_get(state->net, zone, &tuple, hash);
net/netfilter/nf_conntrack_core.c
1888
rid = nf_ct_zone_id(zone, IP_CT_DIR_REPLY);
net/netfilter/nf_conntrack_core.c
1892
h = __nf_conntrack_find_get(state->net, zone, &tuple, tmp);
net/netfilter/nf_conntrack_core.c
521
const struct nf_conntrack_zone *zone,
net/netfilter/nf_conntrack_core.c
543
nf_ct_zone_add(tmpl, zone);
net/netfilter/nf_conntrack_core.c
685
const struct nf_conntrack_zone *zone,
net/netfilter/nf_conntrack_core.c
694
nf_ct_zone_equal(ct, zone, NF_CT_DIRECTION(h)) &&
net/netfilter/nf_conntrack_core.c
732
____nf_conntrack_find(struct net *net, const struct nf_conntrack_zone *zone,
net/netfilter/nf_conntrack_core.c
753
if (nf_ct_key_equal(h, tuple, zone, net))
net/netfilter/nf_conntrack_core.c
771
__nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
net/netfilter/nf_conntrack_core.c
777
h = ____nf_conntrack_find(net, zone, tuple, hash);
net/netfilter/nf_conntrack_core.c
787
if (likely(nf_ct_key_equal(h, tuple, zone, net)))
net/netfilter/nf_conntrack_core.c
801
nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
net/netfilter/nf_conntrack_core.c
804
unsigned int rid, zone_id = nf_ct_zone_id(zone, IP_CT_DIR_ORIGINAL);
net/netfilter/nf_conntrack_core.c
809
thash = __nf_conntrack_find_get(net, zone, tuple,
net/netfilter/nf_conntrack_core.c
815
rid = nf_ct_zone_id(zone, IP_CT_DIR_REPLY);
net/netfilter/nf_conntrack_core.c
817
thash = __nf_conntrack_find_get(net, zone, tuple,
net/netfilter/nf_conntrack_core.c
866
const struct nf_conntrack_zone *zone;
net/netfilter/nf_conntrack_core.c
876
zone = nf_ct_zone(ct);
net/netfilter/nf_conntrack_core.c
897
zone, net))
net/netfilter/nf_conntrack_core.c
908
zone, net))
net/netfilter/nf_conntrack_expect.c
112
const struct nf_conntrack_zone *zone,
net/netfilter/nf_conntrack_expect.c
117
nf_ct_exp_zone_equal_any(i, zone);
net/netfilter/nf_conntrack_expect.c
135
const struct nf_conntrack_zone *zone,
net/netfilter/nf_conntrack_expect.c
147
if (nf_ct_exp_equal(tuple, i, zone, net))
net/netfilter/nf_conntrack_expect.c
157
const struct nf_conntrack_zone *zone,
net/netfilter/nf_conntrack_expect.c
163
i = __nf_ct_expect_find(net, zone, tuple);
net/netfilter/nf_conntrack_expect.c
176
const struct nf_conntrack_zone *zone,
net/netfilter/nf_conntrack_expect.c
191
nf_ct_exp_equal(tuple, i, zone, net)) {
net/netfilter/nf_conntrack_expect.c
349
exp->zone = ct->zone;
net/netfilter/nf_conntrack_netlink.c
1009
err = ctnetlink_parse_zone(cda[CTA_ZONE], &filter->zone);
net/netfilter/nf_conntrack_netlink.c
1031
&filter->zone,
net/netfilter/nf_conntrack_netlink.c
1046
&filter->zone,
net/netfilter/nf_conntrack_netlink.c
1168
!nf_ct_zone_equal_any(ct, &filter->zone))
net/netfilter/nf_conntrack_netlink.c
1409
struct nf_conntrack_zone *zone)
net/netfilter/nf_conntrack_netlink.c
1411
nf_ct_zone_init(zone, NF_CT_DEFAULT_ZONE_ID,
net/netfilter/nf_conntrack_netlink.c
1415
zone->id = ntohs(nla_get_be16(attr));
net/netfilter/nf_conntrack_netlink.c
1425
struct nf_conntrack_zone *zone)
net/netfilter/nf_conntrack_netlink.c
1429
if (zone->id != NF_CT_DEFAULT_ZONE_ID)
net/netfilter/nf_conntrack_netlink.c
1432
ret = ctnetlink_parse_zone(attr, zone);
net/netfilter/nf_conntrack_netlink.c
1437
zone->dir = NF_CT_ZONE_DIR_REPL;
net/netfilter/nf_conntrack_netlink.c
1439
zone->dir = NF_CT_ZONE_DIR_ORIG;
net/netfilter/nf_conntrack_netlink.c
1463
u_int8_t l3num, struct nf_conntrack_zone *zone,
net/netfilter/nf_conntrack_netlink.c
1503
if (!zone)
net/netfilter/nf_conntrack_netlink.c
1507
type, zone);
net/netfilter/nf_conntrack_netlink.c
1524
u_int8_t l3num, struct nf_conntrack_zone *zone)
net/netfilter/nf_conntrack_netlink.c
1526
return ctnetlink_parse_tuple_filter(cda, tuple, type, l3num, zone,
net/netfilter/nf_conntrack_netlink.c
154
const struct nf_conntrack_zone *zone, int dir)
net/netfilter/nf_conntrack_netlink.c
156
if (zone->id == NF_CT_DEFAULT_ZONE_ID || zone->dir != dir)
net/netfilter/nf_conntrack_netlink.c
158
if (nla_put_be16(skb, attrtype, htons(zone->id)))
net/netfilter/nf_conntrack_netlink.c
1619
struct nf_conntrack_zone zone;
net/netfilter/nf_conntrack_netlink.c
1623
err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
net/netfilter/nf_conntrack_netlink.c
1629
family, &zone);
net/netfilter/nf_conntrack_netlink.c
1632
family, &zone);
net/netfilter/nf_conntrack_netlink.c
1644
h = nf_conntrack_find_get(info->net, &zone, &tuple);
net/netfilter/nf_conntrack_netlink.c
1672
struct nf_conntrack_zone zone;
net/netfilter/nf_conntrack_netlink.c
1688
err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
net/netfilter/nf_conntrack_netlink.c
1694
u3, &zone);
net/netfilter/nf_conntrack_netlink.c
1697
u3, &zone);
net/netfilter/nf_conntrack_netlink.c
1704
h = nf_conntrack_find_get(info->net, &zone, &tuple);
net/netfilter/nf_conntrack_netlink.c
2233
const struct nf_conntrack_zone *zone,
net/netfilter/nf_conntrack_netlink.c
2245
ct = nf_conntrack_alloc(net, zone, otuple, rtuple, GFP_ATOMIC);
net/netfilter/nf_conntrack_netlink.c
2359
master_h = nf_conntrack_find_get(net, zone, &master);
net/netfilter/nf_conntrack_netlink.c
2397
struct nf_conntrack_zone zone;
net/netfilter/nf_conntrack_netlink.c
2401
err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
net/netfilter/nf_conntrack_netlink.c
2407
u3, &zone);
net/netfilter/nf_conntrack_netlink.c
2414
u3, &zone);
net/netfilter/nf_conntrack_netlink.c
2420
h = nf_conntrack_find_get(info->net, &zone, &otuple);
net/netfilter/nf_conntrack_netlink.c
2422
h = nf_conntrack_find_get(info->net, &zone, &rtuple);
net/netfilter/nf_conntrack_netlink.c
2434
ct = ctnetlink_create_conntrack(info->net, &zone, cda,
net/netfilter/nf_conntrack_netlink.c
2673
const struct nf_conntrack_zone *zone;
net/netfilter/nf_conntrack_netlink.c
2676
zone = nf_ct_zone(ct);
net/netfilter/nf_conntrack_netlink.c
2683
if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
net/netfilter/nf_conntrack_netlink.c
2693
if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
net/netfilter/nf_conntrack_netlink.c
2698
if (ctnetlink_dump_zone_id(skb, CTA_ZONE, zone,
net/netfilter/nf_conntrack_netlink.c
3264
struct nf_conntrack_zone zone;
net/netfilter/nf_conntrack_netlink.c
3276
err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
net/netfilter/nf_conntrack_netlink.c
3280
h = nf_conntrack_find_get(net, &zone, &tuple);
net/netfilter/nf_conntrack_netlink.c
3306
struct nf_conntrack_zone zone;
net/netfilter/nf_conntrack_netlink.c
3323
err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
net/netfilter/nf_conntrack_netlink.c
3344
exp = nf_ct_expect_find_get(info->net, &zone, &tuple);
net/netfilter/nf_conntrack_netlink.c
3402
struct nf_conntrack_zone zone;
net/netfilter/nf_conntrack_netlink.c
3407
err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
net/netfilter/nf_conntrack_netlink.c
3419
exp = nf_ct_expect_find_get(info->net, &zone, &tuple);
net/netfilter/nf_conntrack_netlink.c
3568
exp->zone = ct->zone;
net/netfilter/nf_conntrack_netlink.c
3595
const struct nf_conntrack_zone *zone,
net/netfilter/nf_conntrack_netlink.c
3620
h = nf_conntrack_find_get(net, zone, &master_tuple);
net/netfilter/nf_conntrack_netlink.c
3648
struct nf_conntrack_zone zone;
net/netfilter/nf_conntrack_netlink.c
3656
err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
net/netfilter/nf_conntrack_netlink.c
3666
exp = __nf_ct_expect_find(info->net, &zone, &tuple);
net/netfilter/nf_conntrack_netlink.c
3671
err = ctnetlink_create_expect(info->net, &zone, cda, u3,
net/netfilter/nf_conntrack_netlink.c
590
const struct nf_conntrack_zone *zone;
net/netfilter/nf_conntrack_netlink.c
603
zone = nf_ct_zone(ct);
net/netfilter/nf_conntrack_netlink.c
610
if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
net/netfilter/nf_conntrack_netlink.c
620
if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
net/netfilter/nf_conntrack_netlink.c
625
if (ctnetlink_dump_zone_id(skb, CTA_ZONE, zone,
net/netfilter/nf_conntrack_netlink.c
745
const struct nf_conntrack_zone *zone;
net/netfilter/nf_conntrack_netlink.c
782
zone = nf_ct_zone(ct);
net/netfilter/nf_conntrack_netlink.c
789
if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
net/netfilter/nf_conntrack_netlink.c
799
if (ctnetlink_dump_zone_id(skb, CTA_TUPLE_ZONE, zone,
net/netfilter/nf_conntrack_netlink.c
804
if (ctnetlink_dump_zone_id(skb, CTA_ZONE, zone,
net/netfilter/nf_conntrack_netlink.c
904
struct nf_conntrack_zone zone;
net/netfilter/nf_conntrack_netlink.c
936
struct nf_conntrack_zone *zone);
net/netfilter/nf_conntrack_netlink.c
940
struct nf_conntrack_zone *zone,
net/netfilter/nf_conntrack_ovs.c
145
u16 zone, u8 family, u8 *proto, u16 *mru)
net/netfilter/nf_conntrack_ovs.c
150
enum ip_defrag_users user = IP_DEFRAG_CONNTRACK_IN + zone;
net/netfilter/nf_conntrack_ovs.c
162
enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone;
net/netfilter/nf_conntrack_pptp.c
131
const struct nf_conntrack_zone *zone;
net/netfilter/nf_conntrack_pptp.c
138
zone = nf_ct_zone(ct);
net/netfilter/nf_conntrack_pptp.c
139
h = nf_conntrack_find_get(net, zone, t);
net/netfilter/nf_conntrack_pptp.c
149
exp = nf_ct_expect_find_get(net, zone, t);
net/netfilter/nf_conntrack_proto_icmp.c
113
const struct nf_conntrack_zone *zone;
net/netfilter/nf_conntrack_proto_icmp.c
121
zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
net/netfilter/nf_conntrack_proto_icmp.c
133
h = nf_conntrack_find_get(state->net, zone, &innertuple);
net/netfilter/nf_conntrack_standalone.c
209
const struct nf_conntrack_zone *zone = nf_ct_zone(ct);
net/netfilter/nf_conntrack_standalone.c
211
if (zone->dir != dir)
net/netfilter/nf_conntrack_standalone.c
213
switch (zone->dir) {
net/netfilter/nf_conntrack_standalone.c
215
seq_printf(s, "zone=%u ", zone->id);
net/netfilter/nf_conntrack_standalone.c
218
seq_printf(s, "zone-orig=%u ", zone->id);
net/netfilter/nf_conntrack_standalone.c
221
seq_printf(s, "zone-reply=%u ", zone->id);
net/netfilter/nf_nat_core.c
149
const struct nf_conntrack_zone *zone,
net/netfilter/nf_nat_core.c
157
u32 zone;
net/netfilter/nf_nat_core.c
170
if (zone->dir == NF_CT_DEFAULT_ZONE_DIR)
net/netfilter/nf_nat_core.c
171
combined.zone = zone->id;
net/netfilter/nf_nat_core.c
245
const struct nf_conntrack_zone *zone;
net/netfilter/nf_nat_core.c
279
zone = nf_ct_zone(ignored_ct);
net/netfilter/nf_nat_core.c
281
thash = nf_conntrack_find_get(net, zone, tuple);
net/netfilter/nf_nat_core.c
286
thash = nf_conntrack_find_get(net, zone, &reply);
net/netfilter/nf_nat_core.c
338
const struct nf_conntrack_zone *zone;
net/netfilter/nf_nat_core.c
358
zone = nf_ct_zone(ignored_conntrack);
net/netfilter/nf_nat_core.c
360
thash = nf_conntrack_find_get(net, zone, &reply);
net/netfilter/nf_nat_core.c
462
const struct nf_conntrack_zone *zone,
net/netfilter/nf_nat_core.c
467
unsigned int h = hash_by_src(net, zone, tuple);
net/netfilter/nf_nat_core.c
473
nf_ct_zone_equal(ct, zone, IP_CT_DIR_ORIGINAL)) {
net/netfilter/nf_nat_core.c
493
find_best_ips_proto(const struct nf_conntrack_zone *zone,
net/netfilter/nf_nat_core.c
534
0 : (__force u32)tuple->dst.u3.all[max] ^ zone->id);
net/netfilter/nf_nat_core.c
695
const struct nf_conntrack_zone *zone;
net/netfilter/nf_nat_core.c
698
zone = nf_ct_zone(ct);
net/netfilter/nf_nat_core.c
716
} else if (find_appropriate_src(net, zone,
net/netfilter/nf_nat_core.c
726
find_best_ips_proto(zone, tuple, range, ct, maniptype);
net/netfilter/nft_ct.c
161
const struct nf_conntrack_zone *zone = nf_ct_zone(ct);
net/netfilter/nft_ct.c
165
zoneid = nf_ct_zone_id(zone, priv->dir);
net/netfilter/nft_ct.c
167
zoneid = zone->id;
net/netfilter/nft_ct.c
229
struct nf_conntrack_zone zone = { .dir = NF_CT_DEFAULT_ZONE_DIR };
net/netfilter/nft_ct.c
241
zone.id = value;
net/netfilter/nft_ct.c
245
zone.dir = NF_CT_ZONE_DIR_ORIG;
net/netfilter/nft_ct.c
248
zone.dir = NF_CT_ZONE_DIR_REPL;
net/netfilter/nft_ct.c
258
nf_ct_zone_add(ct, &zone);
net/netfilter/nft_ct.c
264
ct = nf_ct_tmpl_alloc(nft_net(pkt), &zone, GFP_ATOMIC);
net/netfilter/nft_ct.c
362
struct nf_conntrack_zone zone = { .id = 0 };
net/netfilter/nft_ct.c
370
tmp = nf_ct_tmpl_alloc(&init_net, &zone, GFP_KERNEL);
net/netfilter/xt_CT.c
158
struct nf_conntrack_zone zone;
net/netfilter/xt_CT.c
169
if (info->zone || info->flags & (XT_CT_ZONE_DIR_ORIG |
net/netfilter/xt_CT.c
179
memset(&zone, 0, sizeof(zone));
net/netfilter/xt_CT.c
180
zone.id = info->zone;
net/netfilter/xt_CT.c
181
zone.dir = xt_ct_flags_to_dir(info);
net/netfilter/xt_CT.c
183
zone.flags |= NF_CT_FLAG_MARK;
net/netfilter/xt_CT.c
185
ct = nf_ct_tmpl_alloc(par->net, &zone, GFP_KERNEL);
net/netfilter/xt_CT.c
240
.zone = info->zone,
net/netfilter/xt_CT.c
305
.zone = info->zone,
net/netfilter/xt_connlimit.c
35
const struct nf_conntrack_zone *zone = &nf_ct_zone_dflt;
net/netfilter/xt_connlimit.c
43
zone = nf_ct_zone(ct);
net/netfilter/xt_connlimit.c
56
key[4] = zone->id;
net/netfilter/xt_connlimit.c
63
key[1] = zone->id;
net/openvswitch/conntrack.c
1054
err = ovs_ct_handle_fragments(net, key, info->zone.id,
net/openvswitch/conntrack.c
1274
info->zone.id = nla_get_u16(a);
net/openvswitch/conntrack.c
1398
nf_ct_zone_init(&ct_info.zone, NF_CT_DEFAULT_ZONE_ID,
net/openvswitch/conntrack.c
1406
ct_info.ct = nf_ct_tmpl_alloc(net, &ct_info.zone, GFP_KERNEL);
net/openvswitch/conntrack.c
1528
nla_put_u16(skb, OVS_CT_ATTR_ZONE, ct_info->zone.id))
net/openvswitch/conntrack.c
1672
u16 zone;
net/openvswitch/conntrack.c
1684
zone_limit->zone_id, &zone))) {
net/openvswitch/conntrack.c
1693
ct_limit->zone = zone;
net/openvswitch/conntrack.c
1716
u16 zone;
net/openvswitch/conntrack.c
1728
zone_limit->zone_id, &zone))) {
net/openvswitch/conntrack.c
1732
ct_limit_del(info, zone);
net/openvswitch/conntrack.c
1783
u16 zone;
net/openvswitch/conntrack.c
1795
&zone))) {
net/openvswitch/conntrack.c
1799
limit = ct_limit_get(info, zone);
net/openvswitch/conntrack.c
1803
net, info->data, zone, limit, reply);
net/openvswitch/conntrack.c
1835
ct_limit->zone, ct_limit->limit, reply);
net/openvswitch/conntrack.c
199
const struct nf_conntrack_zone *zone,
net/openvswitch/conntrack.c
203
key->ct_zone = zone->id;
net/openvswitch/conntrack.c
247
const struct nf_conntrack_zone *zone = &nf_ct_zone_dflt;
net/openvswitch/conntrack.c
271
zone = nf_ct_zone(ct);
net/openvswitch/conntrack.c
275
zone = &info->zone;
net/openvswitch/conntrack.c
277
__ovs_ct_update_key(key, state, zone, ct);
net/openvswitch/conntrack.c
445
u16 zone, int family, struct sk_buff *skb)
net/openvswitch/conntrack.c
450
err = nf_ct_handle_fragments(net, skb, zone, family, &key->ip.proto, &ovs_cb.mru);
net/openvswitch/conntrack.c
490
ovs_ct_find_existing(struct net *net, const struct nf_conntrack_zone *zone,
net/openvswitch/conntrack.c
515
h = nf_conntrack_find_get(net, zone, &tuple);
net/openvswitch/conntrack.c
549
(key->ct_zone == info->zone.id);
net/openvswitch/conntrack.c
552
ct = ovs_ct_find_existing(net, &info->zone, info->family, skb,
net/openvswitch/conntrack.c
63
struct nf_conntrack_zone zone;
net/openvswitch/conntrack.c
873
const struct ovs_ct_limit_info *info, u16 zone)
net/openvswitch/conntrack.c
875
return &info->limits[zone & (CT_LIMIT_HASH_BUCKETS - 1)];
net/openvswitch/conntrack.c
885
head = ct_limit_hash_bucket(info, new_ct_limit->zone);
net/openvswitch/conntrack.c
887
if (ct_limit->zone == new_ct_limit->zone) {
net/openvswitch/conntrack.c
899
static void ct_limit_del(const struct ovs_ct_limit_info *info, u16 zone)
net/openvswitch/conntrack.c
90
u16 zone;
net/openvswitch/conntrack.c
905
head = ct_limit_hash_bucket(info, zone);
net/openvswitch/conntrack.c
907
if (ct_limit->zone == zone) {
net/openvswitch/conntrack.c
916
static u32 ct_limit_get(const struct ovs_ct_limit_info *info, u16 zone)
net/openvswitch/conntrack.c
921
head = ct_limit_hash_bucket(info, zone);
net/openvswitch/conntrack.c
923
if (ct_limit->zone == zone)
net/openvswitch/conntrack.c
939
conncount_key = info->zone.id;
net/openvswitch/conntrack.c
941
per_zone_limit = ct_limit_get(ct_limit_info, info->zone.id);
net/openvswitch/conntrack.c
980
info->zone.id);
net/openvswitch/flow.c
1005
u16 zone = 0;
net/openvswitch/flow.c
1047
zone = post_ct ? tc_ext->zone : 0;
net/openvswitch/flow.c
1060
key->ct_zone = zone;
net/sched/act_connmark.c
143
nparms->zone = parm->zone;
net/sched/act_connmark.c
158
nparms->zone = parm->zone;
net/sched/act_connmark.c
211
opt.zone = parms->zone;
net/sched/act_connmark.c
40
struct nf_conntrack_zone zone;
net/sched/act_connmark.c
76
zone.id = parms->zone;
net/sched/act_connmark.c
77
zone.dir = NF_CT_DEFAULT_ZONE_DIR;
net/sched/act_connmark.c
79
thash = nf_conntrack_find_get(parms->net, &zone, &tuple);
net/sched/act_ct.c
1014
err = tcf_ct_handle_fragments(net, skb, family, p->zone, &defrag);
net/sched/act_ct.c
1107
tc_skb_cb(skb)->zone = p->zone;
net/sched/act_ct.c
1243
struct nf_conntrack_zone zone;
net/sched/act_ct.c
1249
p->zone = NF_CT_DEFAULT_ZONE_ID;
net/sched/act_ct.c
1302
&p->zone, TCA_CT_ZONE,
net/sched/act_ct.c
1304
sizeof(p->zone));
net/sched/act_ct.c
1307
nf_ct_zone_init(&zone, p->zone, NF_CT_DEFAULT_ZONE_DIR, 0);
net/sched/act_ct.c
1308
tmpl = nf_ct_tmpl_alloc(net, &zone, GFP_KERNEL);
net/sched/act_ct.c
1576
&p->zone, TCA_CT_ZONE,
net/sched/act_ct.c
1578
sizeof(p->zone)))
net/sched/act_ct.c
1624
entry->ct.zone = tcf_ct_zone(act);
net/sched/act_ct.c
326
struct zones_ht_key key = { .net = net, .zone = params->zone };
net/sched/act_ct.c
48
u16 zone;
net/sched/act_ct.c
65
.key_len = offsetofend(struct zones_ht_key, zone),
net/sched/act_ct.c
763
if (nf_ct_zone(ct)->id != p->zone)
net/sched/act_ct.c
844
u8 family, u16 zone, bool *defrag)
net/sched/act_ct.c
865
err = nf_ct_handle_fragments(net, skb, zone, family, &proto, &mru);
net/sched/act_ctinfo.c
123
zone.id = cp->zone;
net/sched/act_ctinfo.c
124
zone.dir = NF_CT_DEFAULT_ZONE_DIR;
net/sched/act_ctinfo.c
126
thash = nf_conntrack_find_get(cp->net, &zone, &tuple);
net/sched/act_ctinfo.c
246
cp_new->zone = nla_get_u16_default(tb[TCA_CTINFO_ZONE], 0);
net/sched/act_ctinfo.c
307
if (nla_put_u16(skb, TCA_CTINFO_ZONE, cp->zone))
net/sched/act_ctinfo.c
87
struct nf_conntrack_zone zone;
net/sched/cls_api.c
1878
ext->zone = cb->zone;
net/sched/cls_flower.c
326
u16 zone = tc_skb_cb(skb)->zone;
net/sched/cls_flower.c
344
post_ct, zone);
sound/soc/soc-jack.c
114
struct snd_soc_jack_zone *zone;
sound/soc/soc-jack.c
116
list_for_each_entry(zone, &jack->jack_zones, list) {
sound/soc/soc-jack.c
117
if (micbias_voltage >= zone->min_mv &&
sound/soc/soc-jack.c
118
micbias_voltage < zone->max_mv)
sound/soc/soc-jack.c
119
return zone->jack_type;
sound/soc/sof/debug.c
241
reply->elems[i].zone, reply->elems[i].id,
sound/synth/emux/emux_effect.c
203
origp = (unsigned char *)&vp->zone->v.parm + offset;
sound/synth/emux/emux_synth.c
100
vp->zone = table[i];
sound/synth/emux/emux_synth.c
101
if (vp->zone->sample)
sound/synth/emux/emux_synth.c
102
vp->block = vp->zone->sample->block;
sound/synth/emux/emux_synth.c
479
vp->zone = NULL;
sound/synth/emux/emux_synth.c
532
vp->reg = vp->zone->v;
sound/synth/emux/soundfont.c
1007
zone = sf_zone_new(sflist, sf);
sound/synth/emux/soundfont.c
1008
if (!zone) {
sound/synth/emux/soundfont.c
1022
kfree(zone);
sound/synth/emux/soundfont.c
1031
zone->v.sample = sample_id; /* the last sample */
sound/synth/emux/soundfont.c
1032
zone->v.rate_offset = calc_rate_offset(patch.base_freq);
sound/synth/emux/soundfont.c
1034
zone->v.root = note / 100;
sound/synth/emux/soundfont.c
1035
zone->v.tune = -(note % 100);
sound/synth/emux/soundfont.c
1036
zone->v.low = (freq_to_note(patch.low_note) + 99) / 100;
sound/synth/emux/soundfont.c
1037
zone->v.high = freq_to_note(patch.high_note) / 100;
sound/synth/emux/soundfont.c
1039
zone->v.pan = (patch.panning + 128) / 2;
sound/synth/emux/soundfont.c
1043
(int)patch.base_freq, zone->v.rate_offset,
sound/synth/emux/soundfont.c
1044
zone->v.root, zone->v.tune, zone->v.low, zone->v.high);
sound/synth/emux/soundfont.c
1068
zone->v.parm.volatkhld =
sound/synth/emux/soundfont.c
1071
zone->v.parm.voldcysus = (calc_gus_sustain(patch.env_offset[2]) << 8) |
sound/synth/emux/soundfont.c
1073
zone->v.parm.volrelease = 0x8000 | snd_sf_calc_parm_decay(release);
sound/synth/emux/soundfont.c
1074
zone->v.attenuation = calc_gus_attenuation(patch.env_offset[0]);
sound/synth/emux/soundfont.c
1078
zone->v.parm.volatkhld,
sound/synth/emux/soundfont.c
1079
zone->v.parm.voldcysus,
sound/synth/emux/soundfont.c
1080
zone->v.parm.volrelease,
sound/synth/emux/soundfont.c
1081
zone->v.attenuation);
sound/synth/emux/soundfont.c
1087
zone->v.parm.volrelease = 0x807f;
sound/synth/emux/soundfont.c
1093
zone->v.parm.tremfrq = ((patch.tremolo_depth / 2) << 8) | rate;
sound/synth/emux/soundfont.c
1098
zone->v.parm.fm2frq2 = ((patch.vibrato_depth / 6) << 8) | rate;
sound/synth/emux/soundfont.c
1104
zone->v.mode = SNDRV_SFNT_MODE_LOOPING;
sound/synth/emux/soundfont.c
1106
zone->v.mode = 0;
sound/synth/emux/soundfont.c
1110
zone->bank = 0;
sound/synth/emux/soundfont.c
1111
zone->instr = patch.instr_no;
sound/synth/emux/soundfont.c
1112
zone->mapped = 0;
sound/synth/emux/soundfont.c
1113
zone->v.sf_id = sf->id;
sound/synth/emux/soundfont.c
1115
zone->sample = set_sample(sf, &zone->v);
sound/synth/emux/soundfont.c
1118
add_preset(sflist, zone);
sound/synth/emux/soundfont.c
1172
struct snd_sf_zone *zone;
sound/synth/emux/soundfont.c
1175
zone = search_first_zone(sflist, cur->bank, cur->instr, cur->v.low);
sound/synth/emux/soundfont.c
1176
if (zone && zone->v.sf_id != cur->v.sf_id) {
sound/synth/emux/soundfont.c
1180
for (p = zone; p; p = p->next_zone) {
sound/synth/emux/soundfont.c
1186
delete_preset(sflist, zone);
sound/synth/emux/soundfont.c
1187
zone = NULL; /* do not forget to clear this! */
sound/synth/emux/soundfont.c
1194
cur->next_zone = zone; /* zone link */
sound/synth/emux/soundfont.c
476
struct snd_sf_zone *zone;
sound/synth/emux/soundfont.c
515
for (zone = sf->zones; zone; zone = zone->next) {
sound/synth/emux/soundfont.c
516
if (!zone->mapped &&
sound/synth/emux/soundfont.c
517
zone->bank == hdr.bank &&
sound/synth/emux/soundfont.c
518
zone->instr == hdr.instr)
sound/synth/emux/soundfont.c
547
zone = sf_zone_new(sflist, sf);
sound/synth/emux/soundfont.c
548
if (!zone)
sound/synth/emux/soundfont.c
552
zone->bank = tmpzone.bank;
sound/synth/emux/soundfont.c
553
zone->instr = tmpzone.instr;
sound/synth/emux/soundfont.c
554
zone->v = tmpzone.v;
sound/synth/emux/soundfont.c
557
zone->sample = set_sample(sf, &zone->v);
sound/synth/emux/soundfont.c
943
struct snd_sf_zone *zone;
tools/perf/util/bpf_skel/lock_contention.bpf.c
870
lock_off = offsetof(struct zone, lock);
tools/perf/util/bpf_skel/vmlinux/vmlinux.h
211
struct zone node_zones[6]; /* value for all possible config */
tools/power/cpupower/lib/powercap.c
124
static int sysfs_powercap_get64_val(struct powercap_zone *zone,
tools/power/cpupower/lib/powercap.c
132
strcat(file, zone->sys_name);
tools/power/cpupower/lib/powercap.c
146
int powercap_get_max_energy_range_uj(struct powercap_zone *zone, uint64_t *val)
tools/power/cpupower/lib/powercap.c
148
return sysfs_powercap_get64_val(zone, GET_MAX_ENERGY_RANGE_UJ, val);
tools/power/cpupower/lib/powercap.c
151
int powercap_get_energy_uj(struct powercap_zone *zone, uint64_t *val)
tools/power/cpupower/lib/powercap.c
153
return sysfs_powercap_get64_val(zone, GET_ENERGY_UJ, val);
tools/power/cpupower/lib/powercap.c
156
int powercap_get_max_power_range_uw(struct powercap_zone *zone, uint64_t *val)
tools/power/cpupower/lib/powercap.c
158
return sysfs_powercap_get64_val(zone, GET_MAX_POWER_RANGE_UW, val);
tools/power/cpupower/lib/powercap.c
161
int powercap_get_power_uw(struct powercap_zone *zone, uint64_t *val)
tools/power/cpupower/lib/powercap.c
163
return sysfs_powercap_get64_val(zone, GET_POWER_UW, val);
tools/power/cpupower/lib/powercap.c
166
int powercap_zone_get_enabled(struct powercap_zone *zone, int *mode)
tools/power/cpupower/lib/powercap.c
170
if ((strlen(PATH_TO_POWERCAP) + strlen(zone->sys_name)) +
tools/power/cpupower/lib/powercap.c
175
strcat(path, zone->sys_name);
tools/power/cpupower/lib/powercap.c
181
int powercap_zone_set_enabled(struct powercap_zone *zone, int mode)
tools/power/cpupower/lib/powercap.c
188
int powercap_read_zone(struct powercap_zone *zone)
tools/power/cpupower/lib/powercap.c
199
strcat(sysfs_dir, zone->sys_name);
tools/power/cpupower/lib/powercap.c
206
strcat(file, zone->sys_name);
tools/power/cpupower/lib/powercap.c
208
sysfs_read_file(file, zone->name, MAX_LINE_LEN);
tools/power/cpupower/lib/powercap.c
209
if (zone->parent)
tools/power/cpupower/lib/powercap.c
210
zone->tree_depth = zone->parent->tree_depth + 1;
tools/power/cpupower/lib/powercap.c
211
ret = powercap_get_energy_uj(zone, &val);
tools/power/cpupower/lib/powercap.c
213
zone->has_energy_uj = 1;
tools/power/cpupower/lib/powercap.c
214
ret = powercap_get_power_uw(zone, &val);
tools/power/cpupower/lib/powercap.c
216
zone->has_power_uw = 1;
tools/power/cpupower/lib/powercap.c
235
if (zone->children[i] == NULL) {
tools/power/cpupower/lib/powercap.c
236
zone->children[i] = child_zone;
tools/power/cpupower/lib/powercap.c
246
strcpy(child_zone->sys_name, zone->sys_name);
tools/power/cpupower/lib/powercap.c
249
child_zone->parent = zone;
tools/power/cpupower/lib/powercap.c
250
if (zone->tree_depth >= POWERCAP_MAX_TREE_DEPTH) {
tools/power/cpupower/lib/powercap.c
290
int powercap_walk_zones(struct powercap_zone *zone,
tools/power/cpupower/lib/powercap.c
291
int (*f)(struct powercap_zone *zone))
tools/power/cpupower/lib/powercap.c
295
if (!zone)
tools/power/cpupower/lib/powercap.c
298
ret = f(zone);
tools/power/cpupower/lib/powercap.c
303
if (zone->children[i] != NULL)
tools/power/cpupower/lib/powercap.c
304
powercap_walk_zones(zone->children[i], f);
tools/power/cpupower/lib/powercap.h
38
int powercap_walk_zones(struct powercap_zone *zone,
tools/power/cpupower/lib/powercap.h
39
int (*f)(struct powercap_zone *zone));
tools/power/cpupower/lib/powercap.h
46
int powercap_get_max_energy_range_uj(struct powercap_zone *zone, uint64_t *val);
tools/power/cpupower/lib/powercap.h
47
int powercap_get_energy_uj(struct powercap_zone *zone, uint64_t *val);
tools/power/cpupower/lib/powercap.h
48
int powercap_get_max_power_range_uw(struct powercap_zone *zone, uint64_t *val);
tools/power/cpupower/lib/powercap.h
49
int powercap_get_power_uw(struct powercap_zone *zone, uint64_t *val);
tools/power/cpupower/lib/powercap.h
50
int powercap_zone_get_enabled(struct powercap_zone *zone, int *mode);
tools/power/cpupower/lib/powercap.h
51
int powercap_zone_set_enabled(struct powercap_zone *zone, int mode);
tools/power/cpupower/utils/idle_monitor/rapl_monitor.c
43
static int powercap_count_zones(struct powercap_zone *zone)
tools/power/cpupower/utils/idle_monitor/rapl_monitor.c
51
if (!zone->has_energy_uj)
tools/power/cpupower/utils/idle_monitor/rapl_monitor.c
54
printf("%s\n", zone->sys_name);
tools/power/cpupower/utils/idle_monitor/rapl_monitor.c
55
uj = powercap_get_energy_uj(zone, &val);
tools/power/cpupower/utils/idle_monitor/rapl_monitor.c
58
strncpy(rapl_zones[rapl_zone_count].name, zone->name, CSTATE_NAME_LEN - 1);
tools/power/cpupower/utils/idle_monitor/rapl_monitor.c
63
rapl_zones_pt[rapl_zone_count] = zone;
tools/power/cpupower/utils/powercap-info.c
26
static int powercap_print_one_zone(struct powercap_zone *zone)
tools/power/cpupower/utils/powercap-info.c
31
for (i = 0; i < zone->tree_depth && i < POWERCAP_MAX_TREE_DEPTH; i++)
tools/power/cpupower/utils/powercap-info.c
34
printf("%sZone: %s", pr_prefix, zone->name);
tools/power/cpupower/utils/powercap-info.c
35
ret = powercap_zone_get_enabled(zone, &mode);
tools/power/cpupower/utils/powercap-info.c
40
if (zone->has_power_uw)
tools/power/cpupower/utils/powercap-info.c
44
if (zone->has_energy_uj)
tools/testing/memblock/linux/mmzone.h
34
struct zone node_zones[MAX_NR_ZONES];
tools/testing/selftests/bpf/progs/btf_dump_test_case_padding.c
230
struct zone _5;
tools/testing/selftests/kvm/include/kvm_util.h
508
struct kvm_coalesced_mmio_zone zone = {
tools/testing/selftests/kvm/include/kvm_util.h
514
vm_ioctl(vm, KVM_REGISTER_COALESCED_MMIO, &zone);
tools/testing/selftests/kvm/include/kvm_util.h
521
struct kvm_coalesced_mmio_zone zone = {
tools/testing/selftests/kvm/include/kvm_util.h
527
vm_ioctl(vm, KVM_UNREGISTER_COALESCED_MMIO, &zone);
tools/testing/selftests/net/netfilter/conntrack_dump_flush.c
102
uint16_t zone)
tools/testing/selftests/net/netfilter/conntrack_dump_flush.c
117
mnl_attr_put_u16(nlh, CTA_ZONE, htons(zone));
tools/testing/selftests/net/netfilter/conntrack_dump_flush.c
146
uint32_t dst_ip, uint16_t zone)
tools/testing/selftests/net/netfilter/conntrack_dump_flush.c
174
return conntrack_data_insert(sock, nlh, zone);
tools/testing/selftests/net/netfilter/conntrack_dump_flush.c
180
uint16_t zone)
tools/testing/selftests/net/netfilter/conntrack_dump_flush.c
210
return conntrack_data_insert(sock, nlh, zone);
tools/testing/selftests/net/netfilter/conntrack_dump_flush.c
219
static int conntracK_count_zone(struct mnl_socket *sock, uint16_t zone)
tools/testing/selftests/net/netfilter/conntrack_dump_flush.c
240
mnl_attr_put_u16(nlh, CTA_ZONE, htons(zone));
tools/testing/selftests/net/netfilter/conntrack_dump_flush.c
266
static int conntrack_flush_zone(struct mnl_socket *sock, uint16_t zone)
tools/testing/selftests/net/netfilter/conntrack_dump_flush.c
287
mnl_attr_put_u16(nlh, CTA_ZONE, htons(zone));
tools/testing/selftests/net/tcp_mmap.c
105
void hash_zone(void *zone, unsigned int length)
tools/testing/selftests/net/tcp_mmap.c
110
prefetch(zone + 384);
tools/testing/selftests/net/tcp_mmap.c
111
temp ^= *(unsigned long *)zone;
tools/testing/selftests/net/tcp_mmap.c
112
temp ^= *(unsigned long *)(zone + sizeof(long));
tools/testing/selftests/net/tcp_mmap.c
113
temp ^= *(unsigned long *)(zone + 2*sizeof(long));
tools/testing/selftests/net/tcp_mmap.c
114
temp ^= *(unsigned long *)(zone + 3*sizeof(long));
tools/testing/selftests/net/tcp_mmap.c
115
temp ^= *(unsigned long *)(zone + 4*sizeof(long));
tools/testing/selftests/net/tcp_mmap.c
116
temp ^= *(unsigned long *)(zone + 5*sizeof(long));
tools/testing/selftests/net/tcp_mmap.c
117
temp ^= *(unsigned long *)(zone + 6*sizeof(long));
tools/testing/selftests/net/tcp_mmap.c
118
temp ^= *(unsigned long *)(zone + 7*sizeof(long));
tools/testing/selftests/net/tcp_mmap.c
119
zone += 8*sizeof(long);
tools/testing/selftests/net/tcp_mmap.c
123
temp ^= *(unsigned char *)zone;
tools/testing/selftests/net/tcp_mmap.c
124
zone += 1;
tools/testing/vma/linux/mmzone.h
34
struct zone node_zones[MAX_NR_ZONES];
virt/kvm/coalesced_mmio.c
123
struct kvm_coalesced_mmio_zone *zone)
virt/kvm/coalesced_mmio.c
128
if (zone->pio != 1 && zone->pio != 0)
virt/kvm/coalesced_mmio.c
137
dev->zone = *zone;
virt/kvm/coalesced_mmio.c
141
zone->pio ? KVM_PIO_BUS : KVM_MMIO_BUS,
virt/kvm/coalesced_mmio.c
142
zone->addr, zone->size, &dev->dev);
virt/kvm/coalesced_mmio.c
158
struct kvm_coalesced_mmio_zone *zone)
virt/kvm/coalesced_mmio.c
163
if (zone->pio != 1 && zone->pio != 0)
virt/kvm/coalesced_mmio.c
169
if (zone->pio == dev->zone.pio &&
virt/kvm/coalesced_mmio.c
170
coalesced_mmio_in_range(dev, zone->addr, zone->size)) {
virt/kvm/coalesced_mmio.c
172
zone->pio ? KVM_PIO_BUS : KVM_MMIO_BUS, &dev->dev);
virt/kvm/coalesced_mmio.c
36
if (addr < dev->zone.addr)
virt/kvm/coalesced_mmio.c
38
if (addr + len > dev->zone.addr + dev->zone.size)
virt/kvm/coalesced_mmio.c
74
ring->coalesced_mmio[insert].pio = dev->zone.pio;
virt/kvm/coalesced_mmio.h
22
struct kvm_coalesced_mmio_zone zone;
virt/kvm/coalesced_mmio.h
28
struct kvm_coalesced_mmio_zone *zone);
virt/kvm/coalesced_mmio.h
30
struct kvm_coalesced_mmio_zone *zone);
virt/kvm/kvm_main.c
5223
struct kvm_coalesced_mmio_zone zone;
virt/kvm/kvm_main.c
5226
if (copy_from_user(&zone, argp, sizeof(zone)))
virt/kvm/kvm_main.c
5228
r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone);
virt/kvm/kvm_main.c
5232
struct kvm_coalesced_mmio_zone zone;
virt/kvm/kvm_main.c
5235
if (copy_from_user(&zone, argp, sizeof(zone)))
virt/kvm/kvm_main.c
5237
r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone);