new_block
struct error_block new_block = {
registered_errors.blocks[registered_errors.count++] = new_block;
int new_block(struct dm_btree_info *info, struct dm_block **result);
r = new_block(info, &b);
r = new_block(s->info, &right);
r = new_block(s->info, &middle);
r = new_block(s->info, &left);
r = new_block(s->info, &right);
.new_block = sm_disk_new_block,
.new_block = sm_metadata_new_block,
.new_block = sm_bootstrap_new_block,
return sm->new_block(sm, b);
int (*new_block)(struct dm_space_map *sm, dm_block_t *b);
dm_block_t new_block;
r = dm_sm_new_block(tm->sm, &new_block);
r = dm_bm_write_lock_zero(tm->bm, new_block, v, result);
dm_sm_dec_block(tm->sm, new_block);
insert_shadow(tm, new_block);
struct dpaa2_switch_filter_block *new_block;
new_block = dpaa2_switch_filter_block_get_unused(ethsw);
new_block->in_use = true;
return dpaa2_switch_port_acl_tbl_bind(port_priv, new_block);
const struct btrfs_qgroup_swapped_block *new_block = rb_entry(new,
return qgroup_swapped_block_bytenr_key_cmp(&new_block->subvol_bytenr, existing);
u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster);
folio, 0, &new_block);
u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster);
for (i = 0; i < blocks; i++, old_block++, new_block++) {
new_bh = sb_getblk(osb->sb, new_block);
u64 new_block = 0;
new_block = be64_to_cpu(entry->e_cluster) +
if (omfs_allocate_block(inode->i_sb, new_block)) {
&new_block, &new_count);
entry->e_cluster = cpu_to_be64(new_block);
*ret_block = new_block;
u64 new_block;
ret = omfs_grow_extent(inode, oe, &new_block);
clus_to_blk(sbi, new_block));
u64 new_block;
&new_block, &len);
inode->i_ino = new_block;
long old_block, new_block;
old_block, &new_block);
result = put_user(new_block, (long __user *)arg);
udf_pblk_t new_block;
new_block = udf_new_block(sb, NULL,
if (!new_block)
err = udf_setup_indirect_aext(inode, new_block, epos);
int udf_relocate_blocks(struct super_block *sb, long old_block, long *new_block)
*new_block = le32_to_cpu(
*new_block = le32_to_cpu(
*new_block =
union xfs_btree_ptr *new_block,
error = cur->bc_ops->alloc_block(cur, hint_block, new_block, stat);
trace_xfs_btree_alloc_block(cur, new_block, *stat, error);
struct xfs_btree_block *new_block;
ret = xfs_btree_get_buf_block(cur, &new_ptr, &new_block, &new_bp);
xfs_btree_set_sibling(cur, new_block, ptrp, XFS_BB_LEFTSIB);
*blockp = new_block;
void *new_block, *new_states;
new_block = mmap(0, alloc_size, vgrnd.params.mmap_prot, vgrnd.params.mmap_flags, -1, 0);
if (new_block == MAP_FAILED)
if (((uintptr_t)new_block & (page_size - 1)) + vgrnd.params.size_of_opaque_state > page_size)
new_block = (void *)(((uintptr_t)new_block + page_size - 1) & (~(page_size - 1)));
vgrnd.states[i] = new_block;
new_block += state_size_aligned;
munmap(new_block, alloc_size);