size_mul
size_mul(sizeof(struct pkvm_hyp_vcpu *), nr_vcpus));
size_mul(sizeof(*kvm->arch.nested_mmus), num_mmus),
size_mul(sizeof(void *),
const size_t dev_ctrs = size_mul(max_aes, hb_ctrs);
const size_t stats_size = size_mul(dev_ctrs, sizeof(*curr_stats));
ae_offset = size_mul(ae, hb_ctrs);
const size_t dev_ctrs = size_mul(max_aes, hb_ctrs);
const size_t stats_size = size_mul(dev_ctrs, hb_struct_size);
const size_t dev_ctrs = size_mul(max_aes, hb_ctrs);
const size_t stats_size = size_mul(dev_ctrs, sizeof(struct hb_cnt_pair));
const size_t mem_items_to_fill = size_mul(stats_size, 2) / sizeof(u32);
size_mul(sizeof(uint32_t), num_syncobj_handles));
size_mul(sizeof(u32), num_syncobj_handles));
size_mul(sizeof(u32), num_read_bo_handles));
size_mul(sizeof(u32), num_write_bo_handles));
size_mul(sizeof(u32), num_syncobj));
size_mul(DIV_ROUND_UP(logo->width, BITS_PER_BYTE), logo->height),
size_mul(attr->gid_tbl_len, 2));
wqes = uverbs_request_next_ptr(&iter, size_mul(cmd.wqe_size,
size_mul(cmd.sge_count,
wqes = uverbs_request_next_ptr(iter, size_mul(wqe_size, wr_count));
sgls = uverbs_request_next_ptr(iter, size_mul(sge_count,
inq_size = size_mul(cfg->num_input_queues,
outq_size = size_mul(cfg->num_output_queues,
sizeinput += size_mul(cfg->input[i].queue_size + 1,
sizeoutput += size_mul(cfg->output[i].queue_size + 1,
bf = kzalloc_flex(*bf, refcnt, size_mul(bf_bank_size, num_erp_banks));
alloc_size = size_mul(count + max_alloc - 1, size);
size_mul(topology_max_packages(), MAX_POWER_DOMAINS));
domain_die_map = kcalloc(size_mul(topology_max_packages(), MAX_POWER_DOMAINS),
return bitmap_size(size_mul(num_ssid, num_id));
phend = size_add(size_mul(sizeof(struct elf32_phdr), ehdr->e_phnum), ehdr->e_phoff);
shend = size_add(size_mul(sizeof(struct elf32_shdr), ehdr->e_shnum), ehdr->e_shoff);
return size_mul(sizeof(unsigned short),
#define TEE_IOCTL_PARAM_SIZE(x) (size_mul(sizeof(struct tee_param), (x)))
size_mul(lang_count, (needed_count + 1)));
if (clear_user(argp, size_mul(count - copied, sizeof(u64))))
size_mul(arg->vec_len, sizeof(struct page_region))))
return devm_kmemdup(dev, src, size_mul(size, n), flags);
#define array_size(a, b) size_mul(a, b)
#define array3_size(a, b, c) size_mul(size_mul(a, b), c)
size_mul(count, sizeof(*(p)->member) + __must_be_array((p)->member)))
const size_t __obj_size = size_mul(sizeof(TYPE), COUNT); \
off = size_mul(off, 2);
alloc_size = kmalloc_size_roundup(size_mul(new_n, size));
alloc_size = kmalloc_size_roundup(size_mul(cnt, sizeof(*p)));
size_mul(new_max, sizeof(*info->pages)),
check_one_size_helper(20, size_mul, var++, 5);
check_one_size_helper(20, size_mul, 4, var++);
check_one_size_helper(0, size_mul, 0, 3);
check_one_size_helper(0, size_mul, 3, 0);
check_one_size_helper(6, size_mul, 2, 3);
check_one_size_helper(SIZE_MAX, size_mul, SIZE_MAX, 1);
check_one_size_helper(SIZE_MAX, size_mul, SIZE_MAX, 3);
check_one_size_helper(SIZE_MAX, size_mul, SIZE_MAX, -3);
return kmemdup(src, size_mul(element_size, count), gfp);
const size_t __obj_size = size_mul(sizeof(TYPE), COUNT); \