ROUNDUP
const size_t roundedReclen = ROUNDUP(reclen, alignof(struct dirent));
static const int kArraySize = ROUNDUP(SMP_MAX_CPUS, kArrayBits) / kArrayBits;
size = ROUNDUP(size, B_PAGE_SIZE);
size = ROUNDUP(size, B_PAGE_SIZE);
start = ROUNDUP(start, alignment);
if (next == NULL || (next->base >= ROUNDUP(start, alignment) + size)) {
memory->base = ROUNDUP(start, alignment);
if (next->base >= ROUNDUP(last->base + last->size, alignment) + size) {
if ((fInfo.base + (fInfo.size - 1)) >= (ROUNDUP(last->base + last->size,
memory->base = ROUNDUP(last->base + last->size, alignment);
queueMemSize += ROUNDUP(sizeof(VirtioDesc) * fDescCount, B_PAGE_SIZE);
queueMemSize += ROUNDUP(sizeof(VirtioAvail) + sizeof(uint16) * fQueueLen, B_PAGE_SIZE);
queueMemSize += ROUNDUP(sizeof(VirtioUsed) + sizeof(VirtioUsedItem) * fQueueLen, B_PAGE_SIZE);
size_t length = min_c(ROUNDUP(v->capability.length, sizeof(uint32)), size);
offset = ROUNDUP(offset, kBlockSize);
size = ROUNDUP(size, B_PAGE_SIZE);
offset = ROUNDUP(offset, info->block_size);
const phys_size_t rounded_len = ROUNDUP(request->Length() + (request->Offset()
size = ROUNDUP(size + offset, B_PAGE_SIZE);
size = ROUNDUP(size, B_PAGE_SIZE);
uint32 areaSize = ROUNDUP(romSize, 1 << 16);
size_t size = ROUNDUP(sizeof(Packet)*count, B_PAGE_SIZE);
SetWriteBufferSize(ROUNDUP(interface->endpoint[0].descr->max_packet_size, 16));
SetReadBufferSize(ROUNDUP(interface->endpoint[0].descr->max_packet_size, 16));
SetReadBufferSize(ROUNDUP(interface->endpoint[1].descr->max_packet_size, 16));
SetWriteBufferSize(ROUNDUP(interface->endpoint[1].descr->max_packet_size, 16));
SetReadBufferSize(ROUNDUP(endpoint->descr->max_packet_size, 16));
SetWriteBufferSize(ROUNDUP(endpoint->descr->max_packet_size, 16));
fReadBufferSize(ROUNDUP(DEF_BUFFER_SIZE, 16)),
fOutputBufferSize(ROUNDUP(DEF_BUFFER_SIZE, 16)),
fWriteBufferSize(ROUNDUP(DEF_BUFFER_SIZE, 16)),
B_ANY_KERNEL_ADDRESS, ROUNDUP(totalBuffers, B_PAGE_SIZE), B_CONTIGUOUS,
if (static_cast<uint64>(pos) > ROUNDUP(fMaxFileSize, B_PAGE_SIZE)
const size_t rounded_len = ROUNDUP((length) + (offset - rounded_offset),
entryLen = ROUNDUP(entryLen, 8);
size = std::min(bufferSize, ROUNDUP(size, 8));
size = std::min(bufferSize, ROUNDUP(size, 8));
entryLen = ROUNDUP(entryLen, 8);
entryLen = ROUNDUP(entryLen, 8);
return ROUNDUP(entrySize, 8);
if (fNode->di_forkoff != (ROUNDUP(sizeof(uint32), 8) >> 3))
return ROUNDUP(entrySize, 8);
size = ROUNDUP(size, 4);
size = ROUNDUP(size, B_PAGE_SIZE);
size = ROUNDUP(size, B_PAGE_SIZE);
region->size = ROUNDUP(header.p_memsz + (header.p_vaddr % B_PAGE_SIZE),
uint64 end = ROUNDUP(start + size, B_PAGE_SIZE);
size = ROUNDUP(size, B_PAGE_SIZE);
size = ROUNDUP(size, B_PAGE_SIZE);
size = ROUNDUP(size, B_PAGE_SIZE);
buffer = (uint8*)buffer + ROUNDUP(signatureLength, sizeof(void*));
gKernelArgs.arch_args.virtual_end = ROUNDUP(KERNEL_LOAD_BASE_64_BIT
maxAddress = ROUNDUP(maxAddress, 0x40000000);
size = ROUNDUP(size, B_PAGE_SIZE);
base = ROUNDUP(base, B_PAGE_SIZE);
uint64 end = ROUNDUP(base + extMemoryBlock[i].length, B_PAGE_SIZE);
size = ROUNDUP(size, B_PAGE_SIZE);
size_t alignedSize = ROUNDUP(size + (start - physAddr), B_PAGE_SIZE);
sPageDirectory = (uint32 *)ROUNDUP((uint32)sPageDirectory, ALIGN_PAGEDIR);
uint64 regs_size = ROUNDUP(gKernelArgs.arch_args.uart.regs.size, B_PAGE_SIZE);
sPageDirectory = (uint32_t *)ROUNDUP((uint32_t)sPageDirectory, ALIGN_PAGEDIR);
gKernelArgs.arch_args.virtual_end = ROUNDUP(KERNEL_LOAD_BASE
maxAddress = ROUNDUP(maxAddress, 0x40000000);
size_t pages = ROUNDUP(size, B_PAGE_SIZE) / B_PAGE_SIZE;
size_t pages = ROUNDUP(size, B_PAGE_SIZE) / B_PAGE_SIZE;
ROUNDUP(size, B_PAGE_SIZE)) != B_OK)
ROUNDUP(size, B_PAGE_SIZE) / B_PAGE_SIZE);
sNextVirtualAddress += ROUNDUP(size, B_PAGE_SIZE);
size = ROUNDUP(size, B_PAGE_SIZE);
size = ROUNDUP(size, B_PAGE_SIZE);
size = ROUNDUP(size, B_PAGE_SIZE);
phys_addr_t adr = ROUNDUP(gFreeVirtMem, B_PAGE_SIZE);
size = ROUNDUP(size, B_PAGE_SIZE);
size = ROUNDUP(size, B_PAGE_SIZE);
phys_addr_t adr = ROUNDUP((addr_t)gFreeMem, B_PAGE_SIZE);
(ROUNDUP(SMP_MAX_CPUS * TOTAL_SLOTS_PER_CPU + EXTRA_SLOTS, 1024) / 1024)
start = ROUNDUP(start + 1, kPageTableAlignment);
start = ROUNDUP(start + 1, kPageTableAlignment);
start = ROUNDUP(start + 1, kPageTableAlignment);
#define NUM_USER_PGDIR_ENTS (VADDR_TO_PDENT(ROUNDUP(USER_SIZE, \
ROUNDUP(args->arch_args.uart.regs.size, B_PAGE_SIZE), 0);
ROUNDUP(sizeof(paddr_chunk_desc) * 1024, B_PAGE_SIZE),
ROUNDUP(sizeof(paddr_chunk_desc *) * num_virtual_chunks, B_PAGE_SIZE),
#define NUM_USER_PGROOT_ENTS (VADDR_TO_PRENT(ROUNDUP(USER_SIZE, B_PAGE_SIZE * 64 * 128)))
#define NUM_USER_PGDIR_ENTS (VADDR_TO_PDENT(ROUNDUP(USER_SIZE, B_PAGE_SIZE * 64)))
end = ROUNDUP(end, B_PAGE_SIZE);
start = ROUNDUP(start + 1, B_PAGE_SIZE);
start = ROUNDUP(start + 1, B_PAGE_SIZE);
end = ROUNDUP(end, B_PAGE_SIZE);
start = ROUNDUP(start + 1, B_PAGE_SIZE);
start = ROUNDUP(start + 1, B_PAGE_SIZE);
start = ROUNDUP(start + 1, kPageDirAlignment);
start = ROUNDUP(start + 1, kPageTableAlignment);
start = ROUNDUP(start + 1, kPageDirAlignment);
start = ROUNDUP(start + 1, kPageTableAlignment);
start = ROUNDUP(start + 1, kPageTableAlignment);
#define NUM_USER_PGDIR_ENTS (VADDR_TO_PDENT(ROUNDUP(USER_SIZE, \
#define NUM_USER_PGROOT_ENTS (VADDR_TO_PRENT(ROUNDUP(USER_SIZE, B_PAGE_SIZE * 64 * 128)))
#define NUM_USER_PGDIR_ENTS (VADDR_TO_PDENT(ROUNDUP(USER_SIZE, B_PAGE_SIZE * 64)))
addr_t virtualEnd = ROUNDUP(virtualAddress + size, B_PAGE_SIZE);
addr_t virtualEnd = ROUNDUP(virtualAddress + size, B_PAGE_SIZE);
end = ROUNDUP(end, B_PAGE_SIZE);
start = ROUNDUP(start + 1, kPageTableAlignment);
size = ROUNDUP(*_virtualAddress + size - virtualAddress, B_PAGE_SIZE);
start = ROUNDUP(start + 1, kPageTableAlignment);
start = ROUNDUP(start + 1, kPageTableAlignment);
end = ROUNDUP(end, B_PAGE_SIZE);
start = ROUNDUP(start + 1, kPageTableAlignment);
size = ROUNDUP(*_virtualAddress + size - virtualAddress, B_PAGE_SIZE);
start = ROUNDUP(start + 1, kPageTableAlignment);
start = ROUNDUP(start + 1, kPageTableAlignment);
(ROUNDUP(SMP_MAX_CPUS * TOTAL_SLOTS_PER_CPU + EXTRA_SLOTS, 1024) / 1024)
start = ROUNDUP(start + 1, kPageTableAlignment);
start = ROUNDUP(start + 1, kPageTableAlignment);
start = ROUNDUP(start + 1, kPageTableAlignment);
#define NUM_USER_PGDIR_ENTS (VADDR_TO_PDENT(ROUNDUP(USER_SIZE, \
start = ROUNDUP(start + 1, k64BitPageTableRange);
start = ROUNDUP(start + 1, k64BitPageTableRange);
start = ROUNDUP(start + 1, k64BitPageTableRange);
size_t virtualSize = ROUNDUP(pagesNeeded, 1024) * B_PAGE_SIZE;
(ROUNDUP(SMP_MAX_CPUS * TOTAL_SLOTS_PER_CPU + EXTRA_SLOTS, \
start = ROUNDUP(start + 1, kPAEPageTableRange);
start = ROUNDUP(start + 1, kPAEPageTableRange);
start = ROUNDUP(start + 1, kPAEPageTableRange);
size = ROUNDUP(size, B_PAGE_SIZE);
return ROUNDUP(offset, B_PAGE_SIZE) >= virtual_base
#define ALIGN_ENTRY(pointer) (void*)ROUNDUP((addr_t)(pointer), 8)
size_t size = ROUNDUP(args->debug_size, B_PAGE_SIZE);
addr_t endAddress = ROUNDUP(address + size, B_PAGE_SIZE);
size_t growSize = ROUNDUP(((kernel_guarded_heap.grow_size / B_PAGE_SIZE) / 2)
const size_t alignedSize = ROUNDUP(size, alignment);
size_t metaSize = ROUNDUP(((size / B_PAGE_SIZE) / 2) * sizeof(GuardedHeapChunk), B_PAGE_SIZE);
area->base = ROUNDUP(base, B_PAGE_SIZE);
firstValid = (ROUNDUP(area->base, alignment) - area->base)
size = ROUNDUP(size, alignment);
size_t areaSize = ROUNDUP(size + sizeof(area_allocation_info)
address = (void *)ROUNDUP((addr_t)address, alignment);
+ ROUNDUP(kTraceOutputBufferSize + MAX_TRACE_SIZE, B_PAGE_SIZE);
phys_size_t size = ROUNDUP(fBounceBufferSize, B_PAGE_SIZE);
memUpperBound = ROUNDUP(memUpperBound, B_PAGE_SIZE);
fileUpperBound = ROUNDUP(fileUpperBound, B_PAGE_SIZE);
size_t segmentSize = ROUNDUP(programHeaders[i].p_memsz
length += ROUNDUP(programHeaders[i].p_memsz
end = ROUNDUP(programHeaders[i].p_memsz + programHeaders[i].p_vaddr,
region->size = ROUNDUP(programHeaders[i].p_memsz
size_t unusableSize = ROUNDUP(SLAB_AREA_STRUCT_OFFSET + kAreaAdminSize,
size = ROUNDUP(size, SLAB_CHUNK_SIZE_SMALL);
= ROUNDUP(sizeof(Area), B_PAGE_SIZE);
size_t neededSize = ROUNDUP(size, sizeof(double));
size_t size = ROUNDUP(sNumCPUs * MSG_ALLOCATE_PER_CPU * sizeof(smp_msg), B_PAGE_SIZE);
size_t needed = ROUNDUP(sizeof(user_thread), CACHE_LINE_SIZE);
clonedData = (void*)ROUNDUP((addr_t)stack, 16);
stack = (uint8*)clonedData + ROUNDUP(dataSize, 16);
pageIndex = ROUNDUP(pageIndex + 1, SWAP_BLOCK_PAGES) - 1;
pageIndex = ROUNDUP(pageIndex + 1, SWAP_BLOCK_PAGES) - 1;
size = ROUNDUP(size, B_PAGE_SIZE);
address = ROUNDUP(address, B_PAGE_SIZE);
addr_t alignedBase = ROUNDUP(range->base, alignment);
addr_t alignedBase = ROUNDUP(range->base, alignment);
addr_t alignedBase = ROUNDUP(range->base, alignment);
return ROUNDUP(address, alignment);
addr_t lockEndAddress = ROUNDUP((addr_t)address + numBytes, B_PAGE_SIZE);
addr_t lockEndAddress = ROUNDUP((addr_t)address + numBytes, B_PAGE_SIZE);
ROUNDUP(kCacheInfoTableCount * sizeof(cache_info), B_PAGE_SIZE),
? ROUNDUP(previousRangeEnd, alignment) : previousRangeEnd;
? ROUNDUP(lastRangeEnd, alignment) : lastRangeEnd;
size = ROUNDUP(size, alignment);
size_t areaSize = ROUNDUP(size + sizeof(area_allocation_info) + B_PAGE_SIZE,
alignedSize = ROUNDUP(alignedSize, alignment);
size_t areaSize = ROUNDUP(size + sizeof(area_allocation_info)
address = (void *)ROUNDUP((addr_t)address, alignment);
area->base = ROUNDUP(base, B_PAGE_SIZE);
firstValid = (ROUNDUP(area->base, alignment) - area->base)