init_block
struct init_block init; /* one time cmd, not in cmd queue */
u32 init_block : 1;
hdr->init_block = ACE_CMD_INIT_BLOCK;
u32 init_block : 1;
struct vmci_init_blk init_block;
if (copy_from_user(&init_block, uptr, sizeof(init_block))) {
if (init_block.flags & ~VMCI_PRIVILEGE_FLAG_RESTRICTED) {
vmci_host_dev->context = vmci_ctx_create(init_block.cid,
init_block.flags, 0,
init_block.cid = vmci_ctx_get_id(vmci_host_dev->context);
if (copy_to_user(uptr, &init_block, sizeof(init_block))) {
volatile struct lance_init_block *ib = lp->init_block;
volatile struct lance_init_block *ib = lp->init_block;
volatile struct lance_init_block *ib = lp->init_block;
volatile struct lance_init_block *ib = lp->init_block;
volatile struct lance_init_block *ib = lp->init_block;
volatile struct lance_init_block *ib = lp->init_block;
volatile struct lance_init_block *init_block; /* CPU address of RAM */
volatile struct lance_init_block *init_block; /* Hosts view */
volatile struct lance_init_block *ib = lp->init_block;
volatile struct lance_init_block *ib = lp->init_block;
volatile struct lance_init_block *ib = lp->init_block;
volatile struct lance_init_block *ib = lp->init_block;
volatile struct lance_init_block *ib = lp->init_block;
volatile struct lance_init_block *ib = lp->init_block;
priv->init_block = (struct lance_init_block *)dev->mem_start;
lp->lance.init_block = (struct lance_init_block *)(va + HPLANCE_MEMOFF); /* CPU addr */
struct lance_init_block init_block;
lp->init_block.mode = 0x0003; /* Disable Rx and Tx. */
lp->init_block.phys_addr[i] = dev->dev_addr[i];
lp->init_block.filter[0] = 0x00000000;
lp->init_block.filter[1] = 0x00000000;
lp->init_block.rx_ring = ((u32)isa_virt_to_bus(lp->rx_ring) & 0xffffff) | RX_RING_LEN_BITS;
lp->init_block.tx_ring = ((u32)isa_virt_to_bus(lp->tx_ring) & 0xffffff) | TX_RING_LEN_BITS;
outw((short) (u32) isa_virt_to_bus(&lp->init_block), ioaddr+LANCE_DATA);
outw(((u32)isa_virt_to_bus(&lp->init_block)) >> 16, ioaddr+LANCE_DATA);
(u32) isa_virt_to_bus(&lp->init_block));
outw((short) (u32) isa_virt_to_bus(&lp->init_block), ioaddr+LANCE_DATA);
outw(((u32)isa_virt_to_bus(&lp->init_block)) >> 16, ioaddr+LANCE_DATA);
dev->name, i, (u32) isa_virt_to_bus(&lp->init_block), inw(ioaddr+LANCE_DATA));
lp->init_block.mode = 0x0000;
lp->init_block.phys_addr[i] = dev->dev_addr[i];
lp->init_block.filter[0] = 0x00000000;
lp->init_block.filter[1] = 0x00000000;
lp->init_block.rx_ring = ((u32)isa_virt_to_bus(lp->rx_ring) & 0xffffff) | RX_RING_LEN_BITS;
lp->init_block.tx_ring = ((u32)isa_virt_to_bus(lp->tx_ring) & 0xffffff) | TX_RING_LEN_BITS;
lp->lance.init_block = (struct lance_init_block *)(lp->ram); /* CPU addr */
lp->init_block = dma_alloc_coherent(&pdev->dev,
sizeof(*lp->init_block),
if (!lp->init_block) {
lp->init_block->mode = cpu_to_le16(0x0003); /* Disable Rx and Tx. */
lp->init_block->tlen_rlen =
lp->init_block->phys_addr[i] = dev->dev_addr[i];
lp->init_block->filter[0] = 0x00000000;
lp->init_block->filter[1] = 0x00000000;
lp->init_block->rx_ring = cpu_to_le32(lp->rx_ring_dma_addr);
lp->init_block->tx_ring = cpu_to_le32(lp->tx_ring_dma_addr);
dma_free_coherent(&lp->pci_dev->dev, sizeof(*lp->init_block),
lp->init_block, lp->init_dma_addr);
lp->init_block->mode =
lp->init_block->tlen_rlen =
lp->init_block->phys_addr[i] = dev->dev_addr[i];
lp->init_block->rx_ring = cpu_to_le32(lp->rx_ring_dma_addr);
lp->init_block->tx_ring = cpu_to_le32(lp->tx_ring_dma_addr);
struct pcnet32_init_block *init_block;
volatile struct pcnet32_init_block *ib = lp->init_block;
lp->init_block->mode =
lp->init_block->mode =
dma_free_coherent(&lp->pci_dev->dev, sizeof(*lp->init_block),
lp->init_block, lp->init_dma_addr);
dma_free_coherent(&lp->pci_dev->dev, sizeof(*lp->init_block),
lp->init_block, lp->init_dma_addr);
lp->init_block->mode = cpu_to_le16(csr15);
(unsigned long) cp->init_block;
(unsigned long) cp->init_block;
(unsigned long) cp->init_block;
(unsigned long) cp->init_block;
u64 compwb = le64_to_cpu(cp->init_block->tx_compwb);
(unsigned long) cp->init_block;
memset(cp->init_block, 0, sizeof(struct cas_init_block));
cp->init_block =
if (!cp->init_block) {
cp->init_txds[i] = cp->init_block->txds[i];
cp->init_rxds[i] = cp->init_block->rxds[i];
cp->init_rxcs[i] = cp->init_block->rxcs[i];
cp->init_block, cp->block_dvma);
cp->init_block, cp->block_dvma);
struct cas_init_block *init_block;
struct gem_txd *txd = &gp->init_block->txd[entry];
txd = &gp->init_block->txd[entry];
txd = &gp->init_block->txd[first_entry];
struct gem_init_block *gb = gp->init_block;
struct gem_init_block *gb = gp->init_block;
gp->init_block, gp->gblock_dvma);
gp->init_block = dma_alloc_coherent(&pdev->dev, sizeof(struct gem_init_block),
if (!gp->init_block) {
struct gem_rxd *rxd = &gp->init_block->rxd[i];
txd = &gp->init_block->txd[entry];
&gp->init_block->rxd[cluster_start];
struct gem_rxd *rxd = &gp->init_block->rxd[entry];
struct gem_init_block *init_block;
void (*init_block)(struct inode *,
ret = nilfs_mdt_get_block(inode, blkoff, create, init_block, bhp);
void (*init_block)(struct inode *,
ret = nilfs_mdt_create_block(inode, blkoff, out_bh, init_block);
void (*init_block)(struct inode *,
if (init_block)
init_block(inode, bh, from);
void (*init_block)(struct inode *,
err = nilfs_mdt_insert_new_block(inode, block, bh, init_block);
void (*init_block)(struct inode *,
static void init_block (deflate_state *s);
init_block(s);
init_block(s);
u8 param_id, bool init_block, bool final_block,
msg.ext.large_config.init_block = init_block;
msg.ext.large_config.init_block = 1;
u32 init_block:1;
u32 init_block:1;