Symbol: queue
crypto/heimdal/lib/ipc/client.c
178
dispatch_queue_t queue;
crypto/heimdal/lib/ipc/client.c
243
c->queue = dispatch_queue_create("heim-ipc-async-client", NULL);
crypto/heimdal/lib/ipc/client.c
244
c->source = dispatch_source_create(DISPATCH_SOURCE_TYPE_MACH_RECV, c->mp, 0, c->queue);
crypto/heimdal/lib/ipc/client.c
245
dispatch_set_context(c->queue, c);
crypto/heimdal/lib/ipc/client.c
256
dispatch_release(c->queue);
crypto/heimdal/lib/ipc/server.c
109
dispatch_queue_t queue;
crypto/heimdal/lib/ipc/server.c
318
s->queue = dispatch_queue_create(name, NULL);
crypto/heimdal/lib/ipc/server.c
323
s->sport, 0, s->queue);
crypto/heimdal/lib/ipc/server.c
325
dispatch_release(s->queue);
crypto/heimdal/lib/ipc/server.c
331
dispatch_set_context(s->queue, ctx);
crypto/heimdal/lib/ipc/server.c
343
dispatch_release(st->queue);
crypto/krb5/src/plugins/kdb/db2/libdb2/hash/hash.c
788
TAILQ_INSERT_TAIL(&hashp->curs_queue, new_curs, queue);
crypto/krb5/src/plugins/kdb/db2/libdb2/hash/hash.h
49
TAILQ_ENTRY(cursor_t) queue;
crypto/openssh/mux.c
1505
struct sshbuf *queue;
crypto/openssh/mux.c
1513
if ((queue = sshbuf_new()) == NULL)
crypto/openssh/mux.c
1515
if ((r = sshbuf_put_stringb(queue, m)) != 0)
crypto/openssh/mux.c
1518
need = sshbuf_len(queue);
crypto/openssh/mux.c
1519
ptr = sshbuf_ptr(queue);
crypto/openssh/mux.c
1523
sshbuf_free(queue);
crypto/openssh/mux.c
1540
sshbuf_free(queue);
crypto/openssh/mux.c
1546
sshbuf_free(queue);
crypto/openssh/mux.c
1552
sshbuf_free(queue);
crypto/openssh/mux.c
1559
struct sshbuf *queue;
crypto/openssh/mux.c
1564
if ((queue = sshbuf_new()) == NULL)
crypto/openssh/mux.c
1566
if (mux_client_read(fd, queue, 4, timeout_ms) != 0) {
crypto/openssh/mux.c
1570
sshbuf_free(queue);
crypto/openssh/mux.c
1574
need = PEEK_U32(sshbuf_ptr(queue));
crypto/openssh/mux.c
1575
if (mux_client_read(fd, queue, need, timeout_ms) != 0) {
crypto/openssh/mux.c
1578
sshbuf_free(queue);
crypto/openssh/mux.c
1582
if ((r = sshbuf_get_string_direct(queue, &ptr, &have)) != 0 ||
crypto/openssh/mux.c
1585
sshbuf_free(queue);
crypto/openssh/sftp-server.c
503
get_handle(struct sshbuf *queue, int *hp)
crypto/openssh/sftp-server.c
510
if ((r = sshbuf_get_string(queue, &handle, &hlen)) != 0)
crypto/openssl/ssl/record/methods/dtls_meth.c
283
static int dtls_rlayer_buffer_record(OSSL_RECORD_LAYER *rl, struct pqueue_st *queue,
crypto/openssl/ssl/record/methods/dtls_meth.c
290
if (pqueue_size(queue) >= 100)
crypto/openssl/ssl/record/methods/dtls_meth.c
322
if (pqueue_insert(queue, item) == NULL) {
crypto/openssl/ssl/record/methods/dtls_meth.c
353
struct pqueue_st *queue)
crypto/openssl/ssl/record/methods/dtls_meth.c
357
item = pqueue_pop(queue);
crypto/openssl/ssl/record/rec_layer_d1.c
127
if (pqueue_insert(queue, item) == NULL) {
crypto/openssl/ssl/record/rec_layer_d1.c
79
struct pqueue_st *queue = s->rlayer.d->buffered_app_data;
crypto/openssl/ssl/record/rec_layer_d1.c
82
if (pqueue_size(queue) >= 100)
lib/libpmc/libpmc.c
498
__K8MASK(dram-controller-queue-bypass, 3),
libexec/atrun/atrun.c
127
int queue;
libexec/atrun/atrun.c
299
queue = *filename;
libexec/atrun/atrun.c
303
nice(tolower(queue) - 'a');
libexec/atrun/atrun.c
452
char queue;
libexec/atrun/atrun.c
541
if (sscanf(dirent->d_name,"%c%5lx%8lx",&queue,&jobno,&ctm) != 3)
libexec/atrun/atrun.c
547
if (isupper(queue) && (strcmp(batch_name,dirent->d_name) > 0)) {
libexec/atrun/atrun.c
556
if (islower(queue))
sbin/camcontrol/camcontrol.c
1740
ATA_QUEUE_LEN(parm->queue) + 1);
sbin/pfctl/parse.y
220
char queue[PF_QNAME_SIZE];
sbin/pfctl/parse.y
2377
if (strlcpy($$->queue, $1, sizeof($$->queue)) >=
sbin/pfctl/parse.y
2378
sizeof($$->queue)) {
sbin/pfctl/parse.y
2380
"%d chars)", $1, sizeof($$->queue)-1);
sbin/pfctl/parse.y
558
struct node_queue *queue;
sbin/pfctl/parse.y
6268
LOOP_THROUGH(struct node_queue, queue,
sbin/pfctl/parse.y
6271
queue->queue);
sbin/pfctl/parse.y
6306
LOOP_THROUGH(struct node_queue, queue, nqueues,
sbin/pfctl/parse.y
6317
if (strlcpy(n->queue, queue->queue,
sbin/pfctl/parse.y
6318
sizeof(n->queue)) >= sizeof(n->queue))
sbin/pfctl/parse.y
6365
if (!strncmp(a->qname, tqueue->queue, PF_QNAME_SIZE) &&
sbin/pfctl/parse.y
6417
if (!strcmp(a->qname, nq->queue)) {
sbin/pfctl/parse.y
6431
if (strlcpy(n->queue, nq->queue,
sbin/pfctl/parse.y
6432
sizeof(n->queue)) >=
sbin/pfctl/parse.y
6433
sizeof(n->queue))
sbin/pfctl/parse.y
6458
queue, nqueues,
sbin/pfctl/parse.y
6460
queue->queue);
sbin/pfctl/parse.y
649
%type <v.queue> qassign qassign_list qassign_item
sbin/pfctl/pfctl_optimize.c
826
TAILQ_HEAD( , pf_opt_rule) queue;
sbin/pfctl/pfctl_optimize.c
857
TAILQ_INIT(&queue);
sbin/pfctl/pfctl_optimize.c
858
TAILQ_CONCAT(&queue, &block->sb_rules, por_entry);
sbin/pfctl/pfctl_optimize.c
860
while ((por1 = TAILQ_FIRST(&queue)) != NULL) {
sbin/pfctl/pfctl_optimize.c
861
TAILQ_REMOVE(&queue, por1, por_entry);
sbin/pfctl/pfctl_optimize.c
892
struct pf_opt_queue queue;
sbin/pfctl/pfctl_optimize.c
897
TAILQ_INIT(&queue);
sbin/pfctl/pfctl_optimize.c
929
TAILQ_INSERT_TAIL(&queue, por, por_entry);
sbin/pfctl/pfctl_optimize.c
937
if (construct_superblocks(pf, &queue, &prof_superblocks))
sys/arm/ti/cpsw/if_cpsw.c
2670
struct cpsw_queue *queue)
sys/arm/ti/cpsw/if_cpsw.c
2676
CTLFLAG_RD, &queue->queue_slots, 0,
sys/arm/ti/cpsw/if_cpsw.c
2679
CTLFLAG_RD, &queue->active_queue_len, 0,
sys/arm/ti/cpsw/if_cpsw.c
2682
CTLFLAG_RD, &queue->max_active_queue_len, 0,
sys/arm/ti/cpsw/if_cpsw.c
2685
CTLFLAG_RD, &queue->avail_queue_len, 0,
sys/arm/ti/cpsw/if_cpsw.c
2689
CTLFLAG_RD, &queue->max_avail_queue_len, 0,
sys/arm/ti/cpsw/if_cpsw.c
2692
CTLFLAG_RD, &queue->queue_adds, 0,
sys/arm/ti/cpsw/if_cpsw.c
2695
CTLFLAG_RD, &queue->queue_removes, 0,
sys/arm/ti/cpsw/if_cpsw.c
2698
CTLFLAG_RD, &queue->queue_restart, 0,
sys/arm/ti/cpsw/if_cpsw.c
2701
CTLFLAG_RD, &queue->longest_chain, 0,
sys/arm/ti/cpsw/if_cpsw.c
385
#define cpsw_write_hdp_slot(sc, queue, slot) \
sys/arm/ti/cpsw/if_cpsw.c
386
cpsw_write_4(sc, (queue)->hdp_offset, cpsw_cpdma_bd_paddr(sc, slot))
sys/arm/ti/cpsw/if_cpsw.c
388
#define cpsw_read_cp(sc, queue) \
sys/arm/ti/cpsw/if_cpsw.c
389
cpsw_read_4(sc, (queue)->hdp_offset + CP_OFFSET)
sys/arm/ti/cpsw/if_cpsw.c
390
#define cpsw_write_cp(sc, queue, val) \
sys/arm/ti/cpsw/if_cpsw.c
391
cpsw_write_4(sc, (queue)->hdp_offset + CP_OFFSET, (val))
sys/arm/ti/cpsw/if_cpsw.c
392
#define cpsw_write_cp_slot(sc, queue, slot) \
sys/arm/ti/cpsw/if_cpsw.c
393
cpsw_write_cp(sc, queue, cpsw_cpdma_bd_paddr(sc, slot))
sys/arm/ti/cpsw/if_cpsw.c
398
cpsw_write_hdp_slotX(struct cpsw_softc *sc, struct cpsw_queue *queue, struct cpsw_slot *slot)
sys/arm/ti/cpsw/if_cpsw.c
400
uint32_t reg = queue->hdp_offset;
sys/arm/ti/cpsw/if_cpsw.c
407
cpsw_write_cp_slotX(struct cpsw_softc *sc, struct cpsw_queue *queue, struct cpsw_slot *slot)
sys/arm/ti/cpsw/if_cpsw.c
410
CPSW_DEBUGF(("CP <=== 0x%08x (expecting 0x%08x)", v, cpsw_read_cp(sc, queue)));
sys/arm/ti/cpsw/if_cpsw.c
411
cpsw_write_cp(sc, queue, v);
sys/arm/ti/cpsw/if_cpsw.c
499
cpsw_add_slots(struct cpsw_softc *sc, struct cpsw_queue *queue, int requested)
sys/arm/ti/cpsw/if_cpsw.c
517
STAILQ_INSERT_TAIL(&queue->avail, slot, next);
sys/arm/ti/cpsw/if_cpsw.c
518
++queue->avail_queue_len;
sys/arm/ti/cpsw/if_cpsw.c
519
++queue->queue_slots;
sys/arm64/broadcom/genet/if_genet.c
722
gen_init_txring(struct gen_softc *sc, int queue, int qid, int base,
sys/arm64/broadcom/genet/if_genet.c
728
q = &sc->tx_queue[queue];
sys/arm64/broadcom/genet/if_genet.c
771
gen_init_rxring(struct gen_softc *sc, int queue, int qid, int base,
sys/arm64/broadcom/genet/if_genet.c
778
q = &sc->rx_queue[queue];
sys/arm64/broadcom/genet/if_genet.c
826
sc->tx_queue[i].queue = i;
sys/arm64/broadcom/genet/if_genet.c
849
sc->rx_queue[i].queue = i;
sys/arm64/spe/arm_spe_dev.c
274
struct arm_spe_queue *queue;
sys/arm64/spe/arm_spe_dev.c
278
queue = malloc(sizeof(struct arm_spe_queue), M_ARM_SPE,
sys/arm64/spe/arm_spe_dev.c
284
queue->ident = info->ident;
sys/arm64/spe/arm_spe_dev.c
285
queue->offset = buf->pmbptr - buf_start_addr(buf->buf_idx, info);
sys/arm64/spe/arm_spe_dev.c
286
queue->buf_idx = buf->buf_idx;
sys/arm64/spe/arm_spe_dev.c
287
queue->final_buf = !info->enabled;
sys/arm64/spe/arm_spe_dev.c
288
queue->partial_rec = buf->partial_rec;
sys/arm64/spe/arm_spe_dev.c
292
STAILQ_INSERT_TAIL(&info->sc->pending, queue, next);
sys/cam/ata/ata_da.c
3014
TAILQ_HEAD(, bio) queue;
sys/cam/ata/ata_da.c
3017
TAILQ_INIT(&queue);
sys/cam/ata/ata_da.c
3018
TAILQ_CONCAT(&queue, &softc->trim_req.bps, bio_queue);
sys/cam/ata/ata_da.c
3032
while ((bp1 = TAILQ_FIRST(&queue)) != NULL) {
sys/cam/ata/ata_da.c
3033
TAILQ_REMOVE(&queue, bp1, bio_queue);
sys/cam/ata/ata_xpt.c
983
ATA_QUEUE_LEN(ident_buf->queue) + 1;
sys/cam/cam_iosched.c
2075
TAILQ_FOREACH(bp, &bq->queue, bio_queue) {
sys/cam/cam_queue.c
103
if (queue->queue_array != NULL) {
sys/cam/cam_queue.c
104
bcopy(queue->queue_array, new_array,
sys/cam/cam_queue.c
105
(queue->entries + 1) * sizeof(cam_pinfo *));
sys/cam/cam_queue.c
106
free(queue->queue_array, M_CAMQ);
sys/cam/cam_queue.c
108
queue->queue_array = new_array;
sys/cam/cam_queue.c
109
queue->array_size = new_size;
sys/cam/cam_queue.c
119
camq_insert(struct camq *queue, cam_pinfo *new_entry)
sys/cam/cam_queue.c
122
KASSERT(queue->entries < queue->array_size,
sys/cam/cam_queue.c
124
queue->entries, queue->array_size));
sys/cam/cam_queue.c
125
queue->entries++;
sys/cam/cam_queue.c
126
queue->queue_array[queue->entries] = new_entry;
sys/cam/cam_queue.c
127
new_entry->index = queue->entries;
sys/cam/cam_queue.c
128
if (queue->entries != 0)
sys/cam/cam_queue.c
129
heap_up(queue->queue_array, queue->entries);
sys/cam/cam_queue.c
139
camq_remove(struct camq *queue, int index)
sys/cam/cam_queue.c
143
if (index <= 0 || index > queue->entries)
sys/cam/cam_queue.c
145
"from queue %p of size %d", __func__, index, queue,
sys/cam/cam_queue.c
146
queue->entries);
sys/cam/cam_queue.c
148
removed_entry = queue->queue_array[index];
sys/cam/cam_queue.c
149
if (queue->entries != index) {
sys/cam/cam_queue.c
150
queue->queue_array[index] = queue->queue_array[queue->entries];
sys/cam/cam_queue.c
151
queue->queue_array[index]->index = index;
sys/cam/cam_queue.c
152
heap_down(queue->queue_array, index, queue->entries - 1);
sys/cam/cam_queue.c
155
queue->entries--;
sys/cam/cam_queue.c
166
camq_change_priority(struct camq *queue, int index, uint32_t new_priority)
sys/cam/cam_queue.c
168
if (new_priority > queue->queue_array[index]->priority) {
sys/cam/cam_queue.c
169
queue->queue_array[index]->priority = new_priority;
sys/cam/cam_queue.c
170
heap_down(queue->queue_array, index, queue->entries);
sys/cam/cam_queue.c
173
queue->queue_array[index]->priority = new_priority;
sys/cam/cam_queue.c
174
heap_up(queue->queue_array, index);
sys/cam/cam_queue.c
263
if (new_size > ccbq->queue.array_size)
sys/cam/cam_queue.c
264
return (camq_resize(&ccbq->queue, new_size));
sys/cam/cam_queue.c
273
if (camq_init(&ccbq->queue,
sys/cam/cam_queue.c
285
camq_fini(&ccbq->queue);
sys/cam/cam_queue.c
77
camq_fini(struct camq *queue)
sys/cam/cam_queue.c
79
if (queue->queue_array != NULL) {
sys/cam/cam_queue.c
80
free(queue->queue_array, M_CAMQ);
sys/cam/cam_queue.c
85
camq_resize(struct camq *queue, int new_size)
sys/cam/cam_queue.c
89
KASSERT(new_size >= queue->entries, ("camq_resize: "
sys/cam/cam_queue.c
91
new_size, queue->entries));
sys/cam/cam_queue.h
104
uint32_t camq_resize(struct camq *queue, int new_size);
sys/cam/cam_queue.h
114
void camq_fini(struct camq *queue);
sys/cam/cam_queue.h
120
void camq_insert(struct camq *queue, cam_pinfo *new_entry);
sys/cam/cam_queue.h
126
cam_pinfo *camq_remove(struct camq *queue, int index);
sys/cam/cam_queue.h
140
void camq_change_priority(struct camq *queue, int index,
sys/cam/cam_queue.h
146
return (ccbq->queue.entries + ccbq->queue_extra_entries);
sys/cam/cam_queue.h
160
struct camq *queue = &ccbq->queue;
sys/cam/cam_queue.h
171
if (queue->entries == queue->array_size &&
sys/cam/cam_queue.h
172
camq_resize(&ccbq->queue, queue->array_size * 2) != CAM_REQ_CMP) {
sys/cam/cam_queue.h
173
old_ccb = (struct ccb_hdr *)camq_remove(queue, queue->entries);
sys/cam/cam_queue.h
180
camq_insert(queue, &new_ccb->ccb_h.pinfo);
sys/cam/cam_queue.h
187
struct camq *queue = &ccbq->queue;
sys/cam/cam_queue.h
199
removed_entry = camq_remove(queue, ccb->ccb_h.pinfo.index);
sys/cam/cam_queue.h
220
camq_insert(queue, &bccb->pinfo);
sys/cam/cam_queue.h
226
return((union ccb *)ccbq->queue.queue_array[index]);
sys/cam/cam_queue.h
61
struct camq queue;
sys/cam/cam_sim.c
103
int max_tagged_dev_transactions, struct cam_devq *queue)
sys/cam/cam_sim.c
122
sim->devq = queue;
sys/cam/cam_sim.h
62
struct cam_devq *queue);
sys/cam/cam_xpt.c
265
static int xpt_schedule_dev(struct camq *queue, cam_pinfo *dev_pinfo,
sys/cam/cam_xpt.c
3015
start_ccb->crs.qfrozen_cnt = dev->ccbq.queue.qfrozen_cnt;
sys/cam/cam_xpt.c
3199
xpt_schedule_dev(struct camq *queue, cam_pinfo *pinfo,
sys/cam/cam_xpt.c
3215
camq_change_priority(queue, pinfo->index,
sys/cam/cam_xpt.c
3230
pinfo->generation = ++queue->generation;
sys/cam/cam_xpt.c
3231
camq_insert(queue, pinfo);
sys/cam/cam_xpt.c
331
if ((dev->ccbq.queue.entries > 0) &&
sys/cam/cam_xpt.c
333
(dev->ccbq.queue.qfrozen_cnt == 0)) {
sys/cam/cam_xpt.c
342
CAMQ_GET_PRIO(&dev->ccbq.queue));
sys/cam/cam_xpt.c
3446
ccb_h->pinfo.generation = ++path->device->ccbq.queue.generation;
sys/cam/cam_xpt.c
4370
dev->ccbq.queue.qfrozen_cnt, dev->ccbq.queue.qfrozen_cnt + count));
sys/cam/cam_xpt.c
4371
freeze = (dev->ccbq.queue.qfrozen_cnt += count);
sys/cam/cam_xpt.c
4443
dev->ccbq.queue.qfrozen_cnt, dev->ccbq.queue.qfrozen_cnt - count));
sys/cam/cam_xpt.c
4444
if (count > dev->ccbq.queue.qfrozen_cnt) {
sys/cam/cam_xpt.c
4447
count, dev->ccbq.queue.qfrozen_cnt);
sys/cam/cam_xpt.c
4449
count = dev->ccbq.queue.qfrozen_cnt;
sys/cam/cam_xpt.c
4451
dev->ccbq.queue.qfrozen_cnt -= count;
sys/cam/cam_xpt.c
4452
if (dev->ccbq.queue.qfrozen_cnt == 0) {
sys/cam/cam_xpt.c
4505
struct cam_doneq *queue;
sys/cam/cam_xpt.c
4529
queue = &cam_doneqs[hash];
sys/cam/cam_xpt.c
4530
mtx_lock(&queue->cam_doneq_mtx);
sys/cam/cam_xpt.c
4531
run = (queue->cam_doneq_sleep && STAILQ_EMPTY(&queue->cam_doneq));
sys/cam/cam_xpt.c
4532
STAILQ_INSERT_TAIL(&queue->cam_doneq, &done_ccb->ccb_h, sim_links.stqe);
sys/cam/cam_xpt.c
4534
mtx_unlock(&queue->cam_doneq_mtx);
sys/cam/cam_xpt.c
4536
wakeup(&queue->cam_doneq);
sys/cam/cam_xpt.c
5416
struct cam_doneq *queue = arg;
sys/cam/cam_xpt.c
5421
mtx_lock(&queue->cam_doneq_mtx);
sys/cam/cam_xpt.c
5423
while (STAILQ_EMPTY(&queue->cam_doneq))
sys/cam/cam_xpt.c
5424
msleep(&queue->cam_doneq, &queue->cam_doneq_mtx,
sys/cam/cam_xpt.c
5426
STAILQ_CONCAT(&doneq, &queue->cam_doneq);
sys/cam/cam_xpt.c
5427
mtx_unlock(&queue->cam_doneq_mtx);
sys/cam/cam_xpt.c
5434
mtx_lock(&queue->cam_doneq_mtx);
sys/cam/cam_xpt.c
5441
struct cam_doneq *queue = arg;
sys/cam/cam_xpt.c
5446
mtx_lock(&queue->cam_doneq_mtx);
sys/cam/cam_xpt.c
5448
while (STAILQ_EMPTY(&queue->cam_doneq)) {
sys/cam/cam_xpt.c
5449
queue->cam_doneq_sleep = 1;
sys/cam/cam_xpt.c
5450
msleep(&queue->cam_doneq, &queue->cam_doneq_mtx,
sys/cam/cam_xpt.c
5452
queue->cam_doneq_sleep = 0;
sys/cam/cam_xpt.c
5454
STAILQ_CONCAT(&doneq, &queue->cam_doneq);
sys/cam/cam_xpt.c
5455
mtx_unlock(&queue->cam_doneq_mtx);
sys/cam/cam_xpt.c
5464
mtx_lock(&queue->cam_doneq_mtx);
sys/cam/cam_xpt.c
5472
struct cam_doneq *queue;
sys/cam/cam_xpt.c
5477
queue = &cam_doneqs[i];
sys/cam/cam_xpt.c
5478
mtx_lock(&queue->cam_doneq_mtx);
sys/cam/cam_xpt.c
5479
while ((ccb_h = STAILQ_FIRST(&queue->cam_doneq)) != NULL) {
sys/cam/cam_xpt.c
5480
STAILQ_REMOVE_HEAD(&queue->cam_doneq, sim_links.stqe);
sys/cam/cam_xpt.c
5481
mtx_unlock(&queue->cam_doneq_mtx);
sys/cam/cam_xpt.c
5483
mtx_lock(&queue->cam_doneq_mtx);
sys/cam/cam_xpt.c
5485
mtx_unlock(&queue->cam_doneq_mtx);
sys/cam/ctl/ctl_backend_block.c
1233
TAILQ_HEAD(, bio) queue = TAILQ_HEAD_INITIALIZER(queue);
sys/cam/ctl/ctl_backend_block.c
1281
TAILQ_INSERT_TAIL(&queue, bio, bio_queue);
sys/cam/ctl/ctl_backend_block.c
1292
while ((bio = TAILQ_FIRST(&queue)) != NULL) {
sys/cam/ctl/ctl_backend_block.c
1293
TAILQ_REMOVE(&queue, bio, bio_queue);
sys/cam/nvme/nvme_da.c
1320
TAILQ_HEAD(, bio) queue;
sys/cam/nvme/nvme_da.c
1323
TAILQ_INIT(&queue);
sys/cam/nvme/nvme_da.c
1324
TAILQ_CONCAT(&queue, &trim->bps, bio_queue);
sys/cam/nvme/nvme_da.c
1338
bp1 = TAILQ_FIRST(&queue);
sys/cam/nvme/nvme_da.c
1344
while ((bp2 = TAILQ_FIRST(&queue)) != NULL) {
sys/cam/nvme/nvme_da.c
1345
TAILQ_REMOVE(&queue, bp2, bio_queue);
sys/cam/scsi/scsi_da.c
4792
TAILQ_HEAD(, bio) queue;
sys/cam/scsi/scsi_da.c
4794
TAILQ_INIT(&queue);
sys/cam/scsi/scsi_da.c
4795
TAILQ_CONCAT(&queue, &softc->delete_run_queue.queue, bio_queue);
sys/cam/scsi/scsi_da.c
4810
while ((bp1 = TAILQ_FIRST(&queue)) != NULL) {
sys/cam/scsi/scsi_da.c
4811
TAILQ_REMOVE(&queue, bp1, bio_queue);
sys/compat/linuxkpi/common/include/linux/fs.h
119
#define fasync_helper(fd, filp, on, queue) \
sys/compat/linuxkpi/common/include/linux/fs.h
122
*(queue) = &(filp)->f_sigio; \
sys/compat/linuxkpi/common/include/linux/fs.h
124
*(queue) = NULL; \
sys/compat/linuxkpi/common/include/linux/fs.h
128
#define kill_fasync(queue, sig, pollstat) \
sys/compat/linuxkpi/common/include/linux/fs.h
130
if (*(queue) != NULL) \
sys/compat/linuxkpi/common/include/linux/fs.h
131
pgsigio(*(queue), (sig), 0); \
sys/compat/linuxkpi/common/include/linux/netdevice.h
175
uint8_t queue;
sys/crypto/ccp/ccp.c
468
s->queue = q;
sys/crypto/ccp/ccp.c
543
qp = &sc->queues[s->queue];
sys/crypto/ccp/ccp.h
90
unsigned queue;
sys/crypto/ccp/ccp_hardware.c
107
ccp_read_queue_4(struct ccp_softc *sc, unsigned queue, uint32_t offset)
sys/crypto/ccp/ccp_hardware.c
112
return (ccp_read_4(sc, (CMD_Q_STATUS_INCR * (1 + queue)) + offset));
sys/crypto/ccp/ccp_hardware.c
116
ccp_write_queue_4(struct ccp_softc *sc, unsigned queue, uint32_t offset,
sys/crypto/ccp/ccp_hardware.c
119
ccp_write_4(sc, (CMD_Q_STATUS_INCR * (1 + queue)) + offset, value);
sys/crypto/ccp/ccp_hardware.c
210
ccp_hw_attach_queue(device_t dev, uint64_t lsbmask, unsigned queue)
sys/crypto/ccp/ccp_hardware.c
220
qp = &sc->queues[queue];
sys/crypto/ccp/ccp_hardware.c
226
if ((sc->valid_queues & (1 << queue)) == 0)
sys/crypto/ccp/ccp_hardware.c
229
ccp_queue_decode_lsb_regions(sc, lsbmask, queue);
sys/crypto/ccp/ccp_hardware.c
234
queue);
sys/crypto/ccp/ccp_hardware.c
235
sc->valid_queues &= ~(1 << queue);
sys/crypto/ccp/ccp_hardware.c
275
ccp_write_queue_4(sc, queue, CMD_Q_CONTROL_BASE, qp->qcontrol);
sys/crypto/ccp/ccp_hardware.c
276
ccp_write_queue_4(sc, queue, CMD_Q_INT_ENABLE_BASE, 0);
sys/crypto/ccp/ccp_hardware.c
279
ccp_write_queue_4(sc, queue, CMD_Q_INTERRUPT_STATUS_BASE,
sys/crypto/ccp/ccp_hardware.c
284
ccp_write_queue_4(sc, queue, CMD_Q_TAIL_LO_BASE,
sys/crypto/ccp/ccp_hardware.c
286
ccp_write_queue_4(sc, queue, CMD_Q_HEAD_LO_BASE,
sys/crypto/ccp/ccp_hardware.c
294
ccp_write_queue_4(sc, queue, CMD_Q_INT_ENABLE_BASE,
sys/crypto/ccp/ccp_hardware.c
299
ccp_write_queue_4(sc, queue, CMD_Q_CONTROL_BASE, qp->qcontrol);
sys/crypto/ccp/ccp_hardware.c
316
ccp_hw_detach_queue(device_t dev, unsigned queue)
sys/crypto/ccp/ccp_hardware.c
322
qp = &sc->queues[queue];
sys/crypto/ccp/ccp_hardware.c
328
if ((sc->valid_queues & (1 << queue)) == 0)
sys/crypto/ccp/ccp_lsb.c
41
unsigned queue)
sys/crypto/ccp/ccp_lsb.c
46
qp = &sc->queues[queue];
sys/crypto/ccp/ccp_lsb.c
51
if (((1 << queue) & lsbmask) != 0)
sys/crypto/ccp/ccp_lsb.h
42
unsigned queue);
sys/dev/aac/aac.c
103
static int aac_enqueue_fib(struct aac_softc *sc, int queue,
sys/dev/aac/aac.c
105
static int aac_dequeue_fib(struct aac_softc *sc, int queue,
sys/dev/aac/aac.c
107
static int aac_enqueue_response(struct aac_softc *sc, int queue,
sys/dev/aac/aac.c
2133
aac_enqueue_fib(struct aac_softc *sc, int queue, struct aac_command *cm)
sys/dev/aac/aac.c
2146
pi = sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX];
sys/dev/aac/aac.c
2147
ci = sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX];
sys/dev/aac/aac.c
2150
if (pi >= aac_qinfo[queue].size)
sys/dev/aac/aac.c
2166
(sc->aac_qentries[queue] + pi)->aq_fib_size = fib_size;
sys/dev/aac/aac.c
2167
(sc->aac_qentries[queue] + pi)->aq_fib_addr = fib_addr;
sys/dev/aac/aac.c
2170
sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX] = pi + 1;
sys/dev/aac/aac.c
2173
if (aac_qinfo[queue].notify != 0)
sys/dev/aac/aac.c
2174
AAC_QNOTIFY(sc, aac_qinfo[queue].notify);
sys/dev/aac/aac.c
2187
aac_dequeue_fib(struct aac_softc *sc, int queue, u_int32_t *fib_size,
sys/dev/aac/aac.c
2198
pi = sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX];
sys/dev/aac/aac.c
2199
ci = sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX];
sys/dev/aac/aac.c
2208
if (pi >= aac_qinfo[queue].size)
sys/dev/aac/aac.c
2216
if (ci >= aac_qinfo[queue].size)
sys/dev/aac/aac.c
2220
*fib_size = (sc->aac_qentries[queue] + ci)->aq_fib_size;
sys/dev/aac/aac.c
2222
switch (queue) {
sys/dev/aac/aac.c
2231
fib_index = (sc->aac_qentries[queue] + ci)->aq_fib_addr /
sys/dev/aac/aac.c
2248
fib_index = (sc->aac_qentries[queue] + ci)->aq_fib_addr;
sys/dev/aac/aac.c
2268
sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX] = ci + 1;
sys/dev/aac/aac.c
2271
if (notify && (aac_qinfo[queue].notify != 0))
sys/dev/aac/aac.c
2272
AAC_QNOTIFY(sc, aac_qinfo[queue].notify);
sys/dev/aac/aac.c
2283
aac_enqueue_response(struct aac_softc *sc, int queue, struct aac_fib *fib)
sys/dev/aac/aac.c
2298
pi = sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX];
sys/dev/aac/aac.c
2299
ci = sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX];
sys/dev/aac/aac.c
2302
if (pi >= aac_qinfo[queue].size)
sys/dev/aac/aac.c
2312
(sc->aac_qentries[queue] + pi)->aq_fib_size = fib_size;
sys/dev/aac/aac.c
2313
(sc->aac_qentries[queue] + pi)->aq_fib_addr = fib_addr;
sys/dev/aac/aac.c
2316
sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX] = pi + 1;
sys/dev/aac/aac.c
2319
if (aac_qinfo[queue].notify != 0)
sys/dev/aac/aac.c
2320
AAC_QNOTIFY(sc, aac_qinfo[queue].notify);
sys/dev/aic7xxx/aic7xxx.c
362
ahc_run_untagged_queue(struct ahc_softc *ahc, struct scb_tailq *queue)
sys/dev/aic7xxx/aic7xxx.c
369
if ((scb = TAILQ_FIRST(queue)) != NULL
sys/dev/aic7xxx/aic7xxx.h
1206
struct scb_tailq *queue);
sys/dev/al_eth/al_eth.c
685
uint8_t udma, uint32_t queue)
sys/dev/al_eth/al_eth.c
691
if (queue >= AL_ETH_NUM_QUEUES)
sys/dev/al_eth/al_eth.c
694
al_eth_thash_table_set(&adapter->hal_adapter, idx, udma, queue);
sys/dev/aq/aq_hw.c
778
if (aq_vlans[i].queue != 0xFF) {
sys/dev/aq/aq_hw.c
780
aq_vlans[i].queue,
sys/dev/aq/aq_hw.h
245
uint8_t queue;
sys/dev/aq/aq_hw.h
253
int8_t queue;
sys/dev/aq/aq_hw_llh.c
705
uint32_t rx_intr_moderation_ctl, uint32_t queue)
sys/dev/aq/aq_hw_llh.c
707
AQ_WRITE_REG(aq_hw, rx_intr_moderation_ctl_adr(queue),
sys/dev/aq/aq_hw_llh.c
751
uint32_t tx_intr_moderation_ctl, uint32_t queue)
sys/dev/aq/aq_hw_llh.c
753
AQ_WRITE_REG(aq_hw, tx_intr_moderation_ctl_adr(queue),
sys/dev/aq/aq_hw_llh.h
404
uint32_t rx_intr_moderation_ctl, uint32_t queue);
sys/dev/aq/aq_hw_llh.h
426
uint32_t tx_intr_moderation_ctl, uint32_t queue);
sys/dev/aq/aq_hw_llh_internal.h
3259
#define tx_intr_moderation_ctl_adr(queue) (0x00008980u + (queue) * 0x4)
sys/dev/aq/aq_main.c
1057
aq_vlans[i].queue = 0xFF;
sys/dev/ata/chipsets/ata-promise.c
122
TAILQ_HEAD(, host_packet) queue;
sys/dev/ata/chipsets/ata-promise.c
1240
TAILQ_INSERT_TAIL(&hpktp->queue, hp, chain);
sys/dev/ata/chipsets/ata-promise.c
1256
if ((hp = TAILQ_FIRST(&hpktp->queue))) {
sys/dev/ata/chipsets/ata-promise.c
1257
TAILQ_REMOVE(&hpktp->queue, hp, chain);
sys/dev/ata/chipsets/ata-promise.c
284
TAILQ_INIT(&hpkt->queue);
sys/dev/ath/ath_hal/ar5212/ar5212_reset.c
1137
uint32_t queue;
sys/dev/ath/ath_hal/ar5212/ar5212_reset.c
1168
for (queue = 0; queue < AR_NUM_DCU; queue++) {
sys/dev/ath/ath_hal/ar5212/ar5212_reset.c
1170
AR_Q0_STS + (queue * 4)) &
sys/dev/atkbdc/psm.c
2243
sc->queue.count = 0;
sys/dev/atkbdc/psm.c
2244
sc->queue.head = 0;
sys/dev/atkbdc/psm.c
2245
sc->queue.tail = 0;
sys/dev/atkbdc/psm.c
2454
while (sc->queue.count <= 0) {
sys/dev/atkbdc/psm.c
2474
while ((sc->queue.count > 0) && (uio->uio_resid > 0)) {
sys/dev/atkbdc/psm.c
2476
l = imin(sc->queue.count, uio->uio_resid);
sys/dev/atkbdc/psm.c
2479
if (l > sizeof(sc->queue.buf) - sc->queue.head) {
sys/dev/atkbdc/psm.c
2480
bcopy(&sc->queue.buf[sc->queue.head], &buf[0],
sys/dev/atkbdc/psm.c
2481
sizeof(sc->queue.buf) - sc->queue.head);
sys/dev/atkbdc/psm.c
2482
bcopy(&sc->queue.buf[0],
sys/dev/atkbdc/psm.c
2483
&buf[sizeof(sc->queue.buf) - sc->queue.head],
sys/dev/atkbdc/psm.c
2484
l - (sizeof(sc->queue.buf) - sc->queue.head));
sys/dev/atkbdc/psm.c
2486
bcopy(&sc->queue.buf[sc->queue.head], &buf[0], l);
sys/dev/atkbdc/psm.c
2487
sc->queue.count -= l;
sys/dev/atkbdc/psm.c
2488
sc->queue.head = (sc->queue.head + l) % sizeof(sc->queue.buf);
sys/dev/atkbdc/psm.c
2542
sc->queue.count = 0;
sys/dev/atkbdc/psm.c
2543
sc->queue.head = 0;
sys/dev/atkbdc/psm.c
2544
sc->queue.tail = 0;
sys/dev/atkbdc/psm.c
260
synapticspacket_t queue[SYNAPTICS_PACKETQUEUE];
sys/dev/atkbdc/psm.c
4014
dx = abs(smoother->queue[smoother->queue_cursor].x -
sys/dev/atkbdc/psm.c
4016
dy = abs(smoother->queue[smoother->queue_cursor].y -
sys/dev/atkbdc/psm.c
4233
dx = x0 - smoother->queue[cursor].x;
sys/dev/atkbdc/psm.c
4234
dy = y0 - smoother->queue[cursor].y;
sys/dev/atkbdc/psm.c
4242
smoother->queue[cursor].x = x0;
sys/dev/atkbdc/psm.c
4243
smoother->queue[cursor].y = y0;
sys/dev/atkbdc/psm.c
4288
dxp = abs(x0 - smoother->queue[peer].x) + 1;
sys/dev/atkbdc/psm.c
4289
dyp = abs(y0 - smoother->queue[peer].y) + 1;
sys/dev/atkbdc/psm.c
451
ringbuf_t queue; /* mouse status queue */
sys/dev/atkbdc/psm.c
5222
if (sc->queue.count + pb->inputbytes < sizeof(sc->queue.buf)) {
sys/dev/atkbdc/psm.c
5224
sizeof(sc->queue.buf) - sc->queue.tail);
sys/dev/atkbdc/psm.c
5225
bcopy(&pb->ipacket[0], &sc->queue.buf[sc->queue.tail], l);
sys/dev/atkbdc/psm.c
5227
bcopy(&pb->ipacket[l], &sc->queue.buf[0],
sys/dev/atkbdc/psm.c
5229
sc->queue.tail = (sc->queue.tail + pb->inputbytes) %
sys/dev/atkbdc/psm.c
5230
sizeof(sc->queue.buf);
sys/dev/atkbdc/psm.c
5231
sc->queue.count += pb->inputbytes;
sys/dev/atkbdc/psm.c
5273
if (sc->queue.count > 0)
sys/dev/atkbdc/psm.c
5298
return (sc->queue.count != 0 ? 1 : 0);
sys/dev/axgbe/xgbe-dev.c
1737
unsigned int queue, unsigned int q_fifo_size)
sys/dev/axgbe/xgbe-dev.c
1744
__func__, queue, q_fifo_size, frame_fifo_size);
sys/dev/axgbe/xgbe-dev.c
1755
pdata->rx_rfa[queue] = 0;
sys/dev/axgbe/xgbe-dev.c
1756
pdata->rx_rfd[queue] = 0;
sys/dev/axgbe/xgbe-dev.c
1762
pdata->rx_rfa[queue] = 0; /* Full - 1024 bytes */
sys/dev/axgbe/xgbe-dev.c
1763
pdata->rx_rfd[queue] = 1; /* Full - 1536 bytes */
sys/dev/axgbe/xgbe-dev.c
1769
pdata->rx_rfa[queue] = 2; /* Full - 2048 bytes */
sys/dev/axgbe/xgbe-dev.c
1770
pdata->rx_rfd[queue] = 5; /* Full - 3584 bytes */
sys/dev/axgbe/xgbe-dev.c
1790
pdata->rx_rfa[queue] = XGMAC_FLOW_CONTROL_VALUE(rfa);
sys/dev/axgbe/xgbe-dev.c
1791
pdata->rx_rfd[queue] = XGMAC_FLOW_CONTROL_VALUE(rfd);
sys/dev/axgbe/xgbe-dev.c
1793
queue, pdata->rx_rfa[queue], pdata->rx_rfd[queue]);
sys/dev/axgbe/xgbe-dev.c
1952
unsigned int qptc, qptc_extra, queue;
sys/dev/axgbe/xgbe-dev.c
1964
for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) {
sys/dev/axgbe/xgbe-dev.c
1966
axgbe_printf(1, "TXq%u mapped to TC%u\n", queue, i);
sys/dev/axgbe/xgbe-dev.c
1967
XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
sys/dev/axgbe/xgbe-dev.c
1969
pdata->q2tc_map[queue++] = i;
sys/dev/axgbe/xgbe-dev.c
1973
axgbe_printf(1, "TXq%u mapped to TC%u\n", queue, i);
sys/dev/axgbe/xgbe-dev.c
1974
XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
sys/dev/axgbe/xgbe-dev.c
1976
pdata->q2tc_map[queue++] = i;
sys/dev/axgbe/xgbe-dev.c
2465
xgbe_txq_prepare_tx_stop(struct xgbe_prv_data *pdata, unsigned int queue)
sys/dev/axgbe/xgbe-dev.c
2476
tx_status = XGMAC_MTL_IOREAD(pdata, queue, MTL_Q_TQDR);
sys/dev/axgbe/xgbe-dev.c
2486
queue);
sys/dev/axgbe/xgbe-dev.c
2490
xgbe_prepare_tx_stop(struct xgbe_prv_data *pdata, unsigned int queue)
sys/dev/axgbe/xgbe-dev.c
2497
return (xgbe_txq_prepare_tx_stop(pdata, queue));
sys/dev/axgbe/xgbe-dev.c
2500
if (queue < DMA_DSRX_FIRST_QUEUE) {
sys/dev/axgbe/xgbe-dev.c
2502
tx_pos = (queue * DMA_DSR_Q_WIDTH) + DMA_DSR0_TPS_START;
sys/dev/axgbe/xgbe-dev.c
2504
tx_qidx = queue - DMA_DSRX_FIRST_QUEUE;
sys/dev/axgbe/xgbe-dev.c
2528
queue);
sys/dev/axgbe/xgbe-dev.c
2579
xgbe_prepare_rx_stop(struct xgbe_prv_data *pdata, unsigned int queue)
sys/dev/axgbe/xgbe-dev.c
2590
rx_status = XGMAC_MTL_IOREAD(pdata, queue, MTL_Q_RQDR);
sys/dev/axgbe/xgbe-dev.c
2600
queue);
sys/dev/bwn/if_bwnvar.h
540
#define BWN_WME_PARAMS(queue) \
sys/dev/bwn/if_bwnvar.h
541
(BWN_SHARED_EDCFQ + (BWN_NR_WMEPARAMS * sizeof(uint16_t) * (queue)))
sys/dev/bxe/bxe.c
15985
struct sysctl_oid *queue_top, *queue;
sys/dev/bxe/bxe.c
16084
queue = SYSCTL_ADD_NODE(ctx, queue_top_children, OID_AUTO,
sys/dev/bxe/bxe.c
16086
queue_children = SYSCTL_CHILDREN(queue);
sys/dev/bxe/bxe.c
2745
uint16_t queue,
sys/dev/bxe/bxe.c
2754
struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue];
sys/dev/bxe/bxe.c
2759
fp->index, queue, cons, prod);
sys/dev/bxe/bxe.c
2763
KASSERT((queue < max_agg_queues),
sys/dev/bxe/bxe.c
2765
fp->index, queue, max_agg_queues));
sys/dev/bxe/bxe.c
2769
fp->index, queue));
sys/dev/bxe/bxe.c
2780
fp->index, queue, cons, prod);
sys/dev/bxe/bxe.c
2795
fp->rx_tpa_queue_used |= (1 << queue);
sys/dev/bxe/bxe.c
2844
uint16_t queue,
sys/dev/bxe/bxe.c
2860
fp->index, queue, tpa_info->len_on_bd, frag_size, pages);
sys/dev/bxe/bxe.c
2894
fp->index, queue, i, j, sge_idx, frag_size, frag_len);
sys/dev/bxe/bxe.c
2919
fp->index, queue, frag_size);
sys/dev/bxe/bxe.c
3032
uint16_t queue,
sys/dev/bxe/bxe.c
3043
fp->index, queue, tpa_info->placement_offset,
sys/dev/bxe/bxe.c
3049
rc = bxe_alloc_rx_tpa_mbuf(fp, queue);
sys/dev/bxe/bxe.c
3070
rc = bxe_fill_frag_mbuf(sc, fp, tpa_info, queue, pages, m, cqe, cqe_idx);
sys/dev/bxe/bxe.c
3100
fp->rx_tpa_info[queue].state = BXE_TPA_STATE_STOP;
sys/dev/bxe/bxe.c
3101
fp->rx_tpa_queue_used &= ~(1 << queue);
sys/dev/bxe/bxe.c
3231
uint8_t queue;
sys/dev/bxe/bxe.c
3243
queue = cqe->end_agg_cqe.queue_index;
sys/dev/bxe/bxe.c
3244
tpa_info = &fp->rx_tpa_info[queue];
sys/dev/bxe/bxe.c
3247
fp->index, queue);
sys/dev/bxe/bxe.c
3253
bxe_tpa_stop(sc, fp, tpa_info, queue, pages,
sys/dev/bxe/bxe.c
6287
int queue)
sys/dev/bxe/bxe.c
6289
struct bxe_sw_tpa_info *tpa_info = &fp->rx_tpa_info[queue];
sys/dev/bxe/bxe.c
676
int queue);
sys/dev/cxgb/cxgb_sge.c
2118
t3_free_tx_desc(struct sge_qset *qs, int reclaimable, int queue)
sys/dev/cxgb/cxgb_sge.c
2122
struct sge_txq *q = &qs->txq[queue];
sys/dev/cxgb/cxgb_sge.c
370
reclaim_completed_tx(struct sge_qset *qs, int reclaim_min, int queue)
sys/dev/cxgb/cxgb_sge.c
372
struct sge_txq *q = &qs->txq[queue];
sys/dev/cxgb/cxgb_sge.c
384
t3_free_tx_desc(qs, reclaim, queue);
sys/dev/cxgbe/iw_cxgbe/cq.c
1003
mm->addr = vtophys(chp->cq.queue);
sys/dev/cxgbe/iw_cxgbe/cq.c
124
cq->queue = dma_alloc_coherent(rhp->ibdev.dma_device, cq->memsize,
sys/dev/cxgbe/iw_cxgbe/cq.c
126
if (!cq->queue) {
sys/dev/cxgbe/iw_cxgbe/cq.c
131
memset(cq->queue, 0, cq->memsize);
sys/dev/cxgbe/iw_cxgbe/cq.c
194
dma_free_coherent(rhp->ibdev.dma_device, cq->memsize, cq->queue,
sys/dev/cxgbe/iw_cxgbe/cq.c
88
cq->memsize, cq->queue,
sys/dev/cxgbe/iw_cxgbe/cq.c
950
memsize = hwentries * sizeof *chp->cq.queue;
sys/dev/cxgbe/iw_cxgbe/qp.c
112
wq->rq.memsize, wq->rq.queue,
sys/dev/cxgbe/iw_cxgbe/qp.c
115
wq->sq.memsize, wq->sq.queue,
sys/dev/cxgbe/iw_cxgbe/qp.c
1764
sizeof(*qhp->wq.sq.queue) + 16 * sizeof(__be64);
sys/dev/cxgbe/iw_cxgbe/qp.c
1769
sizeof(*qhp->wq.rq.queue);
sys/dev/cxgbe/iw_cxgbe/qp.c
181
wq->sq.queue = dma_alloc_coherent(rhp->ibdev.dma_device, wq->sq.memsize,
sys/dev/cxgbe/iw_cxgbe/qp.c
183
if (!wq->sq.queue) {
sys/dev/cxgbe/iw_cxgbe/qp.c
187
wq->sq.phys_addr = vtophys(wq->sq.queue);
sys/dev/cxgbe/iw_cxgbe/qp.c
189
memset(wq->sq.queue, 0, wq->sq.memsize);
sys/dev/cxgbe/iw_cxgbe/qp.c
191
wq->rq.queue = dma_alloc_coherent(rhp->ibdev.dma_device,
sys/dev/cxgbe/iw_cxgbe/qp.c
193
if (!wq->rq.queue) {
sys/dev/cxgbe/iw_cxgbe/qp.c
197
wq->rq.phys_addr = vtophys(wq->rq.queue);
sys/dev/cxgbe/iw_cxgbe/qp.c
199
memset(wq->rq.queue, 0, wq->rq.memsize);
sys/dev/cxgbe/iw_cxgbe/qp.c
204
wq->sq.queue, (unsigned long long)wq->sq.phys_addr,
sys/dev/cxgbe/iw_cxgbe/qp.c
205
wq->rq.queue, (unsigned long long)wq->rq.phys_addr);
sys/dev/cxgbe/iw_cxgbe/qp.c
319
wq->rq.memsize, wq->rq.queue,
sys/dev/cxgbe/iw_cxgbe/qp.c
323
wq->sq.memsize, wq->sq.queue,
sys/dev/cxgbe/iw_cxgbe/qp.c
354
if (dstp == (u8 *)&sq->queue[sq->size])
sys/dev/cxgbe/iw_cxgbe/qp.c
355
dstp = (u8 *)sq->queue;
sys/dev/cxgbe/iw_cxgbe/qp.c
356
if (rem <= (u8 *)&sq->queue[sq->size] - dstp)
sys/dev/cxgbe/iw_cxgbe/qp.c
359
len = (u8 *)&sq->queue[sq->size] - dstp;
sys/dev/cxgbe/iw_cxgbe/qp.c
453
ret = build_isgl((__be64 *)sq->queue,
sys/dev/cxgbe/iw_cxgbe/qp.c
454
(__be64 *)&sq->queue[sq->size],
sys/dev/cxgbe/iw_cxgbe/qp.c
496
ret = build_isgl((__be64 *)sq->queue,
sys/dev/cxgbe/iw_cxgbe/qp.c
497
(__be64 *)&sq->queue[sq->size],
sys/dev/cxgbe/iw_cxgbe/qp.c
553
ret = build_isgl((__be64 *)qhp->wq.rq.queue,
sys/dev/cxgbe/iw_cxgbe/qp.c
554
(__be64 *)&qhp->wq.rq.queue[qhp->wq.rq.size],
sys/dev/cxgbe/iw_cxgbe/qp.c
757
if (++p == (__be64 *)&sq->queue[sq->size])
sys/dev/cxgbe/iw_cxgbe/qp.c
758
p = (__be64 *)sq->queue;
sys/dev/cxgbe/iw_cxgbe/qp.c
764
if (++p == (__be64 *)&sq->queue[sq->size])
sys/dev/cxgbe/iw_cxgbe/qp.c
765
p = (__be64 *)sq->queue;
sys/dev/cxgbe/iw_cxgbe/qp.c
811
wqe = (union t4_wr *)((u8 *)qhp->wq.sq.queue +
sys/dev/cxgbe/iw_cxgbe/qp.c
948
wqe = (union t4_recv_wr *)((u8 *)qhp->wq.rq.queue +
sys/dev/cxgbe/iw_cxgbe/t4.h
312
union t4_wr *queue;
sys/dev/cxgbe/iw_cxgbe/t4.h
338
union t4_recv_wr *queue;
sys/dev/cxgbe/iw_cxgbe/t4.h
406
return wq->rq.queue[wq->rq.size].status.host_wq_pidx;
sys/dev/cxgbe/iw_cxgbe/t4.h
456
return wq->sq.queue[wq->sq.size].status.host_wq_pidx;
sys/dev/cxgbe/iw_cxgbe/t4.h
531
return wq->rq.queue[wq->rq.size].status.qp_err;
sys/dev/cxgbe/iw_cxgbe/t4.h
536
wq->rq.queue[wq->rq.size].status.qp_err = 1;
sys/dev/cxgbe/iw_cxgbe/t4.h
544
struct t4_cqe *queue;
sys/dev/cxgbe/iw_cxgbe/t4.h
620
cq->bits_type_ts = cq->queue[cq->cidx].bits_type_ts;
sys/dev/cxgbe/iw_cxgbe/t4.h
641
return cq->sw_in_use || t4_valid_cqe(cq, &cq->queue[cq->cidx]);
sys/dev/cxgbe/iw_cxgbe/t4.h
654
if (cq->queue[prev_cidx].bits_type_ts != cq->bits_type_ts) {
sys/dev/cxgbe/iw_cxgbe/t4.h
659
} else if (t4_valid_cqe(cq, &cq->queue[cq->cidx])) {
sys/dev/cxgbe/iw_cxgbe/t4.h
663
*cqe = &cq->queue[cq->cidx];
sys/dev/cxgbe/iw_cxgbe/t4.h
699
return ((struct t4_status_page *)&cq->queue[cq->size])->qp_err;
sys/dev/cxgbe/iw_cxgbe/t4.h
704
((struct t4_status_page *)&cq->queue[cq->size])->qp_err = 1;
sys/dev/cxgbe/t4_ioctl.h
329
int8_t queue; /* queue index; -1 => all queues */
sys/dev/cxgbe/t4_sched.c
425
if (!in_range(p->queue, 0, vi->ntxq - 1) ||
sys/dev/cxgbe/t4_sched.c
429
if (p->queue < 0) {
sys/dev/cxgbe/t4_sched.c
444
txq = &sc->sge.txq[vi->first_txq + p->queue];
sys/dev/drm2/drmP.h
1600
#define DRM_INIT_WAITQUEUE(queue) do {(void)(queue);} while (0)
sys/dev/drm2/drmP.h
1652
#define DRM_WAIT_ON( ret, queue, timeout, condition ) \
sys/dev/drm2/drmP.h
1657
ret = -mtx_sleep(&(queue), &dev->irq_lock, \
sys/dev/drm2/drmP.h
1684
wait_queue_head_t queue; /* vblank wait queue */
sys/dev/drm2/drm_os_freebsd.h
451
#define wake_up(queue) wakeup_one((void *)queue)
sys/dev/drm2/drm_os_freebsd.h
452
#define wake_up_interruptible(queue) wakeup_one((void *)queue)
sys/dev/drm2/drm_os_freebsd.h
453
#define wake_up_all(queue) wakeup((void *)queue)
sys/dev/drm2/drm_os_freebsd.h
454
#define wake_up_interruptible_all(queue) wakeup((void *)queue)
sys/dev/ena/ena.c
1568
struct ena_que *queue;
sys/dev/ena/ena.c
1572
queue = &adapter->que[i];
sys/dev/ena/ena.c
1573
while (taskqueue_cancel(queue->cleanup_tq, &queue->cleanup_task, NULL))
sys/dev/ena/ena.c
1574
taskqueue_drain(queue->cleanup_tq, &queue->cleanup_task);
sys/dev/ena/ena.c
1575
taskqueue_free(queue->cleanup_tq);
sys/dev/ena/ena.c
1588
struct ena_que *queue;
sys/dev/ena/ena.c
1666
queue = &adapter->que[i];
sys/dev/ena/ena.c
1668
NET_TASK_INIT(&queue->cleanup_task, 0, ena_cleanup, queue);
sys/dev/ena/ena.c
1669
queue->cleanup_tq = taskqueue_create_fast("ena cleanup",
sys/dev/ena/ena.c
1670
M_WAITOK, taskqueue_thread_enqueue, &queue->cleanup_tq);
sys/dev/ena/ena.c
1673
cpu_mask = &queue->cpu_mask;
sys/dev/ena/ena.c
1675
taskqueue_start_threads_cpuset(&queue->cleanup_tq, 1, PI_NET,
sys/dev/ena/ena.c
1720
struct ena_que *queue = arg;
sys/dev/ena/ena.c
1721
struct ena_adapter *adapter = queue->adapter;
sys/dev/ena/ena.c
1727
taskqueue_enqueue(queue->cleanup_tq, &queue->cleanup_task);
sys/dev/enetc/if_enetc.c
1094
struct enetc_tx_queue *queue;
sys/dev/enetc/if_enetc.c
1100
queue = &sc->tx_queues[ipi->ipi_qsidx];
sys/dev/enetc/if_enetc.c
1109
desc = &queue->ring[pidx];
sys/dev/enetc/if_enetc.c
1124
desc = &queue->ring[pidx];
sys/dev/enetc/if_enetc.c
1134
desc = &queue->ring[pidx];
sys/dev/enetc/if_enetc.c
1161
struct enetc_tx_queue *queue;
sys/dev/enetc/if_enetc.c
1164
queue = &sc->tx_queues[qid];
sys/dev/enetc/if_enetc.c
1166
cidx = queue->cidx;
sys/dev/enetc/if_enetc.c
1188
queue->cidx = hw_cidx;
sys/dev/enetc/if_enetc.c
1197
struct enetc_rx_queue *queue;
sys/dev/enetc/if_enetc.c
1202
queue = &sc->rx_queues[qid];
sys/dev/enetc/if_enetc.c
1203
desc = &queue->ring[pidx];
sys/dev/enetc/if_enetc.c
1214
desc = &queue->ring[pidx];
sys/dev/enetc/if_enetc.c
1229
struct enetc_rx_queue *queue;
sys/dev/enetc/if_enetc.c
1237
queue = &sc->rx_queues[ri->iri_qsidx];
sys/dev/enetc/if_enetc.c
1238
desc = &queue->ring[cidx];
sys/dev/enetc/if_enetc.c
1277
desc = &queue->ring[cidx];
sys/dev/enetc/if_enetc.c
1293
struct enetc_rx_queue *queue;
sys/dev/enetc/if_enetc.c
1299
queue = &sc->rx_queues[iru->iru_qsidx];
sys/dev/enetc/if_enetc.c
1306
desc = &queue->ring[pidx];
sys/dev/enetc/if_enetc.c
1319
if (!queue->enabled && pidx >= 8) {
sys/dev/enetc/if_enetc.c
1321
queue->enabled = true;
sys/dev/enetc/if_enetc.c
481
struct enetc_tx_queue *queue;
sys/dev/enetc/if_enetc.c
497
queue = &sc->tx_queues[i];
sys/dev/enetc/if_enetc.c
498
queue->sc = sc;
sys/dev/enetc/if_enetc.c
499
queue->ring = (union enetc_tx_bd*)(vaddrs[i]);
sys/dev/enetc/if_enetc.c
500
queue->ring_paddr = paddrs[i];
sys/dev/enetc/if_enetc.c
501
queue->cidx = 0;
sys/dev/enetc/if_enetc.c
512
struct enetc_rx_queue *queue;
sys/dev/enetc/if_enetc.c
527
queue = &sc->rx_queues[i];
sys/dev/enetc/if_enetc.c
528
queue->sc = sc;
sys/dev/enetc/if_enetc.c
529
queue->qid = i;
sys/dev/enetc/if_enetc.c
530
queue->ring = (union enetc_rx_bd*)(vaddrs[i]);
sys/dev/enetc/if_enetc.c
531
queue->ring_paddr = paddrs[i];
sys/dev/enetc/if_enetc.c
648
struct enetc_ctrl_queue *queue;
sys/dev/enetc/if_enetc.c
652
queue = &sc->ctrl_queue;
sys/dev/enetc/if_enetc.c
653
desc = &queue->ring[queue->pidx];
sys/dev/enetc/if_enetc.c
655
if (++queue->pidx == ENETC_MIN_DESC)
sys/dev/enetc/if_enetc.c
656
queue->pidx = 0;
sys/dev/enetc/if_enetc.c
669
bus_dmamap_sync(queue->dma.idi_tag, queue->dma.idi_map, BUS_DMASYNC_PREWRITE);
sys/dev/enetc/if_enetc.c
670
ENETC_WR4(sc, ENETC_SICBDRPIR, queue->pidx);
sys/dev/enetc/if_enetc.c
674
if (ENETC_RD4(sc, ENETC_SICBDRCIR) == queue->pidx)
sys/dev/enetc/if_enetc.c
723
struct enetc_ctrl_queue *queue = &sc->ctrl_queue;
sys/dev/enetc/if_enetc.c
726
(uint32_t)queue->dma.idi_paddr);
sys/dev/enetc/if_enetc.c
728
(uint32_t)(queue->dma.idi_paddr >> 32));
sys/dev/enetc/if_enetc.c
730
queue->dma.idi_size / sizeof(struct enetc_cbd));
sys/dev/enetc/if_enetc.c
732
queue->pidx = 0;
sys/dev/enetc/if_enetc.c
733
ENETC_WR4(sc, ENETC_SICBDRPIR, queue->pidx);
sys/dev/enetc/if_enetc.c
734
ENETC_WR4(sc, ENETC_SICBDRCIR, queue->pidx);
sys/dev/enetc/if_enetc.c
741
struct enetc_tx_queue *queue;
sys/dev/enetc/if_enetc.c
745
queue = &sc->tx_queues[i];
sys/dev/enetc/if_enetc.c
748
(uint32_t)queue->ring_paddr);
sys/dev/enetc/if_enetc.c
750
(uint32_t)(queue->ring_paddr >> 32));
sys/dev/enetc/if_enetc.c
771
struct enetc_rx_queue *queue;
sys/dev/enetc/if_enetc.c
778
queue = &sc->rx_queues[i];
sys/dev/enetc/if_enetc.c
781
(uint32_t)queue->ring_paddr);
sys/dev/enetc/if_enetc.c
783
(uint32_t)(queue->ring_paddr >> 32));
sys/dev/enetc/if_enetc.c
788
queue->enabled = false;
sys/dev/hyperv/utilities/hv_snapshot.c
252
#define SEARCH_REMOVE_REQ_LOCKED(reqp, queue, link, tmp, id) \
sys/dev/hyperv/utilities/hv_snapshot.c
254
STAILQ_FOREACH_SAFE(reqp, queue, link, tmp) { \
sys/dev/hyperv/utilities/hv_snapshot.c
256
STAILQ_REMOVE(queue, \
sys/dev/ice/ice_lib.c
8347
u16 queue = (reg & GL_MDET_TX_TCLAN_QNUM_M) >> GL_MDET_TX_TCLAN_QNUM_S;
sys/dev/ice/ice_lib.c
8350
ice_mdd_tx_tclan_str(event), queue, pf_num, vf_num);
sys/dev/ice/ice_lib.c
8367
u16 queue = (reg & GL_MDET_TX_PQM_QNUM_M) >> GL_MDET_TX_PQM_QNUM_S;
sys/dev/ice/ice_lib.c
8370
ice_mdd_tx_pqm_str(event), queue, pf_num, vf_num);
sys/dev/ice/ice_lib.c
8386
u16 queue = (reg & GL_MDET_RX_QNUM_M) >> GL_MDET_RX_QNUM_S;
sys/dev/ice/ice_lib.c
8389
ice_mdd_rx_str(event), queue, pf_num, vf_num);
sys/dev/ice/virtchnl.h
1710
} queue;
sys/dev/ips/ips.c
564
ips_copper_queue_t *queue = queueptr;
sys/dev/ips/ips.c
568
queue->base_phys_addr = segments[0].ds_addr;
sys/dev/ips/ips.h
151
struct bio_queue_head queue;
sys/dev/ips/ips_commands.c
156
iobuf = bioq_first(&sc->queue);
sys/dev/ips/ips_commands.c
163
bioq_remove(&sc->queue, iobuf);
sys/dev/ips/ips_disk.c
116
bioq_insert_tail(&dsc->sc->queue, iobuf);
sys/dev/ips/ips_pci.c
142
bioq_init(&sc->queue);
sys/dev/ips/ips_pci.c
192
bioq_flush(&sc->queue, NULL, ENXIO);
sys/dev/iscsi/icl_soft.c
1005
struct icl_pdu_stailq queue;
sys/dev/iscsi/icl_soft.c
1010
STAILQ_INIT(&queue);
sys/dev/iscsi/icl_soft.c
1020
if (STAILQ_EMPTY(&queue) || isc->check_send_space)
sys/dev/iscsi/icl_soft.c
1021
STAILQ_CONCAT(&queue, &isc->to_send);
sys/dev/iscsi/icl_soft.c
1024
icl_conn_send_pdus(isc, &queue);
sys/dev/iscsi/icl_soft.c
1038
if (STAILQ_EMPTY(&queue) &&
sys/dev/iscsi/icl_soft.c
1062
STAILQ_CONCAT(&isc->to_send, &queue);
sys/dev/iscsi/icl_soft.c
876
icl_conn_send_pdus(struct icl_soft_conn *isc, struct icl_pdu_stailq *queue)
sys/dev/iscsi/icl_soft.c
909
while (!STAILQ_EMPTY(queue)) {
sys/dev/iscsi/icl_soft.c
910
request = STAILQ_FIRST(queue);
sys/dev/iscsi/icl_soft.c
934
STAILQ_REMOVE_HEAD(queue, ip_next);
sys/dev/iscsi/icl_soft.c
954
request2 = STAILQ_FIRST(queue);
sys/dev/iscsi/icl_soft.c
960
STAILQ_REMOVE_HEAD(queue, ip_next);
sys/dev/ixgbe/if_ix.c
4829
u64 queue = 1ULL << vector;
sys/dev/ixgbe/if_ix.c
4833
mask = (IXGBE_EIMS_RTX_QUEUE & queue);
sys/dev/ixgbe/if_ix.c
4836
mask = (queue & 0xFFFFFFFF);
sys/dev/ixgbe/if_ix.c
4839
mask = (queue >> 32);
sys/dev/ixgbe/if_ix.c
4852
u64 queue = 1ULL << vector;
sys/dev/ixgbe/if_ix.c
4856
mask = (IXGBE_EIMS_RTX_QUEUE & queue);
sys/dev/ixgbe/if_ix.c
4859
mask = (queue & 0xFFFFFFFF);
sys/dev/ixgbe/if_ix.c
4862
mask = (queue >> 32);
sys/dev/ixgbe/if_ixv.c
683
u32 queue = 1 << vector;
sys/dev/ixgbe/if_ixv.c
686
mask = (IXGBE_EIMS_RTX_QUEUE & queue);
sys/dev/ixgbe/if_ixv.c
697
u64 queue = (u64)(1 << vector);
sys/dev/ixgbe/if_ixv.c
700
mask = (IXGBE_EIMS_RTX_QUEUE & queue);
sys/dev/ixgbe/ixgbe_82599.c
1565
u8 queue)
sys/dev/ixgbe/ixgbe_82599.c
1599
fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
sys/dev/ixgbe/ixgbe_82599.c
1611
DEBUGOUT2("Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd);
sys/dev/ixgbe/ixgbe_82599.c
1902
u16 soft_id, u8 queue, bool cloud_mode)
sys/dev/ixgbe/ixgbe_82599.c
1972
if (queue == IXGBE_FDIR_DROP_QUEUE)
sys/dev/ixgbe/ixgbe_82599.c
1977
fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
sys/dev/ixgbe/ixgbe_82599.c
2041
u16 soft_id, u8 queue, bool cloud_mode)
sys/dev/ixgbe/ixgbe_82599.c
2092
soft_id, queue, cloud_mode);
sys/dev/ixgbe/ixgbe_api.h
160
u8 queue);
sys/dev/ixgbe/ixgbe_api.h
165
u16 soft_id, u8 queue, bool cloud_mode);
sys/dev/ixgbe/ixgbe_api.h
173
u8 queue,
sys/dev/ixl/i40e_adminq_cmd.h
1384
__le16 queue;
sys/dev/ixl/i40e_common.c
1155
void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable)
sys/dev/ixl/i40e_common.c
1157
u32 abs_queue_idx = hw->func_caps.base_queue + queue;
sys/dev/ixl/i40e_common.c
5589
u16 vsi_seid, u16 queue, bool is_add,
sys/dev/ixl/i40e_common.c
5608
cmd->queue = CPU_TO_LE16(queue);
sys/dev/ixl/i40e_lan_hmc.c
1300
u16 queue,
sys/dev/ixl/i40e_lan_hmc.c
1306
err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_TX, queue);
sys/dev/ixl/i40e_lan_hmc.c
1320
u16 queue)
sys/dev/ixl/i40e_lan_hmc.c
1325
err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_TX, queue);
sys/dev/ixl/i40e_lan_hmc.c
1339
u16 queue,
sys/dev/ixl/i40e_lan_hmc.c
1345
err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_TX, queue);
sys/dev/ixl/i40e_lan_hmc.c
1360
u16 queue,
sys/dev/ixl/i40e_lan_hmc.c
1366
err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_RX, queue);
sys/dev/ixl/i40e_lan_hmc.c
1380
u16 queue)
sys/dev/ixl/i40e_lan_hmc.c
1385
err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_RX, queue);
sys/dev/ixl/i40e_lan_hmc.c
1399
u16 queue,
sys/dev/ixl/i40e_lan_hmc.c
1405
err = i40e_hmc_get_object_va(hw, &context_bytes, I40E_HMC_LAN_RX, queue);
sys/dev/ixl/i40e_lan_hmc.h
180
u16 queue,
sys/dev/ixl/i40e_lan_hmc.h
183
u16 queue);
sys/dev/ixl/i40e_lan_hmc.h
185
u16 queue,
sys/dev/ixl/i40e_lan_hmc.h
188
u16 queue,
sys/dev/ixl/i40e_lan_hmc.h
191
u16 queue);
sys/dev/ixl/i40e_lan_hmc.h
193
u16 queue,
sys/dev/ixl/i40e_prototype.h
464
void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable);
sys/dev/ixl/i40e_prototype.h
565
u16 vsi_seid, u16 queue, bool is_add,
sys/dev/ixl/ixl_pf_iov.c
1244
struct virtchnl_queue_select *queue;
sys/dev/ixl/ixl_pf_iov.c
1246
queue = msg;
sys/dev/ixl/ixl_pf_iov.c
1247
if (queue->vsi_id != vf->vsi.vsi_num) {
sys/dev/ixl/ixl_pf_main.c
1848
u16 vf_num, queue;
sys/dev/ixl/ixl_pf_main.c
1862
queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
sys/dev/ixl/ixl_pf_main.c
1895
event, queue, pf_num, pf_mdet_num, vf_num, vp_mdet_num);
sys/dev/ixl/ixl_pf_main.c
1900
event, queue, pf_num, vf_num, vp_mdet_num);
sys/dev/ixl/ixl_pf_main.c
1905
event, queue, pf_num, pf_mdet_num);
sys/dev/ixl/ixl_pf_main.c
1921
u16 queue;
sys/dev/ixl/ixl_pf_main.c
1936
queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
sys/dev/ixl/ixl_pf_main.c
1969
event, queue, pf_num, pf_mdet_num, vp_mdet_num);
sys/dev/ixl/ixl_pf_main.c
1974
event, queue, pf_num, vp_mdet_num);
sys/dev/ixl/ixl_pf_main.c
1979
event, queue, pf_num, pf_mdet_num);
sys/dev/mana/gdma.h
474
void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue);
sys/dev/mana/gdma.h
853
int mana_gd_post_and_ring(struct gdma_queue *queue,
sys/dev/mana/gdma.h
862
struct gdma_queue *queue);
sys/dev/mana/gdma_main.c
1004
queue = malloc(sizeof(*queue), M_DEVBUF, M_WAITOK | M_ZERO);
sys/dev/mana/gdma_main.c
1005
gmi = &queue->mem_info;
sys/dev/mana/gdma_main.c
1014
queue->head = 0;
sys/dev/mana/gdma_main.c
1015
queue->tail = 0;
sys/dev/mana/gdma_main.c
1016
queue->queue_mem_ptr = gmi->virt_addr;
sys/dev/mana/gdma_main.c
1017
queue->queue_size = spec->queue_size;
sys/dev/mana/gdma_main.c
1018
queue->monitor_avl_buf = spec->monitor_avl_buf;
sys/dev/mana/gdma_main.c
1019
queue->type = spec->type;
sys/dev/mana/gdma_main.c
1020
queue->gdma_dev = gd;
sys/dev/mana/gdma_main.c
1022
err = mana_gd_create_eq(gd, spec, true, queue);
sys/dev/mana/gdma_main.c
1026
*queue_ptr = queue;
sys/dev/mana/gdma_main.c
1032
free(queue, M_DEVBUF);
sys/dev/mana/gdma_main.c
1042
struct gdma_queue *queue;
sys/dev/mana/gdma_main.c
1049
queue = malloc(sizeof(*queue), M_DEVBUF, M_WAITOK | M_ZERO);
sys/dev/mana/gdma_main.c
1050
gmi = &queue->mem_info;
sys/dev/mana/gdma_main.c
1059
queue->head = 0;
sys/dev/mana/gdma_main.c
1060
queue->tail = 0;
sys/dev/mana/gdma_main.c
1061
queue->queue_mem_ptr = gmi->virt_addr;
sys/dev/mana/gdma_main.c
1062
queue->queue_size = spec->queue_size;
sys/dev/mana/gdma_main.c
1063
queue->monitor_avl_buf = spec->monitor_avl_buf;
sys/dev/mana/gdma_main.c
1064
queue->type = spec->type;
sys/dev/mana/gdma_main.c
1065
queue->gdma_dev = gd;
sys/dev/mana/gdma_main.c
1068
mana_gd_create_cq(spec, queue);
sys/dev/mana/gdma_main.c
1070
*queue_ptr = queue;
sys/dev/mana/gdma_main.c
1076
free(queue, M_DEVBUF);
sys/dev/mana/gdma_main.c
1081
mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue)
sys/dev/mana/gdma_main.c
1083
struct gdma_mem_info *gmi = &queue->mem_info;
sys/dev/mana/gdma_main.c
1085
switch (queue->type) {
sys/dev/mana/gdma_main.c
1087
mana_gd_destroy_eq(gc, queue->eq.disable_needed, queue);
sys/dev/mana/gdma_main.c
1091
mana_gd_destroy_cq(gc, queue);
sys/dev/mana/gdma_main.c
1103
queue->type);
sys/dev/mana/gdma_main.c
1109
free(queue, M_DEVBUF);
sys/dev/mana/gdma_main.c
1377
mana_gd_post_and_ring(struct gdma_queue *queue,
sys/dev/mana/gdma_main.c
1381
struct gdma_context *gc = queue->gdma_dev->gdma_context;
sys/dev/mana/gdma_main.c
1384
err = mana_gd_post_work_request(queue, wqe_req, wqe_info);
sys/dev/mana/gdma_main.c
1388
mana_gd_wq_ring_doorbell(gc, queue);
sys/dev/mana/gdma_main.c
341
struct gdma_queue *queue)
sys/dev/mana/gdma_main.c
347
if (queue->type != GDMA_EQ)
sys/dev/mana/gdma_main.c
353
req.hdr.dev_id = queue->gdma_dev->dev_id;
sys/dev/mana/gdma_main.c
354
req.type = queue->type;
sys/dev/mana/gdma_main.c
355
req.pdid = queue->gdma_dev->pdid;
sys/dev/mana/gdma_main.c
356
req.doolbell_id = queue->gdma_dev->doorbell;
sys/dev/mana/gdma_main.c
357
req.gdma_region = queue->mem_info.dma_region_handle;
sys/dev/mana/gdma_main.c
358
req.queue_size = queue->queue_size;
sys/dev/mana/gdma_main.c
359
req.log2_throttle_limit = queue->eq.log2_throttle_limit;
sys/dev/mana/gdma_main.c
360
req.eq_pci_msix_index = queue->eq.msix_index;
sys/dev/mana/gdma_main.c
370
queue->id = resp.queue_index;
sys/dev/mana/gdma_main.c
371
queue->eq.disable_needed = true;
sys/dev/mana/gdma_main.c
372
queue->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
sys/dev/mana/gdma_main.c
377
int mana_gd_disable_queue(struct gdma_queue *queue)
sys/dev/mana/gdma_main.c
379
struct gdma_context *gc = queue->gdma_dev->gdma_context;
sys/dev/mana/gdma_main.c
384
if (queue->type != GDMA_EQ)
sys/dev/mana/gdma_main.c
386
queue->type);
sys/dev/mana/gdma_main.c
391
req.hdr.dev_id = queue->gdma_dev->dev_id;
sys/dev/mana/gdma_main.c
392
req.type = queue->type;
sys/dev/mana/gdma_main.c
393
req.queue_index = queue->id;
sys/dev/mana/gdma_main.c
471
mana_gd_wq_ring_doorbell(struct gdma_context *gc, struct gdma_queue *queue)
sys/dev/mana/gdma_main.c
473
mana_gd_ring_doorbell(gc, queue->gdma_dev->doorbell, queue->type,
sys/dev/mana/gdma_main.c
474
queue->id, queue->head * GDMA_WQE_BU_SIZE, 0);
sys/dev/mana/gdma_main.c
629
mana_gd_register_irq(struct gdma_queue *queue,
sys/dev/mana/gdma_main.c
632
struct gdma_dev *gd = queue->gdma_dev;
sys/dev/mana/gdma_main.c
649
queue->eq.msix_index = msi_index;
sys/dev/mana/gdma_main.c
673
gic->arg = queue;
sys/dev/mana/gdma_main.c
684
mana_gd_deregiser_irq(struct gdma_queue *queue)
sys/dev/mana/gdma_main.c
686
struct gdma_dev *gd = queue->gdma_dev;
sys/dev/mana/gdma_main.c
696
msix_index = queue->eq.msix_index;
sys/dev/mana/gdma_main.c
708
queue->eq.msix_index = INVALID_PCI_MSIX_INDEX;
sys/dev/mana/gdma_main.c
769
struct gdma_queue *queue)
sys/dev/mana/gdma_main.c
774
err = mana_gd_test_eq(gc, queue);
sys/dev/mana/gdma_main.c
780
mana_gd_deregiser_irq(queue);
sys/dev/mana/gdma_main.c
782
if (queue->eq.disable_needed)
sys/dev/mana/gdma_main.c
783
mana_gd_disable_queue(queue);
sys/dev/mana/gdma_main.c
788
bool create_hwq, struct gdma_queue *queue)
sys/dev/mana/gdma_main.c
795
queue->eq.msix_index = INVALID_PCI_MSIX_INDEX;
sys/dev/mana/gdma_main.c
797
log2_num_entries = ilog2(queue->queue_size / GDMA_EQE_SIZE);
sys/dev/mana/gdma_main.c
806
err = mana_gd_register_irq(queue, spec);
sys/dev/mana/gdma_main.c
812
queue->eq.callback = spec->eq.callback;
sys/dev/mana/gdma_main.c
813
queue->eq.context = spec->eq.context;
sys/dev/mana/gdma_main.c
814
queue->head |= INITIALIZED_OWNER_BIT(log2_num_entries);
sys/dev/mana/gdma_main.c
815
queue->eq.log2_throttle_limit = spec->eq.log2_throttle_limit ?: 1;
sys/dev/mana/gdma_main.c
818
err = mana_gd_create_hw_eq(gc, queue);
sys/dev/mana/gdma_main.c
822
err = mana_gd_test_eq(gc, queue);
sys/dev/mana/gdma_main.c
830
mana_gd_destroy_eq(gc, false, queue);
sys/dev/mana/gdma_main.c
836
struct gdma_queue *queue)
sys/dev/mana/gdma_main.c
840
queue->head |= INITIALIZED_OWNER_BIT(log2_num_entries);
sys/dev/mana/gdma_main.c
841
queue->cq.parent = spec->cq.parent_eq;
sys/dev/mana/gdma_main.c
842
queue->cq.context = spec->cq.context;
sys/dev/mana/gdma_main.c
843
queue->cq.callback = spec->cq.callback;
sys/dev/mana/gdma_main.c
848
struct gdma_queue *queue)
sys/dev/mana/gdma_main.c
850
uint32_t id = queue->id;
sys/dev/mana/gdma_main.c
867
struct gdma_queue *queue;
sys/dev/mana/gdma_main.c
870
queue = malloc(sizeof(*queue), M_DEVBUF, M_WAITOK | M_ZERO);
sys/dev/mana/gdma_main.c
871
gmi = &queue->mem_info;
sys/dev/mana/gdma_main.c
876
queue->head = 0;
sys/dev/mana/gdma_main.c
877
queue->tail = 0;
sys/dev/mana/gdma_main.c
878
queue->queue_mem_ptr = gmi->virt_addr;
sys/dev/mana/gdma_main.c
879
queue->queue_size = spec->queue_size;
sys/dev/mana/gdma_main.c
880
queue->monitor_avl_buf = spec->monitor_avl_buf;
sys/dev/mana/gdma_main.c
881
queue->type = spec->type;
sys/dev/mana/gdma_main.c
882
queue->gdma_dev = gd;
sys/dev/mana/gdma_main.c
885
err = mana_gd_create_eq(gd, spec, false, queue);
sys/dev/mana/gdma_main.c
887
mana_gd_create_cq(spec, queue);
sys/dev/mana/gdma_main.c
892
*queue_ptr = queue;
sys/dev/mana/gdma_main.c
897
free(queue, M_DEVBUF);
sys/dev/mana/gdma_main.c
998
struct gdma_queue *queue;
sys/dev/mana/hw_channel.c
300
struct gdma_queue **queue)
sys/dev/mana/hw_channel.c
311
return mana_gd_create_hwc_queue(hwc->gdma_dev, &spec, queue);
sys/dev/mana/hw_channel.c
319
struct gdma_queue **queue)
sys/dev/mana/hw_channel.c
330
return mana_gd_create_hwc_queue(hwc->gdma_dev, &spec, queue);
sys/dev/mana/hw_channel.c
337
struct gdma_queue **queue)
sys/dev/mana/hw_channel.c
348
return mana_gd_create_hwc_queue(hwc->gdma_dev, &spec, queue);
sys/dev/mana/hw_channel.c
536
struct gdma_queue *queue;
sys/dev/mana/hw_channel.c
556
err = mana_hwc_create_gdma_wq(hwc, q_type, queue_size, &queue);
sys/dev/mana/hw_channel.c
561
hwc_wq->gdma_wq = queue;
sys/dev/mfi/mfi.c
1648
TAILQ_HEAD(,mfi_evt_queue_elm) queue;
sys/dev/mfi/mfi.c
1653
TAILQ_INIT(&queue);
sys/dev/mfi/mfi.c
1655
TAILQ_CONCAT(&queue, &sc->mfi_evt_queue, link);
sys/dev/mfi/mfi.c
1657
while ((elm = TAILQ_FIRST(&queue)) != NULL) {
sys/dev/mfi/mfi.c
1658
TAILQ_REMOVE(&queue, elm, link);
sys/dev/mge/if_mge.c
131
uint8_t queue);
sys/dev/mge/if_mge.c
132
static void mge_set_prom_mode(struct mge_softc *sc, uint8_t queue);
sys/dev/mge/if_mge.c
437
mge_set_ucast_address(struct mge_softc *sc, uint8_t last_byte, uint8_t queue)
sys/dev/mge/if_mge.c
444
reg_val = (1 | (queue << 1)) << reg_off;
sys/dev/mge/if_mge.c
455
mge_set_prom_mode(struct mge_softc *sc, uint8_t queue)
sys/dev/mge/if_mge.c
466
reg_val = ((1 | (queue << 1)) | (1 | (queue << 1)) << 8 |
sys/dev/mge/if_mge.c
467
(1 | (queue << 1)) << 16 | (1 | (queue << 1)) << 24);
sys/dev/mthca/mthca_cq.c
163
return buf->queue.direct.buf + (entry * MTHCA_CQ_ENTRY_SIZE);
sys/dev/mthca/mthca_cq.c
165
return buf->queue.page_list[entry * MTHCA_CQ_ENTRY_SIZE / PAGE_SIZE].buf
sys/dev/mthca/mthca_cq.c
360
&buf->queue, &buf->is_direct,
sys/dev/mthca/mthca_cq.c
373
mthca_buf_free(dev, (cqe + 1) * MTHCA_CQ_ENTRY_SIZE, &buf->queue,
sys/dev/mthca/mthca_provider.h
188
union mthca_buf queue;
sys/dev/mthca/mthca_provider.h
241
union mthca_buf queue;
sys/dev/mthca/mthca_provider.h
284
union mthca_buf queue;
sys/dev/mthca/mthca_qp.c
1056
&qp->queue, &qp->is_direct, pd, 0, &qp->mr);
sys/dev/mthca/mthca_qp.c
1072
&qp->queue, qp->is_direct, &qp->mr);
sys/dev/mthca/mthca_qp.c
211
return qp->queue.direct.buf + (n << qp->rq.wqe_shift);
sys/dev/mthca/mthca_qp.c
213
return qp->queue.page_list[(n << qp->rq.wqe_shift) >> PAGE_SHIFT].buf +
sys/dev/mthca/mthca_qp.c
220
return qp->queue.direct.buf + qp->send_wqe_offset +
sys/dev/mthca/mthca_qp.c
223
return qp->queue.page_list[(qp->send_wqe_offset +
sys/dev/mthca/mthca_srq.c
143
mthca_buf_free(dev, srq->max << srq->wqe_shift, &srq->queue,
sys/dev/mthca/mthca_srq.c
165
&srq->queue, &srq->is_direct, pd, 1, &srq->mr);
sys/dev/mthca/mthca_srq.c
77
return srq->queue.direct.buf + (n << srq->wqe_shift);
sys/dev/mthca/mthca_srq.c
79
return srq->queue.page_list[(n << srq->wqe_shift) >> PAGE_SHIFT].buf +
sys/dev/mxge/mxge_mcp.h
503
uint8_t queue;
sys/dev/neta/if_mvneta.c
3368
if (arg->queue < 0 || arg->queue > MVNETA_RX_RING_CNT)
sys/dev/neta/if_mvneta.c
3379
mvneta_rx_lockq(sc, arg->queue);
sys/dev/neta/if_mvneta.c
3380
rx = MVNETA_RX_RING(sc, arg->queue);
sys/dev/neta/if_mvneta.c
3383
mvneta_rx_unlockq(sc, arg->queue);
sys/dev/neta/if_mvneta.c
3391
mvneta_rx_lockq(sc, arg->queue);
sys/dev/neta/if_mvneta.c
3395
mvneta_rx_unlockq(sc, arg->queue);
sys/dev/neta/if_mvneta.c
3402
MVNETA_WRITE(sc, MVNETA_PRXITTH(arg->queue), reg);
sys/dev/neta/if_mvneta.c
3403
mvneta_rx_unlockq(sc, arg->queue);
sys/dev/neta/if_mvneta.c
3486
rxarg->queue = q;
sys/dev/neta/if_mvnetavar.h
225
int queue;
sys/dev/ocs_fc/ocs_ddump.c
564
ocs_ddump_value(textbuf, "queue-id", "%d", eq->queue->id);
sys/dev/ocs_fc/ocs_ddump.c
568
ocs_ddump_value(textbuf, "queue-id", "%d", cq->queue->id);
sys/dev/ocs_fc/ocs_ddump.c
575
ocs_ddump_value(textbuf, "queue-id", "%d", mq->queue->id);
sys/dev/ocs_fc/ocs_ddump.c
582
ocs_ddump_value(textbuf, "queue-id", "%d", wq->queue->id);
sys/dev/ocs_fc/ocs_hw.c
2192
while (!done && !sli_queue_read(&hw->sli, eq->queue, eqe)) {
sys/dev/ocs_fc/ocs_hw.c
2221
if (eq->queue->n_posted > (eq->queue->posted_limit)) {
sys/dev/ocs_fc/ocs_hw.c
2222
sli_queue_arm(&hw->sli, eq->queue, FALSE);
sys/dev/ocs_fc/ocs_hw.c
2233
sli_queue_eq_arm(&hw->sli, eq->queue, TRUE);
sys/dev/ocs_fc/ocs_hw.c
3609
queue_rc = _sli_queue_write(&wq->hw->sli, wq->queue, wqe->wqebuf);
sys/dev/ocs_fc/ocs_hw.c
3615
ocs_queue_history_wq(&wq->hw->q_hist, (void *) wqe->wqebuf, wq->queue->id, queue_rc);
sys/dev/ocs_fc/ocs_hw.c
3639
sli_queue_lock(wq->queue);
sys/dev/ocs_fc/ocs_hw.c
3665
sli_queue_unlock(wq->queue);
sys/dev/ocs_fc/ocs_hw.c
3688
sli_queue_lock(wq->queue);
sys/dev/ocs_fc/ocs_hw.c
3705
sli_queue_unlock(wq->queue);
sys/dev/ocs_fc/ocs_hw.c
4967
sli_queue_lock(io_to_abort->wq->queue);
sys/dev/ocs_fc/ocs_hw.c
4973
sli_queue_unlock(io_to_abort->wq->queue);
sys/dev/ocs_fc/ocs_hw.c
4976
sli_queue_unlock(io_to_abort->wq->queue);
sys/dev/ocs_fc/ocs_hw.c
8401
sli_queue_lock(wq->queue);
sys/dev/ocs_fc/ocs_hw.c
8406
sli_queue_unlock(wq->queue);
sys/dev/ocs_fc/ocs_hw.c
8434
while (!sli_queue_read(&hw->sli, cq->queue, cqe)) {
sys/dev/ocs_fc/ocs_hw.c
8435
status = sli_cq_parse(&hw->sli, cq->queue, cqe, &ctype, &rid);
sys/dev/ocs_fc/ocs_hw.c
8507
if (n_processed == cq->queue->proc_limit) {
sys/dev/ocs_fc/ocs_hw.c
8511
if (cq->queue->n_posted >= (cq->queue->posted_limit)) {
sys/dev/ocs_fc/ocs_hw.c
8512
sli_queue_arm(&hw->sli, cq->queue, FALSE);
sys/dev/ocs_fc/ocs_hw.c
8516
sli_queue_arm(&hw->sli, cq->queue, TRUE);
sys/dev/ocs_fc/ocs_hw.c
8518
if (n_processed > cq->queue->max_num_processed) {
sys/dev/ocs_fc/ocs_hw.c
8519
cq->queue->max_num_processed = n_processed;
sys/dev/ocs_fc/ocs_hw.c
8522
if (telapsed > cq->queue->max_process_time) {
sys/dev/ocs_fc/ocs_hw.c
8523
cq->queue->max_process_time = telapsed;
sys/dev/ocs_fc/ocs_hw.c
8543
ocs_queue_history_cqe(&hw->q_hist, SLI_QENTRY_WQ, (void *)cqe, ((sli4_fc_wcqe_t *)cqe)->status, cq->queue->id,
sys/dev/ocs_fc/ocs_hw.c
8544
((cq->queue->index - 1) & (cq->queue->length - 1)));
sys/dev/ocs_fc/ocs_hw.c
8923
ocs_queue_history_cqe(&hw->q_hist, SLI_QENTRY_XABT, (void *)cqe, 0, cq->queue->id,
sys/dev/ocs_fc/ocs_hw.c
8924
((cq->queue->index - 1) & (cq->queue->length - 1)));
sys/dev/ocs_fc/ocs_hw.h
1105
sli4_queue_t *queue;
sys/dev/ocs_fc/ocs_hw.h
1120
sli4_queue_t *queue; /**< pointer to SLI4 queue */
sys/dev/ocs_fc/ocs_hw.h
1141
sli4_queue_t *queue;
sys/dev/ocs_fc/ocs_hw.h
1157
sli4_queue_t *queue;
sys/dev/ocs_fc/ocs_hw_queues.c
364
eq->queue = &hw->eq[eq->instance];
sys/dev/ocs_fc/ocs_hw_queues.c
372
if (sli_queue_alloc(&hw->sli, SLI_QTYPE_EQ, eq->queue, entry_count, NULL, 0)) {
sys/dev/ocs_fc/ocs_hw_queues.c
377
sli_eq_modify_delay(&hw->sli, eq->queue, 1, 0, 8);
sys/dev/ocs_fc/ocs_hw_queues.c
380
ocs_log_debug(hw->os, "create eq[%2d] id %3d len %4d\n", eq->instance, eq->queue->id,
sys/dev/ocs_fc/ocs_hw_queues.c
409
cq->queue = &hw->cq[cq->instance];
sys/dev/ocs_fc/ocs_hw_queues.c
413
if (sli_queue_alloc(&hw->sli, SLI_QTYPE_CQ, cq->queue, cq->entry_count, eq->queue, 0)) {
sys/dev/ocs_fc/ocs_hw_queues.c
422
ocs_log_debug(hw->os, "create cq[%2d] id %3d len %4d\n", cq->instance, cq->queue->id,
sys/dev/ocs_fc/ocs_hw_queues.c
463
cq->queue = &hw->cq[cq->instance];
sys/dev/ocs_fc/ocs_hw_queues.c
464
qs[i] = cq->queue;
sys/dev/ocs_fc/ocs_hw_queues.c
465
assocs[i] = eqs[i]->queue;
sys/dev/ocs_fc/ocs_hw_queues.c
513
mq->queue = &hw->mq[mq->instance];
sys/dev/ocs_fc/ocs_hw_queues.c
516
mq->queue,
sys/dev/ocs_fc/ocs_hw_queues.c
518
cq->queue, 0)) {
sys/dev/ocs_fc/ocs_hw_queues.c
525
ocs_log_debug(hw->os, "create mq[%2d] id %3d len %4d\n", mq->instance, mq->queue->id,
sys/dev/ocs_fc/ocs_hw_queues.c
556
wq->queue = &hw->wq[wq->instance];
sys/dev/ocs_fc/ocs_hw_queues.c
564
if (sli_queue_alloc(&hw->sli, SLI_QTYPE_WQ, wq->queue, wq->entry_count, cq->queue, ulp)) {
sys/dev/ocs_fc/ocs_hw_queues.c
571
ocs_log_debug(hw->os, "create wq[%2d] id %3d len %4d cls %d ulp %d\n", wq->instance, wq->queue->id,
sys/dev/ocs_fc/ocs_hw_queues.c
614
cq->queue,
sys/dev/ocs_fc/ocs_hw_queues.c
633
cq->queue,
sys/dev/ocs_fc/ocs_hw_queues.c
716
cqs[0]->queue->id,
sys/dev/ocs_fc/ocs_hw_queues.c
721
ocs_log_err(hw->os, "RQ Set allocation failure for base CQ=%d\n", cqs[0]->queue->id);
sys/dev/ocs_fc/ocs_hw_queues.c
900
ocs_printf("eq[%d] id %2d\n", eq->instance, eq->queue->id);
sys/dev/ocs_fc/ocs_hw_queues.c
902
ocs_printf(" cq[%d] id %2d current\n", cq->instance, cq->queue->id);
sys/dev/ocs_fc/ocs_hw_queues.c
907
ocs_printf(" mq[%d] id %2d\n", mq->instance, mq->queue->id);
sys/dev/ocs_fc/ocs_hw_queues.c
911
ocs_printf(" wq[%d] id %2d\n", wq->instance, wq->queue->id);
sys/dev/pms/RefTisa/sallsdk/api/saosapi.h
635
bit32 queue,
sys/dev/pms/RefTisa/sallsdk/api/saosapi.h
639
#define ossaQueueProcessed(agRoot, queue, obpi, obci)
sys/dev/pms/RefTisa/sallsdk/spc/samacro.h
237
#define MPI_DEBUG_TRACE( queue, pici, ib,iomb,count) \
sys/dev/pms/RefTisa/sallsdk/spc/samacro.h
239
mpiTraceAdd( (queue), (pici),(ib), (iomb), (count)); \
sys/dev/pms/RefTisa/sallsdk/spc/samacro.h
242
#define MPI_DEBUG_TRACE( queue, pici, ib,iomb,count)
sys/dev/pst/pst-raid.c
153
bioq_init(&psc->queue);
sys/dev/pst/pst-raid.c
211
bioq_disksort(&psc->queue, bp);
sys/dev/pst/pst-raid.c
225
(bp = bioq_first(&psc->queue))) {
sys/dev/pst/pst-raid.c
227
bioq_remove(&psc->queue, bp);
sys/dev/pst/pst-raid.c
63
struct bio_queue_head queue;
sys/dev/qcom_ess_edma/qcom_ess_edma_hw.c
443
int queue, int idx)
sys/dev/qcom_ess_edma/qcom_ess_edma_hw.c
447
EDMA_RING_LOCK_ASSERT(&sc->sc_rx_ring[queue]);
sys/dev/qcom_ess_edma/qcom_ess_edma_hw.c
451
__func__, queue, idx);
sys/dev/qcom_ess_edma/qcom_ess_edma_hw.c
454
reg = EDMA_REG_READ(sc, EDMA_REG_RFD_IDX_Q(queue));
sys/dev/qcom_ess_edma/qcom_ess_edma_hw.c
456
"%s: q=%d reg was 0x%08x\n", __func__, queue, reg);
sys/dev/qcom_ess_edma/qcom_ess_edma_hw.c
459
EDMA_REG_WRITE(sc, EDMA_REG_RFD_IDX_Q(queue), reg);
sys/dev/qcom_ess_edma/qcom_ess_edma_hw.c
461
"%s: q=%d reg now 0x%08x\n", __func__, queue, reg);
sys/dev/qcom_ess_edma/qcom_ess_edma_hw.c
476
qcom_ess_edma_hw_rfd_get_cons_index(struct qcom_ess_edma_softc *sc, int queue)
sys/dev/qcom_ess_edma/qcom_ess_edma_hw.c
481
reg = EDMA_REG_READ(sc, EDMA_REG_RFD_IDX_Q(queue));
sys/dev/qcom_ess_edma/qcom_ess_edma_hw.c
495
int queue, int idx)
sys/dev/qcom_ess_edma/qcom_ess_edma_hw.c
497
EDMA_RING_LOCK_ASSERT(&sc->sc_rx_ring[queue]);
sys/dev/qcom_ess_edma/qcom_ess_edma_hw.c
499
EDMA_REG_WRITE(sc, EDMA_REG_RX_SW_CONS_IDX_Q(queue), idx);
sys/dev/qcom_ess_edma/qcom_ess_edma_hw.h
66
struct qcom_ess_edma_softc *sc, int queue, int idx);
sys/dev/qcom_ess_edma/qcom_ess_edma_hw.h
68
struct qcom_ess_edma_softc *sc, int queue);
sys/dev/qcom_ess_edma/qcom_ess_edma_hw.h
70
struct qcom_ess_edma_softc *sc, int queue, int idx);
sys/dev/qcom_ess_edma/qcom_ess_edma_rx.c
255
int queue, int num)
sys/dev/qcom_ess_edma/qcom_ess_edma_rx.c
265
ring = &sc->sc_rx_ring[queue];
sys/dev/qcom_ess_edma/qcom_ess_edma_rx.c
279
queue, error);
sys/dev/qcom_ess_edma/qcom_ess_edma_rx.c
301
(void) qcom_ess_edma_hw_rfd_prod_index_update(sc, queue, prod_index);
sys/dev/qcom_ess_edma/qcom_ess_edma_rx.c
305
__func__, queue, n, prod_index);
sys/dev/qcom_ess_edma/qcom_ess_edma_rx.c
328
qcom_ess_edma_rx_ring_complete(struct qcom_ess_edma_softc *sc, int queue,
sys/dev/qcom_ess_edma/qcom_ess_edma_rx.c
341
ring = &sc->sc_rx_ring[queue];
sys/dev/qcom_ess_edma/qcom_ess_edma_rx.c
362
queue);
sys/dev/qcom_ess_edma/qcom_ess_edma_rx.c
507
__func__, queue, cleaned_count);
sys/dev/qcom_ess_edma/qcom_ess_edma_rx.c
508
(void) qcom_ess_edma_rx_ring_fill(sc, queue, cleaned_count);
sys/dev/qcom_ess_edma/qcom_ess_edma_rx.c
509
(void) qcom_ess_edma_hw_rfd_sw_cons_index_update(sc, queue,
sys/dev/qcom_ess_edma/qcom_ess_edma_rx.c
67
qcom_ess_edma_rx_queue_to_cpu(struct qcom_ess_edma_softc *sc, int queue)
sys/dev/qcom_ess_edma/qcom_ess_edma_rx.c
69
return (queue % mp_ncpus);
sys/dev/qcom_ess_edma/qcom_ess_edma_rx.h
36
int queue);
sys/dev/qcom_ess_edma/qcom_ess_edma_rx.h
47
int queue, int num);
sys/dev/qcom_ess_edma/qcom_ess_edma_rx.h
49
int queue, struct mbufq *mq);
sys/dev/qcom_ess_edma/qcom_ess_edma_tx.c
161
qcom_ess_edma_tx_ring_complete(struct qcom_ess_edma_softc *sc, int queue)
sys/dev/qcom_ess_edma/qcom_ess_edma_tx.c
167
ring = &sc->sc_tx_ring[queue];
sys/dev/qcom_ess_edma/qcom_ess_edma_tx.c
178
(void) qcom_ess_edma_hw_tx_read_tpd_cons_idx(sc, queue,
sys/dev/qcom_ess_edma/qcom_ess_edma_tx.c
202
qcom_ess_edma_hw_tx_update_cons_idx(sc, queue, sw_next_to_clean);
sys/dev/qcom_ess_edma/qcom_ess_edma_tx.c
226
qcom_ess_edma_tx_ring_frame(struct qcom_ess_edma_softc *sc, int queue,
sys/dev/qcom_ess_edma/qcom_ess_edma_tx.c
241
ring = &sc->sc_tx_ring[queue];
sys/dev/qcom_ess_edma/qcom_ess_edma_tx.c
439
qcom_ess_edma_tx_ring_frame_update(struct qcom_ess_edma_softc *sc, int queue)
sys/dev/qcom_ess_edma/qcom_ess_edma_tx.c
443
ring = &sc->sc_tx_ring[queue];
sys/dev/qcom_ess_edma/qcom_ess_edma_tx.c
449
(void) qcom_ess_edma_hw_tx_update_tpd_prod_idx(sc, queue,
sys/dev/qcom_ess_edma/qcom_ess_edma_tx.c
72
qcom_ess_edma_tx_queue_to_cpu(struct qcom_ess_edma_softc *sc, int queue)
sys/dev/qcom_ess_edma/qcom_ess_edma_tx.c
75
return (queue / mp_ncpus);
sys/dev/qcom_ess_edma/qcom_ess_edma_tx.h
37
int queue);
sys/dev/qcom_ess_edma/qcom_ess_edma_tx.h
43
int queue);
sys/dev/qcom_ess_edma/qcom_ess_edma_tx.h
45
int queue, struct mbuf **m0, uint16_t port_bitmap,
sys/dev/qcom_ess_edma/qcom_ess_edma_tx.h
48
int queue);
sys/dev/qlnx/qlnxe/ecore_spq.c
1096
(found->queue == &p_spq->unlimited_pending))
sys/dev/qlnx/qlnxe/ecore_spq.c
741
p_ent->queue = &p_spq->unlimited_pending;
sys/dev/qlnx/qlnxe/ecore_spq.c
747
p_ent->queue = &p_spq->pending;
sys/dev/qlnx/qlnxe/ecore_spq.c
792
if (p_ent->queue == &p_spq->unlimited_pending) {
sys/dev/qlnx/qlnxe/ecore_spq.c
980
p_ent->queue == &p_spq->unlimited_pending);
sys/dev/qlnx/qlnxe/ecore_spq.c
982
if (p_ent->queue == &p_spq->unlimited_pending) {
sys/dev/qlnx/qlnxe/ecore_spq.h
140
osal_list_t *queue;
sys/dev/sfxge/sfxge_ev.c
143
rx_desc = &rxq->queue[id];
sys/dev/sfxge/sfxge_ev.c
148
rx_desc = &rxq->queue[id];
sys/dev/sfxge/sfxge_rx.c
1243
free(rxq->queue, M_SFXGE);
sys/dev/sfxge/sfxge_rx.c
1282
rxq->queue = malloc(sizeof(struct sfxge_rx_sw_desc) * sc->rxq_entries,
sys/dev/sfxge/sfxge_rx.c
250
rx_desc = &rxq->queue[id];
sys/dev/sfxge/sfxge_rx.c
821
rx_desc = &rxq->queue[id];
sys/dev/sfxge/sfxge_rx.h
163
struct sfxge_rx_sw_desc *queue __aligned(CACHE_LINE_SIZE);
sys/dev/virtio/balloon/virtio_balloon.c
352
KASSERT(m->a.queue == PQ_NONE,
sys/dev/virtio/block/virtio_blk.c
1373
vtblk_queue_completed(struct vtblk_softc *sc, struct bio_queue *queue)
sys/dev/virtio/block/virtio_blk.c
1381
TAILQ_INSERT_TAIL(queue, bp, bio_queue);
sys/dev/virtio/block/virtio_blk.c
1387
vtblk_done_completed(struct vtblk_softc *sc, struct bio_queue *queue)
sys/dev/virtio/block/virtio_blk.c
1391
TAILQ_FOREACH_SAFE(bp, queue, bio_queue, tmp) {
sys/dev/virtio/block/virtio_blk.c
1427
struct bio_queue queue;
sys/dev/virtio/block/virtio_blk.c
1429
TAILQ_INIT(&queue);
sys/dev/virtio/block/virtio_blk.c
1430
vtblk_queue_completed(sc, &queue);
sys/dev/virtio/block/virtio_blk.c
1431
vtblk_done_completed(sc, &queue);
sys/dev/virtio/block/virtio_blk.c
1649
struct bio_queue queue;
sys/dev/virtio/block/virtio_blk.c
1653
TAILQ_INIT(&queue);
sys/dev/virtio/block/virtio_blk.c
1661
vtblk_queue_completed(sc, &queue);
sys/dev/virtio/block/virtio_blk.c
1674
vtblk_done_completed(sc, &queue);
sys/dev/virtio/mmio/virtio_mmio.c
620
vtmmio_notify_virtqueue(device_t dev, uint16_t queue, bus_size_t offset)
sys/dev/virtio/mmio/virtio_mmio.c
627
vtmmio_write_config_4(sc, offset, queue);
sys/dev/virtio/pci/virtio_pci_legacy.c
469
vtpci_legacy_notify_vq(device_t dev, uint16_t queue, bus_size_t offset)
sys/dev/virtio/pci/virtio_pci_legacy.c
476
vtpci_legacy_write_header_2(sc, offset, queue);
sys/dev/virtio/pci/virtio_pci_modern.c
614
vtpci_modern_notify_vq(device_t dev, uint16_t queue, bus_size_t offset)
sys/dev/virtio/pci/virtio_pci_modern.c
620
vtpci_modern_write_notify_2(sc, offset, queue);
sys/dev/virtio/virtqueue.c
177
virtqueue_alloc(device_t dev, uint16_t queue, uint16_t size,
sys/dev/virtio/virtqueue.c
190
queue, info->vqai_name);
sys/dev/virtio/virtqueue.c
195
queue, info->vqai_name, size);
sys/dev/virtio/virtqueue.c
200
queue, info->vqai_name, info->vqai_maxindirsz,
sys/dev/virtio/virtqueue.c
214
vq->vq_queue_index = queue;
sys/dev/virtio/virtqueue.h
68
int virtqueue_alloc(device_t dev, uint16_t queue, uint16_t size,
sys/dev/vmware/vmci/vmci_kernel_if.c
1017
const struct vmci_queue *queue, uint64_t queue_offset, size_t size,
sys/dev/vmware/vmci/vmci_kernel_if.c
1024
queue, queue_offset, size, false));
sys/dev/vmware/vmci/vmci_kernel_if.c
1043
vmci_memcpy_to_queue_v(struct vmci_queue *queue, uint64_t queue_offset,
sys/dev/vmware/vmci/vmci_kernel_if.c
1054
return (__vmci_memcpy_to_queue(queue, queue_offset, src, size,
sys/dev/vmware/vmci/vmci_kernel_if.c
1076
const struct vmci_queue *queue, uint64_t queue_offset, size_t size,
sys/dev/vmware/vmci/vmci_kernel_if.c
1086
return (__vmci_memcpy_from_queue(dest, queue, queue_offset, size,
sys/dev/vmware/vmci/vmci_kernel_if.c
513
struct vmci_queue *queue;
sys/dev/vmware/vmci/vmci_kernel_if.c
518
sizeof(*queue) + sizeof(*(queue->kernel_if)) + dmas_size;
sys/dev/vmware/vmci/vmci_kernel_if.c
526
queue = malloc(queue_size, M_DEVBUF, M_NOWAIT);
sys/dev/vmware/vmci/vmci_kernel_if.c
527
if (!queue)
sys/dev/vmware/vmci/vmci_kernel_if.c
530
queue->q_header = NULL;
sys/dev/vmware/vmci/vmci_kernel_if.c
531
queue->saved_header = NULL;
sys/dev/vmware/vmci/vmci_kernel_if.c
532
queue->kernel_if = (struct vmci_queue_kernel_if *)(queue + 1);
sys/dev/vmware/vmci/vmci_kernel_if.c
533
queue->kernel_if->num_pages = num_pages;
sys/dev/vmware/vmci/vmci_kernel_if.c
534
queue->kernel_if->dmas = (struct vmci_dma_alloc *)(queue->kernel_if +
sys/dev/vmware/vmci/vmci_kernel_if.c
537
vmci_dma_malloc(PAGE_SIZE, 1, &queue->kernel_if->dmas[i]);
sys/dev/vmware/vmci/vmci_kernel_if.c
538
if (!queue->kernel_if->dmas[i].dma_vaddr) {
sys/dev/vmware/vmci/vmci_kernel_if.c
540
vmci_free_queue(queue, i * PAGE_SIZE);
sys/dev/vmware/vmci/vmci_kernel_if.c
546
queue->q_header = (void *)queue->kernel_if->dmas[0].dma_vaddr;
sys/dev/vmware/vmci/vmci_kernel_if.c
548
return ((void *)queue);
sys/dev/vmware/vmci/vmci_kernel_if.c
571
struct vmci_queue *queue = q;
sys/dev/vmware/vmci/vmci_kernel_if.c
573
if (queue) {
sys/dev/vmware/vmci/vmci_kernel_if.c
579
vmci_dma_free(&queue->kernel_if->dmas[i]);
sys/dev/vmware/vmci/vmci_kernel_if.c
580
free(queue, M_DEVBUF);
sys/dev/vmware/vmci/vmci_kernel_if.c
817
__vmci_memcpy_to_queue(struct vmci_queue *queue, uint64_t queue_offset,
sys/dev/vmware/vmci/vmci_kernel_if.c
820
struct vmci_queue_kernel_if *kernel_if = queue->kernel_if;
sys/dev/vmware/vmci/vmci_kernel_if.c
874
__vmci_memcpy_from_queue(void *dest, const struct vmci_queue *queue,
sys/dev/vmware/vmci/vmci_kernel_if.c
877
struct vmci_queue_kernel_if *kernel_if = queue->kernel_if;
sys/dev/vmware/vmci/vmci_kernel_if.c
931
vmci_memcpy_to_queue(struct vmci_queue *queue, uint64_t queue_offset,
sys/dev/vmware/vmci/vmci_kernel_if.c
938
return (__vmci_memcpy_to_queue(queue, queue_offset,
sys/dev/vmware/vmci/vmci_kernel_if.c
960
const struct vmci_queue *queue, uint64_t queue_offset, size_t size,
sys/dev/vmware/vmci/vmci_kernel_if.c
967
queue, queue_offset, size, false));
sys/dev/vmware/vmci/vmci_kernel_if.c
988
vmci_memcpy_to_queue_local(struct vmci_queue *queue, uint64_t queue_offset,
sys/dev/vmware/vmci/vmci_kernel_if.c
995
return (__vmci_memcpy_to_queue(queue, queue_offset,
sys/dev/vmware/vmci/vmci_qpair.c
60
vmci_queue_add_producer_tail(struct vmci_queue *queue,
sys/dev/vmware/vmci/vmci_qpair.c
64
vmci_queue_header_add_producer_tail(queue->q_header, add, queue_size);
sys/dev/vmware/vmci/vmci_qpair.c
86
vmci_queue_add_consumer_head(struct vmci_queue *queue,
sys/dev/vmware/vmci/vmci_qpair.c
90
vmci_queue_header_add_consumer_head(queue->q_header, add, queue_size);
sys/dev/vmware/vmci/vmci_queue.h
101
return (vmci_memcpy_to_queue_v(queue, queue_offset, src, src_offset,
sys/dev/vmware/vmci/vmci_queue.h
107
const struct vmci_queue *queue, uint64_t queue_offset, size_t size,
sys/dev/vmware/vmci/vmci_queue.h
111
return (vmci_memcpy_from_queue_v(dest, dest_offset, queue, queue_offset,
sys/dev/vmware/vmci/vmci_queue.h
51
typedef int vmci_memcpy_to_queue_func(struct vmci_queue *queue,
sys/dev/vmware/vmci/vmci_queue.h
55
const struct vmci_queue *queue, uint64_t queue_offset, size_t size,
sys/dev/vmware/vmci/vmci_queue.h
75
int vmci_memcpy_to_queue(struct vmci_queue *queue, uint64_t queue_offset,
sys/dev/vmware/vmci/vmci_queue.h
79
const struct vmci_queue *queue, uint64_t queue_offset, size_t size,
sys/dev/vmware/vmci/vmci_queue.h
81
int vmci_memcpy_to_queue_local(struct vmci_queue *queue,
sys/dev/vmware/vmci/vmci_queue.h
85
const struct vmci_queue *queue, uint64_t queue_offset, size_t size,
sys/dev/vmware/vmci/vmci_queue.h
88
int vmci_memcpy_to_queue_v(struct vmci_queue *queue, uint64_t queue_offset,
sys/dev/vmware/vmci/vmci_queue.h
92
const struct vmci_queue *queue, uint64_t queue_offset, size_t size,
sys/dev/vmware/vmci/vmci_queue.h
96
vmci_memcpy_to_queue_v_local(struct vmci_queue *queue, uint64_t queue_offset,
sys/dev/wg/if_wg.c
1883
wg_queue_init(struct wg_queue *queue, const char *name)
sys/dev/wg/if_wg.c
1885
mtx_init(&queue->q_mtx, name, NULL, MTX_DEF);
sys/dev/wg/if_wg.c
1886
STAILQ_INIT(&queue->q_queue);
sys/dev/wg/if_wg.c
1887
queue->q_len = 0;
sys/dev/wg/if_wg.c
1891
wg_queue_deinit(struct wg_queue *queue)
sys/dev/wg/if_wg.c
1893
wg_queue_purge(queue);
sys/dev/wg/if_wg.c
1894
mtx_destroy(&queue->q_mtx);
sys/dev/wg/if_wg.c
1898
wg_queue_len(struct wg_queue *queue)
sys/dev/wg/if_wg.c
1900
return (queue->q_len);
sys/geom/concat/g_concat.c
292
struct bio_queue_head queue;
sys/geom/concat/g_concat.c
299
bioq_init(&queue);
sys/geom/concat/g_concat.c
303
while ((cbp = bioq_takefirst(&queue)) != NULL)
sys/geom/concat/g_concat.c
310
bioq_insert_tail(&queue, cbp);
sys/geom/concat/g_concat.c
315
while ((cbp = bioq_takefirst(&queue)) != NULL) {
sys/geom/concat/g_concat.c
326
struct bio_queue_head queue;
sys/geom/concat/g_concat.c
379
bioq_init(&queue);
sys/geom/concat/g_concat.c
393
while ((cbp = bioq_takefirst(&queue)) != NULL)
sys/geom/concat/g_concat.c
400
bioq_insert_tail(&queue, cbp);
sys/geom/concat/g_concat.c
428
while ((cbp = bioq_takefirst(&queue)) != NULL) {
sys/geom/eli/g_eli.c
642
TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) {
sys/geom/gate/g_gate.c
104
struct bio_queue_head queue;
sys/geom/gate/g_gate.c
126
bioq_init(&queue);
sys/geom/gate/g_gate.c
130
bioq_insert_tail(&queue, bp);
sys/geom/gate/g_gate.c
134
bioq_insert_tail(&queue, bp);
sys/geom/gate/g_gate.c
138
while ((bp = bioq_takefirst(&queue)) != NULL) {
sys/geom/gate/g_gate.c
365
struct bio_queue_head queue;
sys/geom/gate/g_gate.c
373
bioq_init(&queue);
sys/geom/gate/g_gate.c
375
TAILQ_FOREACH_SAFE(bp, &sc->sc_inqueue.queue, bio_queue, bp2) {
sys/geom/gate/g_gate.c
380
bioq_insert_tail(&queue, bp);
sys/geom/gate/g_gate.c
382
TAILQ_FOREACH_SAFE(bp, &sc->sc_outqueue.queue, bio_queue, bp2) {
sys/geom/gate/g_gate.c
387
bioq_insert_tail(&queue, bp);
sys/geom/gate/g_gate.c
390
while ((bp = bioq_takefirst(&queue)) != NULL) {
sys/geom/gate/g_gate.c
810
TAILQ_FOREACH_SAFE(bp, &sc->sc_outqueue.queue, bio_queue, tbp) {
sys/geom/gate/g_gate.c
822
TAILQ_INSERT_AFTER(&sc->sc_inqueue.queue,
sys/geom/gate/g_gate.c
911
TAILQ_FOREACH(bp, &sc->sc_outqueue.queue, bio_queue) {
sys/geom/journal/g_journal.c
1472
KASSERT(TAILQ_EMPTY(&sc->sc_delayed_queue.queue),
sys/geom/mirror/g_mirror.c
1643
struct bio_queue queue;
sys/geom/mirror/g_mirror.c
1667
TAILQ_INIT(&queue);
sys/geom/mirror/g_mirror.c
1673
while ((cbp = TAILQ_FIRST(&queue)) != NULL) {
sys/geom/mirror/g_mirror.c
1674
TAILQ_REMOVE(&queue, cbp, bio_queue);
sys/geom/mirror/g_mirror.c
1682
TAILQ_INSERT_TAIL(&queue, cbp, bio_queue);
sys/geom/mirror/g_mirror.c
1695
while ((cbp = TAILQ_FIRST(&queue)) != NULL) {
sys/geom/mirror/g_mirror.c
1696
TAILQ_REMOVE(&queue, cbp, bio_queue);
sys/geom/mirror/g_mirror.c
1712
struct bio_queue queue;
sys/geom/mirror/g_mirror.c
1775
TAILQ_INIT(&queue);
sys/geom/mirror/g_mirror.c
1792
while ((cbp = TAILQ_FIRST(&queue)) != NULL) {
sys/geom/mirror/g_mirror.c
1793
TAILQ_REMOVE(&queue, cbp, bio_queue);
sys/geom/mirror/g_mirror.c
1801
TAILQ_INSERT_TAIL(&queue, cbp, bio_queue);
sys/geom/mirror/g_mirror.c
1810
if (TAILQ_EMPTY(&queue)) {
sys/geom/mirror/g_mirror.c
1816
while ((cbp = TAILQ_FIRST(&queue)) != NULL) {
sys/geom/mirror/g_mirror.c
1818
TAILQ_REMOVE(&queue, cbp, bio_queue);
sys/geom/mirror/g_mirror.c
1833
TAILQ_INIT(&queue);
sys/geom/mirror/g_mirror.c
1839
while ((cbp = TAILQ_FIRST(&queue)) != NULL) {
sys/geom/mirror/g_mirror.c
1840
TAILQ_REMOVE(&queue, cbp, bio_queue);
sys/geom/mirror/g_mirror.c
1848
TAILQ_INSERT_TAIL(&queue, cbp, bio_queue);
sys/geom/mirror/g_mirror.c
1853
KASSERT(!TAILQ_EMPTY(&queue),
sys/geom/mirror/g_mirror.c
1855
while ((cbp = TAILQ_FIRST(&queue)) != NULL) {
sys/geom/mirror/g_mirror.c
1857
TAILQ_REMOVE(&queue, cbp, bio_queue);
sys/geom/raid/g_raid.c
1234
TAILQ_FOREACH(nbp, &vol->v_inflight.queue, bio_queue) {
sys/geom/raid/g_raid.c
1294
TAILQ_FOREACH(bp, &vol->v_inflight.queue, bio_queue) {
sys/geom/raid/g_raid.c
1694
TAILQ_FOREACH_SAFE(bp, &sc->sc_queue.queue, bio_queue, tmp) {
sys/geom/raid/g_raid.c
802
TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) {
sys/geom/raid/g_raid.c
960
struct bio_queue_head queue;
sys/geom/raid/g_raid.c
970
bioq_init(&queue);
sys/geom/raid/g_raid.c
980
bioq_insert_tail(&queue, cbp);
sys/geom/raid/g_raid.c
982
while ((cbp = bioq_takefirst(&queue)) != NULL) {
sys/geom/raid/g_raid.c
989
while ((cbp = bioq_takefirst(&queue)) != NULL)
sys/geom/raid/tr_concat.c
211
struct bio_queue_head queue;
sys/geom/raid/tr_concat.c
244
bioq_init(&queue);
sys/geom/raid/tr_concat.c
263
bioq_insert_tail(&queue, cbp);
sys/geom/raid/tr_concat.c
271
while ((cbp = bioq_takefirst(&queue)) != NULL) {
sys/geom/raid/tr_concat.c
278
while ((cbp = bioq_takefirst(&queue)) != NULL)
sys/geom/raid/tr_raid0.c
192
struct bio_queue_head queue;
sys/geom/raid/tr_raid0.c
225
bioq_init(&queue);
sys/geom/raid/tr_raid0.c
243
bioq_insert_tail(&queue, cbp);
sys/geom/raid/tr_raid0.c
253
while ((cbp = bioq_takefirst(&queue)) != NULL) {
sys/geom/raid/tr_raid0.c
260
while ((cbp = bioq_takefirst(&queue)) != NULL)
sys/geom/raid/tr_raid1.c
546
struct bio_queue_head queue;
sys/geom/raid/tr_raid1.c
556
bioq_init(&queue);
sys/geom/raid/tr_raid1.c
587
bioq_insert_tail(&queue, cbp);
sys/geom/raid/tr_raid1.c
589
while ((cbp = bioq_takefirst(&queue)) != NULL) {
sys/geom/raid/tr_raid1.c
596
while ((cbp = bioq_takefirst(&queue)) != NULL)
sys/geom/raid/tr_raid1e.c
1149
struct bio_queue_head queue;
sys/geom/raid/tr_raid1e.c
1160
bioq_init(&queue);
sys/geom/raid/tr_raid1e.c
689
struct bio_queue_head queue;
sys/geom/raid/tr_raid1e.c
704
bioq_init(&queue);
sys/geom/raid/tr_raid1e.c
730
bioq_insert_tail(&queue, cbp);
sys/geom/raid/tr_raid1e.c
740
while ((cbp = bioq_takefirst(&queue)) != NULL) {
sys/geom/raid/tr_raid1e.c
747
while ((cbp = bioq_takefirst(&queue)) != NULL)
sys/geom/raid/tr_raid1e.c
759
struct bio_queue_head queue;
sys/geom/raid/tr_raid1e.c
774
bioq_init(&queue);
sys/geom/raid/tr_raid1e.c
806
bioq_insert_tail(&queue, cbp);
sys/geom/raid/tr_raid1e.c
818
while ((cbp = bioq_takefirst(&queue)) != NULL) {
sys/geom/raid/tr_raid1e.c
825
while ((cbp = bioq_takefirst(&queue)) != NULL)
sys/geom/raid/tr_raid5.c
213
struct bio_queue_head queue;
sys/geom/raid/tr_raid5.c
272
bioq_init(&queue);
sys/geom/raid/tr_raid5.c
282
bioq_insert_tail(&queue, cbp);
sys/geom/raid/tr_raid5.c
326
while ((cbp = bioq_takefirst(&queue)) != NULL) {
sys/geom/raid/tr_raid5.c
333
while ((cbp = bioq_takefirst(&queue)) != NULL)
sys/geom/raid3/g_raid3.c
1413
struct bio_queue_head queue;
sys/geom/raid3/g_raid3.c
1419
bioq_init(&queue);
sys/geom/raid3/g_raid3.c
1426
for (cbp = bioq_first(&queue); cbp != NULL;
sys/geom/raid3/g_raid3.c
1427
cbp = bioq_first(&queue)) {
sys/geom/raid3/g_raid3.c
1428
bioq_remove(&queue, cbp);
sys/geom/raid3/g_raid3.c
1436
bioq_insert_tail(&queue, cbp);
sys/geom/raid3/g_raid3.c
1441
for (cbp = bioq_first(&queue); cbp != NULL; cbp = bioq_first(&queue)) {
sys/geom/raid3/g_raid3.c
1442
bioq_remove(&queue, cbp);
sys/geom/raid3/g_raid3.c
1539
TAILQ_FOREACH(bp, &sc->sc_inflight.queue, bio_queue) {
sys/geom/raid3/g_raid3.c
1579
TAILQ_FOREACH_SAFE(bp, &sc->sc_regular_delayed.queue, bio_queue, bp2) {
sys/geom/raid3/g_raid3.c
1606
TAILQ_FOREACH_SAFE(bp, &sc->sc_sync_delayed.queue, bio_queue, bp2) {
sys/geom/raid3/g_raid3.c
2168
TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) {
sys/geom/raid3/g_raid3.c
2176
TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) {
sys/geom/raid3/g_raid3.c
419
TAILQ_FOREACH(bp, &sc->sc_queue.queue, bio_queue) {
sys/geom/shsec/g_shsec.c
296
TAILQ_HEAD(, bio) queue = TAILQ_HEAD_INITIALIZER(queue);
sys/geom/shsec/g_shsec.c
345
TAILQ_INSERT_TAIL(&queue, cbp, bio_queue);
sys/geom/shsec/g_shsec.c
373
while ((cbp = TAILQ_FIRST(&queue)) != NULL) {
sys/geom/shsec/g_shsec.c
376
TAILQ_REMOVE(&queue, cbp, bio_queue);
sys/geom/shsec/g_shsec.c
385
while ((cbp = TAILQ_FIRST(&queue)) != NULL) {
sys/geom/shsec/g_shsec.c
386
TAILQ_REMOVE(&queue, cbp, bio_queue);
sys/geom/stripe/g_stripe.c
302
TAILQ_HEAD(, bio) queue = TAILQ_HEAD_INITIALIZER(queue);
sys/geom/stripe/g_stripe.c
320
TAILQ_INSERT_TAIL(&queue, cbp, bio_queue);
sys/geom/stripe/g_stripe.c
344
cbp = TAILQ_FIRST(&queue);
sys/geom/stripe/g_stripe.c
372
TAILQ_INSERT_TAIL(&queue, cbp, bio_queue);
sys/geom/stripe/g_stripe.c
394
while ((cbp = TAILQ_FIRST(&queue)) != NULL) {
sys/geom/stripe/g_stripe.c
397
TAILQ_REMOVE(&queue, cbp, bio_queue);
sys/geom/stripe/g_stripe.c
416
while ((cbp = TAILQ_FIRST(&queue)) != NULL) {
sys/geom/stripe/g_stripe.c
417
TAILQ_REMOVE(&queue, cbp, bio_queue);
sys/geom/stripe/g_stripe.c
431
TAILQ_HEAD(, bio) queue = TAILQ_HEAD_INITIALIZER(queue);
sys/geom/stripe/g_stripe.c
447
TAILQ_INSERT_TAIL(&queue, cbp, bio_queue);
sys/geom/stripe/g_stripe.c
478
TAILQ_INSERT_TAIL(&queue, cbp, bio_queue);
sys/geom/stripe/g_stripe.c
507
while ((cbp = TAILQ_FIRST(&queue)) != NULL) {
sys/geom/stripe/g_stripe.c
510
TAILQ_REMOVE(&queue, cbp, bio_queue);
sys/geom/stripe/g_stripe.c
519
while ((cbp = TAILQ_FIRST(&queue)) != NULL) {
sys/geom/stripe/g_stripe.c
520
TAILQ_REMOVE(&queue, cbp, bio_queue);
sys/geom/stripe/g_stripe.c
530
struct bio_queue_head queue;
sys/geom/stripe/g_stripe.c
535
bioq_init(&queue);
sys/geom/stripe/g_stripe.c
539
for (cbp = bioq_first(&queue); cbp != NULL;
sys/geom/stripe/g_stripe.c
540
cbp = bioq_first(&queue)) {
sys/geom/stripe/g_stripe.c
541
bioq_remove(&queue, cbp);
sys/geom/stripe/g_stripe.c
549
bioq_insert_tail(&queue, cbp);
sys/geom/stripe/g_stripe.c
554
for (cbp = bioq_first(&queue); cbp != NULL; cbp = bioq_first(&queue)) {
sys/geom/stripe/g_stripe.c
555
bioq_remove(&queue, cbp);
sys/kern/kern_lock.c
1109
int queue;
sys/kern/kern_lock.c
1166
queue = SQ_EXCLUSIVE_QUEUE;
sys/kern/kern_lock.c
1178
queue = SQ_SHARED_QUEUE;
sys/kern/kern_lock.c
1188
queue = SQ_SHARED_QUEUE;
sys/kern/kern_lock.c
1192
__func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
sys/kern/kern_lock.c
1195
sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 0, queue);
sys/kern/kern_lock.c
1297
int error, ipri, itimo, queue;
sys/kern/kern_lock.c
1476
queue = SQ_EXCLUSIVE_QUEUE;
sys/kern/kern_lock.c
1488
queue = SQ_SHARED_QUEUE;
sys/kern/kern_lock.c
1491
if (queue == SQ_EXCLUSIVE_QUEUE) {
sys/kern/kern_lock.c
1497
queue = SQ_SHARED_QUEUE;
sys/kern/kern_lock.c
1520
__func__, lk, queue == SQ_SHARED_QUEUE ?
sys/kern/kern_lock.c
1523
queue);
sys/kern/kern_lock.c
1531
if (queue == SQ_SHARED_QUEUE) {
sys/kern/kern_lock.c
266
const char *wmesg, int pri, int timo, int queue)
sys/kern/kern_lock.c
278
(queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared");
sys/kern/kern_lock.c
282
if (queue == SQ_EXCLUSIVE_QUEUE && (flags & LK_SLEEPFAIL) != 0) {
sys/kern/kern_lock.c
288
SLEEPQ_INTERRUPTIBLE : 0), queue);
sys/kern/kern_lock.c
315
int queue;
sys/kern/kern_lock.c
350
queue = SQ_EXCLUSIVE_QUEUE;
sys/kern/kern_lock.c
362
queue = SQ_SHARED_QUEUE;
sys/kern/kern_lock.c
372
queue = SQ_SHARED_QUEUE;
sys/kern/kern_lock.c
386
__func__, lk, queue == SQ_SHARED_QUEUE ? "shared" :
sys/kern/kern_lock.c
388
sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 0, queue);
sys/kern/kern_rmlock.c
142
struct rm_queue *queue;
sys/kern/kern_rmlock.c
161
for (queue = pc->pc_rm_queue.rmq_next;
sys/kern/kern_rmlock.c
162
queue != &pc->pc_rm_queue; queue = queue->rmq_next) {
sys/kern/kern_rmlock.c
163
tracker = (struct rm_priotracker *)queue;
sys/kern/kern_rmlock.c
226
struct rm_queue *queue;
sys/kern/kern_rmlock.c
231
for (queue = pc->pc_rm_queue.rmq_next; queue != &pc->pc_rm_queue;
sys/kern/kern_rmlock.c
232
queue = queue->rmq_next) {
sys/kern/kern_rmlock.c
233
tracker = (struct rm_priotracker *)queue;
sys/kern/kern_rmlock.c
261
struct rm_queue *queue;
sys/kern/kern_rmlock.c
264
for (queue = pc->pc_rm_queue.rmq_next; queue != &pc->pc_rm_queue;
sys/kern/kern_rmlock.c
265
queue = queue->rmq_next) {
sys/kern/kern_rmlock.c
266
tracker = (struct rm_priotracker *)queue;
sys/kern/kern_rmlock.c
836
struct rm_queue *queue;
sys/kern/kern_rmlock.c
847
for (queue = pc->pc_rm_queue.rmq_next;
sys/kern/kern_rmlock.c
848
queue != &pc->pc_rm_queue; queue = queue->rmq_next) {
sys/kern/kern_rmlock.c
849
tr = (struct rm_priotracker *)queue;
sys/kern/kern_rwlock.c
1223
int queue;
sys/kern/kern_rwlock.c
1271
queue = TS_SHARED_QUEUE;
sys/kern/kern_rwlock.c
1273
queue = TS_EXCLUSIVE_QUEUE;
sys/kern/kern_rwlock.c
1281
queue == TS_SHARED_QUEUE ? "read" : "write");
sys/kern/kern_rwlock.c
1285
turnstile_broadcast(ts, queue);
sys/kern/kern_rwlock.c
787
uintptr_t setv, queue;
sys/kern/kern_rwlock.c
824
queue = TS_SHARED_QUEUE;
sys/kern/kern_rwlock.c
826
queue = TS_EXCLUSIVE_QUEUE;
sys/kern/kern_rwlock.c
845
turnstile_broadcast(ts, queue);
sys/kern/kern_sig.c
3151
struct sigqueue *queue;
sys/kern/kern_sig.c
3190
queue = &td->td_sigqueue;
sys/kern/kern_sig.c
3192
if (sigqueue_get(queue, sig, &ksi) == 0) {
sys/kern/kern_sig.c
3193
queue = &p->p_sigqueue;
sys/kern/kern_sig.c
3194
sigqueue_get(queue, sig, &ksi);
sys/kern/kern_sig.c
3228
sigqueue_add(queue, sig, &ksi);
sys/kern/kern_sx.c
1325
uintptr_t setx, queue;
sys/kern/kern_sx.c
1346
queue = SQ_SHARED_QUEUE;
sys/kern/kern_sx.c
1349
queue = SQ_EXCLUSIVE_QUEUE;
sys/kern/kern_sx.c
1357
sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 0, queue);
sys/kern/kern_sx.c
918
int queue;
sys/kern/kern_sx.c
961
queue = SQ_SHARED_QUEUE;
sys/kern/kern_sx.c
964
queue = SQ_EXCLUSIVE_QUEUE;
sys/kern/kern_sx.c
972
__func__, sx, queue == SQ_SHARED_QUEUE ? "shared" :
sys/kern/kern_sx.c
975
sleepq_broadcast(&sx->lock_object, SLEEPQ_SX, 0, queue);
sys/kern/subr_disk.c
155
TAILQ_INIT(&head->queue);
sys/kern/subr_disk.c
167
if (bp == TAILQ_FIRST(&head->queue))
sys/kern/subr_disk.c
172
TAILQ_REMOVE(&head->queue, bp, bio_queue);
sys/kern/subr_disk.c
173
if (TAILQ_EMPTY(&head->queue))
sys/kern/subr_disk.c
193
TAILQ_INSERT_HEAD(&head->queue, bp, bio_queue);
sys/kern/subr_disk.c
202
TAILQ_INSERT_TAIL(&head->queue, bp, bio_queue);
sys/kern/subr_disk.c
213
return (TAILQ_FIRST(&head->queue));
sys/kern/subr_disk.c
221
bp = TAILQ_FIRST(&head->queue);
sys/kern/subr_disk.c
281
cur = TAILQ_FIRST(&head->queue);
sys/kern/subr_disk.c
294
TAILQ_INSERT_HEAD(&head->queue, bp, bio_queue);
sys/kern/subr_disk.c
296
TAILQ_INSERT_AFTER(&head->queue, prev, bp, bio_queue);
sys/kern/subr_gtaskqueue.c
127
struct gtaskqueue *queue;
sys/kern/subr_gtaskqueue.c
136
queue = malloc(sizeof(struct gtaskqueue), M_GTASKQUEUE, mflags | M_ZERO);
sys/kern/subr_gtaskqueue.c
137
if (!queue) {
sys/kern/subr_gtaskqueue.c
142
STAILQ_INIT(&queue->tq_queue);
sys/kern/subr_gtaskqueue.c
143
LIST_INIT(&queue->tq_active);
sys/kern/subr_gtaskqueue.c
144
queue->tq_enqueue = enqueue;
sys/kern/subr_gtaskqueue.c
145
queue->tq_context = context;
sys/kern/subr_gtaskqueue.c
146
queue->tq_name = tq_name;
sys/kern/subr_gtaskqueue.c
147
queue->tq_spin = (mtxflags & MTX_SPIN) != 0;
sys/kern/subr_gtaskqueue.c
148
queue->tq_flags |= TQ_FLAGS_ACTIVE;
sys/kern/subr_gtaskqueue.c
150
queue->tq_flags |= TQ_FLAGS_UNLOCKED_ENQUEUE;
sys/kern/subr_gtaskqueue.c
151
mtx_init(&queue->tq_mutex, tq_name, NULL, mtxflags);
sys/kern/subr_gtaskqueue.c
153
return (queue);
sys/kern/subr_gtaskqueue.c
170
gtaskqueue_free(struct gtaskqueue *queue)
sys/kern/subr_gtaskqueue.c
173
TQ_LOCK(queue);
sys/kern/subr_gtaskqueue.c
174
queue->tq_flags &= ~TQ_FLAGS_ACTIVE;
sys/kern/subr_gtaskqueue.c
175
gtaskqueue_terminate(queue->tq_threads, queue);
sys/kern/subr_gtaskqueue.c
176
KASSERT(LIST_EMPTY(&queue->tq_active), ("Tasks still running?"));
sys/kern/subr_gtaskqueue.c
177
KASSERT(queue->tq_callouts == 0, ("Armed timeout tasks"));
sys/kern/subr_gtaskqueue.c
178
mtx_destroy(&queue->tq_mutex);
sys/kern/subr_gtaskqueue.c
179
free(queue->tq_threads, M_GTASKQUEUE);
sys/kern/subr_gtaskqueue.c
180
free(queue->tq_name, M_GTASKQUEUE);
sys/kern/subr_gtaskqueue.c
181
free(queue, M_GTASKQUEUE);
sys/kern/subr_gtaskqueue.c
190
struct gtaskqueue *queue = grouptask->gt_taskqueue;
sys/kern/subr_gtaskqueue.c
194
if (queue == NULL) {
sys/kern/subr_gtaskqueue.c
199
TQ_LOCK(queue);
sys/kern/subr_gtaskqueue.c
201
gtaskqueue_drain_locked(queue, gtask);
sys/kern/subr_gtaskqueue.c
202
TQ_UNLOCK(queue);
sys/kern/subr_gtaskqueue.c
208
struct gtaskqueue *queue = grouptask->gt_taskqueue;
sys/kern/subr_gtaskqueue.c
212
if (queue == NULL) {
sys/kern/subr_gtaskqueue.c
217
TQ_LOCK(queue);
sys/kern/subr_gtaskqueue.c
219
TQ_UNLOCK(queue);
sys/kern/subr_gtaskqueue.c
223
grouptaskqueue_enqueue(struct gtaskqueue *queue, struct gtask *gtask)
sys/kern/subr_gtaskqueue.c
226
if (queue == NULL) {
sys/kern/subr_gtaskqueue.c
231
TQ_LOCK(queue);
sys/kern/subr_gtaskqueue.c
233
TQ_UNLOCK(queue);
sys/kern/subr_gtaskqueue.c
237
TQ_UNLOCK(queue);
sys/kern/subr_gtaskqueue.c
240
STAILQ_INSERT_TAIL(&queue->tq_queue, gtask, ta_link);
sys/kern/subr_gtaskqueue.c
242
TQ_UNLOCK(queue);
sys/kern/subr_gtaskqueue.c
243
if ((queue->tq_flags & TQ_FLAGS_BLOCKED) == 0)
sys/kern/subr_gtaskqueue.c
244
queue->tq_enqueue(queue->tq_context);
sys/kern/subr_gtaskqueue.c
259
gtaskqueue_drain_tq_queue(struct gtaskqueue *queue)
sys/kern/subr_gtaskqueue.c
263
if (STAILQ_EMPTY(&queue->tq_queue))
sys/kern/subr_gtaskqueue.c
275
STAILQ_INSERT_TAIL(&queue->tq_queue, &t_barrier, ta_link);
sys/kern/subr_gtaskqueue.c
283
TQ_SLEEP(queue, &t_barrier, "gtq_qdrain");
sys/kern/subr_gtaskqueue.c
292
gtaskqueue_drain_tq_active(struct gtaskqueue *queue)
sys/kern/subr_gtaskqueue.c
297
if (LIST_EMPTY(&queue->tq_active))
sys/kern/subr_gtaskqueue.c
301
queue->tq_callouts++;
sys/kern/subr_gtaskqueue.c
304
seq = queue->tq_seq;
sys/kern/subr_gtaskqueue.c
306
LIST_FOREACH(tb, &queue->tq_active, tb_link) {
sys/kern/subr_gtaskqueue.c
308
TQ_SLEEP(queue, tb->tb_running, "gtq_adrain");
sys/kern/subr_gtaskqueue.c
314
queue->tq_callouts--;
sys/kern/subr_gtaskqueue.c
315
if ((queue->tq_flags & TQ_FLAGS_ACTIVE) == 0)
sys/kern/subr_gtaskqueue.c
316
wakeup_one(queue->tq_threads);
sys/kern/subr_gtaskqueue.c
320
gtaskqueue_block(struct gtaskqueue *queue)
sys/kern/subr_gtaskqueue.c
323
TQ_LOCK(queue);
sys/kern/subr_gtaskqueue.c
324
queue->tq_flags |= TQ_FLAGS_BLOCKED;
sys/kern/subr_gtaskqueue.c
325
TQ_UNLOCK(queue);
sys/kern/subr_gtaskqueue.c
329
gtaskqueue_unblock(struct gtaskqueue *queue)
sys/kern/subr_gtaskqueue.c
332
TQ_LOCK(queue);
sys/kern/subr_gtaskqueue.c
333
queue->tq_flags &= ~TQ_FLAGS_BLOCKED;
sys/kern/subr_gtaskqueue.c
334
if (!STAILQ_EMPTY(&queue->tq_queue))
sys/kern/subr_gtaskqueue.c
335
queue->tq_enqueue(queue->tq_context);
sys/kern/subr_gtaskqueue.c
336
TQ_UNLOCK(queue);
sys/kern/subr_gtaskqueue.c
340
gtaskqueue_run_locked(struct gtaskqueue *queue)
sys/kern/subr_gtaskqueue.c
347
KASSERT(queue != NULL, ("tq is NULL"));
sys/kern/subr_gtaskqueue.c
348
TQ_ASSERT_LOCKED(queue);
sys/kern/subr_gtaskqueue.c
350
LIST_INSERT_HEAD(&queue->tq_active, &tb, tb_link);
sys/kern/subr_gtaskqueue.c
353
while ((gtask = STAILQ_FIRST(&queue->tq_queue)) != NULL) {
sys/kern/subr_gtaskqueue.c
354
STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link);
sys/kern/subr_gtaskqueue.c
357
tb.tb_seq = ++queue->tq_seq;
sys/kern/subr_gtaskqueue.c
358
TQ_UNLOCK(queue);
sys/kern/subr_gtaskqueue.c
370
TQ_LOCK(queue);
sys/kern/subr_gtaskqueue.c
379
task_is_running(struct gtaskqueue *queue, struct gtask *gtask)
sys/kern/subr_gtaskqueue.c
383
TQ_ASSERT_LOCKED(queue);
sys/kern/subr_gtaskqueue.c
384
LIST_FOREACH(tb, &queue->tq_active, tb_link) {
sys/kern/subr_gtaskqueue.c
392
gtaskqueue_cancel_locked(struct gtaskqueue *queue, struct gtask *gtask)
sys/kern/subr_gtaskqueue.c
396
STAILQ_REMOVE(&queue->tq_queue, gtask, gtask, ta_link);
sys/kern/subr_gtaskqueue.c
398
return (task_is_running(queue, gtask) ? EBUSY : 0);
sys/kern/subr_gtaskqueue.c
402
gtaskqueue_cancel(struct gtaskqueue *queue, struct gtask *gtask)
sys/kern/subr_gtaskqueue.c
406
TQ_LOCK(queue);
sys/kern/subr_gtaskqueue.c
407
error = gtaskqueue_cancel_locked(queue, gtask);
sys/kern/subr_gtaskqueue.c
408
TQ_UNLOCK(queue);
sys/kern/subr_gtaskqueue.c
414
gtaskqueue_drain_locked(struct gtaskqueue *queue, struct gtask *gtask)
sys/kern/subr_gtaskqueue.c
416
while ((gtask->ta_flags & TASK_ENQUEUED) || task_is_running(queue, gtask))
sys/kern/subr_gtaskqueue.c
417
TQ_SLEEP(queue, gtask, "gtq_drain");
sys/kern/subr_gtaskqueue.c
421
gtaskqueue_drain(struct gtaskqueue *queue, struct gtask *gtask)
sys/kern/subr_gtaskqueue.c
424
if (!queue->tq_spin)
sys/kern/subr_gtaskqueue.c
427
TQ_LOCK(queue);
sys/kern/subr_gtaskqueue.c
428
gtaskqueue_drain_locked(queue, gtask);
sys/kern/subr_gtaskqueue.c
429
TQ_UNLOCK(queue);
sys/kern/subr_gtaskqueue.c
433
gtaskqueue_drain_all(struct gtaskqueue *queue)
sys/kern/subr_gtaskqueue.c
436
if (!queue->tq_spin)
sys/kern/subr_gtaskqueue.c
439
TQ_LOCK(queue);
sys/kern/subr_gtaskqueue.c
440
gtaskqueue_drain_tq_queue(queue);
sys/kern/subr_gtaskqueue.c
441
gtaskqueue_drain_tq_active(queue);
sys/kern/subr_gtaskqueue.c
442
TQ_UNLOCK(queue);
sys/kern/subr_gtaskqueue.c
51
static int task_is_running(struct gtaskqueue *queue, struct gtask *gtask);
sys/kern/subr_gtaskqueue.c
52
static void gtaskqueue_drain_locked(struct gtaskqueue *queue, struct gtask *gtask);
sys/kern/subr_sleepqueue.c
1000
sleepq_remove_matching(sq, queue, match_any, pri);
sys/kern/subr_sleepqueue.c
1008
sleepq_remove_matching(struct sleepqueue *sq, int queue,
sys/kern/subr_sleepqueue.c
1019
TAILQ_FOREACH_SAFE(td, &sq->sq_blocked[queue], td_slpq, tdn) {
sys/kern/subr_sleepqueue.c
1185
sleepq_sbuf_print_stacks(struct sbuf *sb, const void *wchan, int queue,
sys/kern/subr_sleepqueue.c
1199
MPASS((queue >= 0) && (queue < NR_SLEEPQS));
sys/kern/subr_sleepqueue.c
1242
TAILQ_FOREACH_SAFE(td, &sq->sq_blocked[queue], td_slpq,
sys/kern/subr_sleepqueue.c
308
int flags, int queue)
sys/kern/subr_sleepqueue.c
319
MPASS((queue >= 0) && (queue < NR_SLEEPQS));
sys/kern/subr_sleepqueue.c
374
TAILQ_INSERT_TAIL(&sq->sq_blocked[queue], td, td_slpq);
sys/kern/subr_sleepqueue.c
375
sq->sq_blockedcnt[queue]++;
sys/kern/subr_sleepqueue.c
377
td->td_sqqueue = queue;
sys/kern/subr_sleepqueue.c
423
sleepq_sleepcnt(const void *wchan, int queue)
sys/kern/subr_sleepqueue.c
428
MPASS((queue >= 0) && (queue < NR_SLEEPQS));
sys/kern/subr_sleepqueue.c
432
return (sq->sq_blockedcnt[queue]);
sys/kern/subr_sleepqueue.c
925
sleepq_signal(const void *wchan, int flags, int pri, int queue)
sys/kern/subr_sleepqueue.c
934
MPASS((queue >= 0) && (queue < NR_SLEEPQS));
sys/kern/subr_sleepqueue.c
944
head = &sq->sq_blocked[queue];
sys/kern/subr_sleepqueue.c
988
sleepq_broadcast(const void *wchan, int flags, int pri, int queue)
sys/kern/subr_sleepqueue.c
994
MPASS((queue >= 0) && (queue < NR_SLEEPQS));
sys/kern/subr_taskqueue.c
107
_timeout_task_init(struct taskqueue *queue, struct timeout_task *timeout_task,
sys/kern/subr_taskqueue.c
112
callout_init_mtx(&timeout_task->c, &queue->tq_mutex,
sys/kern/subr_taskqueue.c
114
timeout_task->q = queue;
sys/kern/subr_taskqueue.c
127
task_get_busy(struct taskqueue *queue, struct task *task)
sys/kern/subr_taskqueue.c
131
TQ_ASSERT_LOCKED(queue);
sys/kern/subr_taskqueue.c
132
LIST_FOREACH(tb, &queue->tq_active, tb_link) {
sys/kern/subr_taskqueue.c
144
struct taskqueue *queue;
sys/kern/subr_taskqueue.c
151
queue = malloc(sizeof(struct taskqueue), M_TASKQUEUE, mflags | M_ZERO);
sys/kern/subr_taskqueue.c
152
if (queue == NULL) {
sys/kern/subr_taskqueue.c
159
STAILQ_INIT(&queue->tq_queue);
sys/kern/subr_taskqueue.c
160
LIST_INIT(&queue->tq_active);
sys/kern/subr_taskqueue.c
161
queue->tq_enqueue = enqueue;
sys/kern/subr_taskqueue.c
162
queue->tq_context = context;
sys/kern/subr_taskqueue.c
163
queue->tq_name = tq_name;
sys/kern/subr_taskqueue.c
164
queue->tq_spin = (mtxflags & MTX_SPIN) != 0;
sys/kern/subr_taskqueue.c
165
queue->tq_flags |= TQ_FLAGS_ACTIVE;
sys/kern/subr_taskqueue.c
170
queue->tq_flags |= TQ_FLAGS_UNLOCKED_ENQUEUE;
sys/kern/subr_taskqueue.c
171
mtx_init(&queue->tq_mutex, tq_name, NULL, mtxflags);
sys/kern/subr_taskqueue.c
173
return (queue);
sys/kern/subr_taskqueue.c
186
taskqueue_set_callback(struct taskqueue *queue,
sys/kern/subr_taskqueue.c
195
KASSERT((queue->tq_callbacks[cb_type] == NULL),
sys/kern/subr_taskqueue.c
198
queue->tq_callbacks[cb_type] = callback;
sys/kern/subr_taskqueue.c
199
queue->tq_cb_contexts[cb_type] = context;
sys/kern/subr_taskqueue.c
216
taskqueue_free(struct taskqueue *queue)
sys/kern/subr_taskqueue.c
219
TQ_LOCK(queue);
sys/kern/subr_taskqueue.c
220
queue->tq_flags &= ~TQ_FLAGS_ACTIVE;
sys/kern/subr_taskqueue.c
221
taskqueue_terminate(queue->tq_threads, queue);
sys/kern/subr_taskqueue.c
222
KASSERT(LIST_EMPTY(&queue->tq_active), ("Tasks still running?"));
sys/kern/subr_taskqueue.c
223
KASSERT(queue->tq_callouts == 0, ("Armed timeout tasks"));
sys/kern/subr_taskqueue.c
224
mtx_destroy(&queue->tq_mutex);
sys/kern/subr_taskqueue.c
225
free(queue->tq_threads, M_TASKQUEUE);
sys/kern/subr_taskqueue.c
226
free(queue->tq_name, M_TASKQUEUE);
sys/kern/subr_taskqueue.c
227
free(queue, M_TASKQUEUE);
sys/kern/subr_taskqueue.c
231
taskqueue_enqueue_locked(struct taskqueue *queue, struct task *task, int flags)
sys/kern/subr_taskqueue.c
242
tb = task_get_busy(queue, task);
sys/kern/subr_taskqueue.c
244
TQ_UNLOCK(queue);
sys/kern/subr_taskqueue.c
254
TQ_UNLOCK(queue);
sys/kern/subr_taskqueue.c
259
TQ_UNLOCK(queue);
sys/kern/subr_taskqueue.c
269
prev = STAILQ_LAST(&queue->tq_queue, task, ta_link);
sys/kern/subr_taskqueue.c
271
STAILQ_INSERT_TAIL(&queue->tq_queue, task, ta_link);
sys/kern/subr_taskqueue.c
273
prev = queue->tq_hint;
sys/kern/subr_taskqueue.c
278
ins = STAILQ_FIRST(&queue->tq_queue);
sys/kern/subr_taskqueue.c
285
STAILQ_INSERT_AFTER(&queue->tq_queue, prev, task, ta_link);
sys/kern/subr_taskqueue.c
286
queue->tq_hint = task;
sys/kern/subr_taskqueue.c
288
STAILQ_INSERT_HEAD(&queue->tq_queue, task, ta_link);
sys/kern/subr_taskqueue.c
292
if ((queue->tq_flags & TQ_FLAGS_UNLOCKED_ENQUEUE) != 0)
sys/kern/subr_taskqueue.c
293
TQ_UNLOCK(queue);
sys/kern/subr_taskqueue.c
294
if ((queue->tq_flags & TQ_FLAGS_BLOCKED) == 0)
sys/kern/subr_taskqueue.c
295
queue->tq_enqueue(queue->tq_context);
sys/kern/subr_taskqueue.c
296
if ((queue->tq_flags & TQ_FLAGS_UNLOCKED_ENQUEUE) == 0)
sys/kern/subr_taskqueue.c
297
TQ_UNLOCK(queue);
sys/kern/subr_taskqueue.c
304
taskqueue_enqueue_flags(struct taskqueue *queue, struct task *task, int flags)
sys/kern/subr_taskqueue.c
308
TQ_LOCK(queue);
sys/kern/subr_taskqueue.c
309
res = taskqueue_enqueue_locked(queue, task, flags);
sys/kern/subr_taskqueue.c
316
taskqueue_enqueue(struct taskqueue *queue, struct task *task)
sys/kern/subr_taskqueue.c
318
return (taskqueue_enqueue_flags(queue, task, 0));
sys/kern/subr_taskqueue.c
324
struct taskqueue *queue;
sys/kern/subr_taskqueue.c
328
queue = timeout_task->q;
sys/kern/subr_taskqueue.c
331
queue->tq_callouts--;
sys/kern/subr_taskqueue.c
337
taskqueue_enqueue_timeout_sbt(struct taskqueue *queue,
sys/kern/subr_taskqueue.c
342
TQ_LOCK(queue);
sys/kern/subr_taskqueue.c
343
KASSERT(timeout_task->q == NULL || timeout_task->q == queue,
sys/kern/subr_taskqueue.c
345
timeout_task->q = queue;
sys/kern/subr_taskqueue.c
349
TQ_UNLOCK(queue);
sys/kern/subr_taskqueue.c
352
taskqueue_enqueue_locked(queue, &timeout_task->t, 0);
sys/kern/subr_taskqueue.c
358
queue->tq_callouts++;
sys/kern/subr_taskqueue.c
364
if (queue->tq_spin)
sys/kern/subr_taskqueue.c
366
if (queue->tq_spin && queue->tq_tcount == 1 &&
sys/kern/subr_taskqueue.c
367
queue->tq_threads[0] == curthread) {
sys/kern/subr_taskqueue.c
375
TQ_UNLOCK(queue);
sys/kern/subr_taskqueue.c
381
taskqueue_enqueue_timeout(struct taskqueue *queue,
sys/kern/subr_taskqueue.c
385
return (taskqueue_enqueue_timeout_sbt(queue, ttask, ticks * tick_sbt,
sys/kern/subr_taskqueue.c
400
taskqueue_drain_tq_queue(struct taskqueue *queue)
sys/kern/subr_taskqueue.c
404
if (STAILQ_EMPTY(&queue->tq_queue))
sys/kern/subr_taskqueue.c
416
STAILQ_INSERT_TAIL(&queue->tq_queue, &t_barrier, ta_link);
sys/kern/subr_taskqueue.c
417
queue->tq_hint = &t_barrier;
sys/kern/subr_taskqueue.c
425
TQ_SLEEP(queue, &t_barrier, "tq_qdrain");
sys/kern/subr_taskqueue.c
435
taskqueue_drain_tq_active(struct taskqueue *queue)
sys/kern/subr_taskqueue.c
440
if (LIST_EMPTY(&queue->tq_active))
sys/kern/subr_taskqueue.c
444
queue->tq_callouts++;
sys/kern/subr_taskqueue.c
447
seq = queue->tq_seq;
sys/kern/subr_taskqueue.c
449
LIST_FOREACH(tb, &queue->tq_active, tb_link) {
sys/kern/subr_taskqueue.c
451
TQ_SLEEP(queue, tb->tb_running, "tq_adrain");
sys/kern/subr_taskqueue.c
457
queue->tq_callouts--;
sys/kern/subr_taskqueue.c
458
if ((queue->tq_flags & TQ_FLAGS_ACTIVE) == 0)
sys/kern/subr_taskqueue.c
459
wakeup_one(queue->tq_threads);
sys/kern/subr_taskqueue.c
464
taskqueue_block(struct taskqueue *queue)
sys/kern/subr_taskqueue.c
467
TQ_LOCK(queue);
sys/kern/subr_taskqueue.c
468
queue->tq_flags |= TQ_FLAGS_BLOCKED;
sys/kern/subr_taskqueue.c
469
TQ_UNLOCK(queue);
sys/kern/subr_taskqueue.c
473
taskqueue_unblock(struct taskqueue *queue)
sys/kern/subr_taskqueue.c
476
TQ_LOCK(queue);
sys/kern/subr_taskqueue.c
477
queue->tq_flags &= ~TQ_FLAGS_BLOCKED;
sys/kern/subr_taskqueue.c
478
if (!STAILQ_EMPTY(&queue->tq_queue))
sys/kern/subr_taskqueue.c
479
queue->tq_enqueue(queue->tq_context);
sys/kern/subr_taskqueue.c
480
TQ_UNLOCK(queue);
sys/kern/subr_taskqueue.c
484
taskqueue_run_locked(struct taskqueue *queue)
sys/kern/subr_taskqueue.c
492
KASSERT(queue != NULL, ("tq is NULL"));
sys/kern/subr_taskqueue.c
493
TQ_ASSERT_LOCKED(queue);
sys/kern/subr_taskqueue.c
495
LIST_INSERT_HEAD(&queue->tq_active, &tb, tb_link);
sys/kern/subr_taskqueue.c
498
while ((task = STAILQ_FIRST(&queue->tq_queue)) != NULL) {
sys/kern/subr_taskqueue.c
499
STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link);
sys/kern/subr_taskqueue.c
500
if (queue->tq_hint == task)
sys/kern/subr_taskqueue.c
501
queue->tq_hint = NULL;
sys/kern/subr_taskqueue.c
505
tb.tb_seq = ++queue->tq_seq;
sys/kern/subr_taskqueue.c
507
TQ_UNLOCK(queue);
sys/kern/subr_taskqueue.c
519
TQ_LOCK(queue);
sys/kern/subr_taskqueue.c
528
taskqueue_run(struct taskqueue *queue)
sys/kern/subr_taskqueue.c
531
TQ_LOCK(queue);
sys/kern/subr_taskqueue.c
532
taskqueue_run_locked(queue);
sys/kern/subr_taskqueue.c
533
TQ_UNLOCK(queue);
sys/kern/subr_taskqueue.c
542
taskqueue_poll_is_busy(struct taskqueue *queue, struct task *task)
sys/kern/subr_taskqueue.c
546
TQ_LOCK(queue);
sys/kern/subr_taskqueue.c
547
retval = task->ta_pending > 0 || task_get_busy(queue, task) != NULL;
sys/kern/subr_taskqueue.c
548
TQ_UNLOCK(queue);
sys/kern/subr_taskqueue.c
554
taskqueue_cancel_locked(struct taskqueue *queue, struct task *task,
sys/kern/subr_taskqueue.c
561
STAILQ_REMOVE(&queue->tq_queue, task, task, ta_link);
sys/kern/subr_taskqueue.c
562
if (queue->tq_hint == task)
sys/kern/subr_taskqueue.c
563
queue->tq_hint = NULL;
sys/kern/subr_taskqueue.c
568
tb = task_get_busy(queue, task);
sys/kern/subr_taskqueue.c
578
taskqueue_cancel(struct taskqueue *queue, struct task *task, u_int *pendp)
sys/kern/subr_taskqueue.c
582
TQ_LOCK(queue);
sys/kern/subr_taskqueue.c
583
error = taskqueue_cancel_locked(queue, task, pendp);
sys/kern/subr_taskqueue.c
584
TQ_UNLOCK(queue);
sys/kern/subr_taskqueue.c
590
taskqueue_cancel_timeout(struct taskqueue *queue,
sys/kern/subr_taskqueue.c
596
TQ_LOCK(queue);
sys/kern/subr_taskqueue.c
598
error = taskqueue_cancel_locked(queue, &timeout_task->t, &pending1);
sys/kern/subr_taskqueue.c
601
queue->tq_callouts--;
sys/kern/subr_taskqueue.c
603
TQ_UNLOCK(queue);
sys/kern/subr_taskqueue.c
611
taskqueue_drain(struct taskqueue *queue, struct task *task)
sys/kern/subr_taskqueue.c
614
if (!queue->tq_spin)
sys/kern/subr_taskqueue.c
617
TQ_LOCK(queue);
sys/kern/subr_taskqueue.c
618
while (task->ta_pending != 0 || task_get_busy(queue, task) != NULL)
sys/kern/subr_taskqueue.c
619
TQ_SLEEP(queue, task, "tq_drain");
sys/kern/subr_taskqueue.c
620
TQ_UNLOCK(queue);
sys/kern/subr_taskqueue.c
624
taskqueue_drain_all(struct taskqueue *queue)
sys/kern/subr_taskqueue.c
627
if (!queue->tq_spin)
sys/kern/subr_taskqueue.c
630
TQ_LOCK(queue);
sys/kern/subr_taskqueue.c
631
(void)taskqueue_drain_tq_queue(queue);
sys/kern/subr_taskqueue.c
632
(void)taskqueue_drain_tq_active(queue);
sys/kern/subr_taskqueue.c
633
TQ_UNLOCK(queue);
sys/kern/subr_taskqueue.c
637
taskqueue_drain_timeout(struct taskqueue *queue,
sys/kern/subr_taskqueue.c
644
TQ_LOCK(queue);
sys/kern/subr_taskqueue.c
648
TQ_UNLOCK(queue);
sys/kern/subr_taskqueue.c
651
taskqueue_drain(queue, &timeout_task->t);
sys/kern/subr_taskqueue.c
656
TQ_LOCK(queue);
sys/kern/subr_taskqueue.c
658
TQ_UNLOCK(queue);
sys/kern/subr_taskqueue.c
662
taskqueue_quiesce(struct taskqueue *queue)
sys/kern/subr_taskqueue.c
666
TQ_LOCK(queue);
sys/kern/subr_taskqueue.c
668
ret = taskqueue_drain_tq_queue(queue);
sys/kern/subr_taskqueue.c
670
ret = taskqueue_drain_tq_active(queue);
sys/kern/subr_taskqueue.c
672
TQ_UNLOCK(queue);
sys/kern/subr_taskqueue.c
903
taskqueue_member(struct taskqueue *queue, struct thread *td)
sys/kern/subr_taskqueue.c
908
if (queue->tq_threads[i] == NULL)
sys/kern/subr_taskqueue.c
910
if (queue->tq_threads[i] == td) {
sys/kern/subr_taskqueue.c
914
if (++j >= queue->tq_tcount)
sys/kern/subr_turnstile.c
1067
turnstile_head(struct turnstile *ts, int queue)
sys/kern/subr_turnstile.c
1072
MPASS(queue == TS_SHARED_QUEUE || queue == TS_EXCLUSIVE_QUEUE);
sys/kern/subr_turnstile.c
1075
return (TAILQ_FIRST(&ts->ts_blocked[queue]));
sys/kern/subr_turnstile.c
1082
turnstile_empty(struct turnstile *ts, int queue)
sys/kern/subr_turnstile.c
1087
MPASS(queue == TS_SHARED_QUEUE || queue == TS_EXCLUSIVE_QUEUE);
sys/kern/subr_turnstile.c
1090
return (TAILQ_EMPTY(&ts->ts_blocked[queue]));
sys/kern/subr_turnstile.c
1103
print_queue(struct threadqueue *queue, const char *header, const char *prefix)
sys/kern/subr_turnstile.c
1108
if (TAILQ_EMPTY(queue)) {
sys/kern/subr_turnstile.c
1112
TAILQ_FOREACH(td, queue, td_lockq) {
sys/kern/subr_turnstile.c
316
int queue;
sys/kern/subr_turnstile.c
348
queue = td->td_tsqueue;
sys/kern/subr_turnstile.c
349
MPASS(queue == TS_EXCLUSIVE_QUEUE || queue == TS_SHARED_QUEUE);
sys/kern/subr_turnstile.c
351
TAILQ_REMOVE(&ts->ts_blocked[queue], td, td_lockq);
sys/kern/subr_turnstile.c
352
TAILQ_FOREACH(td1, &ts->ts_blocked[queue], td_lockq) {
sys/kern/subr_turnstile.c
359
TAILQ_INSERT_TAIL(&ts->ts_blocked[queue], td, td_lockq);
sys/kern/subr_turnstile.c
739
turnstile_wait(struct turnstile *ts, struct thread *owner, int queue)
sys/kern/subr_turnstile.c
749
MPASS(queue == TS_SHARED_QUEUE || queue == TS_EXCLUSIVE_QUEUE);
sys/kern/subr_turnstile.c
778
TAILQ_INSERT_TAIL(&ts->ts_blocked[queue], td, td_lockq);
sys/kern/subr_turnstile.c
782
TAILQ_FOREACH(td1, &ts->ts_blocked[queue], td_lockq)
sys/kern/subr_turnstile.c
789
TAILQ_INSERT_TAIL(&ts->ts_blocked[queue], td, td_lockq);
sys/kern/subr_turnstile.c
801
td->td_tsqueue = queue;
sys/kern/subr_turnstile.c
828
turnstile_signal(struct turnstile *ts, int queue)
sys/kern/subr_turnstile.c
838
MPASS(queue == TS_SHARED_QUEUE || queue == TS_EXCLUSIVE_QUEUE);
sys/kern/subr_turnstile.c
844
td = TAILQ_FIRST(&ts->ts_blocked[queue]);
sys/kern/subr_turnstile.c
847
TAILQ_REMOVE(&ts->ts_blocked[queue], td, td_lockq);
sys/kern/subr_turnstile.c
879
turnstile_broadcast(struct turnstile *ts, int queue)
sys/kern/subr_turnstile.c
895
MPASS(queue == TS_SHARED_QUEUE || queue == TS_EXCLUSIVE_QUEUE);
sys/kern/subr_turnstile.c
901
TAILQ_CONCAT(&ts->ts_pending, &ts->ts_blocked[queue], td_lockq);
sys/net/altq/altq_fairq.c
373
qlimit(&cl->cl_buckets[i].queue) = qlimit;
sys/net/altq/altq_fairq.c
658
return rio_addq((rio_t *)cl->cl_red, &b->queue, m, cl->cl_pktattr);
sys/net/altq/altq_fairq.c
662
return red_addq(cl->cl_red, &b->queue, m, cl->cl_pktattr);
sys/net/altq/altq_fairq.c
666
return codel_addq(cl->cl_codel, &b->queue, m);
sys/net/altq/altq_fairq.c
668
if (qlen(&b->queue) >= qlimit(&b->queue)) {
sys/net/altq/altq_fairq.c
676
_addq(&b->queue, m);
sys/net/altq/altq_fairq.c
692
m = rio_getq((rio_t *)cl->cl_red, &b->queue);
sys/net/altq/altq_fairq.c
696
m = red_getq(cl->cl_red, &b->queue);
sys/net/altq/altq_fairq.c
700
m = codel_getq(cl->cl_codel, &b->queue);
sys/net/altq/altq_fairq.c
703
m = _getq(&b->queue);
sys/net/altq/altq_fairq.c
755
m = qhead(&b->queue);
sys/net/altq/altq_fairq.c
802
if (qempty(&b->queue)) {
sys/net/altq/altq_fairq.c
847
while ((m = _getq(&b->queue)) != NULL) {
sys/net/altq/altq_fairq.c
851
ASSERT(qlen(&b->queue) == 0);
sys/net/altq/altq_fairq.c
872
sp->qlength += qlen(&b->queue);
sys/net/altq/altq_fairq.h
95
class_queue_t queue; /* the actual queue */
sys/netgraph/bluetooth/drivers/ubt/ng_ubt.c
1750
int error = 0, queue, qlen;
sys/netgraph/bluetooth/drivers/ubt/ng_ubt.c
1816
queue = ((ng_ubt_node_qlen_ep *) (msg->data))->queue;
sys/netgraph/bluetooth/drivers/ubt/ng_ubt.c
1819
switch (queue) {
sys/netgraph/bluetooth/drivers/ubt/ng_ubt.c
1847
queue = ((ng_ubt_node_qlen_ep *) (msg->data))->queue;
sys/netgraph/bluetooth/drivers/ubt/ng_ubt.c
1849
switch (queue) {
sys/netgraph/bluetooth/drivers/ubt/ng_ubt.c
1875
((ng_ubt_node_qlen_ep *) (rsp->data))->queue = queue;
sys/netgraph/bluetooth/include/ng_bluetooth.h
154
STAILQ_HEAD(, ng_item) queue; /* actually items queue */
sys/netgraph/bluetooth/include/ng_bluetooth.h
164
STAILQ_INIT(&(q)->queue); \
sys/netgraph/bluetooth/include/ng_bluetooth.h
175
#define NG_BT_ITEMQ_FIRST(q) STAILQ_FIRST(&(q)->queue)
sys/netgraph/bluetooth/include/ng_bluetooth.h
185
STAILQ_INSERT_TAIL(&(q)->queue, (i), el_next); \
sys/netgraph/bluetooth/include/ng_bluetooth.h
191
(i) = STAILQ_FIRST(&(q)->queue); \
sys/netgraph/bluetooth/include/ng_bluetooth.h
193
STAILQ_REMOVE_HEAD(&(q)->queue, el_next); \
sys/netgraph/bluetooth/include/ng_bluetooth.h
200
STAILQ_INSERT_HEAD(&(q)->queue, (i), el_next); \
sys/netgraph/bluetooth/include/ng_ubt.h
68
int32_t queue; /* queue index */
sys/netgraph/netgraph.h
1158
int ng_snd_item(item_p item, int queue);
sys/netgraph/netgraph.h
369
STAILQ_HEAD(, ng_item) queue; /* actually items queue */
sys/netgraph/ng_base.c
135
STAILQ_HEAD_INITIALIZER(ng_deadnode.nd_input_queue.queue),
sys/netgraph/ng_base.c
1921
#define HEAD_IS_READER(QP) NGI_QUEUED_READER(STAILQ_FIRST(&(QP)->queue))
sys/netgraph/ng_base.c
1922
#define HEAD_IS_WRITER(QP) NGI_QUEUED_WRITER(STAILQ_FIRST(&(QP)->queue)) /* notused */
sys/netgraph/ng_base.c
2002
item = STAILQ_FIRST(&ngq->queue);
sys/netgraph/ng_base.c
2003
STAILQ_REMOVE_HEAD(&ngq->queue, el_next);
sys/netgraph/ng_base.c
2004
if (STAILQ_EMPTY(&ngq->queue))
sys/netgraph/ng_base.c
2029
STAILQ_INSERT_TAIL(&ngq->queue, item, el_next);
sys/netgraph/ng_base.c
2139
if (STAILQ_EMPTY(&ngq->queue)) {
sys/netgraph/ng_base.c
2146
STAILQ_INSERT_HEAD(&ngq->queue, item, el_next);
sys/netgraph/ng_base.c
2182
while ((item = STAILQ_FIRST(&ngq->queue)) != NULL) {
sys/netgraph/ng_base.c
2183
STAILQ_REMOVE_HEAD(&ngq->queue, el_next);
sys/netgraph/ng_base.c
2184
if (STAILQ_EMPTY(&ngq->queue))
sys/netgraph/ng_base.c
2231
int queue, rw;
sys/netgraph/ng_base.c
2278
queue = 1;
sys/netgraph/ng_base.c
2281
queue = 1;
sys/netgraph/ng_base.c
2283
queue = 0;
sys/netgraph/ng_base.c
2298
queue = 1;
sys/netgraph/ng_base.c
2301
if (queue) {
sys/netgraph/ng_base.c
666
STAILQ_INIT(&node->nd_input_queue.queue);
sys/netpfil/pf/pf.c
1138
struct pf_overload_head queue;
sys/netpfil/pf/pf.c
1146
queue = V_pf_overloadqueue;
sys/netpfil/pf/pf.c
1151
SLIST_FOREACH(pfoe, &queue, next) {
sys/netpfil/pf/pf.c
1185
SLIST_FOREACH_SAFE(pfoe, &queue, next, pfoe1)
sys/netpfil/pf/pf.c
1187
SLIST_REMOVE(&queue, pfoe, pf_overload_entry, next);
sys/netpfil/pf/pf.c
1194
if (SLIST_EMPTY(&queue)) {
sys/netpfil/pf/pf.c
1207
SLIST_FOREACH(pfoe, &queue, next)
sys/netpfil/pf/pf.c
1222
SLIST_FOREACH_SAFE(pfoe, &queue, next, pfoe1)
sys/netpfil/pf/pf.c
2737
struct pf_send_head queue;
sys/netpfil/pf/pf.c
2743
queue = V_pf_sendqueue;
sys/netpfil/pf/pf.c
2749
STAILQ_FOREACH_SAFE(pfse, &queue, pfse_next, next) {
sys/ofed/drivers/infiniband/ulp/ipoib/ipoib.h
421
struct ifqueue queue;
sys/ofed/drivers/infiniband/ulp/ipoib/ipoib_cm.c
897
_IF_DEQUEUE(&p->path->queue, mb);
sys/ofed/drivers/infiniband/ulp/ipoib/ipoib_main.c
394
_IF_DRAIN(&path->queue);
sys/ofed/drivers/infiniband/ulp/ipoib/ipoib_main.c
548
_IF_DEQUEUE(&path->queue, mb);
sys/ofed/drivers/infiniband/ulp/ipoib/ipoib_main.c
597
bzero(&path->queue, sizeof(path->queue));
sys/ofed/drivers/infiniband/ulp/ipoib/ipoib_main.c
683
if (_IF_QLEN(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE)
sys/ofed/drivers/infiniband/ulp/ipoib/ipoib_main.c
684
_IF_ENQUEUE(&path->queue, mb);
sys/ofed/drivers/infiniband/ulp/ipoib/ipoib_main.c
709
path->queue.ifq_len < IPOIB_MAX_PATH_REC_QUEUE) {
sys/ofed/drivers/infiniband/ulp/ipoib/ipoib_main.c
710
_IF_ENQUEUE(&path->queue, mb);
sys/ofed/include/rdma/rdmavt_cq.h
88
struct rvt_cq_wc *queue;
sys/powerpc/powernv/xive.c
186
struct xive_queue queue; /* We only use a single queue for now. */
sys/powerpc/powernv/xive.c
375
xive_cpud->queue.q_page = contigmalloc(PAGE_SIZE, M_XIVE,
sys/powerpc/powernv/xive.c
377
xive_cpud->queue.q_size = 1 << PAGE_SHIFT;
sys/powerpc/powernv/xive.c
378
xive_cpud->queue.q_mask =
sys/powerpc/powernv/xive.c
379
((xive_cpud->queue.q_size / sizeof(int)) - 1);
sys/powerpc/powernv/xive.c
380
xive_cpud->queue.q_toggle = 0;
sys/powerpc/powernv/xive.c
381
xive_cpud->queue.q_index = 0;
sys/powerpc/powernv/xive.c
387
XIVE_PRIORITY, vtophys(xive_cpud->queue.q_page), PAGE_SHIFT,
sys/powerpc/powernv/xive.c
529
vector = xive_read_eq(&xive_cpud->queue);
sys/powerpc/ps3/ps3disk.c
427
TAILQ_FOREACH(bp, &sc->sc_bioq.queue, bio_queue) {
sys/powerpc/pseries/phyp_vscsi.c
322
TAILQ_INSERT_TAIL(&sc->free_xferq, &sc->loginxp, queue);
sys/powerpc/pseries/phyp_vscsi.c
350
TAILQ_INSERT_TAIL(&sc->free_xferq, xp, queue);
sys/powerpc/pseries/phyp_vscsi.c
473
TAILQ_REMOVE(&sc->free_xferq, xp, queue);
sys/powerpc/pseries/phyp_vscsi.c
474
TAILQ_INSERT_TAIL(&sc->active_xferq, xp, queue);
sys/powerpc/pseries/phyp_vscsi.c
503
TAILQ_REMOVE(&sc->free_xferq, xp, queue);
sys/powerpc/pseries/phyp_vscsi.c
504
TAILQ_INSERT_TAIL(&sc->active_xferq, xp, queue);
sys/powerpc/pseries/phyp_vscsi.c
550
TAILQ_REMOVE(&sc->free_xferq, xp, queue);
sys/powerpc/pseries/phyp_vscsi.c
551
TAILQ_INSERT_TAIL(&sc->active_xferq, xp, queue);
sys/powerpc/pseries/phyp_vscsi.c
805
TAILQ_REMOVE(&sc->free_xferq, xp, queue);
sys/powerpc/pseries/phyp_vscsi.c
806
TAILQ_INSERT_TAIL(&sc->active_xferq, xp, queue);
sys/powerpc/pseries/phyp_vscsi.c
82
TAILQ_ENTRY(vscsi_xfer) queue;
sys/powerpc/pseries/phyp_vscsi.c
984
TAILQ_REMOVE(&sc->active_xferq, xp, queue);
sys/powerpc/pseries/phyp_vscsi.c
985
TAILQ_INSERT_TAIL(&sc->free_xferq, xp, queue);
sys/riscv/vmm/vmm_fence.c
52
struct vmm_fence *queue;
sys/riscv/vmm/vmm_fence.c
56
queue = hypctx->fence_queue;
sys/riscv/vmm/vmm_fence.c
57
fence = &queue[hypctx->fence_queue_head];
sys/riscv/vmm/vmm_fence.c
75
struct vmm_fence *queue;
sys/riscv/vmm/vmm_fence.c
79
queue = hypctx->fence_queue;
sys/riscv/vmm/vmm_fence.c
80
fence = &queue[hypctx->fence_queue_tail];
sys/sys/ata.h
127
/*075*/ u_int16_t queue;
sys/sys/bio.h
145
TAILQ_HEAD(bio_queue, bio) queue;
sys/sys/buf.h
404
TAILQ_HEAD(buf_queue, buf) queue;
sys/sys/gtaskqueue.h
61
void gtaskqueue_block(struct gtaskqueue *queue);
sys/sys/gtaskqueue.h
62
void gtaskqueue_unblock(struct gtaskqueue *queue);
sys/sys/gtaskqueue.h
64
int gtaskqueue_cancel(struct gtaskqueue *queue, struct gtask *gtask);
sys/sys/gtaskqueue.h
65
void gtaskqueue_drain(struct gtaskqueue *queue, struct gtask *task);
sys/sys/gtaskqueue.h
66
void gtaskqueue_drain_all(struct gtaskqueue *queue);
sys/sys/gtaskqueue.h
70
int grouptaskqueue_enqueue(struct gtaskqueue *queue, struct gtask *task);
sys/sys/signalvar.h
409
void sigqueue_delete(struct sigqueue *queue, int sig);
sys/sys/signalvar.h
411
void sigqueue_flush(struct sigqueue *queue);
sys/sys/signalvar.h
412
void sigqueue_init(struct sigqueue *queue, struct proc *p);
sys/sys/sleepqueue.h
100
void sleepq_signal(const void *wchan, int flags, int pri, int queue);
sys/sys/sleepqueue.h
105
u_int sleepq_sleepcnt(const void *wchan, int queue);
sys/sys/sleepqueue.h
114
int sleepq_sbuf_print_stacks(struct sbuf *sb, const void *wchan, int queue,
sys/sys/sleepqueue.h
88
const char *wmesg, int flags, int queue);
sys/sys/sleepqueue.h
90
void sleepq_broadcast(const void *wchan, int flags, int pri, int queue);
sys/sys/sleepqueue.h
97
void sleepq_remove_matching(struct sleepqueue *sq, int queue,
sys/sys/taskqueue.h
100
int taskqueue_member(struct taskqueue *queue, struct thread *td);
sys/sys/taskqueue.h
101
void taskqueue_set_callback(struct taskqueue *queue,
sys/sys/taskqueue.h
129
void _timeout_task_init(struct taskqueue *queue,
sys/sys/taskqueue.h
132
#define TIMEOUT_TASK_INIT(queue, timeout_task, priority, func, context) do { \
sys/sys/taskqueue.h
135
_timeout_task_init(queue, timeout_task, priority, func, context); \
sys/sys/taskqueue.h
78
int taskqueue_enqueue(struct taskqueue *queue, struct task *task);
sys/sys/taskqueue.h
79
int taskqueue_enqueue_flags(struct taskqueue *queue, struct task *task,
sys/sys/taskqueue.h
81
int taskqueue_enqueue_timeout(struct taskqueue *queue,
sys/sys/taskqueue.h
83
int taskqueue_enqueue_timeout_sbt(struct taskqueue *queue,
sys/sys/taskqueue.h
86
int taskqueue_poll_is_busy(struct taskqueue *queue, struct task *task);
sys/sys/taskqueue.h
87
int taskqueue_cancel(struct taskqueue *queue, struct task *task,
sys/sys/taskqueue.h
89
int taskqueue_cancel_timeout(struct taskqueue *queue,
sys/sys/taskqueue.h
91
void taskqueue_drain(struct taskqueue *queue, struct task *task);
sys/sys/taskqueue.h
92
void taskqueue_drain_timeout(struct taskqueue *queue,
sys/sys/taskqueue.h
94
void taskqueue_drain_all(struct taskqueue *queue);
sys/sys/taskqueue.h
95
void taskqueue_quiesce(struct taskqueue *queue);
sys/sys/taskqueue.h
96
void taskqueue_free(struct taskqueue *queue);
sys/sys/taskqueue.h
97
void taskqueue_run(struct taskqueue *queue);
sys/sys/taskqueue.h
98
void taskqueue_block(struct taskqueue *queue);
sys/sys/taskqueue.h
99
void taskqueue_unblock(struct taskqueue *queue);
sys/sys/turnstile.h
92
int turnstile_empty(struct turnstile *ts, int queue);
sys/vm/memguard.c
263
KASSERT(vm_page_wired(p) && p->a.queue == PQ_NONE,
sys/vm/memguard.c
278
KASSERT(vm_page_wired(p) && p->a.queue == PQ_NONE,
sys/vm/vm_object.c
2371
uint8_t queue)
sys/vm/vm_object.c
2437
vm_page_unwire(tm, queue);
sys/vm/vm_object.c
890
vm_page_astate_load(p).queue != PQ_NONE,
sys/vm/vm_object.h
392
vm_size_t length, uint8_t queue);
sys/vm/vm_page.c
1341
m->a.queue = PQ_NONE;
sys/vm/vm_page.c
181
static void vm_page_enqueue(vm_page_t m, uint8_t queue);
sys/vm/vm_page.c
186
static void vm_page_mvqueue(vm_page_t m, const uint8_t queue,
sys/vm/vm_page.c
2714
KASSERT(m->a.queue == PQ_NONE &&
sys/vm/vm_page.c
2717
m, m->a.queue, (m->a.flags & PGA_QUEUE_STATE_MASK)));
sys/vm/vm_page.c
3640
_vm_page_pagequeue(vm_page_t m, uint8_t queue)
sys/vm/vm_page.c
3643
return (&vm_pagequeue_domain(m)->vmd_pagequeues[queue]);
sys/vm/vm_page.c
3651
return (_vm_page_pagequeue(m, vm_page_astate_load(m).queue));
sys/vm/vm_page.c
3684
KASSERT(old->queue != PQ_NONE && new.queue != old->queue,
sys/vm/vm_page.c
3686
__func__, old->queue, new.queue));
sys/vm/vm_page.c
3724
pq = _vm_page_pagequeue(m, old->queue);
sys/vm/vm_page.c
3752
KASSERT(old->queue != PQ_NONE && new.queue == old->queue,
sys/vm/vm_page.c
3754
__func__, old->queue, new.queue));
sys/vm/vm_page.c
3790
KASSERT(old->queue == new.queue || new.queue != PQ_NONE,
sys/vm/vm_page.c
3792
__func__, new.queue, new.flags));
sys/vm/vm_page.c
3797
vm_page_pqbatch_submit(m, new.queue);
sys/vm/vm_page.c
3812
if (old->queue != PQ_NONE && new.queue != old->queue) {
sys/vm/vm_page.c
3815
if (new.queue != PQ_NONE)
sys/vm/vm_page.c
3816
vm_page_pqbatch_submit(m, new.queue);
sys/vm/vm_page.c
3820
if (new.queue != PQ_NONE &&
sys/vm/vm_page.c
3822
vm_page_pqbatch_submit(m, new.queue);
sys/vm/vm_page.c
3831
vm_pqbatch_process_page(struct vm_pagequeue *pq, vm_page_t m, uint8_t queue)
sys/vm/vm_page.c
3837
KASSERT(queue < PQ_COUNT,
sys/vm/vm_page.c
3838
("%s: invalid queue index %d", __func__, queue));
sys/vm/vm_page.c
3839
KASSERT(pq == _vm_page_pagequeue(m, queue),
sys/vm/vm_page.c
3843
if (__predict_false(old.queue != queue ||
sys/vm/vm_page.c
3854
new.queue = PQ_NONE;
sys/vm/vm_page.c
3873
uint8_t queue)
sys/vm/vm_page.c
3878
vm_pqbatch_process_page(pq, bq->bq_pa[i], queue);
sys/vm/vm_page.c
3890
vm_page_pqbatch_submit(vm_page_t m, uint8_t queue)
sys/vm/vm_page.c
3896
KASSERT(queue < PQ_COUNT, ("invalid queue %d", queue));
sys/vm/vm_page.c
3900
bq = DPCPU_PTR(pqbatch[domain][queue]);
sys/vm/vm_page.c
3908
pq = &VM_DOMAIN(domain)->vmd_pagequeues[queue];
sys/vm/vm_page.c
3910
vm_pqbatch_process(pq, bq, queue);
sys/vm/vm_page.c
3920
pq = &VM_DOMAIN(domain)->vmd_pagequeues[queue];
sys/vm/vm_page.c
3923
bq = DPCPU_PTR(pqbatch[domain][queue]);
sys/vm/vm_page.c
3924
vm_pqbatch_process(pq, bq, queue);
sys/vm/vm_page.c
3925
vm_pqbatch_process_page(pq, m, queue);
sys/vm/vm_page.c
3943
int cpu, domain, queue;
sys/vm/vm_page.c
3953
for (queue = 0; queue < PQ_COUNT; queue++) {
sys/vm/vm_page.c
3954
pq = &vmd->vmd_pagequeues[queue];
sys/vm/vm_page.c
3958
DPCPU_PTR(pqbatch[domain][queue]), queue);
sys/vm/vm_page.c
3983
if (old.queue == PQ_NONE) {
sys/vm/vm_page.c
4007
if (__predict_true(old.queue == PQ_NONE)) {
sys/vm/vm_page.c
4015
new.queue = PQ_NONE;
sys/vm/vm_page.c
4025
vm_page_enqueue(vm_page_t m, uint8_t queue)
sys/vm/vm_page.c
4028
KASSERT(m->a.queue == PQ_NONE &&
sys/vm/vm_page.c
4034
m->a.queue = queue;
sys/vm/vm_page.c
4037
vm_page_pqbatch_submit(m, queue);
sys/vm/vm_page.c
4087
KASSERT(m->a.queue == PQ_NONE,
sys/vm/vm_page.c
4107
KASSERT(m->a.queue == PQ_NONE,
sys/vm/vm_page.c
4395
if (old.queue == nqueue) {
sys/vm/vm_page.c
4405
new.queue = nqueue;
sys/vm/vm_page.c
445
vm_page_init_marker(vm_page_t marker, int queue, uint16_t aflags)
sys/vm/vm_page.c
4499
if (nflag != PGA_REQUEUE_HEAD && old.queue == PQ_ACTIVE &&
sys/vm/vm_page.c
4504
new.queue = nqueue;
sys/vm/vm_page.c
452
marker->a.queue = queue;
sys/vm/vm_page.c
525
m->a.queue = PQ_NONE;
sys/vm/vm_page.c
5944
m->a.queue, m->ref_count, m->a.flags, m->oflags,
sys/vm/vm_page.h
211
uint8_t queue;
sys/vm/vm_page.h
608
void vm_page_init_marker(vm_page_t marker, int queue, uint16_t aflags);
sys/vm/vm_page.h
624
void vm_page_pqbatch_submit(vm_page_t m, uint8_t queue);
sys/vm/vm_page.h
658
void vm_page_unwire(vm_page_t m, uint8_t queue);
sys/vm/vm_page.h
781
KASSERT(new.queue == PQ_INACTIVE || (new.flags & PGA_REQUEUE_HEAD) == 0,
sys/vm/vm_page.h
783
KASSERT((new.flags & PGA_ENQUEUED) == 0 || new.queue != PQ_NONE,
sys/vm/vm_page.h
870
return (as.queue);
sys/vm/vm_page.h
902
uint8_t queue;
sys/vm/vm_page.h
904
queue = vm_page_queue(m);
sys/vm/vm_page.h
905
return (queue == PQ_LAUNDRY || queue == PQ_UNSWAPPABLE);
sys/vm/vm_pageout.c
1315
if (old.queue != PQ_ACTIVE) {
sys/vm/vm_pageout.c
1318
new.queue = PQ_ACTIVE;
sys/vm/vm_pageout.c
1356
new.queue = nqueue;
sys/vm/vm_pageout.c
1378
if (as.queue != PQ_INACTIVE || (as.flags & PGA_ENQUEUED) != 0)
sys/vm/vm_pageout.c
1584
new.queue = PQ_ACTIVE;
sys/vm/vm_pageout.c
1591
new.queue = PQ_INACTIVE;
sys/vm/vm_pageout.c
1636
m->a.queue = PQ_NONE;
sys/vm/vm_pageout.c
351
vm_pageout_defer(vm_page_t m, const uint8_t queue, const bool enqueued)
sys/vm/vm_pageout.c
356
if (__predict_false(as.queue != queue ||
sys/vm/vm_pageout.c
360
vm_page_pqbatch_submit(m, queue);
sys/vm/vm_pageout.c
724
int act_delta, error, numpagedout, queue, refs, starting_target;
sys/vm/vm_pageout.c
742
queue = PQ_UNSWAPPABLE;
sys/vm/vm_pageout.c
744
queue = PQ_LAUNDRY;
sys/vm/vm_pageout.c
747
marker = &vmd->vmd_markers[queue];
sys/vm/vm_pageout.c
748
pq = &vmd->vmd_pagequeues[queue];
sys/vm/vm_pageout.c
761
if (vm_pageout_defer(m, queue, true))
sys/vm/vm_pageout.c
839
new.queue = PQ_ACTIVE;
sys/vm/vm_pageout.c
890
if (vm_pageout_defer(m, queue, true))
sys/vm/vm_pageout.c
939
if (launder > 0 && queue == PQ_UNSWAPPABLE) {
sys/vm/vm_pageout.c
940
queue = PQ_LAUNDRY;
sys/vm/vm_phys.c
404
if (__predict_false(vm_page_astate_load(m).queue != PQ_NONE))
sys/vm/vm_reserv.c
1277
struct vm_reserv_queue *queue;
sys/vm/vm_reserv.c
1297
queue = &vm_rvd[domain].partpop;
sys/vm/vm_reserv.c
1311
TAILQ_FOREACH_SAFE(rv, queue, partpopq, rvn) {
sys/vm/vm_reserv.c
1327
TAILQ_INSERT_AFTER(queue, rv, marker, partpopq);
sys/vm/vm_reserv.c
1335
TAILQ_REMOVE(queue, marker, partpopq);
sys/vm/vm_reserv.c
1339
TAILQ_REMOVE(queue, marker, partpopq);
tools/tools/aac/aac_checkq.c
56
int fd, retval, queue;
tools/tools/aac/aac_checkq.c
67
queue = atoi(argv[1]);
tools/tools/aac/aac_checkq.c
68
printf("Getting stats for queue %d\n", queue);
tools/tools/aac/aac_checkq.c
70
sr.as_item = queue;
tools/tools/fixwhite/fixwhite.c
121
queue[queuelen++] = '\t';
tools/tools/fixwhite/fixwhite.c
125
if (fwrite(queue, 1, queuelen, stdout) != queuelen) {
tools/tools/fixwhite/fixwhite.c
33
static char *queue = NULL;
tools/tools/fixwhite/fixwhite.c
43
queue = realloc(queue, queuesize);
tools/tools/fixwhite/fixwhite.c
44
if (queue == NULL) {
tools/tools/fixwhite/fixwhite.c
49
queue[queuelen++] = c;
tools/tools/fixwhite/fixwhite.c
68
return (queuelen >= back && queue[queuelen - back] == c);
tools/tools/mfi/mfi_checkq.c
53
int fd, retval, queue;
tools/tools/mfi/mfi_checkq.c
64
queue = atoi(argv[1]);
tools/tools/mfi/mfi_checkq.c
65
printf("Getting stats for queue %d\n", queue);
tools/tools/mfi/mfi_checkq.c
67
sr.ms_item = queue;
tools/tools/net80211/w00t/expand/expand.c
114
struct queue *q = p->q;
tools/tools/net80211/w00t/expand/expand.c
117
struct queue *last = NULL;
tools/tools/net80211/w00t/expand/expand.c
135
q = (struct queue*) malloc(sizeof(*q));
tools/tools/net80211/w00t/expand/expand.c
181
struct queue *q = p->q;
tools/tools/net80211/w00t/expand/expand.c
304
struct queue *q = p->q;
tools/tools/net80211/w00t/expand/expand.c
55
struct queue *next;
tools/tools/net80211/w00t/expand/expand.c
73
struct queue *q;
tools/tools/net80211/w00t/redir/redir.c
155
struct queue *q = p->q;
tools/tools/net80211/w00t/redir/redir.c
158
struct queue *last = NULL;
tools/tools/net80211/w00t/redir/redir.c
176
q = (struct queue*) malloc(sizeof(*q));
tools/tools/net80211/w00t/redir/redir.c
253
void send_header(struct params *p, struct queue *q)
tools/tools/net80211/w00t/redir/redir.c
321
struct queue *q = p->q;
tools/tools/net80211/w00t/redir/redir.c
334
struct queue *q = p->q;
tools/tools/net80211/w00t/redir/redir.c
477
struct queue *q = p->q;
tools/tools/net80211/w00t/redir/redir.c
478
struct queue *last = p->q;
tools/tools/net80211/w00t/redir/redir.c
57
struct queue *next;
tools/tools/net80211/w00t/redir/redir.c
82
struct queue *q;
usr.bin/at/at.c
114
static void writefile(time_t runtimer, char queue);
usr.bin/at/at.c
206
writefile(time_t runtimer, char queue)
usr.bin/at/at.c
266
sprintf(ppos, "%c%5lx%8lx", queue,
usr.bin/at/at.c
466
char queue;
usr.bin/at/at.c
498
if(sscanf(dirent->d_name, "%c%5lx%8lx", &queue, &jobno, &ctm)!=3)
usr.bin/at/at.c
505
if (atqueue && (queue != atqueue))
usr.bin/at/at.c
520
queue,
usr.bin/at/at.c
542
char queue;
usr.bin/at/at.c
578
if(sscanf(dirent->d_name, "%c%5lx%8lx", &queue, &jobno, &ctm)!=3)
usr.bin/at/at.c
734
char queue = DEFAULT_AT_QUEUE;
usr.bin/at/at.c
794
atqueue = queue = *optarg;
usr.bin/at/at.c
795
if (!(islower(queue)||isupper(queue)))
usr.bin/at/at.c
886
writefile(timer, queue);
usr.bin/at/at.c
891
queue = toupper(queue);
usr.bin/at/at.c
893
queue = DEFAULT_BATCH_QUEUE;
usr.bin/at/at.c
906
writefile(timer, queue);
usr.bin/posixmqcontrol/posixmqcontrol.c
135
sane_queue(const char *queue)
usr.bin/posixmqcontrol/posixmqcontrol.c
139
if (queue[size] != '/') {
usr.bin/posixmqcontrol/posixmqcontrol.c
140
warnx("queue name [%-.*s] must start with '/'.", NAME_MAX, queue);
usr.bin/posixmqcontrol/posixmqcontrol.c
144
for (size++; queue[size] != 0 && size < NAME_MAX; size++) {
usr.bin/posixmqcontrol/posixmqcontrol.c
145
if (queue[size] == '/') {
usr.bin/posixmqcontrol/posixmqcontrol.c
147
NAME_MAX, queue);
usr.bin/posixmqcontrol/posixmqcontrol.c
152
if (size == NAME_MAX && queue[size] != 0) {
usr.bin/posixmqcontrol/posixmqcontrol.c
154
NAME_MAX, queue, NAME_MAX);
usr.bin/posixmqcontrol/posixmqcontrol.c
242
parse_queue(const char *queue)
usr.bin/posixmqcontrol/posixmqcontrol.c
244
if (sane_queue(queue)) {
usr.bin/posixmqcontrol/posixmqcontrol.c
247
n1->text = queue;
usr.bin/posixmqcontrol/posixmqcontrol.c
253
parse_single_queue(const char *queue)
usr.bin/posixmqcontrol/posixmqcontrol.c
255
if (sane_queue(queue)) {
usr.bin/posixmqcontrol/posixmqcontrol.c
259
n1->text = queue;
usr.bin/posixmqcontrol/posixmqcontrol.c
262
warnx("ignoring extra -q queue [%s].", queue);
usr.bin/posixmqcontrol/posixmqcontrol.c
425
create(const char *queue, struct Creation q_creation)
usr.bin/posixmqcontrol/posixmqcontrol.c
440
mqd_t handle = mq_open(queue, flags);
usr.bin/posixmqcontrol/posixmqcontrol.c
452
handle = mq_open(queue, flags, q_creation.mode, &stuff);
usr.bin/posixmqcontrol/posixmqcontrol.c
522
rm(const char *queue)
usr.bin/posixmqcontrol/posixmqcontrol.c
524
int result = mq_unlink(queue);
usr.bin/posixmqcontrol/posixmqcontrol.c
558
info(const char *queue)
usr.bin/posixmqcontrol/posixmqcontrol.c
560
mqd_t handle = mq_open(queue, O_RDONLY);
usr.bin/posixmqcontrol/posixmqcontrol.c
582
queue, actual.mq_msgsize * actual.mq_curmsgs, actual.mq_msgsize,
usr.bin/posixmqcontrol/posixmqcontrol.c
615
recv(const char *queue)
usr.bin/posixmqcontrol/posixmqcontrol.c
617
mqd_t handle = mq_open(queue, O_RDONLY);
usr.bin/posixmqcontrol/posixmqcontrol.c
661
send(const char *queue, const char *text, unsigned q_priority)
usr.bin/posixmqcontrol/posixmqcontrol.c
663
mqd_t handle = mq_open(queue, O_WRONLY);
usr.bin/posixmqcontrol/posixmqcontrol.c
832
const char *queue = itq->text;
usr.bin/posixmqcontrol/posixmqcontrol.c
834
int result = create(queue, creation);
usr.bin/posixmqcontrol/posixmqcontrol.c
850
const char *queue = itq->text;
usr.bin/posixmqcontrol/posixmqcontrol.c
851
int result = info(queue);
usr.bin/posixmqcontrol/posixmqcontrol.c
868
const char *queue = itq->text;
usr.bin/posixmqcontrol/posixmqcontrol.c
873
int result = send(queue, content, priority);
usr.bin/posixmqcontrol/posixmqcontrol.c
887
const char *queue = STAILQ_FIRST(&queues)->text;
usr.bin/posixmqcontrol/posixmqcontrol.c
888
int worst = recv(queue);
usr.bin/posixmqcontrol/posixmqcontrol.c
902
const char *queue = itq->text;
usr.bin/posixmqcontrol/posixmqcontrol.c
903
int result = rm(queue);
usr.sbin/bhyve/pci_ahci.c
1126
ata_ident->queue = 31;
usr.sbin/bhyve/pci_virtio_input.c
468
struct vtinput_eventqueue *queue, struct input_event *e)
usr.sbin/bhyve/pci_virtio_input.c
471
if (queue->idx >= queue->size) {
usr.sbin/bhyve/pci_virtio_input.c
473
const uint32_t newSize = queue->idx;
usr.sbin/bhyve/pci_virtio_input.c
474
void *newPtr = realloc(queue->events,
usr.sbin/bhyve/pci_virtio_input.c
475
queue->size * sizeof(struct vtinput_event_elem));
usr.sbin/bhyve/pci_virtio_input.c
481
queue->events = newPtr;
usr.sbin/bhyve/pci_virtio_input.c
482
queue->size = newSize;
usr.sbin/bhyve/pci_virtio_input.c
486
struct vtinput_event *event = &queue->events[queue->idx].event;
usr.sbin/bhyve/pci_virtio_input.c
490
queue->idx++;
usr.sbin/bhyve/pci_virtio_input.c
496
vtinput_eventqueue_clear(struct vtinput_eventqueue *queue)
usr.sbin/bhyve/pci_virtio_input.c
499
queue->idx = 0;
usr.sbin/bhyve/pci_virtio_input.c
504
struct vtinput_eventqueue *queue, struct vqueue_info *vq)
usr.sbin/bhyve/pci_virtio_input.c
510
for (uint32_t i = 0; i < queue->idx; ++i) {
usr.sbin/bhyve/pci_virtio_input.c
520
__func__, queue->idx));
usr.sbin/bhyve/pci_virtio_input.c
549
queue->events[i].iov = iov;
usr.sbin/bhyve/pci_virtio_input.c
550
queue->events[i].idx = req.idx;
usr.sbin/bhyve/pci_virtio_input.c
557
for (uint32_t i = 0; i < queue->idx; ++i) {
usr.sbin/bhyve/pci_virtio_input.c
558
struct vtinput_event_elem event = queue->events[i];
usr.sbin/bhyve/pci_virtio_input.c
565
vtinput_eventqueue_clear(queue);
usr.sbin/bhyve/pci_virtio_scsi.c
1001
pthread_cond_destroy(&queue->vsq_cv);
usr.sbin/bhyve/pci_virtio_scsi.c
1002
pthread_mutex_destroy(&queue->vsq_qmtx);
usr.sbin/bhyve/pci_virtio_scsi.c
1003
pthread_mutex_destroy(&queue->vsq_fmtx);
usr.sbin/bhyve/pci_virtio_scsi.c
1004
pthread_mutex_destroy(&queue->vsq_rmtx);
usr.sbin/bhyve/pci_virtio_scsi.c
934
struct pci_vtscsi_queue *queue, int num)
usr.sbin/bhyve/pci_virtio_scsi.c
940
queue->vsq_sc = sc;
usr.sbin/bhyve/pci_virtio_scsi.c
941
queue->vsq_vq = &sc->vss_vq[num + 2];
usr.sbin/bhyve/pci_virtio_scsi.c
943
pthread_mutex_init(&queue->vsq_rmtx, NULL);
usr.sbin/bhyve/pci_virtio_scsi.c
944
pthread_mutex_init(&queue->vsq_fmtx, NULL);
usr.sbin/bhyve/pci_virtio_scsi.c
945
pthread_mutex_init(&queue->vsq_qmtx, NULL);
usr.sbin/bhyve/pci_virtio_scsi.c
946
pthread_cond_init(&queue->vsq_cv, NULL);
usr.sbin/bhyve/pci_virtio_scsi.c
947
STAILQ_INIT(&queue->vsq_requests);
usr.sbin/bhyve/pci_virtio_scsi.c
948
STAILQ_INIT(&queue->vsq_free_requests);
usr.sbin/bhyve/pci_virtio_scsi.c
949
LIST_INIT(&queue->vsq_workers);
usr.sbin/bhyve/pci_virtio_scsi.c
958
pci_vtscsi_put_request(&queue->vsq_free_requests, req);
usr.sbin/bhyve/pci_virtio_scsi.c
966
workers[i].vsw_queue = queue;
usr.sbin/bhyve/pci_virtio_scsi.c
973
LIST_INSERT_HEAD(&queue->vsq_workers, &workers[i], vsw_link);
usr.sbin/bhyve/pci_virtio_scsi.c
979
pci_vtscsi_destroy_queue(queue);
usr.sbin/bhyve/pci_virtio_scsi.c
986
pci_vtscsi_destroy_queue(struct pci_vtscsi_queue *queue)
usr.sbin/bhyve/pci_virtio_scsi.c
988
if (queue->vsq_sc == NULL)
usr.sbin/bhyve/pci_virtio_scsi.c
994
if (STAILQ_EMPTY(&queue->vsq_free_requests))
usr.sbin/bhyve/pci_virtio_scsi.c
997
req = pci_vtscsi_get_request(&queue->vsq_free_requests);
usr.sbin/cxgbetool/cxgbetool.c
3206
op.queue = -1;
usr.sbin/cxgbetool/cxgbetool.c
3213
op.queue = (int8_t)val;
usr.sbin/jail/command.c
861
TAILQ_REMOVE(j->queue, j, tq);
usr.sbin/jail/command.c
873
j->queue = &sleeping;
usr.sbin/jail/config.c
354
j->queue = &cfjails;
usr.sbin/jail/jail.c
577
TAILQ_REMOVE(j->queue, j, tq);
usr.sbin/jail/jail.c
579
j->queue = &ready;
usr.sbin/jail/jailp.h
179
struct cfjails *queue;
usr.sbin/jail/jailp.h
240
extern void requeue(struct cfjail *j, struct cfjails *queue);
usr.sbin/jail/jailp.h
241
extern void requeue_head(struct cfjail *j, struct cfjails *queue);
usr.sbin/jail/state.c
258
if (!--dj->ndeps && dj->queue == &depend)
usr.sbin/jail/state.c
387
requeue(struct cfjail *j, struct cfjails *queue)
usr.sbin/jail/state.c
389
if (j->queue != queue) {
usr.sbin/jail/state.c
390
TAILQ_REMOVE(j->queue, j, tq);
usr.sbin/jail/state.c
391
TAILQ_INSERT_TAIL(queue, j, tq);
usr.sbin/jail/state.c
392
j->queue = queue;
usr.sbin/jail/state.c
397
requeue_head(struct cfjail *j, struct cfjails *queue)
usr.sbin/jail/state.c
399
TAILQ_REMOVE(j->queue, j, tq);
usr.sbin/jail/state.c
400
TAILQ_INSERT_HEAD(queue, j, tq);
usr.sbin/jail/state.c
401
j->queue = queue;
usr.sbin/lpr/common_source/common.c
115
register struct jobqueue *q, **queue;
usr.sbin/lpr/common_source/common.c
137
queue = (struct jobqueue **)malloc(arraysz * sizeof(struct jobqueue *));
usr.sbin/lpr/common_source/common.c
138
if (queue == NULL)
usr.sbin/lpr/common_source/common.c
164
queue = (struct jobqueue **)reallocarray((char *)queue,
usr.sbin/lpr/common_source/common.c
166
if (queue == NULL) {
usr.sbin/lpr/common_source/common.c
172
queue[nitems-1] = q;
usr.sbin/lpr/common_source/common.c
176
qsort(queue, nitems, sizeof(struct jobqueue *), compar);
usr.sbin/lpr/common_source/common.c
177
*namelist = queue;
usr.sbin/lpr/common_source/displayq.c
120
if ((nitems = getq(pp, &queue)) < 0)
usr.sbin/lpr/common_source/displayq.c
207
q = queue[i];
usr.sbin/lpr/common_source/displayq.c
211
free(queue);
usr.sbin/lpr/common_source/displayq.c
97
struct jobqueue **queue;
usr.sbin/lpr/lpc/cmds.c
1128
struct jobqueue **queue;
usr.sbin/lpr/lpc/cmds.c
1172
nitems = getq(pp, &queue);
usr.sbin/lpr/lpc/cmds.c
1176
mtime = queue[0]->job_time;
usr.sbin/lpr/lpc/cmds.c
1185
free(queue[i]);
usr.sbin/lpr/lpc/cmds.c
1186
free(queue);
usr.sbin/lpr/lpc/cmds.c
1253
for (qq = queue + nitems; --qq >= queue; ) {
usr.sbin/lpr/lpc/cmds.c
1273
for (qq = queue + nitems; --qq >= queue; ) {
usr.sbin/lpr/lpc/cmds.c
649
struct dirent **queue;
usr.sbin/lpr/lpc/cmds.c
672
nitems = scandir(pp->spool_dir, &queue, doselect, sortq);
usr.sbin/lpr/lpc/cmds.c
697
cp = queue[i]->d_name;
usr.sbin/lpr/lpc/cmds.c
704
cp = queue[i]->d_name;
usr.sbin/lpr/lpc/cmds.c
719
cp1 = queue[i + 1]->d_name;
usr.sbin/lpr/lpc/movejobs.c
215
nitems = getq(pp, &queue);
usr.sbin/lpr/lpc/movejobs.c
233
touch_info.newtime = queue[nitems - 1]->job_time + 1;
usr.sbin/lpr/lpc/movejobs.c
242
touch_info.newtime = queue[0]->job_time - nitems - 5;
usr.sbin/lpr/lpc/movejobs.c
267
free(queue[i]);
usr.sbin/lpr/lpc/movejobs.c
268
free(queue);
usr.sbin/lpr/lpc/movejobs.c
80
static struct jobqueue **queue;
usr.sbin/lpr/lpc/movejobs.c
99
matchcnt = scanq_jobspec(nitems, queue, SCQ_JSORDER, &jobs_wanted,
usr.sbin/lpr/lpd/printjob.c
150
struct jobqueue **queue;
usr.sbin/lpr/lpd/printjob.c
228
if ((nitems = getq(pp, &queue)) < 0) {
usr.sbin/lpr/lpd/printjob.c
262
for (qp = queue; nitems--; free((char *) q)) {
usr.sbin/lpr/lpd/printjob.c
334
free(queue);
usr.sbin/lpr/lpd/printjob.c
338
if ((nitems = getq(pp, &queue)) < 0) {
usr.sbin/nscd/nscd.c
199
retval->queue = kqueue();
usr.sbin/nscd/nscd.c
200
assert(retval->queue != -1);
usr.sbin/nscd/nscd.c
205
kevent(retval->queue, &eventlist, 1, NULL, 0, &timeout);
usr.sbin/nscd/nscd.c
216
close(env->queue);
usr.sbin/nscd/nscd.c
264
res = kevent(env->queue, eventlist, 2, NULL, 0, &timeout);
usr.sbin/nscd/nscd.c
290
nevents = kevent(env->queue, eventlist, 1, NULL, 0, &kevent_timeout);
usr.sbin/nscd/nscd.c
457
kevent(env->queue, eventlist, 2, NULL, 0, &kevent_timeout);
usr.sbin/nscd/nscd.c
497
nevents = kevent(env->queue, NULL, 0, eventlist,
usr.sbin/nscd/nscd.c
515
kevent(s_runtime_env->queue, eventlist,
usr.sbin/nscd/singletons.h
35
int queue;
usr.sbin/ppp/ipcp.c
1445
struct mqueue *queue;
usr.sbin/ppp/ipcp.c
1462
queue = ipcp->Queue + IPCP_QUEUES(ipcp) - 1;
usr.sbin/ppp/ipcp.c
1464
if (queue->top) {
usr.sbin/ppp/ipcp.c
1465
bp = m_dequeue(queue);
usr.sbin/ppp/ipcp.c
1479
} while (queue-- != ipcp->Queue);
usr.sbin/ppp/ipv6cp.c
424
struct mqueue *queue;
usr.sbin/ppp/ipv6cp.c
441
queue = ipv6cp->Queue + IPV6CP_QUEUES(ipv6cp) - 1;
usr.sbin/ppp/ipv6cp.c
443
if (queue->top) {
usr.sbin/ppp/ipv6cp.c
444
bp = m_dequeue(queue);
usr.sbin/ppp/ipv6cp.c
458
} while (queue-- != ipv6cp->Queue);
usr.sbin/ppp/link.c
102
for (queue = l->Queue; queue < highest; queue++)
usr.sbin/ppp/link.c
103
while (queue->len)
usr.sbin/ppp/link.c
104
m_enqueue(highest, m_dequeue(queue));
usr.sbin/ppp/link.c
110
struct mqueue *queue, *highest;
usr.sbin/ppp/link.c
113
for (queue = l->Queue; queue <= highest; queue++)
usr.sbin/ppp/link.c
114
while (queue->top)
usr.sbin/ppp/link.c
115
m_freem(m_dequeue(queue));
usr.sbin/ppp/link.c
153
struct mqueue *queue, *highest;
usr.sbin/ppp/link.c
170
for (queue = l->Queue; queue < highest; queue++) {
usr.sbin/ppp/link.c
171
len = queue->len;
usr.sbin/ppp/link.c
173
for (m = queue->top; len--; m = m->m_nextpkt)
usr.sbin/ppp/link.c
97
struct mqueue *queue, *highest;
usr.sbin/ppp/mbuf.c
367
m_enqueue(struct mqueue *queue, struct mbuf *bp)
usr.sbin/ppp/mbuf.c
370
if (queue->last) {
usr.sbin/ppp/mbuf.c
371
queue->last->m_nextpkt = bp;
usr.sbin/ppp/mbuf.c
372
queue->last = bp;
usr.sbin/ppp/mbuf.c
374
queue->last = queue->top = bp;
usr.sbin/ppp/mbuf.c
375
queue->len++;
usr.sbin/ppp/mbuf.c
376
log_Printf(LogDEBUG, "m_enqueue: len = %lu\n", (unsigned long)queue->len);
usr.sbin/ppp/mbuf.c
70
struct mbuf *queue;