V_ipq
head = &V_ipq[hash].head;
if (V_ipq[hash].count < V_ipreass_maxbucketsize)
V_ipq[hash].count++;
callout_reset_sbt(&V_ipq[hash].timer,
&V_ipq[hash], 0);
MPASS(callout_active(&V_ipq[hash].timer));
ipq_drop(&V_ipq[hash], fp);
ipq_drop(&V_ipq[hash], fp);
ipq_drop(&V_ipq[hash], fp);
V_ipq[hash].count--;
ipreass_reschedule(&V_ipq[hash]);
resched = !TAILQ_EMPTY(&V_ipq[i].head);
while(!TAILQ_EMPTY(&V_ipq[i].head)) {
struct ipq *fp = TAILQ_FIRST(&V_ipq[i].head);
ipq_free(&V_ipq[i], fp);
ipreass_reschedule(&V_ipq[i]);
KASSERT(V_ipq[i].count == 0,
V_ipq[i].count, V_ipq));
V_ipq = malloc(sizeof(struct ipqbucket) * V_ipq_hashsize,
TAILQ_INIT(&V_ipq[i].head);
mtx_init(&V_ipq[i].lock, "IP reassembly", NULL,
callout_init_mtx(&V_ipq[i].timer, &V_ipq[i].lock, 0);
V_ipq[i].count = 0;
V_ipq[i].vnet = curvnet;
TAILQ_FOREACH_SAFE(fp, &V_ipq[i].head, ipq_list, temp) {
mtx_destroy(&V_ipq[i].lock);
free(V_ipq, M_IPREASS_HASH);
while (V_ipq[i].count > V_ipreass_maxbucketsize &&
(fp = TAILQ_LAST(&V_ipq[i].head, ipqhead)) != NULL)
ipq_timeout(&V_ipq[i], fp);
ipreass_reschedule(&V_ipq[i]);
fp = TAILQ_LAST(&V_ipq[i].head, ipqhead);
ipq_timeout(&V_ipq[i], fp);
ipreass_reschedule(&V_ipq[i]);
#define IPQ_LOCK(i) mtx_lock(&V_ipq[i].lock)
#define IPQ_TRYLOCK(i) mtx_trylock(&V_ipq[i].lock)
#define IPQ_UNLOCK(i) mtx_unlock(&V_ipq[i].lock)
fp = TAILQ_LAST(&V_ipq[bucket].head, ipqhead);
#define IPQ_LOCK_ASSERT(i) mtx_assert(&V_ipq[i].lock, MA_OWNED)
TAILQ_REMOVE(&V_ipq[bucket].head, fp, ipq_list);
V_ipq[bucket].count--;
ipreass_reschedule(&V_ipq[bucket]);