#include "opt_inet.h"
#include "opt_inet6.h"
#include "opt_rss.h"
#include <sys/param.h>
#include <sys/bus.h>
#include <sys/interrupt.h>
#include <sys/module.h>
#include <sys/kernel.h>
#include <sys/hhook.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/proc.h>
#include <sys/socket.h>
#include <sys/socketvar.h>
#include <sys/sysctl.h>
#include <sys/systm.h>
#include <sys/refcount.h>
#include <sys/sched.h>
#include <sys/queue.h>
#include <sys/smp.h>
#include <sys/counter.h>
#include <sys/time.h>
#include <sys/kthread.h>
#include <sys/kern_prefetch.h>
#include <vm/uma.h>
#include <vm/vm.h>
#include <net/route.h>
#include <net/vnet.h>
#ifdef RSS
#include <net/netisr.h>
#include <net/rss_config.h>
#endif
#define TCPSTATES
#include <netinet/in.h>
#include <netinet/in_kdtrace.h>
#include <netinet/in_pcb.h>
#include <netinet/ip.h>
#include <netinet/ip_var.h>
#include <netinet/ip6.h>
#include <netinet6/in6_pcb.h>
#include <netinet6/ip6_var.h>
#include <netinet/tcp.h>
#include <netinet/tcp_fsm.h>
#include <netinet/tcp_seq.h>
#include <netinet/tcp_timer.h>
#include <netinet/tcp_var.h>
#include <netinet/tcpip.h>
#include <netinet/cc/cc.h>
#include <netinet/tcp_hpts.h>
#include <netinet/tcp_hpts_internal.h>
#include <netinet/tcp_log_buf.h>
#ifdef tcp_offload
#include <netinet/tcp_offload.h>
#endif
struct tcp_hptsi *tcp_hptsi_pace;
const struct tcp_hptsi_funcs tcp_hptsi_default_funcs = {
.microuptime = microuptime,
.swi_add = swi_add,
.swi_remove = swi_remove,
.swi_sched = swi_sched,
.intr_event_bind = intr_event_bind,
.intr_event_bind_ithread_cpuset = intr_event_bind_ithread_cpuset,
.callout_init = callout_init,
.callout_reset_sbt_on = callout_reset_sbt_on,
._callout_stop_safe = _callout_stop_safe,
};
#ifdef TCP_HPTS_KTEST
#define microuptime pace->funcs->microuptime
#define swi_add pace->funcs->swi_add
#define swi_remove pace->funcs->swi_remove
#define swi_sched pace->funcs->swi_sched
#define intr_event_bind pace->funcs->intr_event_bind
#define intr_event_bind_ithread_cpuset pace->funcs->intr_event_bind_ithread_cpuset
#define callout_init pace->funcs->callout_init
#define callout_reset_sbt_on pace->funcs->callout_reset_sbt_on
#define _callout_stop_safe pace->funcs->_callout_stop_safe
#endif
static MALLOC_DEFINE(M_TCPHPTS, "tcp_hpts", "TCP hpts");
static void tcp_hpts_thread(void *ctx);
static volatile uint32_t __read_frequently hpts_that_need_softclock;
#define LOWEST_SLEEP_ALLOWED 50
#define DEFAULT_MIN_SLEEP 250
#define DYNAMIC_MIN_SLEEP DEFAULT_MIN_SLEEP
#define DYNAMIC_MAX_SLEEP 5000
#define SLOTS_INDICATE_MORE_SLEEP 100
#define SLOTS_INDICATE_LESS_SLEEP 1000
#ifdef RSS
int tcp_bind_threads = 1;
#else
int tcp_bind_threads = 2;
#endif
static int tcp_use_irq_cpu = 0;
static int hpts_does_tp_logging = 0;
static int32_t tcp_hpts_precision = 120;
int32_t tcp_min_hptsi_time = DEFAULT_MIN_SLEEP;
static int conn_cnt_thresh = DEFAULT_CONNECTION_THRESHOLD;
static int32_t dynamic_min_sleep = DYNAMIC_MIN_SLEEP;
static int32_t dynamic_max_sleep = DYNAMIC_MAX_SLEEP;
SYSCTL_NODE(_net_inet_tcp, OID_AUTO, hpts, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
"TCP Hpts controls");
SYSCTL_NODE(_net_inet_tcp_hpts, OID_AUTO, stats, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
"TCP Hpts statistics");
counter_u64_t hpts_hopelessly_behind;
SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, hopeless, CTLFLAG_RD,
&hpts_hopelessly_behind,
"Number of times hpts could not catch up and was behind hopelessly");
counter_u64_t hpts_loops;
SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, loops, CTLFLAG_RD,
&hpts_loops, "Number of times hpts had to loop to catch up");
counter_u64_t back_tosleep;
SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, no_tcbsfound, CTLFLAG_RD,
&back_tosleep, "Number of times hpts found no tcbs");
counter_u64_t combined_wheel_wrap;
SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, comb_wheel_wrap, CTLFLAG_RD,
&combined_wheel_wrap, "Number of times the wheel lagged enough to have an insert see wrap");
counter_u64_t wheel_wrap;
SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, wheel_wrap, CTLFLAG_RD,
&wheel_wrap, "Number of times the wheel lagged enough to have an insert see wrap");
counter_u64_t hpts_direct_call;
SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, direct_call, CTLFLAG_RD,
&hpts_direct_call, "Number of times hpts was called by syscall/trap or other entry");
counter_u64_t hpts_wake_timeout;
SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, timeout_wakeup, CTLFLAG_RD,
&hpts_wake_timeout, "Number of times hpts threads woke up via the callout expiring");
counter_u64_t hpts_direct_awakening;
SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, direct_awakening, CTLFLAG_RD,
&hpts_direct_awakening, "Number of times hpts threads woke up via the callout expiring");
counter_u64_t hpts_back_tosleep;
SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, back_tosleep, CTLFLAG_RD,
&hpts_back_tosleep, "Number of times hpts threads woke up via the callout expiring and went back to sleep no work");
counter_u64_t cpu_uses_flowid;
counter_u64_t cpu_uses_random;
SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, cpusel_flowid, CTLFLAG_RD,
&cpu_uses_flowid, "Number of times when setting cpuid we used the flowid field");
SYSCTL_COUNTER_U64(_net_inet_tcp_hpts_stats, OID_AUTO, cpusel_random, CTLFLAG_RD,
&cpu_uses_random, "Number of times when setting cpuid we used the a random value");
TUNABLE_INT("net.inet.tcp.bind_hptss", &tcp_bind_threads);
TUNABLE_INT("net.inet.tcp.use_irq", &tcp_use_irq_cpu);
SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, bind_hptss, CTLFLAG_RD,
&tcp_bind_threads, 2,
"Thread Binding tunable");
SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, use_irq, CTLFLAG_RD,
&tcp_use_irq_cpu, 0,
"Use of irq CPU tunable");
SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, precision, CTLFLAG_RW,
&tcp_hpts_precision, 120,
"Value for PRE() precision of callout");
SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, cnt_thresh, CTLFLAG_RW,
&conn_cnt_thresh, 0,
"How many connections (below) make us use the callout based mechanism");
SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, logging, CTLFLAG_RW,
&hpts_does_tp_logging, 0,
"Do we add to any tp that has logging on pacer logs");
SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, dyn_minsleep, CTLFLAG_RW,
&dynamic_min_sleep, 250,
"What is the dynamic minsleep value?");
SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, dyn_maxsleep, CTLFLAG_RW,
&dynamic_max_sleep, 5000,
"What is the dynamic maxsleep value?");
static int32_t max_pacer_loops = 10;
SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, loopmax, CTLFLAG_RW,
&max_pacer_loops, 10,
"What is the maximum number of times the pacer will loop trying to catch up");
#define HPTS_MAX_SLEEP_ALLOWED (NUM_OF_HPTSI_SLOTS/2)
static uint32_t hpts_sleep_max = HPTS_MAX_SLEEP_ALLOWED;
static int
sysctl_net_inet_tcp_hpts_max_sleep(SYSCTL_HANDLER_ARGS)
{
int error;
uint32_t new;
new = hpts_sleep_max;
error = sysctl_handle_int(oidp, &new, 0, req);
if (error == 0 && req->newptr) {
if ((new < (dynamic_min_sleep/HPTS_USECS_PER_SLOT)) ||
(new > HPTS_MAX_SLEEP_ALLOWED))
error = EINVAL;
else
hpts_sleep_max = new;
}
return (error);
}
static int
sysctl_net_inet_tcp_hpts_min_sleep(SYSCTL_HANDLER_ARGS)
{
int error;
uint32_t new;
new = tcp_min_hptsi_time;
error = sysctl_handle_int(oidp, &new, 0, req);
if (error == 0 && req->newptr) {
if (new < LOWEST_SLEEP_ALLOWED)
error = EINVAL;
else
tcp_min_hptsi_time = new;
}
return (error);
}
SYSCTL_PROC(_net_inet_tcp_hpts, OID_AUTO, maxsleep,
CTLTYPE_UINT | CTLFLAG_RW,
&hpts_sleep_max, 0,
&sysctl_net_inet_tcp_hpts_max_sleep, "IU",
"Maximum time hpts will sleep in slots");
SYSCTL_PROC(_net_inet_tcp_hpts, OID_AUTO, minsleep,
CTLTYPE_UINT | CTLFLAG_RW,
&tcp_min_hptsi_time, 0,
&sysctl_net_inet_tcp_hpts_min_sleep, "IU",
"The minimum time the hpts must sleep before processing more slots");
static int slots_indicate_more_sleep = SLOTS_INDICATE_MORE_SLEEP;
static int slots_indicate_less_sleep = SLOTS_INDICATE_LESS_SLEEP;
static int tcp_hpts_no_wake_over_thresh = 1;
SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, more_sleep, CTLFLAG_RW,
&slots_indicate_more_sleep, 0,
"If we only process this many or less on a timeout, we need longer sleep on the next callout");
SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, less_sleep, CTLFLAG_RW,
&slots_indicate_less_sleep, 0,
"If we process this many or more on a timeout, we need less sleep on the next callout");
SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, nowake_over_thresh, CTLFLAG_RW,
&tcp_hpts_no_wake_over_thresh, 0,
"When we are over the threshold on the pacer do we prohibit wakeups?");
uint16_t
tcp_hptsi_random_cpu(struct tcp_hptsi *pace)
{
uint16_t cpuid;
uint32_t ran;
ran = arc4random();
cpuid = (((ran & 0xffff) % mp_ncpus) % pace->rp_num_hptss);
return (cpuid);
}
static void
tcp_hpts_log(struct tcp_hpts_entry *hpts, struct tcpcb *tp, struct timeval *tv,
int slots_to_run, int idx, bool from_callout)
{
if (hpts_does_tp_logging && tcp_bblogging_on(tp)) {
union tcp_log_stackspecific log;
memset(&log, 0, sizeof(log));
log.u_bbr.flex1 = hpts->p_nxt_slot;
log.u_bbr.flex2 = hpts->p_cur_slot;
log.u_bbr.flex3 = hpts->p_prev_slot;
log.u_bbr.flex4 = idx;
log.u_bbr.flex6 = hpts->p_on_queue_cnt;
log.u_bbr.flex7 = hpts->p_cpu;
log.u_bbr.flex8 = (uint8_t)from_callout;
log.u_bbr.inflight = slots_to_run;
log.u_bbr.applimited = hpts->overidden_sleep;
log.u_bbr.timeStamp = tcp_tv_to_usec(tv);
log.u_bbr.epoch = hpts->saved_curslot;
log.u_bbr.lt_epoch = hpts->saved_prev_slot;
log.u_bbr.pkts_out = hpts->p_delayed_by;
log.u_bbr.lost = hpts->p_hpts_sleep_time;
log.u_bbr.pacing_gain = hpts->p_cpu;
log.u_bbr.pkt_epoch = hpts->p_runningslot;
log.u_bbr.use_lt_bw = 1;
TCP_LOG_EVENTP(tp, NULL,
&tptosocket(tp)->so_rcv,
&tptosocket(tp)->so_snd,
BBR_LOG_HPTSDIAG, 0,
0, &log, false, tv);
}
}
static void
tcp_hpts_sleep_timeout(void *arg)
{
#ifdef TCP_HPTS_KTEST
struct tcp_hptsi *pace;
#endif
struct tcp_hpts_entry *hpts;
hpts = (struct tcp_hpts_entry *)arg;
#ifdef TCP_HPTS_KTEST
pace = hpts->p_hptsi;
#endif
swi_sched(hpts->ie_cookie, 0);
}
static int
tcp_hpts_sleep(struct tcp_hpts_entry *hpts, struct timeval *tv)
{
#ifdef TCP_HPTS_KTEST
struct tcp_hptsi *pace;
#endif
sbintime_t sb;
#ifdef TCP_HPTS_KTEST
pace = hpts->p_hptsi;
#endif
hpts->sleeping = tv->tv_usec;
sb = tvtosbt(*tv);
return (callout_reset_sbt_on(
&hpts->co, sb, 0, tcp_hpts_sleep_timeout, hpts, hpts->p_cpu,
(C_DIRECT_EXEC | C_PREL(tcp_hpts_precision))));
}
void
tcp_hpts_wake(struct tcp_hpts_entry *hpts)
{
#ifdef TCP_HPTS_KTEST
struct tcp_hptsi *pace;
#endif
HPTS_MTX_ASSERT(hpts);
#ifdef TCP_HPTS_KTEST
pace = hpts->p_hptsi;
#endif
if (tcp_hpts_no_wake_over_thresh && (hpts->p_on_queue_cnt >= conn_cnt_thresh)) {
hpts->p_direct_wake = 0;
return;
}
if (hpts->p_hpts_wake_scheduled == 0) {
hpts->p_hpts_wake_scheduled = 1;
swi_sched(hpts->ie_cookie, 0);
}
}
static void
tcp_hpts_insert_internal(struct tcpcb *tp, struct tcp_hpts_entry *hpts)
{
struct inpcb *inp = tptoinpcb(tp);
struct hptsh *hptsh;
INP_WLOCK_ASSERT(inp);
HPTS_MTX_ASSERT(hpts);
MPASS(hpts->p_cpu == tp->t_hpts_cpu);
MPASS(!(inp->inp_flags & INP_DROPPED));
hptsh = &hpts->p_hptss[tp->t_hpts_slot];
if (tp->t_in_hpts == IHPTS_NONE) {
tp->t_in_hpts = IHPTS_ONQUEUE;
in_pcbref(inp);
} else if (tp->t_in_hpts == IHPTS_MOVING) {
tp->t_in_hpts = IHPTS_ONQUEUE;
} else
MPASS(tp->t_in_hpts == IHPTS_ONQUEUE);
tp->t_hpts_gencnt = hptsh->gencnt;
TAILQ_INSERT_TAIL(&hptsh->head, tp, t_hpts);
hptsh->count++;
hpts->p_on_queue_cnt++;
}
static struct tcp_hpts_entry *
tcp_hpts_lock(struct tcp_hptsi *pace, struct tcpcb *tp)
{
struct tcp_hpts_entry *hpts;
INP_LOCK_ASSERT(tptoinpcb(tp));
hpts = pace->rp_ent[tp->t_hpts_cpu];
HPTS_LOCK(hpts);
return (hpts);
}
static void
tcp_hpts_release(struct tcpcb *tp)
{
bool released __diagused;
tp->t_in_hpts = IHPTS_NONE;
released = in_pcbrele_wlocked(tptoinpcb(tp));
MPASS(released == false);
}
void
__tcp_hpts_init(struct tcp_hptsi *pace, struct tcpcb *tp)
{
if (__predict_true(tp->t_hpts_cpu == HPTS_CPU_NONE)) {
tp->t_hpts_cpu = tcp_hptsi_random_cpu(pace);
MPASS(!(tp->t_flags2 & TF2_HPTS_CPU_SET));
}
}
void
__tcp_hpts_remove(struct tcp_hptsi *pace, struct tcpcb *tp)
{
struct tcp_hpts_entry *hpts;
struct hptsh *hptsh;
INP_WLOCK_ASSERT(tptoinpcb(tp));
hpts = tcp_hpts_lock(pace, tp);
if (tp->t_in_hpts == IHPTS_ONQUEUE) {
hptsh = &hpts->p_hptss[tp->t_hpts_slot];
tp->t_hpts_request = 0;
if (__predict_true(tp->t_hpts_gencnt == hptsh->gencnt)) {
TAILQ_REMOVE(&hptsh->head, tp, t_hpts);
MPASS(hptsh->count > 0);
hptsh->count--;
MPASS(hpts->p_on_queue_cnt > 0);
hpts->p_on_queue_cnt--;
tcp_hpts_release(tp);
} else {
#ifdef INVARIANTS
struct tcpcb *tmp;
TAILQ_FOREACH(tmp, &hptsh->head, t_hpts)
MPASS(tmp != tp);
#endif
tp->t_in_hpts = IHPTS_MOVING;
tp->t_hpts_slot = -1;
}
} else if (tp->t_in_hpts == IHPTS_MOVING) {
tp->t_hpts_slot = -1;
}
HPTS_UNLOCK(hpts);
}
static inline int
hpts_slot(uint32_t wheel_slot, uint32_t plus)
{
KASSERT(wheel_slot < NUM_OF_HPTSI_SLOTS, ("Invalid slot %u not on wheel", wheel_slot));
return ((wheel_slot + plus) % NUM_OF_HPTSI_SLOTS);
}
static inline int
cts_to_wheel(uint32_t cts)
{
return ((cts / HPTS_USECS_PER_SLOT) % NUM_OF_HPTSI_SLOTS);
}
static inline int
hpts_slots_diff(int prev_slot, int slot_now)
{
if (slot_now > prev_slot)
return (slot_now - prev_slot);
else if (slot_now == prev_slot)
return (NUM_OF_HPTSI_SLOTS - 1);
else
return ((NUM_OF_HPTSI_SLOTS - prev_slot) + slot_now);
}
static inline int32_t
max_slots_available(struct tcp_hpts_entry *hpts, uint32_t wheel_slot, uint32_t *target_slot)
{
uint32_t dis_to_travel, end_slot, pacer_to_now, avail_on_wheel;
if ((hpts->p_hpts_active == 1) &&
(hpts->p_wheel_complete == 0)) {
end_slot = hpts->p_runningslot;
if (end_slot == 0)
end_slot = NUM_OF_HPTSI_SLOTS - 1;
else
end_slot--;
if (target_slot)
*target_slot = end_slot;
} else {
end_slot = hpts->p_prev_slot;
if (end_slot == 0)
end_slot = NUM_OF_HPTSI_SLOTS - 1;
else
end_slot--;
if (target_slot)
*target_slot = end_slot;
if (hpts->p_prev_slot != wheel_slot)
dis_to_travel = hpts_slots_diff(hpts->p_prev_slot, wheel_slot);
else
dis_to_travel = 1;
return (NUM_OF_HPTSI_SLOTS - dis_to_travel);
}
if (hpts->p_runningslot == hpts->p_cur_slot)
dis_to_travel = 1;
else
dis_to_travel = hpts_slots_diff(hpts->p_runningslot, hpts->p_cur_slot);
if (hpts->p_cur_slot != wheel_slot) {
pacer_to_now = hpts_slots_diff(hpts->p_cur_slot, wheel_slot);
} else {
pacer_to_now = 0;
}
avail_on_wheel = NUM_OF_HPTSI_SLOTS - dis_to_travel;
if (avail_on_wheel <= pacer_to_now) {
counter_u64_add(combined_wheel_wrap, 1);
if (target_slot)
*target_slot = hpts->p_nxt_slot;
return (0);
} else {
return (avail_on_wheel - pacer_to_now);
}
}
#ifdef INVARIANTS
static void
check_if_slot_would_be_wrong(struct tcp_hpts_entry *hpts, struct tcpcb *tp,
uint32_t hptsslot)
{
KASSERT(hptsslot < NUM_OF_HPTSI_SLOTS,
("hpts:%p tp:%p slot:%d > max", hpts, tp, hptsslot));
if ((hpts->p_hpts_active) &&
(hpts->p_wheel_complete == 0)) {
int distance, yet_to_run;
distance = hpts_slots_diff(hpts->p_runningslot, hptsslot);
if (hpts->p_runningslot != hpts->p_cur_slot)
yet_to_run = hpts_slots_diff(hpts->p_runningslot, hpts->p_cur_slot);
else
yet_to_run = 0;
KASSERT(yet_to_run <= distance, ("hpts:%p tp:%p slot:%d "
"distance:%d yet_to_run:%d rs:%d cs:%d", hpts, tp,
hptsslot, distance, yet_to_run, hpts->p_runningslot,
hpts->p_cur_slot));
}
}
#endif
void
__tcp_hpts_insert(struct tcp_hptsi *pace, struct tcpcb *tp, uint32_t usecs,
struct hpts_diag *diag)
{
struct tcp_hpts_entry *hpts;
struct timeval tv;
uint32_t slot, wheel_cts, last_slot, need_new_to = 0;
int32_t wheel_slot, maxslots;
bool need_wakeup = false;
INP_WLOCK_ASSERT(tptoinpcb(tp));
MPASS(!(tptoinpcb(tp)->inp_flags & INP_DROPPED));
MPASS(!(tp->t_in_hpts == IHPTS_ONQUEUE));
slot = HPTS_USEC_TO_SLOTS(usecs);
hpts = tcp_hpts_lock(pace, tp);
microuptime(&tv);
if (diag) {
memset(diag, 0, sizeof(struct hpts_diag));
diag->p_hpts_active = hpts->p_hpts_active;
diag->p_prev_slot = hpts->p_prev_slot;
diag->p_runningslot = hpts->p_runningslot;
diag->p_nxt_slot = hpts->p_nxt_slot;
diag->p_cur_slot = hpts->p_cur_slot;
diag->slot_req = slot;
diag->p_on_min_sleep = hpts->p_on_min_sleep;
diag->hpts_sleep_time = hpts->p_hpts_sleep_time;
}
if (slot == 0) {
tp->t_hpts_request = 0;
if ((hpts->p_hpts_active == 0) || (hpts->p_wheel_complete)) {
tp->t_hpts_slot = hpts_slot(hpts->p_prev_slot, 1);
if ((hpts->p_on_min_sleep == 0) &&
(hpts->p_hpts_active == 0))
need_wakeup = true;
} else
tp->t_hpts_slot = hpts->p_runningslot;
if (__predict_true(tp->t_in_hpts != IHPTS_MOVING))
tcp_hpts_insert_internal(tp, hpts);
if (need_wakeup) {
hpts->p_direct_wake = 1;
tcp_hpts_wake(hpts);
}
HPTS_UNLOCK(hpts);
return;
}
wheel_cts = tcp_tv_to_usec(&tv);
wheel_slot = cts_to_wheel(wheel_cts);
maxslots = max_slots_available(hpts, wheel_slot, &last_slot);
if (diag) {
diag->wheel_slot = wheel_slot;
diag->maxslots = maxslots;
diag->wheel_cts = wheel_cts;
}
if (maxslots == 0) {
if (slot > 1) {
slot--;
}
tp->t_hpts_slot = last_slot;
tp->t_hpts_request = slot;
} else if (maxslots >= slot) {
tp->t_hpts_request = 0;
tp->t_hpts_slot = hpts_slot(wheel_slot, slot);
} else {
tp->t_hpts_request = slot - maxslots;
tp->t_hpts_slot = last_slot;
}
if (diag) {
diag->time_remaining = tp->t_hpts_request;
diag->inp_hptsslot = tp->t_hpts_slot;
}
#ifdef INVARIANTS
check_if_slot_would_be_wrong(hpts, tp, tp->t_hpts_slot);
#endif
if (__predict_true(tp->t_in_hpts != IHPTS_MOVING))
tcp_hpts_insert_internal(tp, hpts);
if ((hpts->p_hpts_active == 0) &&
(tp->t_hpts_request == 0) &&
(hpts->p_on_min_sleep == 0)) {
uint32_t have_slept, yet_to_sleep;
have_slept = hpts_slots_diff(hpts->p_prev_slot, wheel_slot);
if (have_slept < hpts->p_hpts_sleep_time)
yet_to_sleep = hpts->p_hpts_sleep_time - have_slept;
else {
yet_to_sleep = 0;
need_wakeup = 1;
}
if (diag) {
diag->have_slept = have_slept;
diag->yet_to_sleep = yet_to_sleep;
}
if (yet_to_sleep &&
(yet_to_sleep > slot)) {
hpts->p_hpts_sleep_time = slot;
need_new_to = slot * HPTS_USECS_PER_SLOT;
}
}
if (need_wakeup) {
hpts->p_direct_wake = 1;
tcp_hpts_wake(hpts);
if (diag) {
diag->need_new_to = 0;
diag->co_ret = 0xffff0000;
}
} else if (need_new_to) {
int32_t co_ret;
struct timeval tv;
tv.tv_sec = 0;
tv.tv_usec = 0;
while (need_new_to > HPTS_USEC_IN_SEC) {
tv.tv_sec++;
need_new_to -= HPTS_USEC_IN_SEC;
}
tv.tv_usec = need_new_to;
co_ret = tcp_hpts_sleep(hpts, &tv);
if (diag) {
diag->need_new_to = need_new_to;
diag->co_ret = co_ret;
}
}
HPTS_UNLOCK(hpts);
}
static uint16_t
hpts_cpuid(struct tcp_hptsi *pace, struct tcpcb *tp, int *failed)
{
struct inpcb *inp = tptoinpcb(tp);
u_int cpuid;
#ifdef NUMA
struct hpts_domain_info *di;
#endif
*failed = 0;
if (tp->t_flags2 & TF2_HPTS_CPU_SET) {
return (tp->t_hpts_cpu);
}
if (tcp_use_irq_cpu) {
if (tp->t_lro_cpu == HPTS_CPU_NONE) {
*failed = 1;
return (0);
}
return (tp->t_lro_cpu);
}
#ifdef RSS
cpuid = rss_hash2cpuid(inp->inp_flowid, inp->inp_flowtype);
if (cpuid == NETISR_CPUID_NONE)
return (tcp_hptsi_random_cpu(pace));
else
return (cpuid);
#endif
if (inp->inp_flowtype == M_HASHTYPE_NONE) {
counter_u64_add(cpu_uses_random, 1);
return (tcp_hptsi_random_cpu(pace));
}
#ifdef NUMA
if ((vm_ndomains == 1) ||
(inp->inp_numa_domain == M_NODOM)) {
#endif
cpuid = inp->inp_flowid % mp_ncpus;
#ifdef NUMA
} else {
di = &pace->domains[inp->inp_numa_domain];
cpuid = di->cpu[inp->inp_flowid % di->count];
}
#endif
counter_u64_add(cpu_uses_flowid, 1);
return (cpuid);
}
static void
tcp_hpts_set_max_sleep(struct tcp_hpts_entry *hpts, int wrap_loop_cnt)
{
uint32_t t = 0, i;
if ((hpts->p_on_queue_cnt) && (wrap_loop_cnt < 2)) {
for (i = 0, t = hpts_slot(hpts->p_cur_slot, 1); i < NUM_OF_HPTSI_SLOTS; i++) {
if (TAILQ_EMPTY(&hpts->p_hptss[t].head) == 0) {
break;
}
t = (t + 1) % NUM_OF_HPTSI_SLOTS;
}
KASSERT((i != NUM_OF_HPTSI_SLOTS), ("Hpts:%p cnt:%d but none found", hpts, hpts->p_on_queue_cnt));
hpts->p_hpts_sleep_time = min((i + 1), hpts_sleep_max);
} else {
hpts->p_hpts_sleep_time = hpts_sleep_max;
}
}
static bool
tcp_hpts_different_slots(uint32_t cts, uint32_t cts_last_run)
{
return ((cts / HPTS_USECS_PER_SLOT) != (cts_last_run / HPTS_USECS_PER_SLOT));
}
int32_t
tcp_hptsi(struct tcp_hpts_entry *hpts, bool from_callout)
{
struct tcp_hptsi *pace;
struct tcpcb *tp;
struct timeval tv;
int32_t slots_to_run, i, error;
int32_t loop_cnt = 0;
int32_t did_prefetch = 0;
int32_t prefetch_tp = 0;
int32_t wrap_loop_cnt = 0;
int32_t slot_pos_of_endpoint = 0;
int32_t orig_exit_slot;
uint32_t cts, cts_last_run;
bool completed_measure, seen_endpoint;
completed_measure = false;
seen_endpoint = false;
HPTS_MTX_ASSERT(hpts);
NET_EPOCH_ASSERT();
pace = hpts->p_hptsi;
MPASS(pace != NULL);
hpts->saved_curslot = hpts->p_cur_slot;
hpts->saved_prev_slot = hpts->p_prev_slot;
microuptime(&tv);
cts_last_run = pace->cts_last_ran[hpts->p_cpu];
pace->cts_last_ran[hpts->p_cpu] = cts = tcp_tv_to_usec(&tv);
orig_exit_slot = hpts->p_cur_slot = cts_to_wheel(cts);
if ((hpts->p_on_queue_cnt == 0) ||
!tcp_hpts_different_slots(cts, cts_last_run)) {
hpts->p_prev_slot = hpts->p_cur_slot;
goto no_run;
}
again:
hpts->p_wheel_complete = 0;
HPTS_MTX_ASSERT(hpts);
slots_to_run = hpts_slots_diff(hpts->p_prev_slot, hpts->p_cur_slot);
if ((hpts->p_on_queue_cnt != 0) &&
((cts - cts_last_run) >
((NUM_OF_HPTSI_SLOTS-1) * HPTS_USECS_PER_SLOT))) {
wrap_loop_cnt++;
hpts->p_nxt_slot = hpts_slot(hpts->p_prev_slot, 1);
hpts->p_runningslot = hpts_slot(hpts->p_prev_slot, 2);
hpts->p_cur_slot = hpts->p_prev_slot;
TAILQ_FOREACH(tp, &hpts->p_hptss[hpts->p_nxt_slot].head,
t_hpts) {
MPASS(tp->t_hpts_slot == hpts->p_nxt_slot);
MPASS(tp->t_hpts_gencnt ==
hpts->p_hptss[hpts->p_nxt_slot].gencnt);
MPASS(tp->t_in_hpts == IHPTS_ONQUEUE);
tp->t_hpts_gencnt =
hpts->p_hptss[hpts->p_runningslot].gencnt;
tp->t_hpts_slot = hpts->p_runningslot;
}
TAILQ_CONCAT(&hpts->p_hptss[hpts->p_runningslot].head,
&hpts->p_hptss[hpts->p_nxt_slot].head, t_hpts);
hpts->p_hptss[hpts->p_runningslot].count +=
hpts->p_hptss[hpts->p_nxt_slot].count;
hpts->p_hptss[hpts->p_nxt_slot].count = 0;
hpts->p_hptss[hpts->p_nxt_slot].gencnt++;
slots_to_run = NUM_OF_HPTSI_SLOTS - 1;
counter_u64_add(wheel_wrap, 1);
} else {
hpts->p_nxt_slot = hpts->p_prev_slot;
hpts->p_runningslot = hpts_slot(hpts->p_prev_slot, 1);
}
if (hpts->p_on_queue_cnt == 0) {
goto no_one;
}
for (i = 0; i < slots_to_run; i++) {
struct tcpcb *tp, *ntp;
TAILQ_HEAD(, tcpcb) head = TAILQ_HEAD_INITIALIZER(head);
struct hptsh *hptsh;
uint32_t runningslot;
hpts->p_delayed_by = (slots_to_run - (i + 1)) *
HPTS_USECS_PER_SLOT;
runningslot = hpts->p_runningslot;
hptsh = &hpts->p_hptss[runningslot];
TAILQ_SWAP(&head, &hptsh->head, tcpcb, t_hpts);
hpts->p_on_queue_cnt -= hptsh->count;
hptsh->count = 0;
hptsh->gencnt++;
HPTS_UNLOCK(hpts);
TAILQ_FOREACH_SAFE(tp, &head, t_hpts, ntp) {
struct inpcb *inp = tptoinpcb(tp);
bool set_cpu;
if (ntp != NULL) {
kern_prefetch(ntp, &prefetch_tp);
prefetch_tp = 1;
}
if (!seen_endpoint) {
seen_endpoint = true;
orig_exit_slot = slot_pos_of_endpoint =
runningslot;
} else if (!completed_measure) {
orig_exit_slot = runningslot;
}
INP_WLOCK(inp);
if ((tp->t_flags2 & TF2_HPTS_CPU_SET) == 0) {
set_cpu = true;
} else {
set_cpu = false;
}
if (__predict_false(tp->t_in_hpts == IHPTS_MOVING)) {
if (tp->t_hpts_slot == -1) {
tp->t_in_hpts = IHPTS_NONE;
if (in_pcbrele_wlocked(inp) == false)
INP_WUNLOCK(inp);
} else {
HPTS_LOCK(hpts);
tcp_hpts_insert_internal(tp, hpts);
HPTS_UNLOCK(hpts);
INP_WUNLOCK(inp);
}
continue;
}
MPASS(tp->t_in_hpts == IHPTS_ONQUEUE);
MPASS(!(inp->inp_flags & INP_DROPPED));
KASSERT(runningslot == tp->t_hpts_slot,
("Hpts:%p inp:%p slot mis-aligned %u vs %u",
hpts, inp, runningslot, tp->t_hpts_slot));
if (tp->t_hpts_request) {
uint32_t maxslots, last_slot, remaining_slots;
remaining_slots = slots_to_run - (i + 1);
if (tp->t_hpts_request > remaining_slots) {
HPTS_LOCK(hpts);
maxslots = max_slots_available(hpts,
hpts->p_cur_slot, &last_slot);
if (maxslots >= tp->t_hpts_request) {
tp->t_hpts_slot = hpts_slot(
hpts->p_runningslot,
tp->t_hpts_request);
tp->t_hpts_request = 0;
} else {
tp->t_hpts_slot = last_slot;
tp->t_hpts_request -=
maxslots;
}
tcp_hpts_insert_internal(tp, hpts);
HPTS_UNLOCK(hpts);
INP_WUNLOCK(inp);
continue;
}
tp->t_hpts_request = 0;
}
tcp_hpts_release(tp);
if (set_cpu) {
__tcp_set_hpts(pace, tp);
}
CURVNET_SET(inp->inp_vnet);
tcp_hpts_log(hpts, tp, &tv, slots_to_run, i, from_callout);
if (tp->t_fb_ptr != NULL) {
kern_prefetch(tp->t_fb_ptr, &did_prefetch);
did_prefetch = 1;
}
tp->t_flags2 |= TF2_HPTS_CALLS;
if ((tp->t_flags2 & TF2_SUPPORTS_MBUFQ) &&
!STAILQ_EMPTY(&tp->t_inqueue))
error = -(*tp->t_fb->tfb_do_queued_segments)(tp,
0);
else
error = tcp_output(tp);
if (__predict_true(error >= 0))
INP_WUNLOCK(inp);
CURVNET_RESTORE();
}
if (seen_endpoint) {
completed_measure = true;
}
HPTS_LOCK(hpts);
hpts->p_runningslot++;
if (hpts->p_runningslot >= NUM_OF_HPTSI_SLOTS) {
hpts->p_runningslot = 0;
}
}
no_one:
HPTS_MTX_ASSERT(hpts);
hpts->p_delayed_by = 0;
hpts->p_prev_slot = hpts->p_cur_slot;
microuptime(&tv);
cts_last_run = cts;
cts = tcp_tv_to_usec(&tv);
if (!from_callout || (loop_cnt > max_pacer_loops)) {
if (from_callout)
counter_u64_add(hpts_hopelessly_behind, 1);
goto no_run;
}
hpts->p_cur_slot = cts_to_wheel(cts);
if (!seen_endpoint) {
orig_exit_slot = hpts->p_cur_slot;
}
if ((wrap_loop_cnt < 2) && tcp_hpts_different_slots(cts, cts_last_run)) {
counter_u64_add(hpts_loops, 1);
loop_cnt++;
goto again;
}
no_run:
pace->cts_last_ran[hpts->p_cpu] = cts;
hpts->p_wheel_complete = 1;
KASSERT((!from_callout || (hpts->p_on_queue_cnt == 0) ||
(loop_cnt > max_pacer_loops) || (wrap_loop_cnt >= 2) ||
((hpts->p_prev_slot == hpts->p_cur_slot) &&
!tcp_hpts_different_slots(cts, cts_last_run))),
("H:%p Shouldn't be done! prev_slot:%u, cur_slot:%u, "
"cts_last_run:%u, cts:%u, loop_cnt:%d, wrap_loop_cnt:%d",
hpts, hpts->p_prev_slot, hpts->p_cur_slot,
cts_last_run, cts, loop_cnt, wrap_loop_cnt));
if (from_callout && tcp_hpts_different_slots(cts, cts_last_run)) {
microuptime(&tv);
cts = tcp_tv_to_usec(&tv);
hpts->p_cur_slot = cts_to_wheel(cts);
counter_u64_add(hpts_loops, 1);
goto again;
}
if (from_callout) {
tcp_hpts_set_max_sleep(hpts, wrap_loop_cnt);
}
if (seen_endpoint)
return(hpts_slots_diff(slot_pos_of_endpoint, orig_exit_slot));
else
return (0);
}
void
__tcp_set_hpts(struct tcp_hptsi *pace, struct tcpcb *tp)
{
struct tcp_hpts_entry *hpts;
int failed;
INP_WLOCK_ASSERT(tptoinpcb(tp));
hpts = tcp_hpts_lock(pace, tp);
if (tp->t_in_hpts == IHPTS_NONE && !(tp->t_flags2 & TF2_HPTS_CPU_SET)) {
tp->t_hpts_cpu = hpts_cpuid(pace, tp, &failed);
if (failed == 0)
tp->t_flags2 |= TF2_HPTS_CPU_SET;
}
HPTS_UNLOCK(hpts);
}
static struct tcp_hpts_entry *
tcp_choose_hpts_to_run(struct tcp_hptsi *pace)
{
struct timeval tv;
int i, oldest_idx, start, end;
uint32_t cts, time_since_ran, calc;
microuptime(&tv);
cts = tcp_tv_to_usec(&tv);
time_since_ran = 0;
start = 0;
end = pace->rp_num_hptss;
if (pace->grp_cnt > 1) {
for (i = 0; i < pace->grp_cnt; i++) {
if (CPU_ISSET(curcpu, &pace->grps[i]->cg_mask)) {
start = pace->grps[i]->cg_first;
end = (pace->grps[i]->cg_last + 1);
break;
}
}
}
oldest_idx = -1;
for (i = start; i < end; i++) {
if (TSTMP_GT(cts, pace->cts_last_ran[i]))
calc = cts - pace->cts_last_ran[i];
else
calc = 0;
if (calc > time_since_ran) {
oldest_idx = i;
time_since_ran = calc;
}
}
if (oldest_idx >= 0)
return(pace->rp_ent[oldest_idx]);
else
return(pace->rp_ent[(curcpu % pace->rp_num_hptss)]);
}
void
_tcp_hpts_softclock(void)
{
struct epoch_tracker et;
struct tcp_hpts_entry *hpts;
int slots_ran;
if (hpts_that_need_softclock == 0)
return;
hpts = tcp_choose_hpts_to_run(tcp_hptsi_pace);
if (hpts->p_hpts_active) {
return;
}
if (!HPTS_TRYLOCK(hpts)) {
return;
}
NET_EPOCH_ENTER(et);
if (hpts->p_hpts_active)
goto out_with_mtx;
hpts->syscall_cnt++;
counter_u64_add(hpts_direct_call, 1);
hpts->p_hpts_active = 1;
slots_ran = tcp_hptsi(hpts, false);
if (hpts->p_on_queue_cnt >= conn_cnt_thresh) {
if (slots_ran > slots_indicate_less_sleep) {
struct timeval tv;
hpts->p_mysleep.tv_usec /= 2;
if (hpts->p_mysleep.tv_usec < dynamic_min_sleep)
hpts->p_mysleep.tv_usec = dynamic_min_sleep;
tcp_hpts_set_max_sleep(hpts, 0);
tv.tv_sec = 0;
tv.tv_usec = hpts->p_hpts_sleep_time * HPTS_USECS_PER_SLOT;
if (tv.tv_usec < hpts->p_mysleep.tv_usec) {
hpts->overidden_sleep = tv.tv_usec;
tv.tv_usec = hpts->p_mysleep.tv_usec;
} else if (tv.tv_usec > dynamic_max_sleep) {
hpts->overidden_sleep = tv.tv_usec;
tv.tv_usec = dynamic_max_sleep;
}
(void)tcp_hpts_sleep(hpts, &tv);
} else if (slots_ran < slots_indicate_more_sleep) {
hpts->p_mysleep.tv_usec *= 2;
if (hpts->p_mysleep.tv_usec > dynamic_max_sleep)
hpts->p_mysleep.tv_usec = dynamic_max_sleep;
}
hpts->p_on_min_sleep = 1;
}
hpts->p_hpts_active = 0;
out_with_mtx:
HPTS_UNLOCK(hpts);
NET_EPOCH_EXIT(et);
}
static void
tcp_hpts_thread(void *ctx)
{
#ifdef TCP_HPTS_KTEST
struct tcp_hptsi *pace;
#endif
struct tcp_hpts_entry *hpts;
struct epoch_tracker et;
struct timeval tv;
int slots_ran;
hpts = (struct tcp_hpts_entry *)ctx;
#ifdef TCP_HPTS_KTEST
pace = hpts->p_hptsi;
#endif
HPTS_LOCK(hpts);
if (hpts->p_direct_wake) {
_callout_stop_safe(&hpts->co, 0);
counter_u64_add(hpts_direct_awakening, 1);
} else {
counter_u64_add(hpts_wake_timeout, 1);
if (callout_pending(&hpts->co) ||
!callout_active(&hpts->co)) {
HPTS_UNLOCK(hpts);
return;
}
}
callout_deactivate(&hpts->co);
hpts->p_hpts_wake_scheduled = 0;
NET_EPOCH_ENTER(et);
if (hpts->p_hpts_active) {
tv.tv_sec = 0;
if (hpts->p_direct_wake == 0) {
counter_u64_add(hpts_back_tosleep, 1);
if (hpts->p_on_queue_cnt >= conn_cnt_thresh) {
hpts->p_mysleep.tv_usec *= 2;
if (hpts->p_mysleep.tv_usec > dynamic_max_sleep)
hpts->p_mysleep.tv_usec = dynamic_max_sleep;
tv.tv_usec = hpts->p_mysleep.tv_usec;
hpts->p_on_min_sleep = 1;
} else {
hpts->p_on_min_sleep = 0;
tv.tv_usec = tcp_min_hptsi_time;
}
} else {
tv.tv_usec = hpts->p_mysleep.tv_usec;
}
goto back_to_sleep;
}
hpts->sleeping = 0;
hpts->p_hpts_active = 1;
slots_ran = tcp_hptsi(hpts, true);
tv.tv_sec = 0;
tv.tv_usec = hpts->p_hpts_sleep_time * HPTS_USECS_PER_SLOT;
if ((hpts->p_on_queue_cnt > conn_cnt_thresh) && (hpts->hit_callout_thresh == 0)) {
hpts->hit_callout_thresh = 1;
atomic_add_int(&hpts_that_need_softclock, 1);
} else if ((hpts->p_on_queue_cnt <= conn_cnt_thresh) && (hpts->hit_callout_thresh == 1)) {
hpts->hit_callout_thresh = 0;
atomic_subtract_int(&hpts_that_need_softclock, 1);
}
if (hpts->p_on_queue_cnt >= conn_cnt_thresh) {
if(hpts->p_direct_wake == 0) {
if (slots_ran < slots_indicate_more_sleep) {
hpts->p_mysleep.tv_usec *= 2;
if (hpts->p_mysleep.tv_usec > dynamic_max_sleep)
hpts->p_mysleep.tv_usec = dynamic_max_sleep;
} else if (slots_ran > slots_indicate_less_sleep) {
hpts->p_mysleep.tv_usec /= 2;
if (hpts->p_mysleep.tv_usec < dynamic_min_sleep)
hpts->p_mysleep.tv_usec = dynamic_min_sleep;
}
}
if (tv.tv_usec < hpts->p_mysleep.tv_usec) {
hpts->overidden_sleep = tv.tv_usec;
tv.tv_usec = hpts->p_mysleep.tv_usec;
} else if (tv.tv_usec > dynamic_max_sleep) {
hpts->overidden_sleep = tv.tv_usec;
tv.tv_usec = dynamic_max_sleep;
}
hpts->p_on_min_sleep = 1;
} else if (hpts->p_on_queue_cnt == 0) {
hpts->p_on_min_sleep = 0;
hpts->overidden_sleep = 0;
} else {
if (tv.tv_usec < tcp_min_hptsi_time) {
hpts->overidden_sleep = tv.tv_usec;
tv.tv_usec = tcp_min_hptsi_time;
hpts->p_on_min_sleep = 1;
} else {
hpts->overidden_sleep = 0;
hpts->p_on_min_sleep = 0;
}
}
HPTS_MTX_ASSERT(hpts);
hpts->p_hpts_active = 0;
back_to_sleep:
hpts->p_direct_wake = 0;
(void)tcp_hpts_sleep(hpts, &tv);
NET_EPOCH_EXIT(et);
HPTS_UNLOCK(hpts);
}
static int32_t
hpts_count_level(struct cpu_group *cg)
{
int32_t count_l3, i;
count_l3 = 0;
if (cg->cg_level == CG_SHARE_L3)
count_l3++;
for (i = 0; i < cg->cg_children; i++) {
count_l3 += hpts_count_level(&cg->cg_child[i]);
}
return (count_l3);
}
static void
hpts_gather_grps(struct cpu_group **grps, int32_t *at, int32_t max, struct cpu_group *cg)
{
int32_t idx, i;
idx = *at;
if (cg->cg_level == CG_SHARE_L3) {
grps[idx] = cg;
idx++;
if (idx == max) {
*at = idx;
return;
}
}
*at = idx;
for (i = 0; i < cg->cg_children; i++) {
hpts_gather_grps(grps, at, max, &cg->cg_child[i]);
}
}
struct tcp_hptsi*
tcp_hptsi_create(const struct tcp_hptsi_funcs *funcs, bool enable_sysctl)
{
struct tcp_hptsi *pace;
struct cpu_group *cpu_top;
uint32_t i, j, cts;
int32_t count;
size_t sz, asz;
struct timeval tv;
struct tcp_hpts_entry *hpts;
char unit[16];
uint32_t ncpus = mp_ncpus ? mp_ncpus : MAXCPU;
KASSERT(funcs != NULL, ("funcs is NULL"));
pace = malloc(sizeof(struct tcp_hptsi), M_TCPHPTS, M_WAITOK | M_ZERO);
if (pace == NULL)
return (NULL);
memset(pace, 0, sizeof(*pace));
pace->funcs = funcs;
#ifdef SMP
cpu_top = smp_topo();
#else
cpu_top = NULL;
#endif
pace->rp_num_hptss = ncpus;
sz = (pace->rp_num_hptss * sizeof(struct tcp_hpts_entry *));
pace->rp_ent = malloc(sz, M_TCPHPTS, M_WAITOK | M_ZERO);
sz = (sizeof(uint32_t) * pace->rp_num_hptss);
pace->cts_last_ran = malloc(sz, M_TCPHPTS, M_WAITOK);
if (cpu_top == NULL) {
pace->grp_cnt = 1;
} else {
count = 0;
pace->grp_cnt = hpts_count_level(cpu_top);
if (pace->grp_cnt == 0) {
pace->grp_cnt = 1;
}
sz = (pace->grp_cnt * sizeof(struct cpu_group *));
pace->grps = malloc(sz, M_TCPHPTS, M_WAITOK);
if (pace->grp_cnt == 1) {
pace->grps[0] = cpu_top;
} else {
count = 0;
hpts_gather_grps(pace->grps, &count, pace->grp_cnt, cpu_top);
}
}
microuptime(&tv);
cts = tcp_tv_to_usec(&tv);
asz = sizeof(struct hptsh) * NUM_OF_HPTSI_SLOTS;
for (i = 0; i < pace->rp_num_hptss; i++) {
pace->rp_ent[i] = malloc(sizeof(struct tcp_hpts_entry),
M_TCPHPTS, M_WAITOK | M_ZERO);
pace->rp_ent[i]->p_hptss = malloc(asz, M_TCPHPTS,
M_WAITOK | M_ZERO);
hpts = pace->rp_ent[i];
hpts->p_hpts_sleep_time = hpts_sleep_max;
hpts->p_cpu = i;
pace->cts_last_ran[i] = cts;
hpts->p_cur_slot = cts_to_wheel(cts);
hpts->p_prev_slot = hpts->p_cur_slot;
hpts->p_nxt_slot = hpts_slot(hpts->p_cur_slot, 1);
callout_init(&hpts->co, 1);
hpts->p_hptsi = pace;
mtx_init(&hpts->p_mtx, "tcp_hpts_lck", "hpts",
MTX_DEF | MTX_DUPOK);
for (j = 0; j < NUM_OF_HPTSI_SLOTS; j++) {
TAILQ_INIT(&hpts->p_hptss[j].head);
}
if (enable_sysctl) {
sysctl_ctx_init(&hpts->hpts_ctx);
sprintf(unit, "%d", i);
hpts->hpts_root = SYSCTL_ADD_NODE(&hpts->hpts_ctx,
SYSCTL_STATIC_CHILDREN(_net_inet_tcp_hpts),
OID_AUTO,
unit,
CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
"");
SYSCTL_ADD_INT(&hpts->hpts_ctx,
SYSCTL_CHILDREN(hpts->hpts_root),
OID_AUTO, "out_qcnt", CTLFLAG_RD,
&hpts->p_on_queue_cnt, 0,
"Count TCB's awaiting output processing");
SYSCTL_ADD_U16(&hpts->hpts_ctx,
SYSCTL_CHILDREN(hpts->hpts_root),
OID_AUTO, "active", CTLFLAG_RD,
&hpts->p_hpts_active, 0,
"Is the hpts active");
SYSCTL_ADD_UINT(&hpts->hpts_ctx,
SYSCTL_CHILDREN(hpts->hpts_root),
OID_AUTO, "curslot", CTLFLAG_RD,
&hpts->p_cur_slot, 0,
"What the current running pacers goal");
SYSCTL_ADD_UINT(&hpts->hpts_ctx,
SYSCTL_CHILDREN(hpts->hpts_root),
OID_AUTO, "runslot", CTLFLAG_RD,
&hpts->p_runningslot, 0,
"What the running pacers current slot is");
SYSCTL_ADD_UINT(&hpts->hpts_ctx,
SYSCTL_CHILDREN(hpts->hpts_root),
OID_AUTO, "lastran", CTLFLAG_RD,
&pace->cts_last_ran[i], 0,
"The last usec timestamp that this hpts ran");
SYSCTL_ADD_LONG(&hpts->hpts_ctx,
SYSCTL_CHILDREN(hpts->hpts_root),
OID_AUTO, "cur_min_sleep", CTLFLAG_RD,
&hpts->p_mysleep.tv_usec,
"What the running pacers is using for p_mysleep.tv_usec");
SYSCTL_ADD_U64(&hpts->hpts_ctx,
SYSCTL_CHILDREN(hpts->hpts_root),
OID_AUTO, "now_sleeping", CTLFLAG_RD,
&hpts->sleeping, 0,
"What the running pacers is actually sleeping for");
SYSCTL_ADD_U64(&hpts->hpts_ctx,
SYSCTL_CHILDREN(hpts->hpts_root),
OID_AUTO, "syscall_cnt", CTLFLAG_RD,
&hpts->syscall_cnt, 0,
"How many times we had syscalls on this hpts");
}
}
return (pace);
}
void
tcp_hptsi_start(struct tcp_hptsi *pace)
{
struct tcp_hpts_entry *hpts;
struct pcpu *pc;
struct timeval tv;
uint32_t i, j;
int count, domain;
int error __diagused;
KASSERT(pace != NULL, ("tcp_hptsi_start: pace is NULL"));
for (i = 0; i < pace->rp_num_hptss; i++) {
hpts = pace->rp_ent[i];
KASSERT(hpts->ie_cookie == NULL,
("tcp_hptsi_start: hpts[%d]->ie_cookie is not NULL", i));
error = swi_add(&hpts->ie, "hpts",
tcp_hpts_thread, (void *)hpts,
SWI_NET, INTR_MPSAFE, &hpts->ie_cookie);
KASSERT(error == 0,
("Can't add hpts:%p i:%d err:%d", hpts, i, error));
if (tcp_bind_threads == 1) {
(void)intr_event_bind(hpts->ie, i);
} else if (tcp_bind_threads == 2) {
for (j = 0; j < pace->grp_cnt; j++) {
if (CPU_ISSET(i, &pace->grps[j]->cg_mask)) {
if (intr_event_bind_ithread_cpuset(hpts->ie,
&pace->grps[j]->cg_mask) == 0) {
pc = pcpu_find(i);
domain = pc->pc_domain;
count = pace->domains[domain].count;
pace->domains[domain].cpu[count] = i;
pace->domains[domain].count++;
break;
}
}
}
}
hpts->p_mysleep.tv_sec = 0;
hpts->p_mysleep.tv_usec = tcp_min_hptsi_time;
tv.tv_sec = 0;
tv.tv_usec = hpts->p_hpts_sleep_time * HPTS_USECS_PER_SLOT;
(void)tcp_hpts_sleep(hpts, &tv);
}
}
void
tcp_hptsi_stop(struct tcp_hptsi *pace)
{
struct tcp_hpts_entry *hpts;
int rv __diagused;
uint32_t i;
KASSERT(pace != NULL, ("tcp_hptsi_stop: pace is NULL"));
for (i = 0; i < pace->rp_num_hptss; i++) {
hpts = pace->rp_ent[i];
KASSERT(hpts != NULL, ("tcp_hptsi_stop: hpts[%d] is NULL", i));
KASSERT(hpts->ie_cookie != NULL,
("tcp_hptsi_stop: hpts[%d]->ie_cookie is NULL", i));
rv = _callout_stop_safe(&hpts->co, CS_DRAIN);
MPASS(rv != 0);
rv = swi_remove(hpts->ie_cookie);
MPASS(rv == 0);
hpts->ie_cookie = NULL;
}
}
void
tcp_hptsi_destroy(struct tcp_hptsi *pace)
{
struct tcp_hpts_entry *hpts;
uint32_t i;
KASSERT(pace != NULL, ("tcp_hptsi_destroy: pace is NULL"));
KASSERT(pace->rp_ent != NULL, ("tcp_hptsi_destroy: pace->rp_ent is NULL"));
for (i = 0; i < pace->rp_num_hptss; i++) {
hpts = pace->rp_ent[i];
if (hpts != NULL) {
if (hpts->hpts_root != NULL) {
sysctl_ctx_free(&hpts->hpts_ctx);
}
mtx_destroy(&hpts->p_mtx);
free(hpts->p_hptss, M_TCPHPTS);
free(hpts, M_TCPHPTS);
}
}
free(pace->rp_ent, M_TCPHPTS);
free(pace->cts_last_ran, M_TCPHPTS);
#ifdef SMP
free(pace->grps, M_TCPHPTS);
#endif
free(pace, M_TCPHPTS);
}
static int
tcp_hpts_mod_load(void)
{
int i;
if (vm_ndomains == 1 && tcp_bind_threads == 2)
tcp_bind_threads = 0;
tcp_hptsi_pace = tcp_hptsi_create(&tcp_hptsi_default_funcs, true);
if (tcp_hptsi_pace == NULL)
return (ENOMEM);
hpts_hopelessly_behind = counter_u64_alloc(M_WAITOK);
hpts_loops = counter_u64_alloc(M_WAITOK);
back_tosleep = counter_u64_alloc(M_WAITOK);
combined_wheel_wrap = counter_u64_alloc(M_WAITOK);
wheel_wrap = counter_u64_alloc(M_WAITOK);
hpts_wake_timeout = counter_u64_alloc(M_WAITOK);
hpts_direct_awakening = counter_u64_alloc(M_WAITOK);
hpts_back_tosleep = counter_u64_alloc(M_WAITOK);
hpts_direct_call = counter_u64_alloc(M_WAITOK);
cpu_uses_flowid = counter_u64_alloc(M_WAITOK);
cpu_uses_random = counter_u64_alloc(M_WAITOK);
tcp_hptsi_start(tcp_hptsi_pace);
tcp_lro_hpts_init();
for (i = 0; i < vm_ndomains; i++) {
if (tcp_hptsi_pace->domains[i].count == 0) {
tcp_bind_threads = 0;
break;
}
}
printf("TCP HPTS started %u (%s) swi interrupt threads\n",
tcp_hptsi_pace->rp_num_hptss, (tcp_bind_threads == 0) ?
"(unbounded)" :
(tcp_bind_threads == 1 ? "per-cpu" : "per-NUMA-domain"));
return (0);
}
static void
tcp_hpts_mod_unload(void)
{
tcp_lro_hpts_uninit();
tcp_hptsi_stop(tcp_hptsi_pace);
tcp_hptsi_destroy(tcp_hptsi_pace);
tcp_hptsi_pace = NULL;
counter_u64_free(hpts_hopelessly_behind);
counter_u64_free(hpts_loops);
counter_u64_free(back_tosleep);
counter_u64_free(combined_wheel_wrap);
counter_u64_free(wheel_wrap);
counter_u64_free(hpts_wake_timeout);
counter_u64_free(hpts_direct_awakening);
counter_u64_free(hpts_back_tosleep);
counter_u64_free(hpts_direct_call);
counter_u64_free(cpu_uses_flowid);
counter_u64_free(cpu_uses_random);
}
static int
tcp_hpts_mod_event(module_t mod, int what, void *arg)
{
switch (what) {
case MOD_LOAD:
return (tcp_hpts_mod_load());
case MOD_QUIESCE:
return (EBUSY);
case MOD_UNLOAD:
tcp_hpts_mod_unload();
return (0);
default:
return (EINVAL);
};
}
static moduledata_t tcp_hpts_module = {
.name = "tcphpts",
.evhand = tcp_hpts_mod_event,
};
DECLARE_MODULE(tcphpts, tcp_hpts_module, SI_SUB_SOFTINTR, SI_ORDER_ANY);
MODULE_VERSION(tcphpts, 1);