root/net/ipv4/tcp_fastopen.c
// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/tcp.h>
#include <linux/rcupdate.h>
#include <net/tcp.h>
#include <net/busy_poll.h>

/*
 * This function is called to set a Fast Open socket's "fastopen_rsk" field
 * to NULL when a TFO socket no longer needs to access the request_sock.
 * This happens only after 3WHS has been either completed or aborted (e.g.,
 * RST is received).
 *
 * Before TFO, a child socket is created only after 3WHS is completed,
 * hence it never needs to access the request_sock. things get a lot more
 * complex with TFO. A child socket, accepted or not, has to access its
 * request_sock for 3WHS processing, e.g., to retransmit SYN-ACK pkts,
 * until 3WHS is either completed or aborted. Afterwards the req will stay
 * until either the child socket is accepted, or in the rare case when the
 * listener is closed before the child is accepted.
 *
 * In short, a request socket is only freed after BOTH 3WHS has completed
 * (or aborted) and the child socket has been accepted (or listener closed).
 * When a child socket is accepted, its corresponding req->sk is set to
 * NULL since it's no longer needed. More importantly, "req->sk == NULL"
 * will be used by the code below to determine if a child socket has been
 * accepted or not, and the check is protected by the fastopenq->lock
 * described below.
 *
 * Note that fastopen_rsk is only accessed from the child socket's context
 * with its socket lock held. But a request_sock (req) can be accessed by
 * both its child socket through fastopen_rsk, and a listener socket through
 * icsk_accept_queue.rskq_accept_head. To protect the access a simple spin
 * lock per listener "icsk->icsk_accept_queue.fastopenq->lock" is created.
 * only in the rare case when both the listener and the child locks are held,
 * e.g., in inet_csk_listen_stop() do we not need to acquire the lock.
 * The lock also protects other fields such as fastopenq->qlen, which is
 * decremented by this function when fastopen_rsk is no longer needed.
 *
 * Note that another solution was to simply use the existing socket lock
 * from the listener. But first socket lock is difficult to use. It is not
 * a simple spin lock - one must consider sock_owned_by_user() and arrange
 * to use sk_add_backlog() stuff. But what really makes it infeasible is the
 * locking hierarchy violation. E.g., inet_csk_listen_stop() may try to
 * acquire a child's lock while holding listener's socket lock.
 *
 * This function also sets "treq->tfo_listener" to false.
 * treq->tfo_listener is used by the listener so it is protected by the
 * fastopenq->lock in this function.
 */
void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
                           bool reset)
{
        struct sock *lsk = req->rsk_listener;
        struct fastopen_queue *fastopenq;

        fastopenq = &inet_csk(lsk)->icsk_accept_queue.fastopenq;

        RCU_INIT_POINTER(tcp_sk(sk)->fastopen_rsk, NULL);
        spin_lock_bh(&fastopenq->lock);
        fastopenq->qlen--;
        tcp_rsk(req)->tfo_listener = false;
        if (req->sk)    /* the child socket hasn't been accepted yet */
                goto out;

        if (!reset || lsk->sk_state != TCP_LISTEN) {
                /* If the listener has been closed don't bother with the
                 * special RST handling below.
                 */
                spin_unlock_bh(&fastopenq->lock);
                reqsk_put(req);
                return;
        }
        /* Wait for 60secs before removing a req that has triggered RST.
         * This is a simple defense against TFO spoofing attack - by
         * counting the req against fastopen.max_qlen, and disabling
         * TFO when the qlen exceeds max_qlen.
         *
         * For more details see CoNext'11 "TCP Fast Open" paper.
         */
        req->rsk_timer.expires = jiffies + 60*HZ;
        if (fastopenq->rskq_rst_head == NULL)
                fastopenq->rskq_rst_head = req;
        else
                fastopenq->rskq_rst_tail->dl_next = req;

        req->dl_next = NULL;
        fastopenq->rskq_rst_tail = req;
        fastopenq->qlen++;
out:
        spin_unlock_bh(&fastopenq->lock);
}

void tcp_fastopen_init_key_once(struct net *net)
{
        u8 key[TCP_FASTOPEN_KEY_LENGTH];
        struct tcp_fastopen_context *ctxt;

        rcu_read_lock();
        ctxt = rcu_dereference(net->ipv4.tcp_fastopen_ctx);
        if (ctxt) {
                rcu_read_unlock();
                return;
        }
        rcu_read_unlock();

        /* tcp_fastopen_reset_cipher publishes the new context
         * atomically, so we allow this race happening here.
         *
         * All call sites of tcp_fastopen_cookie_gen also check
         * for a valid cookie, so this is an acceptable risk.
         */
        get_random_bytes(key, sizeof(key));
        tcp_fastopen_reset_cipher(net, NULL, key, NULL);
}

static void tcp_fastopen_ctx_free(struct rcu_head *head)
{
        struct tcp_fastopen_context *ctx =
            container_of(head, struct tcp_fastopen_context, rcu);

        kfree_sensitive(ctx);
}

void tcp_fastopen_destroy_cipher(struct sock *sk)
{
        struct tcp_fastopen_context *ctx;

        ctx = rcu_dereference_protected(
                        inet_csk(sk)->icsk_accept_queue.fastopenq.ctx, 1);
        if (ctx)
                call_rcu(&ctx->rcu, tcp_fastopen_ctx_free);
}

void tcp_fastopen_ctx_destroy(struct net *net)
{
        struct tcp_fastopen_context *ctxt;

        ctxt = unrcu_pointer(xchg(&net->ipv4.tcp_fastopen_ctx, NULL));

        if (ctxt)
                call_rcu(&ctxt->rcu, tcp_fastopen_ctx_free);
}

int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk,
                              void *primary_key, void *backup_key)
{
        struct tcp_fastopen_context *ctx, *octx;
        struct fastopen_queue *q;
        int err = 0;

        ctx = kmalloc_obj(*ctx);
        if (!ctx) {
                err = -ENOMEM;
                goto out;
        }

        ctx->key[0].key[0] = get_unaligned_le64(primary_key);
        ctx->key[0].key[1] = get_unaligned_le64(primary_key + 8);
        if (backup_key) {
                ctx->key[1].key[0] = get_unaligned_le64(backup_key);
                ctx->key[1].key[1] = get_unaligned_le64(backup_key + 8);
                ctx->num = 2;
        } else {
                ctx->num = 1;
        }

        if (sk) {
                q = &inet_csk(sk)->icsk_accept_queue.fastopenq;
                octx = unrcu_pointer(xchg(&q->ctx, RCU_INITIALIZER(ctx)));
        } else {
                octx = unrcu_pointer(xchg(&net->ipv4.tcp_fastopen_ctx,
                                          RCU_INITIALIZER(ctx)));
        }

        if (octx)
                call_rcu(&octx->rcu, tcp_fastopen_ctx_free);
out:
        return err;
}

int tcp_fastopen_get_cipher(struct net *net, struct inet_connection_sock *icsk,
                            u64 *key)
{
        struct tcp_fastopen_context *ctx;
        int n_keys = 0, i;

        rcu_read_lock();
        if (icsk)
                ctx = rcu_dereference(icsk->icsk_accept_queue.fastopenq.ctx);
        else
                ctx = rcu_dereference(net->ipv4.tcp_fastopen_ctx);
        if (ctx) {
                n_keys = tcp_fastopen_context_len(ctx);
                for (i = 0; i < n_keys; i++) {
                        put_unaligned_le64(ctx->key[i].key[0], key + (i * 2));
                        put_unaligned_le64(ctx->key[i].key[1], key + (i * 2) + 1);
                }
        }
        rcu_read_unlock();

        return n_keys;
}

static bool __tcp_fastopen_cookie_gen_cipher(struct request_sock *req,
                                             struct sk_buff *syn,
                                             const siphash_key_t *key,
                                             struct tcp_fastopen_cookie *foc)
{
        BUILD_BUG_ON(TCP_FASTOPEN_COOKIE_SIZE != sizeof(u64));

        if (req->rsk_ops->family == AF_INET) {
                const struct iphdr *iph = ip_hdr(syn);

                foc->val[0] = cpu_to_le64(siphash(&iph->saddr,
                                          sizeof(iph->saddr) +
                                          sizeof(iph->daddr),
                                          key));
                foc->len = TCP_FASTOPEN_COOKIE_SIZE;
                return true;
        }
#if IS_ENABLED(CONFIG_IPV6)
        if (req->rsk_ops->family == AF_INET6) {
                const struct ipv6hdr *ip6h = ipv6_hdr(syn);

                foc->val[0] = cpu_to_le64(siphash(&ip6h->saddr,
                                          sizeof(ip6h->saddr) +
                                          sizeof(ip6h->daddr),
                                          key));
                foc->len = TCP_FASTOPEN_COOKIE_SIZE;
                return true;
        }
#endif
        return false;
}

/* Generate the fastopen cookie by applying SipHash to both the source and
 * destination addresses.
 */
static void tcp_fastopen_cookie_gen(struct sock *sk,
                                    struct request_sock *req,
                                    struct sk_buff *syn,
                                    struct tcp_fastopen_cookie *foc)
{
        struct tcp_fastopen_context *ctx;

        rcu_read_lock();
        ctx = tcp_fastopen_get_ctx(sk);
        if (ctx)
                __tcp_fastopen_cookie_gen_cipher(req, syn, &ctx->key[0], foc);
        rcu_read_unlock();
}

/* If an incoming SYN or SYNACK frame contains a payload and/or FIN,
 * queue this additional data / FIN.
 */
void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb)
{
        struct tcp_sock *tp = tcp_sk(sk);

        if (TCP_SKB_CB(skb)->end_seq == tp->rcv_nxt)
                return;

        skb = skb_clone(skb, GFP_ATOMIC);
        if (!skb)
                return;

        tcp_cleanup_skb(skb);
        /* segs_in has been initialized to 1 in tcp_create_openreq_child().
         * Hence, reset segs_in to 0 before calling tcp_segs_in()
         * to avoid double counting.  Also, tcp_segs_in() expects
         * skb->len to include the tcp_hdrlen.  Hence, it should
         * be called before __skb_pull().
         */
        tp->segs_in = 0;
        tcp_segs_in(tp, skb);
        __skb_pull(skb, tcp_hdrlen(skb));
        sk_forced_mem_schedule(sk, skb->truesize);
        skb_set_owner_r(skb, sk);

        TCP_SKB_CB(skb)->seq++;
        TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_SYN;

        tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
        tcp_add_receive_queue(sk, skb);
        tp->syn_data_acked = 1;

        /* u64_stats_update_begin(&tp->syncp) not needed here,
         * as we certainly are not changing upper 32bit value (0)
         */
        tp->bytes_received = skb->len;

        if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
                tcp_fin(sk);
}

/* returns 0 - no key match, 1 for primary, 2 for backup */
static int tcp_fastopen_cookie_gen_check(struct sock *sk,
                                         struct request_sock *req,
                                         struct sk_buff *syn,
                                         struct tcp_fastopen_cookie *orig,
                                         struct tcp_fastopen_cookie *valid_foc)
{
        struct tcp_fastopen_cookie search_foc = { .len = -1 };
        struct tcp_fastopen_cookie *foc = valid_foc;
        struct tcp_fastopen_context *ctx;
        int i, ret = 0;

        rcu_read_lock();
        ctx = tcp_fastopen_get_ctx(sk);
        if (!ctx)
                goto out;
        for (i = 0; i < tcp_fastopen_context_len(ctx); i++) {
                __tcp_fastopen_cookie_gen_cipher(req, syn, &ctx->key[i], foc);
                if (tcp_fastopen_cookie_match(foc, orig)) {
                        ret = i + 1;
                        goto out;
                }
                foc = &search_foc;
        }
out:
        rcu_read_unlock();
        return ret;
}

static struct sock *tcp_fastopen_create_child(struct sock *sk,
                                              struct sk_buff *skb,
                                              struct request_sock *req)
{
        struct tcp_sock *tp;
        struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
        struct sock *child;
        bool own_req;

        child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
                                                         NULL, &own_req, NULL);
        if (!child)
                return NULL;

        spin_lock(&queue->fastopenq.lock);
        queue->fastopenq.qlen++;
        spin_unlock(&queue->fastopenq.lock);

        /* Initialize the child socket. Have to fix some values to take
         * into account the child is a Fast Open socket and is created
         * only out of the bits carried in the SYN packet.
         */
        tp = tcp_sk(child);

        rcu_assign_pointer(tp->fastopen_rsk, req);
        tcp_rsk(req)->tfo_listener = true;

        /* RFC1323: The window in SYN & SYN/ACK segments is never
         * scaled. So correct it appropriately.
         */
        tp->snd_wnd = ntohs(tcp_hdr(skb)->window);
        tp->max_window = tp->snd_wnd;

        /* Activate the retrans timer so that SYNACK can be retransmitted.
         * The request socket is not added to the ehash
         * because it's been added to the accept queue directly.
         */
        req->timeout = tcp_timeout_init(child);
        tcp_reset_xmit_timer(child, ICSK_TIME_RETRANS,
                             req->timeout, false);

        refcount_set(&req->rsk_refcnt, 2);

        sk_mark_napi_id_set(child, skb);

        /* Now finish processing the fastopen child socket. */
        tcp_init_transfer(child, BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB, skb);

        tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;

        tcp_fastopen_add_skb(child, skb);

        tcp_rsk(req)->rcv_nxt = tp->rcv_nxt;
        tp->rcv_wup = tp->rcv_nxt;
        /* tcp_conn_request() is sending the SYNACK,
         * and queues the child into listener accept queue.
         */
        return child;
}

static bool tcp_fastopen_queue_check(struct sock *sk)
{
        struct fastopen_queue *fastopenq;
        int max_qlen;

        /* Make sure the listener has enabled fastopen, and we don't
         * exceed the max # of pending TFO requests allowed before trying
         * to validating the cookie in order to avoid burning CPU cycles
         * unnecessarily.
         *
         * XXX (TFO) - The implication of checking the max_qlen before
         * processing a cookie request is that clients can't differentiate
         * between qlen overflow causing Fast Open to be disabled
         * temporarily vs a server not supporting Fast Open at all.
         */
        fastopenq = &inet_csk(sk)->icsk_accept_queue.fastopenq;
        max_qlen = READ_ONCE(fastopenq->max_qlen);
        if (max_qlen == 0)
                return false;

        if (fastopenq->qlen >= max_qlen) {
                struct request_sock *req1;
                spin_lock(&fastopenq->lock);
                req1 = fastopenq->rskq_rst_head;
                if (!req1 || time_after(req1->rsk_timer.expires, jiffies)) {
                        __NET_INC_STATS(sock_net(sk),
                                        LINUX_MIB_TCPFASTOPENLISTENOVERFLOW);
                        spin_unlock(&fastopenq->lock);
                        return false;
                }
                fastopenq->rskq_rst_head = req1->dl_next;
                fastopenq->qlen--;
                spin_unlock(&fastopenq->lock);
                reqsk_put(req1);
        }
        return true;
}

static bool tcp_fastopen_no_cookie(const struct sock *sk,
                                   const struct dst_entry *dst,
                                   int flag)
{
        return (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen) & flag) ||
               tcp_sk(sk)->fastopen_no_cookie ||
               (dst && dst_metric(dst, RTAX_FASTOPEN_NO_COOKIE));
}

/* Returns true if we should perform Fast Open on the SYN. The cookie (foc)
 * may be updated and return the client in the SYN-ACK later. E.g., Fast Open
 * cookie request (foc->len == 0).
 */
struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
                              struct request_sock *req,
                              struct tcp_fastopen_cookie *foc,
                              const struct dst_entry *dst)
{
        bool syn_data = TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1;
        int tcp_fastopen = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen);
        struct tcp_fastopen_cookie valid_foc = { .len = -1 };
        struct sock *child;
        int ret = 0;

        if (foc->len == 0) /* Client requests a cookie */
                NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENCOOKIEREQD);

        if (!((tcp_fastopen & TFO_SERVER_ENABLE) &&
              (syn_data || foc->len >= 0) &&
              tcp_fastopen_queue_check(sk))) {
                foc->len = -1;
                return NULL;
        }

        if (tcp_fastopen_no_cookie(sk, dst, TFO_SERVER_COOKIE_NOT_REQD))
                goto fastopen;

        if (foc->len == 0) {
                /* Client requests a cookie. */
                tcp_fastopen_cookie_gen(sk, req, skb, &valid_foc);
        } else if (foc->len > 0) {
                ret = tcp_fastopen_cookie_gen_check(sk, req, skb, foc,
                                                    &valid_foc);
                if (!ret) {
                        NET_INC_STATS(sock_net(sk),
                                      LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
                } else {
                        /* Cookie is valid. Create a (full) child socket to
                         * accept the data in SYN before returning a SYN-ACK to
                         * ack the data. If we fail to create the socket, fall
                         * back and ack the ISN only but includes the same
                         * cookie.
                         *
                         * Note: Data-less SYN with valid cookie is allowed to
                         * send data in SYN_RECV state.
                         */
fastopen:
                        child = tcp_fastopen_create_child(sk, skb, req);
                        if (child) {
                                if (ret == 2) {
                                        valid_foc.exp = foc->exp;
                                        *foc = valid_foc;
                                        NET_INC_STATS(sock_net(sk),
                                                      LINUX_MIB_TCPFASTOPENPASSIVEALTKEY);
                                } else {
                                        foc->len = -1;
                                }
                                NET_INC_STATS(sock_net(sk),
                                              LINUX_MIB_TCPFASTOPENPASSIVE);
                                tcp_sk(child)->syn_fastopen_child = 1;
                                return child;
                        }
                        NET_INC_STATS(sock_net(sk),
                                      LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
                }
        }
        valid_foc.exp = foc->exp;
        *foc = valid_foc;
        return NULL;
}

bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss,
                               struct tcp_fastopen_cookie *cookie)
{
        const struct dst_entry *dst;

        tcp_fastopen_cache_get(sk, mss, cookie);

        /* Firewall blackhole issue check */
        if (tcp_fastopen_active_should_disable(sk)) {
                cookie->len = -1;
                return false;
        }

        dst = __sk_dst_get(sk);

        if (tcp_fastopen_no_cookie(sk, dst, TFO_CLIENT_NO_COOKIE)) {
                cookie->len = -1;
                return true;
        }
        if (cookie->len > 0)
                return true;
        tcp_sk(sk)->fastopen_client_fail = TFO_COOKIE_UNAVAILABLE;
        return false;
}

/* This function checks if we want to defer sending SYN until the first
 * write().  We defer under the following conditions:
 * 1. fastopen_connect sockopt is set
 * 2. we have a valid cookie
 * Return value: return true if we want to defer until application writes data
 *               return false if we want to send out SYN immediately
 */
bool tcp_fastopen_defer_connect(struct sock *sk, int *err)
{
        struct tcp_fastopen_cookie cookie = { .len = 0 };
        struct tcp_sock *tp = tcp_sk(sk);
        u16 mss;

        if (tp->fastopen_connect && !tp->fastopen_req) {
                if (tcp_fastopen_cookie_check(sk, &mss, &cookie)) {
                        inet_set_bit(DEFER_CONNECT, sk);
                        return true;
                }

                /* Alloc fastopen_req in order for FO option to be included
                 * in SYN
                 */
                tp->fastopen_req = kzalloc_obj(*tp->fastopen_req,
                                               sk->sk_allocation);
                if (tp->fastopen_req)
                        tp->fastopen_req->cookie = cookie;
                else
                        *err = -ENOBUFS;
        }
        return false;
}
EXPORT_IPV6_MOD(tcp_fastopen_defer_connect);

/*
 * The following code block is to deal with middle box issues with TFO:
 * Middlebox firewall issues can potentially cause server's data being
 * blackholed after a successful 3WHS using TFO.
 * The proposed solution is to disable active TFO globally under the
 * following circumstances:
 *   1. client side TFO socket receives out of order FIN
 *   2. client side TFO socket receives out of order RST
 *   3. client side TFO socket has timed out three times consecutively during
 *      or after handshake
 * We disable active side TFO globally for 1hr at first. Then if it
 * happens again, we disable it for 2h, then 4h, 8h, ...
 * And we reset the timeout back to 1hr when we see a successful active
 * TFO connection with data exchanges.
 */

/* Disable active TFO and record current jiffies and
 * tfo_active_disable_times
 */
void tcp_fastopen_active_disable(struct sock *sk)
{
        struct net *net = sock_net(sk);

        if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen_blackhole_timeout))
                return;

        /* Paired with READ_ONCE() in tcp_fastopen_active_should_disable() */
        WRITE_ONCE(net->ipv4.tfo_active_disable_stamp, jiffies);

        /* Paired with smp_rmb() in tcp_fastopen_active_should_disable().
         * We want net->ipv4.tfo_active_disable_stamp to be updated first.
         */
        smp_mb__before_atomic();
        atomic_inc(&net->ipv4.tfo_active_disable_times);

        NET_INC_STATS(net, LINUX_MIB_TCPFASTOPENBLACKHOLE);
}

/* Calculate timeout for tfo active disable
 * Return true if we are still in the active TFO disable period
 * Return false if timeout already expired and we should use active TFO
 */
bool tcp_fastopen_active_should_disable(struct sock *sk)
{
        unsigned int tfo_bh_timeout =
                READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen_blackhole_timeout);
        unsigned long timeout;
        int tfo_da_times;
        int multiplier;

        if (!tfo_bh_timeout)
                return false;

        tfo_da_times = atomic_read(&sock_net(sk)->ipv4.tfo_active_disable_times);
        if (!tfo_da_times)
                return false;

        /* Paired with smp_mb__before_atomic() in tcp_fastopen_active_disable() */
        smp_rmb();

        /* Limit timeout to max: 2^6 * initial timeout */
        multiplier = 1 << min(tfo_da_times - 1, 6);

        /* Paired with the WRITE_ONCE() in tcp_fastopen_active_disable(). */
        timeout = READ_ONCE(sock_net(sk)->ipv4.tfo_active_disable_stamp) +
                  multiplier * tfo_bh_timeout * HZ;
        if (time_before(jiffies, timeout))
                return true;

        /* Mark check bit so we can check for successful active TFO
         * condition and reset tfo_active_disable_times
         */
        tcp_sk(sk)->syn_fastopen_ch = 1;
        return false;
}

/* Disable active TFO if FIN is the only packet in the ofo queue
 * and no data is received.
 * Also check if we can reset tfo_active_disable_times if data is
 * received successfully on a marked active TFO sockets opened on
 * a non-loopback interface
 */
void tcp_fastopen_active_disable_ofo_check(struct sock *sk)
{
        struct tcp_sock *tp = tcp_sk(sk);
        struct net_device *dev;
        struct dst_entry *dst;
        struct sk_buff *skb;

        if (!tp->syn_fastopen)
                return;

        if (!tp->data_segs_in) {
                skb = skb_rb_first(&tp->out_of_order_queue);
                if (skb && !skb_rb_next(skb)) {
                        if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) {
                                tcp_fastopen_active_disable(sk);
                                return;
                        }
                }
        } else if (tp->syn_fastopen_ch &&
                   atomic_read(&sock_net(sk)->ipv4.tfo_active_disable_times)) {
                rcu_read_lock();
                dst = __sk_dst_get(sk);
                dev = dst ? dst_dev_rcu(dst) : NULL;
                if (!(dev && (dev->flags & IFF_LOOPBACK)))
                        atomic_set(&sock_net(sk)->ipv4.tfo_active_disable_times, 0);
                rcu_read_unlock();
        }
}

void tcp_fastopen_active_detect_blackhole(struct sock *sk, bool expired)
{
        u32 timeouts = inet_csk(sk)->icsk_retransmits;
        struct tcp_sock *tp = tcp_sk(sk);

        /* Broken middle-boxes may black-hole Fast Open connection during or
         * even after the handshake. Be extremely conservative and pause
         * Fast Open globally after hitting the third consecutive timeout or
         * exceeding the configured timeout limit.
         */
        if ((tp->syn_fastopen || tp->syn_data || tp->syn_data_acked) &&
            (timeouts == 2 || (timeouts < 2 && expired))) {
                tcp_fastopen_active_disable(sk);
                NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVEFAIL);
        }
}