#include <linux/netdevice.h>
#include <linux/inetdevice.h>
#include <linux/skbuff.h>
#include <linux/socket.h>
#include <linux/udp.h>
#include <net/addrconf.h>
#include <net/dst_cache.h>
#include <net/route.h>
#include <net/ipv6_stubs.h>
#include <net/transp_v6.h>
#include <net/udp.h>
#include <net/udp_tunnel.h>
#include "ovpnpriv.h"
#include "main.h"
#include "bind.h"
#include "io.h"
#include "peer.h"
#include "proto.h"
#include "socket.h"
#include "udp.h"
static struct ovpn_socket *ovpn_socket_from_udp_sock(struct sock *sk)
{
struct ovpn_socket *ovpn_sock;
if (unlikely(READ_ONCE(udp_sk(sk)->encap_type) != UDP_ENCAP_OVPNINUDP))
return NULL;
ovpn_sock = rcu_dereference_sk_user_data(sk);
if (unlikely(!ovpn_sock))
return NULL;
if (unlikely(!ovpn_sock->sk || sk != ovpn_sock->sk))
return NULL;
return ovpn_sock;
}
static int ovpn_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
{
struct ovpn_socket *ovpn_sock;
struct ovpn_priv *ovpn;
struct ovpn_peer *peer;
u32 peer_id;
u8 opcode;
ovpn_sock = ovpn_socket_from_udp_sock(sk);
if (unlikely(!ovpn_sock)) {
net_err_ratelimited("ovpn: %s invoked on non ovpn socket\n",
__func__);
goto drop_noovpn;
}
ovpn = ovpn_sock->ovpn;
if (unlikely(!ovpn)) {
net_err_ratelimited("ovpn: cannot obtain ovpn object from UDP socket\n");
goto drop_noovpn;
}
if (unlikely(!pskb_may_pull(skb, sizeof(struct udphdr) +
OVPN_OPCODE_SIZE))) {
net_dbg_ratelimited("%s: packet too small from UDP socket\n",
netdev_name(ovpn->dev));
goto drop;
}
opcode = ovpn_opcode_from_skb(skb, sizeof(struct udphdr));
if (unlikely(opcode != OVPN_DATA_V2)) {
if (opcode == OVPN_DATA_V1)
goto drop;
return 1;
}
peer_id = ovpn_peer_id_from_skb(skb, sizeof(struct udphdr));
if (peer_id == OVPN_PEER_ID_UNDEF)
peer = ovpn_peer_get_by_transp_addr(ovpn, skb);
else
peer = ovpn_peer_get_by_id(ovpn, peer_id);
if (unlikely(!peer))
goto drop;
__skb_pull(skb, sizeof(struct udphdr));
ovpn_recv(peer, skb);
return 0;
drop:
dev_dstats_rx_dropped(ovpn->dev);
drop_noovpn:
kfree_skb(skb);
return 0;
}
static int ovpn_udp4_output(struct ovpn_peer *peer, struct ovpn_bind *bind,
struct dst_cache *cache, struct sock *sk,
struct sk_buff *skb)
{
struct rtable *rt;
struct flowi4 fl = {
.saddr = bind->local.ipv4.s_addr,
.daddr = bind->remote.in4.sin_addr.s_addr,
.fl4_sport = inet_sk(sk)->inet_sport,
.fl4_dport = bind->remote.in4.sin_port,
.flowi4_proto = sk->sk_protocol,
.flowi4_mark = sk->sk_mark,
};
int ret;
local_bh_disable();
rt = dst_cache_get_ip4(cache, &fl.saddr);
if (rt)
goto transmit;
if (unlikely(!inet_confirm_addr(sock_net(sk), NULL, 0, fl.saddr,
RT_SCOPE_HOST))) {
fl.saddr = 0;
spin_lock_bh(&peer->lock);
bind->local.ipv4.s_addr = 0;
spin_unlock_bh(&peer->lock);
dst_cache_reset(cache);
}
rt = ip_route_output_flow(sock_net(sk), &fl, sk);
if (IS_ERR(rt) && PTR_ERR(rt) == -EINVAL) {
fl.saddr = 0;
spin_lock_bh(&peer->lock);
bind->local.ipv4.s_addr = 0;
spin_unlock_bh(&peer->lock);
dst_cache_reset(cache);
rt = ip_route_output_flow(sock_net(sk), &fl, sk);
}
if (IS_ERR(rt)) {
ret = PTR_ERR(rt);
net_dbg_ratelimited("%s: no route to host %pISpc: %d\n",
netdev_name(peer->ovpn->dev),
&bind->remote.in4,
ret);
goto err;
}
dst_cache_set_ip4(cache, &rt->dst, fl.saddr);
transmit:
udp_tunnel_xmit_skb(rt, sk, skb, fl.saddr, fl.daddr, 0,
ip4_dst_hoplimit(&rt->dst), 0, fl.fl4_sport,
fl.fl4_dport, false, sk->sk_no_check_tx, 0);
ret = 0;
err:
local_bh_enable();
return ret;
}
#if IS_ENABLED(CONFIG_IPV6)
static int ovpn_udp6_output(struct ovpn_peer *peer, struct ovpn_bind *bind,
struct dst_cache *cache, struct sock *sk,
struct sk_buff *skb)
{
struct dst_entry *dst;
int ret;
struct flowi6 fl = {
.saddr = bind->local.ipv6,
.daddr = bind->remote.in6.sin6_addr,
.fl6_sport = inet_sk(sk)->inet_sport,
.fl6_dport = bind->remote.in6.sin6_port,
.flowi6_proto = sk->sk_protocol,
.flowi6_mark = sk->sk_mark,
.flowi6_oif = bind->remote.in6.sin6_scope_id,
};
local_bh_disable();
dst = dst_cache_get_ip6(cache, &fl.saddr);
if (dst)
goto transmit;
if (unlikely(!ipv6_chk_addr(sock_net(sk), &fl.saddr, NULL, 0))) {
fl.saddr = in6addr_any;
spin_lock_bh(&peer->lock);
bind->local.ipv6 = in6addr_any;
spin_unlock_bh(&peer->lock);
dst_cache_reset(cache);
}
dst = ipv6_stub->ipv6_dst_lookup_flow(sock_net(sk), sk, &fl, NULL);
if (IS_ERR(dst)) {
ret = PTR_ERR(dst);
net_dbg_ratelimited("%s: no route to host %pISpc: %d\n",
netdev_name(peer->ovpn->dev),
&bind->remote.in6, ret);
goto err;
}
dst_cache_set_ip6(cache, dst, &fl.saddr);
transmit:
skb->ignore_df = 1;
udp_tunnel6_xmit_skb(dst, sk, skb, skb->dev, &fl.saddr, &fl.daddr, 0,
ip6_dst_hoplimit(dst), 0, fl.fl6_sport,
fl.fl6_dport, udp_get_no_check6_tx(sk), 0);
ret = 0;
err:
local_bh_enable();
return ret;
}
#endif
static int ovpn_udp_output(struct ovpn_peer *peer, struct dst_cache *cache,
struct sock *sk, struct sk_buff *skb)
{
struct ovpn_bind *bind;
int ret;
if (!skb->destructor)
skb->sk = NULL;
rcu_read_lock();
bind = rcu_dereference(peer->bind);
if (unlikely(!bind)) {
net_warn_ratelimited("%s: no bind for remote peer %u\n",
netdev_name(peer->ovpn->dev), peer->id);
ret = -ENODEV;
goto out;
}
switch (bind->remote.in4.sin_family) {
case AF_INET:
ret = ovpn_udp4_output(peer, bind, cache, sk, skb);
break;
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
ret = ovpn_udp6_output(peer, bind, cache, sk, skb);
break;
#endif
default:
ret = -EAFNOSUPPORT;
break;
}
out:
rcu_read_unlock();
return ret;
}
void ovpn_udp_send_skb(struct ovpn_peer *peer, struct sock *sk,
struct sk_buff *skb)
{
int ret;
skb->dev = peer->ovpn->dev;
skb->mark = READ_ONCE(sk->sk_mark);
skb->ip_summed = CHECKSUM_NONE;
ret = ovpn_udp_output(peer, &peer->dst_cache, sk, skb);
if (unlikely(ret < 0))
kfree_skb(skb);
}
static void ovpn_udp_encap_destroy(struct sock *sk)
{
struct ovpn_socket *sock;
struct ovpn_priv *ovpn;
rcu_read_lock();
sock = rcu_dereference_sk_user_data(sk);
if (!sock || !sock->ovpn) {
rcu_read_unlock();
return;
}
ovpn = sock->ovpn;
rcu_read_unlock();
ovpn_peers_free(ovpn, sk, OVPN_DEL_PEER_REASON_TRANSPORT_DISCONNECT);
}
int ovpn_udp_socket_attach(struct ovpn_socket *ovpn_sock, struct socket *sock,
struct ovpn_priv *ovpn)
{
struct udp_tunnel_sock_cfg cfg = {
.sk_user_data = ovpn_sock,
.encap_type = UDP_ENCAP_OVPNINUDP,
.encap_rcv = ovpn_udp_encap_recv,
.encap_destroy = ovpn_udp_encap_destroy,
};
struct ovpn_socket *old_data;
int ret;
rcu_read_lock();
old_data = rcu_dereference_sk_user_data(ovpn_sock->sk);
if (!old_data) {
rcu_read_unlock();
setup_udp_tunnel_sock(sock_net(ovpn_sock->sk), sock, &cfg);
return 0;
}
if ((READ_ONCE(udp_sk(ovpn_sock->sk)->encap_type) == UDP_ENCAP_OVPNINUDP) &&
old_data->ovpn == ovpn) {
netdev_dbg(ovpn->dev,
"provided socket already owned by this interface\n");
ret = -EALREADY;
} else {
netdev_dbg(ovpn->dev,
"provided socket already taken by other user\n");
ret = -EBUSY;
}
rcu_read_unlock();
return ret;
}
void ovpn_udp_socket_detach(struct ovpn_socket *ovpn_sock)
{
struct sock *sk = ovpn_sock->sk;
inet_set_bit(MC_LOOP, sk);
inet_dec_convert_csum(sk);
WRITE_ONCE(udp_sk(sk)->encap_type, 0);
WRITE_ONCE(udp_sk(sk)->encap_rcv, NULL);
WRITE_ONCE(udp_sk(sk)->encap_destroy, NULL);
rcu_assign_sk_user_data(sk, NULL);
}