#include <linux/fs.h>
#include <linux/list.h>
#include <linux/skbuff.h>
#include <linux/socket.h>
#include <linux/workqueue.h>
#include <net/af_unix.h>
#include <net/scm.h>
#include <net/tcp_states.h>
#include "af_unix.h"
struct unix_vertex {
struct list_head edges;
struct list_head entry;
struct list_head scc_entry;
unsigned long out_degree;
unsigned long index;
unsigned long scc_index;
};
struct unix_edge {
struct unix_sock *predecessor;
struct unix_sock *successor;
struct list_head vertex_entry;
struct list_head stack_entry;
};
struct unix_sock *unix_get_socket(struct file *filp)
{
struct inode *inode = file_inode(filp);
if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) {
struct socket *sock = SOCKET_I(inode);
const struct proto_ops *ops;
struct sock *sk = sock->sk;
ops = READ_ONCE(sock->ops);
if (sk && ops && ops->family == PF_UNIX)
return unix_sk(sk);
}
return NULL;
}
static struct unix_vertex *unix_edge_successor(struct unix_edge *edge)
{
if (edge->successor->listener)
return unix_sk(edge->successor->listener)->vertex;
return edge->successor->vertex;
}
enum {
UNIX_GRAPH_NOT_CYCLIC,
UNIX_GRAPH_MAYBE_CYCLIC,
UNIX_GRAPH_CYCLIC,
};
static unsigned char unix_graph_state;
static void unix_update_graph(struct unix_vertex *vertex)
{
if (!vertex)
return;
WRITE_ONCE(unix_graph_state, UNIX_GRAPH_MAYBE_CYCLIC);
}
static LIST_HEAD(unix_unvisited_vertices);
enum unix_vertex_index {
UNIX_VERTEX_INDEX_MARK1,
UNIX_VERTEX_INDEX_MARK2,
UNIX_VERTEX_INDEX_START,
};
static unsigned long unix_vertex_unvisited_index = UNIX_VERTEX_INDEX_MARK1;
static unsigned long unix_vertex_max_scc_index = UNIX_VERTEX_INDEX_START;
static void unix_add_edge(struct scm_fp_list *fpl, struct unix_edge *edge)
{
struct unix_vertex *vertex = edge->predecessor->vertex;
if (!vertex) {
vertex = list_first_entry(&fpl->vertices, typeof(*vertex), entry);
vertex->index = unix_vertex_unvisited_index;
vertex->scc_index = ++unix_vertex_max_scc_index;
vertex->out_degree = 0;
INIT_LIST_HEAD(&vertex->edges);
INIT_LIST_HEAD(&vertex->scc_entry);
list_move_tail(&vertex->entry, &unix_unvisited_vertices);
edge->predecessor->vertex = vertex;
}
vertex->out_degree++;
list_add_tail(&edge->vertex_entry, &vertex->edges);
unix_update_graph(unix_edge_successor(edge));
}
static void unix_del_edge(struct scm_fp_list *fpl, struct unix_edge *edge)
{
struct unix_vertex *vertex = edge->predecessor->vertex;
if (!fpl->dead)
unix_update_graph(unix_edge_successor(edge));
list_del(&edge->vertex_entry);
vertex->out_degree--;
if (!vertex->out_degree) {
edge->predecessor->vertex = NULL;
list_move_tail(&vertex->entry, &fpl->vertices);
}
}
static void unix_free_vertices(struct scm_fp_list *fpl)
{
struct unix_vertex *vertex, *next_vertex;
list_for_each_entry_safe(vertex, next_vertex, &fpl->vertices, entry) {
list_del(&vertex->entry);
kfree(vertex);
}
}
static __cacheline_aligned_in_smp DEFINE_SPINLOCK(unix_gc_lock);
void unix_add_edges(struct scm_fp_list *fpl, struct unix_sock *receiver)
{
int i = 0, j = 0;
spin_lock(&unix_gc_lock);
if (!fpl->count_unix)
goto out;
do {
struct unix_sock *inflight = unix_get_socket(fpl->fp[j++]);
struct unix_edge *edge;
if (!inflight)
continue;
edge = fpl->edges + i++;
edge->predecessor = inflight;
edge->successor = receiver;
unix_add_edge(fpl, edge);
} while (i < fpl->count_unix);
receiver->scm_stat.nr_unix_fds += fpl->count_unix;
out:
WRITE_ONCE(fpl->user->unix_inflight, fpl->user->unix_inflight + fpl->count);
spin_unlock(&unix_gc_lock);
fpl->inflight = true;
unix_free_vertices(fpl);
}
void unix_del_edges(struct scm_fp_list *fpl)
{
struct unix_sock *receiver;
int i = 0;
spin_lock(&unix_gc_lock);
if (!fpl->count_unix)
goto out;
do {
struct unix_edge *edge = fpl->edges + i++;
unix_del_edge(fpl, edge);
} while (i < fpl->count_unix);
if (!fpl->dead) {
receiver = fpl->edges[0].successor;
receiver->scm_stat.nr_unix_fds -= fpl->count_unix;
}
out:
WRITE_ONCE(fpl->user->unix_inflight, fpl->user->unix_inflight - fpl->count);
spin_unlock(&unix_gc_lock);
fpl->inflight = false;
}
void unix_update_edges(struct unix_sock *receiver)
{
if (!receiver->scm_stat.nr_unix_fds) {
receiver->listener = NULL;
} else {
spin_lock(&unix_gc_lock);
unix_update_graph(unix_sk(receiver->listener)->vertex);
receiver->listener = NULL;
spin_unlock(&unix_gc_lock);
}
}
int unix_prepare_fpl(struct scm_fp_list *fpl)
{
struct unix_vertex *vertex;
int i;
if (!fpl->count_unix)
return 0;
for (i = 0; i < fpl->count_unix; i++) {
vertex = kmalloc_obj(*vertex);
if (!vertex)
goto err;
list_add(&vertex->entry, &fpl->vertices);
}
fpl->edges = kvmalloc_objs(*fpl->edges, fpl->count_unix,
GFP_KERNEL_ACCOUNT);
if (!fpl->edges)
goto err;
unix_schedule_gc(fpl->user);
return 0;
err:
unix_free_vertices(fpl);
return -ENOMEM;
}
void unix_destroy_fpl(struct scm_fp_list *fpl)
{
if (fpl->inflight)
unix_del_edges(fpl);
kvfree(fpl->edges);
unix_free_vertices(fpl);
}
static bool gc_in_progress;
static seqcount_t unix_peek_seq = SEQCNT_ZERO(unix_peek_seq);
void unix_peek_fpl(struct scm_fp_list *fpl)
{
static DEFINE_SPINLOCK(unix_peek_lock);
if (!fpl || !fpl->count_unix)
return;
if (!READ_ONCE(gc_in_progress))
return;
spin_lock(&unix_peek_lock);
raw_write_seqcount_barrier(&unix_peek_seq);
spin_unlock(&unix_peek_lock);
}
static bool unix_vertex_dead(struct unix_vertex *vertex)
{
struct unix_edge *edge;
struct unix_sock *u;
long total_ref;
list_for_each_entry(edge, &vertex->edges, vertex_entry) {
struct unix_vertex *next_vertex = unix_edge_successor(edge);
if (!next_vertex)
return false;
if (next_vertex->scc_index != vertex->scc_index)
return false;
}
edge = list_first_entry(&vertex->edges, typeof(*edge), vertex_entry);
u = edge->predecessor;
total_ref = file_count(u->sk.sk_socket->file);
if (total_ref != vertex->out_degree)
return false;
return true;
}
static LIST_HEAD(unix_visited_vertices);
static unsigned long unix_vertex_grouped_index = UNIX_VERTEX_INDEX_MARK2;
static bool unix_scc_dead(struct list_head *scc, bool fast)
{
struct unix_vertex *vertex;
bool scc_dead = true;
unsigned int seq;
seq = read_seqcount_begin(&unix_peek_seq);
list_for_each_entry_reverse(vertex, scc, scc_entry) {
list_move_tail(&vertex->entry, &unix_visited_vertices);
if (!fast)
vertex->index = unix_vertex_grouped_index;
if (scc_dead)
scc_dead = unix_vertex_dead(vertex);
}
if (read_seqcount_retry(&unix_peek_seq, seq))
return false;
return scc_dead;
}
static void unix_collect_skb(struct list_head *scc, struct sk_buff_head *hitlist)
{
struct unix_vertex *vertex;
list_for_each_entry_reverse(vertex, scc, scc_entry) {
struct sk_buff_head *queue;
struct unix_edge *edge;
struct unix_sock *u;
edge = list_first_entry(&vertex->edges, typeof(*edge), vertex_entry);
u = edge->predecessor;
queue = &u->sk.sk_receive_queue;
spin_lock(&queue->lock);
if (u->sk.sk_state == TCP_LISTEN) {
struct sk_buff *skb;
skb_queue_walk(queue, skb) {
struct sk_buff_head *embryo_queue = &skb->sk->sk_receive_queue;
spin_lock(&embryo_queue->lock);
skb_queue_splice_init(embryo_queue, hitlist);
spin_unlock(&embryo_queue->lock);
}
} else {
skb_queue_splice_init(queue, hitlist);
}
spin_unlock(&queue->lock);
}
}
static bool unix_scc_cyclic(struct list_head *scc)
{
struct unix_vertex *vertex;
struct unix_edge *edge;
if (!list_is_singular(scc))
return true;
vertex = list_first_entry(scc, typeof(*vertex), scc_entry);
list_for_each_entry(edge, &vertex->edges, vertex_entry) {
if (unix_edge_successor(edge) == vertex)
return true;
}
return false;
}
static unsigned long __unix_walk_scc(struct unix_vertex *vertex,
unsigned long *last_index,
struct sk_buff_head *hitlist)
{
unsigned long cyclic_sccs = 0;
LIST_HEAD(vertex_stack);
struct unix_edge *edge;
LIST_HEAD(edge_stack);
next_vertex:
list_add(&vertex->scc_entry, &vertex_stack);
vertex->index = *last_index;
vertex->scc_index = *last_index;
(*last_index)++;
list_for_each_entry(edge, &vertex->edges, vertex_entry) {
struct unix_vertex *next_vertex = unix_edge_successor(edge);
if (!next_vertex)
continue;
if (next_vertex->index == unix_vertex_unvisited_index) {
list_add(&edge->stack_entry, &edge_stack);
vertex = next_vertex;
goto next_vertex;
prev_vertex:
edge = list_first_entry(&edge_stack, typeof(*edge), stack_entry);
list_del_init(&edge->stack_entry);
next_vertex = vertex;
vertex = edge->predecessor->vertex;
vertex->scc_index = min(vertex->scc_index, next_vertex->scc_index);
} else if (next_vertex->index != unix_vertex_grouped_index) {
vertex->scc_index = min(vertex->scc_index, next_vertex->scc_index);
} else {
}
}
if (vertex->index == vertex->scc_index) {
struct list_head scc;
__list_cut_position(&scc, &vertex_stack, &vertex->scc_entry);
if (unix_scc_dead(&scc, false)) {
unix_collect_skb(&scc, hitlist);
} else {
if (unix_vertex_max_scc_index < vertex->scc_index)
unix_vertex_max_scc_index = vertex->scc_index;
if (unix_scc_cyclic(&scc))
cyclic_sccs++;
}
list_del(&scc);
}
if (!list_empty(&edge_stack))
goto prev_vertex;
return cyclic_sccs;
}
static unsigned long unix_graph_cyclic_sccs;
static void unix_walk_scc(struct sk_buff_head *hitlist)
{
unsigned long last_index = UNIX_VERTEX_INDEX_START;
unsigned long cyclic_sccs = 0;
unix_vertex_max_scc_index = UNIX_VERTEX_INDEX_START;
while (!list_empty(&unix_unvisited_vertices)) {
struct unix_vertex *vertex;
vertex = list_first_entry(&unix_unvisited_vertices, typeof(*vertex), entry);
cyclic_sccs += __unix_walk_scc(vertex, &last_index, hitlist);
}
list_replace_init(&unix_visited_vertices, &unix_unvisited_vertices);
swap(unix_vertex_unvisited_index, unix_vertex_grouped_index);
WRITE_ONCE(unix_graph_cyclic_sccs, cyclic_sccs);
WRITE_ONCE(unix_graph_state,
cyclic_sccs ? UNIX_GRAPH_CYCLIC : UNIX_GRAPH_NOT_CYCLIC);
}
static void unix_walk_scc_fast(struct sk_buff_head *hitlist)
{
unsigned long cyclic_sccs = unix_graph_cyclic_sccs;
while (!list_empty(&unix_unvisited_vertices)) {
struct unix_vertex *vertex;
struct list_head scc;
vertex = list_first_entry(&unix_unvisited_vertices, typeof(*vertex), entry);
list_add(&scc, &vertex->scc_entry);
if (unix_scc_dead(&scc, true)) {
cyclic_sccs--;
unix_collect_skb(&scc, hitlist);
}
list_del(&scc);
}
list_replace_init(&unix_visited_vertices, &unix_unvisited_vertices);
WRITE_ONCE(unix_graph_cyclic_sccs, cyclic_sccs);
WRITE_ONCE(unix_graph_state,
cyclic_sccs ? UNIX_GRAPH_CYCLIC : UNIX_GRAPH_NOT_CYCLIC);
}
static void unix_gc(struct work_struct *work)
{
struct sk_buff_head hitlist;
struct sk_buff *skb;
spin_lock(&unix_gc_lock);
if (unix_graph_state == UNIX_GRAPH_NOT_CYCLIC) {
spin_unlock(&unix_gc_lock);
goto skip_gc;
}
__skb_queue_head_init(&hitlist);
if (unix_graph_state == UNIX_GRAPH_CYCLIC)
unix_walk_scc_fast(&hitlist);
else
unix_walk_scc(&hitlist);
spin_unlock(&unix_gc_lock);
skb_queue_walk(&hitlist, skb) {
if (UNIXCB(skb).fp)
UNIXCB(skb).fp->dead = true;
}
__skb_queue_purge_reason(&hitlist, SKB_DROP_REASON_SOCKET_CLOSE);
skip_gc:
WRITE_ONCE(gc_in_progress, false);
}
static DECLARE_WORK(unix_gc_work, unix_gc);
#define UNIX_INFLIGHT_SANE_USER (SCM_MAX_FD * 8)
void unix_schedule_gc(struct user_struct *user)
{
if (READ_ONCE(unix_graph_state) == UNIX_GRAPH_NOT_CYCLIC)
return;
if (user &&
READ_ONCE(user->unix_inflight) < UNIX_INFLIGHT_SANE_USER)
return;
if (!READ_ONCE(gc_in_progress)) {
WRITE_ONCE(gc_in_progress, true);
queue_work(system_dfl_wq, &unix_gc_work);
}
if (user && READ_ONCE(unix_graph_cyclic_sccs))
flush_work(&unix_gc_work);
}