#include <sys/cdefs.h>
#include <linux/compat.h>
#include <linux/kthread.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <sys/bus.h>
#include <sys/interrupt.h>
#include <sys/priority.h>
enum {
KTHREAD_SHOULD_STOP_MASK = (1 << 0),
KTHREAD_SHOULD_PARK_MASK = (1 << 1),
KTHREAD_IS_PARKED_MASK = (1 << 2),
};
bool
linux_kthread_should_stop_task(struct task_struct *task)
{
return (atomic_read(&task->kthread_flags) & KTHREAD_SHOULD_STOP_MASK);
}
bool
linux_kthread_should_stop(void)
{
return (atomic_read(¤t->kthread_flags) & KTHREAD_SHOULD_STOP_MASK);
}
int
linux_kthread_stop(struct task_struct *task)
{
int retval;
atomic_or(KTHREAD_SHOULD_STOP_MASK, &task->kthread_flags);
kthread_unpark(task);
wake_up_process(task);
wait_for_completion(&task->exited);
retval = task->task_ret;
put_task_struct(task);
return (retval);
}
int
linux_kthread_park(struct task_struct *task)
{
atomic_or(KTHREAD_SHOULD_PARK_MASK, &task->kthread_flags);
wake_up_process(task);
wait_for_completion(&task->parked);
return (0);
}
void
linux_kthread_parkme(void)
{
struct task_struct *task;
task = current;
set_task_state(task, TASK_PARKED | TASK_UNINTERRUPTIBLE);
while (linux_kthread_should_park()) {
while ((atomic_fetch_or(KTHREAD_IS_PARKED_MASK,
&task->kthread_flags) & KTHREAD_IS_PARKED_MASK) == 0)
complete(&task->parked);
schedule();
set_task_state(task, TASK_PARKED | TASK_UNINTERRUPTIBLE);
}
atomic_andnot(KTHREAD_IS_PARKED_MASK, &task->kthread_flags);
set_task_state(task, TASK_RUNNING);
}
bool
linux_kthread_should_park(void)
{
struct task_struct *task;
task = current;
return (atomic_read(&task->kthread_flags) & KTHREAD_SHOULD_PARK_MASK);
}
void
linux_kthread_unpark(struct task_struct *task)
{
atomic_andnot(KTHREAD_SHOULD_PARK_MASK, &task->kthread_flags);
if ((atomic_fetch_andnot(KTHREAD_IS_PARKED_MASK, &task->kthread_flags) &
KTHREAD_IS_PARKED_MASK) != 0)
wake_up_state(task, TASK_PARKED);
}
struct task_struct *
linux_kthread_setup_and_run(struct thread *td, linux_task_fn_t *task_fn, void *arg)
{
struct task_struct *task;
linux_set_current(td);
task = td->td_lkpi_task;
task->task_fn = task_fn;
task->task_data = arg;
thread_lock(td);
sched_prio(td, PI_SWI(SWI_NET));
sched_add(td, SRQ_BORING);
return (task);
}
void
linux_kthread_fn(void *arg __unused)
{
struct task_struct *task = current;
if (linux_kthread_should_stop_task(task) == 0)
task->task_ret = task->task_fn(task->task_data);
if (linux_kthread_should_stop_task(task) != 0) {
struct thread *td = curthread;
td->td_lkpi_task = NULL;
complete(&task->exited);
}
kthread_exit();
}
void
lkpi_kthread_work_fn(void *context, int pending __unused)
{
struct kthread_work *work = context;
work->func(work);
}
void
lkpi_kthread_worker_init_fn(void *context, int pending __unused)
{
struct kthread_worker *worker = context;
worker->task = current;
}