#include <sys/param.h>
#include <sys/systm.h>
#include <sys/exec.h>
#include <sys/proc.h>
#include <sys/signalvar.h>
#include <sys/errno.h>
#include <sys/malloc.h>
#include <sys/ptrace.h>
#include <sys/uio.h>
#include <sys/sched.h>
#include <sys/exec_elf.h>
#include <sys/mount.h>
#include <sys/syscallargs.h>
#include <uvm/uvm_extern.h>
#include <machine/reg.h>
#ifdef PTRACE
static inline int process_checktracestate(struct process *_curpr,
struct process *_tr, struct proc *_t);
static inline struct process *process_tprfind(pid_t _tpid, struct proc **_tp);
int ptrace_ctrl(struct proc *, int, pid_t, caddr_t, int);
int ptrace_ustate(struct proc *, int, pid_t, void *, int, register_t *);
int ptrace_kstate(struct proc *, int, pid_t, void *);
int global_ptrace;
int
sys_ptrace(struct proc *p, void *v, register_t *retval)
{
struct sys_ptrace_args
*uap = v;
int req = SCARG(uap, req);
pid_t pid = SCARG(uap, pid);
caddr_t uaddr = SCARG(uap, addr);
void *kaddr = NULL;
int data = SCARG(uap, data);
union {
struct ptrace_thread_state u_pts;
struct ptrace_io_desc u_piod;
struct ptrace_event u_pe;
struct ptrace_state u_ps;
register_t u_wcookie;
register_t u_pacmask[2];
} u;
int size = 0;
enum { NONE, IN, IN_ALLOC, OUT, OUT_ALLOC, IN_OUT } mode;
int kstate = 0;
int error;
*retval = 0;
switch (req) {
case PT_TRACE_ME:
case PT_CONTINUE:
case PT_KILL:
case PT_ATTACH:
case PT_DETACH:
#ifdef PT_STEP
case PT_STEP:
#endif
return ptrace_ctrl(p, req, pid, uaddr, data);
case PT_READ_I:
case PT_READ_D:
case PT_WRITE_I:
case PT_WRITE_D:
mode = NONE;
break;
case PT_IO:
mode = IN_OUT;
size = sizeof u.u_piod;
data = size;
break;
case PT_GET_THREAD_FIRST:
mode = OUT;
size = sizeof u.u_pts;
kstate = 1;
break;
case PT_GET_THREAD_NEXT:
mode = IN_OUT;
size = sizeof u.u_pts;
kstate = 1;
break;
case PT_GET_EVENT_MASK:
mode = OUT;
size = sizeof u.u_pe;
kstate = 1;
break;
case PT_SET_EVENT_MASK:
mode = IN;
size = sizeof u.u_pe;
kstate = 1;
break;
case PT_GET_PROCESS_STATE:
mode = OUT;
size = sizeof u.u_ps;
kstate = 1;
break;
case PT_GETREGS:
mode = OUT_ALLOC;
size = sizeof(struct reg);
break;
case PT_SETREGS:
mode = IN_ALLOC;
size = sizeof(struct reg);
break;
#ifdef PT_GETFPREGS
case PT_GETFPREGS:
mode = OUT_ALLOC;
size = sizeof(struct fpreg);
break;
#endif
#ifdef PT_SETFPREGS
case PT_SETFPREGS:
mode = IN_ALLOC;
size = sizeof(struct fpreg);
break;
#endif
#ifdef PT_GETXMMREGS
case PT_GETXMMREGS:
mode = OUT_ALLOC;
size = sizeof(struct xmmregs);
break;
#endif
#ifdef PT_SETXMMREGS
case PT_SETXMMREGS:
mode = IN_ALLOC;
size = sizeof(struct xmmregs);
break;
#endif
#ifdef PT_WCOOKIE
case PT_WCOOKIE:
mode = OUT;
size = sizeof u.u_wcookie;
data = size;
break;
#endif
#ifdef PT_PACMASK
case PT_PACMASK:
mode = OUT;
size = sizeof u.u_pacmask;
break;
#endif
#ifdef PT_GETXSTATE_INFO
case PT_GETXSTATE_INFO:
mode = OUT_ALLOC;
size = sizeof(struct ptrace_xstate_info);
break;
#endif
#ifdef PT_GETXSTATE
case PT_GETXSTATE:
mode = OUT_ALLOC;
size = fpu_save_len;
break;
#endif
#ifdef PT_SETXSTATE
case PT_SETXSTATE:
mode = IN_ALLOC;
size = fpu_save_len;
break;
#endif
default:
return EINVAL;
}
switch (mode) {
case NONE:
kaddr = uaddr;
break;
case IN:
case IN_OUT:
case OUT:
KASSERT(size <= sizeof u);
if (data != size)
return EINVAL;
if (mode == OUT)
memset(&u, 0, size);
else {
if ((error = copyin(uaddr, &u, size)))
return error;
}
kaddr = &u;
break;
case IN_ALLOC:
kaddr = malloc(size, M_TEMP, M_WAITOK);
if ((error = copyin(uaddr, kaddr, size))) {
free(kaddr, M_TEMP, size);
return error;
}
break;
case OUT_ALLOC:
kaddr = malloc(size, M_TEMP, M_WAITOK | M_ZERO);
break;
}
if (kstate)
error = ptrace_kstate(p, req, pid, kaddr);
else
error = ptrace_ustate(p, req, pid, kaddr, data, retval);
if (error == 0) {
switch (mode) {
case NONE:
case IN:
case IN_ALLOC:
break;
case IN_OUT:
case OUT:
error = copyout(&u, uaddr, size);
if (req == PT_IO) {
error = 0;
}
break;
case OUT_ALLOC:
error = copyout(kaddr, uaddr, size);
break;
}
}
if (mode == IN_ALLOC || mode == OUT_ALLOC)
free(kaddr, M_TEMP, size);
return error;
}
int
ptrace_ctrl(struct proc *p, int req, pid_t pid, caddr_t addr, int data)
{
struct proc *t;
struct process *tr;
int error = 0;
switch (req) {
case PT_TRACE_ME:
tr = p->p_p;
mtx_enter(&tr->ps_mtx);
if (ISSET(tr->ps_flags, PS_TRACED)) {
mtx_leave(&tr->ps_mtx);
return EBUSY;
}
atomic_setbits_int(&tr->ps_flags, PS_TRACED);
tr->ps_opptr = tr->ps_pptr;
mtx_leave(&tr->ps_mtx);
if (tr->ps_ptstat == NULL)
tr->ps_ptstat = malloc(sizeof(*tr->ps_ptstat),
M_SUBPROC, M_WAITOK);
memset(tr->ps_ptstat, 0, sizeof(*tr->ps_ptstat));
return 0;
case PT_KILL:
case PT_ATTACH:
case PT_DETACH:
if (pid > THREAD_PID_OFFSET) {
error = ESRCH;
goto fail;
}
case PT_CONTINUE:
#ifdef PT_STEP
case PT_STEP:
#endif
if ((tr = process_tprfind(pid, &t)) == NULL) {
error = ESRCH;
goto fail;
}
break;
}
if (req != PT_ATTACH) {
if (req != PT_KILL && (data < 0 || data >= NSIG)) {
error = EINVAL;
goto fail;
}
if ((error = process_checktracestate(p->p_p, tr, t)))
goto fail;
FIX_SSTEP(t);
} else {
if (tr == p->p_p) {
error = EINVAL;
goto fail;
}
if (ISSET(tr->ps_flags, PS_SYSTEM)) {
error = EPERM;
goto fail;
}
if (ISSET(tr->ps_flags, PS_TRACED)) {
error = EBUSY;
goto fail;
}
if (ISSET(tr->ps_flags, PS_INEXEC)) {
error = EAGAIN;
goto fail;
}
if ((tr->ps_ucred->cr_ruid != p->p_ucred->cr_ruid ||
ISSET(tr->ps_flags, PS_SUGIDEXEC | PS_SUGID)) &&
(error = suser(p)) != 0)
goto fail;
if (atomic_load_int(&global_ptrace) == 0 &&
!inferior(tr, p->p_p) && (error = suser(p)) != 0)
goto fail;
if ((tr->ps_pid == 1) && (securelevel > -1)) {
error = EPERM;
goto fail;
}
if (tr->ps_pid != 1 && inferior(p->p_p, tr)) {
error = EINVAL;
goto fail;
}
}
switch (req) {
#ifdef PT_STEP
case PT_STEP:
#endif
case PT_CONTINUE:
if ((int *)addr != (int *)1)
if ((error = process_set_pc(t, addr)) != 0)
goto fail;
#ifdef PT_STEP
error = process_sstep(t, req == PT_STEP);
if (error)
goto fail;
#endif
goto sendsig;
case PT_DETACH:
#ifdef PT_STEP
error = process_sstep(t, 0);
if (error)
goto fail;
#endif
mtx_enter(&tr->ps_mtx);
process_untrace(tr);
atomic_clearbits_int(&tr->ps_flags, PS_WAITED);
mtx_leave(&tr->ps_mtx);
sendsig:
memset(tr->ps_ptstat, 0, sizeof(*tr->ps_ptstat));
mtx_enter(&tr->ps_mtx);
if (tr->ps_trapped == t) {
SCHED_LOCK();
if (pid >= THREAD_PID_OFFSET)
atomic_setbits_int(&t->p_flag,
P_TRACESINGLE);
tr->ps_xsig = data;
unsleep(t);
setrunnable(t);
SCHED_UNLOCK();
mtx_leave(&tr->ps_mtx);
} else if (pid < THREAD_PID_OFFSET) {
mtx_leave(&tr->ps_mtx);
if (data != 0)
ptsignal(t, data, SPROCESS);
} else {
mtx_leave(&tr->ps_mtx);
error = EINVAL;
goto fail;
}
break;
case PT_KILL:
data = SIGKILL;
goto sendsig;
case PT_ATTACH:
mtx_enter(&tr->ps_mtx);
atomic_setbits_int(&tr->ps_flags, PS_TRACED);
tr->ps_opptr = tr->ps_pptr;
process_reparent(tr, p->p_p);
mtx_leave(&tr->ps_mtx);
if (tr->ps_ptstat == NULL)
tr->ps_ptstat = malloc(sizeof(*tr->ps_ptstat),
M_SUBPROC, M_WAITOK);
data = SIGSTOP;
goto sendsig;
default:
KASSERTMSG(0, "%s: unhandled request %d", __func__, req);
break;
}
fail:
return error;
}
int
ptrace_kstate(struct proc *p, int req, pid_t pid, void *addr)
{
struct process *tr;
struct ptrace_event *pe = addr;
int error;
KASSERT((p->p_flag & P_SYSTEM) == 0);
if ((tr = prfind(pid)) == NULL)
return ESRCH;
if ((error = process_checktracestate(p->p_p, tr, NULL)))
return error;
switch (req) {
case PT_GET_THREAD_FIRST:
case PT_GET_THREAD_NEXT:
{
struct ptrace_thread_state *pts = addr;
struct proc *t;
if (req == PT_GET_THREAD_NEXT) {
t = tfind_user(pts->pts_tid, tr);
if (t == NULL || ISSET(t->p_flag, P_WEXIT))
return ESRCH;
t = TAILQ_NEXT(t, p_thr_link);
} else {
t = TAILQ_FIRST(&tr->ps_threads);
}
if (t == NULL)
pts->pts_tid = -1;
else {
pts->pts_tid = t->p_tid + THREAD_PID_OFFSET;
CTASSERT(sizeof(pts->pts_name) >= sizeof(t->p_name));
strlcpy(pts->pts_name, t->p_name, sizeof(pts->pts_name));
}
return 0;
}
}
switch (req) {
case PT_GET_EVENT_MASK:
pe->pe_set_event = tr->ps_ptmask;
break;
case PT_SET_EVENT_MASK:
tr->ps_ptmask = pe->pe_set_event;
break;
case PT_GET_PROCESS_STATE:
mtx_enter(&tr->ps_mtx);
if (tr->ps_trapped != NULL)
tr->ps_ptstat->pe_tid = tr->ps_trapped->p_tid +
THREAD_PID_OFFSET;
else
tr->ps_ptstat->pe_tid = 0;
mtx_leave(&tr->ps_mtx);
memcpy(addr, tr->ps_ptstat, sizeof *tr->ps_ptstat);
break;
default:
KASSERTMSG(0, "%s: unhandled request %d", __func__, req);
break;
}
return 0;
}
int
ptrace_ustate(struct proc *p, int req, pid_t pid, void *addr, int data,
register_t *retval)
{
struct proc *t;
struct process *tr;
struct uio uio;
struct iovec iov;
int error, write;
int temp = 0;
KASSERT((p->p_flag & P_SYSTEM) == 0);
if ((tr = process_tprfind(pid, &t)) == NULL)
return ESRCH;
if ((error = process_checktracestate(p->p_p, tr, t)))
return error;
FIX_SSTEP(t);
write = 0;
if ((error = process_checkioperm(p, tr)) != 0)
return error;
switch (req) {
case PT_WRITE_I:
case PT_WRITE_D:
write = 1;
temp = data;
case PT_READ_I:
case PT_READ_D:
iov.iov_base = (caddr_t)&temp;
iov.iov_len = sizeof(int);
uio.uio_iov = &iov;
uio.uio_iovcnt = 1;
uio.uio_offset = (off_t)(vaddr_t)addr;
uio.uio_resid = sizeof(int);
uio.uio_segflg = UIO_SYSSPACE;
uio.uio_rw = write ? UIO_WRITE : UIO_READ;
uio.uio_procp = p;
error = process_domem(p, tr, &uio, write ? PT_WRITE_I :
PT_READ_I);
if (write == 0)
*retval = temp;
return error;
case PT_IO:
{
struct ptrace_io_desc *piod = addr;
iov.iov_base = piod->piod_addr;
iov.iov_len = piod->piod_len;
uio.uio_iov = &iov;
uio.uio_iovcnt = 1;
uio.uio_offset = (off_t)(vaddr_t)piod->piod_offs;
uio.uio_resid = piod->piod_len;
uio.uio_segflg = UIO_USERSPACE;
uio.uio_procp = p;
switch (piod->piod_op) {
case PIOD_READ_I:
req = PT_READ_I;
uio.uio_rw = UIO_READ;
break;
case PIOD_READ_D:
req = PT_READ_D;
uio.uio_rw = UIO_READ;
break;
case PIOD_WRITE_I:
req = PT_WRITE_I;
uio.uio_rw = UIO_WRITE;
break;
case PIOD_WRITE_D:
req = PT_WRITE_D;
uio.uio_rw = UIO_WRITE;
break;
case PIOD_READ_AUXV:
req = PT_READ_D;
uio.uio_rw = UIO_READ;
temp = ELF_AUX_WORDS * sizeof(char *);
if (uio.uio_offset > temp)
return EIO;
if (uio.uio_resid > temp - uio.uio_offset)
uio.uio_resid = temp - uio.uio_offset;
piod->piod_len = iov.iov_len = uio.uio_resid;
uio.uio_offset += tr->ps_auxinfo;
#ifdef MACHINE_STACK_GROWS_UP
if (uio.uio_offset < (off_t)tr->ps_strings)
return EIO;
#else
if (uio.uio_offset > (off_t)tr->ps_strings)
return EIO;
if ((uio.uio_offset + uio.uio_resid) >
(off_t)tr->ps_strings)
uio.uio_resid = (off_t)tr->ps_strings -
uio.uio_offset;
#endif
break;
default:
return EINVAL;
}
error = process_domem(p, tr, &uio, req);
piod->piod_len -= uio.uio_resid;
return error;
}
case PT_SETREGS:
return process_write_regs(t, addr);
case PT_GETREGS:
return process_read_regs(t, addr);
#ifdef PT_SETFPREGS
case PT_SETFPREGS:
return process_write_fpregs(t, addr);
#endif
#ifdef PT_SETFPREGS
case PT_GETFPREGS:
return process_read_fpregs(t, addr);
#endif
#ifdef PT_SETXMMREGS
case PT_SETXMMREGS:
return process_write_xmmregs(t, addr);
#endif
#ifdef PT_SETXMMREGS
case PT_GETXMMREGS:
return process_read_xmmregs(t, addr);
#endif
#ifdef PT_WCOOKIE
case PT_WCOOKIE:
*(register_t *)addr = process_get_wcookie(t);
return 0;
#endif
#ifdef PT_PACMASK
case PT_PACMASK:
((register_t *)addr)[0] = process_get_pacmask(t);
((register_t *)addr)[1] = process_get_pacmask(t);
return 0;
#endif
#ifdef PT_GETXSTATE_INFO
case PT_GETXSTATE_INFO:
return process_read_xstate_info(t, addr);
#endif
#ifdef PT_GETXSTATE
case PT_GETXSTATE:
return process_read_xstate(t, addr);
#endif
#ifdef PT_SETXSTATE
case PT_SETXSTATE:
return process_write_xstate(t, addr);
#endif
default:
KASSERTMSG(0, "%s: unhandled request %d", __func__, req);
break;
}
return 0;
}
static inline struct process *
process_tprfind(pid_t tpid, struct proc **tp)
{
struct process *tr;
struct proc *t;
if (tpid > THREAD_PID_OFFSET) {
t = tfind(tpid - THREAD_PID_OFFSET);
if (t == NULL)
return NULL;
tr = t->p_p;
} else {
tr = prfind(tpid);
if (tr == NULL)
return NULL;
mtx_enter(&tr->ps_mtx);
if (tr->ps_trapped != NULL)
t = tr->ps_trapped;
else
t = TAILQ_FIRST(&tr->ps_threads);
mtx_leave(&tr->ps_mtx);
}
*tp = t;
return tr;
}
static inline int
process_checktracestate(struct process *curpr, struct process *tr,
struct proc *t)
{
if (!ISSET(tr->ps_flags, PS_TRACED))
return EPERM;
if (tr->ps_pptr != curpr)
return EBUSY;
if (ISSET(tr->ps_flags, PS_INEXEC))
return EAGAIN;
if (t != NULL &&
(t->p_stat != SSTOP || !ISSET(tr->ps_flags, PS_WAITED)))
return EBUSY;
return 0;
}
#endif
int
process_checkioperm(struct proc *p, struct process *tr)
{
int error;
if ((tr->ps_ucred->cr_ruid != p->p_ucred->cr_ruid ||
ISSET(tr->ps_flags, PS_SUGIDEXEC | PS_SUGID)) &&
(error = suser(p)) != 0)
return (error);
if ((tr->ps_pid == 1) && (securelevel > -1))
return (EPERM);
if (ISSET(tr->ps_flags, PS_INEXEC))
return (EAGAIN);
return (0);
}
int
process_domem(struct proc *curp, struct process *tr, struct uio *uio, int req)
{
struct vmspace *vm;
int error;
vaddr_t addr;
vsize_t len;
len = uio->uio_resid;
if (len == 0)
return 0;
if ((error = process_checkioperm(curp, tr)) != 0)
return error;
vm = tr->ps_vmspace;
if ((tr->ps_flags & PS_EXITING) || (vm->vm_refcnt < 1))
return EFAULT;
addr = uio->uio_offset;
uvmspace_addref(vm);
error = uvm_io(&vm->vm_map, uio, UVM_IO_FIXPROT);
uvmspace_free(vm);
if (error == 0 && req == PT_WRITE_I)
pmap_proc_iflush(tr, addr, len);
return error;
}