pchild
(void) signal(SIGCHLD, pchild); /* while signals not ready */
void pchild(int);
struct svm_range *prange, *pchild;
list_for_each_entry(pchild, &prange->child_list, child_list)
atomic_add_unless(&pchild->queue_refcount, -1, 0);
svm_range_add_child(struct svm_range *prange, struct svm_range *pchild, enum svm_work_list_ops op)
pchild, pchild->start, pchild->last, prange, op);
pchild->work_item.mm = NULL;
pchild->work_item.op = op;
list_add_tail(&pchild->child_list, &prange->child_list);
struct svm_range *pchild;
list_for_each_entry(pchild, &prange->child_list, child_list) {
if (!pchild->mapped_to_gpu)
mutex_lock_nested(&pchild->lock, 1);
if (pchild->start <= last && pchild->last >= start) {
pchild->start, pchild->last);
atomic_inc(&pchild->invalid);
mutex_unlock(&pchild->lock);
list_for_each_entry(pchild, &prange->child_list, child_list) {
mutex_lock_nested(&pchild->lock, 1);
s = max(start, pchild->start);
l = min(last, pchild->last);
svm_range_unmap_from_gpus(pchild, s, l, trigger);
mutex_unlock(&pchild->lock);
struct svm_range *pchild;
pchild = list_first_entry(&prange->child_list,
pr_debug("child prange 0x%p op %d\n", pchild,
pchild->work_item.op);
list_del_init(&pchild->child_list);
svm_range_handle_list_op(svms, pchild, mm);
struct svm_range *pchild;
list_for_each_entry(pchild, &prange->child_list, child_list) {
mutex_lock_nested(&pchild->lock, 1);
s = max(start, pchild->start);
l = min(last, pchild->last);
svm_range_unmap_from_gpus(pchild, s, l, trigger);
svm_range_unmap_split(prange, pchild, start, last);
mutex_unlock(&pchild->lock);
struct svm_range *pchild;
list_for_each_entry(pchild, &prange->child_list, child_list)
if (addr >= pchild->start && addr <= pchild->last) {
addr, pchild->start, pchild->last);
return pchild;
struct svm_range *prange, *pchild;
list_for_each_entry(pchild, &prange->child_list, child_list) {
size = (pchild->last - pchild->start + 1) << PAGE_SHIFT;