#include <sys/errno.h>
#include <sys/types.h>
#include <sys/zone.h>
#include <sys/proc.h>
#include <sys/project.h>
#include <sys/task.h>
#include <sys/thread.h>
#include <sys/time.h>
#include <sys/mman.h>
#include <sys/modhash.h>
#include <sys/modhash_impl.h>
#include <sys/shm.h>
#include <sys/swap.h>
#include <sys/synch.h>
#include <sys/systm.h>
#include <sys/var.h>
#include <sys/vm_usage.h>
#include <sys/zone.h>
#include <sys/sunddi.h>
#include <sys/avl.h>
#include <vm/anon.h>
#include <vm/as.h>
#include <vm/seg_vn.h>
#include <vm/seg_spt.h>
#define VMUSAGE_HASH_SIZE 512
#define VMUSAGE_TYPE_VNODE 1
#define VMUSAGE_TYPE_AMP 2
#define VMUSAGE_TYPE_ANON 3
#define VMUSAGE_BOUND_UNKNOWN 0
#define VMUSAGE_BOUND_INCORE 1
#define VMUSAGE_BOUND_NOT_INCORE 2
#define ISWITHIN(node, addr) ((node)->vmb_start <= addr && \
(node)->vmb_end >= addr ? 1 : 0)
typedef struct vmu_bound {
avl_node_t vmb_node;
struct vmu_bound *vmb_next;
pgcnt_t vmb_start;
pgcnt_t vmb_end;
char vmb_type;
} vmu_bound_t;
typedef struct vmu_object {
struct vmu_object *vmo_next;
caddr_t vmo_key;
short vmo_type;
avl_tree_t vmo_bounds;
} vmu_object_t;
typedef struct vmu_entity {
struct vmu_entity *vme_next;
struct vmu_entity *vme_next_calc;
mod_hash_t *vme_vnode_hash;
mod_hash_t *vme_amp_hash;
mod_hash_t *vme_anon_hash;
vmusage_t vme_result;
} vmu_entity_t;
typedef struct vmu_zone {
struct vmu_zone *vmz_next;
id_t vmz_id;
vmu_entity_t *vmz_zone;
mod_hash_t *vmz_projects_hash;
mod_hash_t *vmz_tasks_hash;
mod_hash_t *vmz_rusers_hash;
mod_hash_t *vmz_eusers_hash;
} vmu_zone_t;
typedef struct vmu_cache {
vmusage_t *vmc_results;
uint64_t vmc_nresults;
uint64_t vmc_refcnt;
uint_t vmc_flags;
hrtime_t vmc_timestamp;
} vmu_cache_t;
typedef struct vmu_data {
kmutex_t vmu_lock;
kcondvar_t vmu_cv;
vmu_entity_t *vmu_system;
mod_hash_t *vmu_zones_hash;
mod_hash_t *vmu_projects_col_hash;
mod_hash_t *vmu_rusers_col_hash;
mod_hash_t *vmu_eusers_col_hash;
mod_hash_t *vmu_all_vnodes_hash;
mod_hash_t *vmu_all_amps_hash;
vmu_entity_t *vmu_entities;
size_t vmu_nentities;
vmu_cache_t *vmu_cache;
kthread_t *vmu_calc_thread;
uint_t vmu_calc_flags;
uint_t vmu_pending_flags;
uint_t vmu_pending_waiters;
vmu_bound_t *vmu_free_bounds;
vmu_object_t *vmu_free_objects;
vmu_entity_t *vmu_free_entities;
vmu_zone_t *vmu_free_zones;
} vmu_data_t;
extern struct as kas;
extern proc_t *practive;
extern zone_t *global_zone;
extern struct seg_ops segvn_ops;
extern struct seg_ops segspt_shmops;
static vmu_data_t vmu_data;
static kmem_cache_t *vmu_bound_cache;
static kmem_cache_t *vmu_object_cache;
static int
bounds_cmp(const void *bnd1, const void *bnd2)
{
const vmu_bound_t *bound1 = bnd1;
const vmu_bound_t *bound2 = bnd2;
if (bound1->vmb_start == bound2->vmb_start) {
return (0);
}
if (bound1->vmb_start < bound2->vmb_start) {
return (-1);
}
return (1);
}
static void
vmu_free_bound(vmu_bound_t *bound)
{
bound->vmb_next = vmu_data.vmu_free_bounds;
bound->vmb_start = 0;
bound->vmb_end = 0;
bound->vmb_type = 0;
vmu_data.vmu_free_bounds = bound;
}
static void
vmu_free_object(mod_hash_val_t val)
{
vmu_object_t *obj = (vmu_object_t *)val;
avl_tree_t *tree = &(obj->vmo_bounds);
vmu_bound_t *bound;
void *cookie = NULL;
while ((bound = avl_destroy_nodes(tree, &cookie)) != NULL)
vmu_free_bound(bound);
avl_destroy(tree);
obj->vmo_type = 0;
obj->vmo_next = vmu_data.vmu_free_objects;
vmu_data.vmu_free_objects = obj;
}
static void
vmu_free_entity(mod_hash_val_t val)
{
vmu_entity_t *entity = (vmu_entity_t *)val;
if (entity->vme_vnode_hash != NULL)
i_mod_hash_clear_nosync(entity->vme_vnode_hash);
if (entity->vme_amp_hash != NULL)
i_mod_hash_clear_nosync(entity->vme_amp_hash);
if (entity->vme_anon_hash != NULL)
i_mod_hash_clear_nosync(entity->vme_anon_hash);
entity->vme_next = vmu_data.vmu_free_entities;
vmu_data.vmu_free_entities = entity;
}
static void
vmu_free_zone(mod_hash_val_t val)
{
vmu_zone_t *zone = (vmu_zone_t *)val;
if (zone->vmz_zone != NULL) {
vmu_free_entity((mod_hash_val_t)zone->vmz_zone);
zone->vmz_zone = NULL;
}
if (zone->vmz_projects_hash != NULL)
i_mod_hash_clear_nosync(zone->vmz_projects_hash);
if (zone->vmz_tasks_hash != NULL)
i_mod_hash_clear_nosync(zone->vmz_tasks_hash);
if (zone->vmz_rusers_hash != NULL)
i_mod_hash_clear_nosync(zone->vmz_rusers_hash);
if (zone->vmz_eusers_hash != NULL)
i_mod_hash_clear_nosync(zone->vmz_eusers_hash);
zone->vmz_next = vmu_data.vmu_free_zones;
vmu_data.vmu_free_zones = zone;
}
void
vm_usage_init()
{
mutex_init(&vmu_data.vmu_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&vmu_data.vmu_cv, NULL, CV_DEFAULT, NULL);
vmu_data.vmu_system = NULL;
vmu_data.vmu_zones_hash = NULL;
vmu_data.vmu_projects_col_hash = NULL;
vmu_data.vmu_rusers_col_hash = NULL;
vmu_data.vmu_eusers_col_hash = NULL;
vmu_data.vmu_free_bounds = NULL;
vmu_data.vmu_free_objects = NULL;
vmu_data.vmu_free_entities = NULL;
vmu_data.vmu_free_zones = NULL;
vmu_data.vmu_all_vnodes_hash = mod_hash_create_ptrhash(
"vmusage vnode hash", VMUSAGE_HASH_SIZE, vmu_free_object,
sizeof (vnode_t));
vmu_data.vmu_all_amps_hash = mod_hash_create_ptrhash(
"vmusage amp hash", VMUSAGE_HASH_SIZE, vmu_free_object,
sizeof (struct anon_map));
vmu_data.vmu_projects_col_hash = mod_hash_create_idhash(
"vmusage collapsed project hash", VMUSAGE_HASH_SIZE,
vmu_free_entity);
vmu_data.vmu_rusers_col_hash = mod_hash_create_idhash(
"vmusage collapsed ruser hash", VMUSAGE_HASH_SIZE,
vmu_free_entity);
vmu_data.vmu_eusers_col_hash = mod_hash_create_idhash(
"vmusage collpased euser hash", VMUSAGE_HASH_SIZE,
vmu_free_entity);
vmu_data.vmu_zones_hash = mod_hash_create_idhash(
"vmusage zone hash", VMUSAGE_HASH_SIZE, vmu_free_zone);
vmu_bound_cache = kmem_cache_create("vmu_bound_cache",
sizeof (vmu_bound_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
vmu_object_cache = kmem_cache_create("vmu_object_cache",
sizeof (vmu_object_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
vmu_data.vmu_entities = NULL;
vmu_data.vmu_nentities = 0;
vmu_data.vmu_cache = NULL;
vmu_data.vmu_calc_thread = NULL;
vmu_data.vmu_calc_flags = 0;
vmu_data.vmu_pending_flags = 0;
vmu_data.vmu_pending_waiters = 0;
}
static vmu_entity_t *
vmu_alloc_entity(id_t id, int type, id_t zoneid)
{
vmu_entity_t *entity;
if (vmu_data.vmu_free_entities != NULL) {
entity = vmu_data.vmu_free_entities;
vmu_data.vmu_free_entities =
vmu_data.vmu_free_entities->vme_next;
bzero(&entity->vme_result, sizeof (vmusage_t));
} else {
entity = kmem_zalloc(sizeof (vmu_entity_t), KM_SLEEP);
}
entity->vme_result.vmu_id = id;
entity->vme_result.vmu_zoneid = zoneid;
entity->vme_result.vmu_type = type;
if (entity->vme_vnode_hash == NULL)
entity->vme_vnode_hash = mod_hash_create_ptrhash(
"vmusage vnode hash", VMUSAGE_HASH_SIZE, vmu_free_object,
sizeof (vnode_t));
if (entity->vme_amp_hash == NULL)
entity->vme_amp_hash = mod_hash_create_ptrhash(
"vmusage amp hash", VMUSAGE_HASH_SIZE, vmu_free_object,
sizeof (struct anon_map));
if (entity->vme_anon_hash == NULL)
entity->vme_anon_hash = mod_hash_create_ptrhash(
"vmusage anon hash", VMUSAGE_HASH_SIZE,
mod_hash_null_valdtor, sizeof (struct anon));
entity->vme_next = vmu_data.vmu_entities;
vmu_data.vmu_entities = entity;
vmu_data.vmu_nentities++;
return (entity);
}
static vmu_zone_t *
vmu_alloc_zone(id_t id)
{
vmu_zone_t *zone;
if (vmu_data.vmu_free_zones != NULL) {
zone = vmu_data.vmu_free_zones;
vmu_data.vmu_free_zones =
vmu_data.vmu_free_zones->vmz_next;
zone->vmz_next = NULL;
zone->vmz_zone = NULL;
} else {
zone = kmem_zalloc(sizeof (vmu_zone_t), KM_SLEEP);
}
zone->vmz_id = id;
if ((vmu_data.vmu_calc_flags & (VMUSAGE_ZONE | VMUSAGE_ALL_ZONES)) != 0)
zone->vmz_zone = vmu_alloc_entity(id, VMUSAGE_ZONE, id);
if ((vmu_data.vmu_calc_flags & (VMUSAGE_PROJECTS |
VMUSAGE_ALL_PROJECTS)) != 0 && zone->vmz_projects_hash == NULL)
zone->vmz_projects_hash = mod_hash_create_idhash(
"vmusage project hash", VMUSAGE_HASH_SIZE, vmu_free_entity);
if ((vmu_data.vmu_calc_flags & (VMUSAGE_TASKS | VMUSAGE_ALL_TASKS))
!= 0 && zone->vmz_tasks_hash == NULL)
zone->vmz_tasks_hash = mod_hash_create_idhash(
"vmusage task hash", VMUSAGE_HASH_SIZE, vmu_free_entity);
if ((vmu_data.vmu_calc_flags & (VMUSAGE_RUSERS | VMUSAGE_ALL_RUSERS))
!= 0 && zone->vmz_rusers_hash == NULL)
zone->vmz_rusers_hash = mod_hash_create_idhash(
"vmusage ruser hash", VMUSAGE_HASH_SIZE, vmu_free_entity);
if ((vmu_data.vmu_calc_flags & (VMUSAGE_EUSERS | VMUSAGE_ALL_EUSERS))
!= 0 && zone->vmz_eusers_hash == NULL)
zone->vmz_eusers_hash = mod_hash_create_idhash(
"vmusage euser hash", VMUSAGE_HASH_SIZE, vmu_free_entity);
return (zone);
}
static vmu_object_t *
vmu_alloc_object(caddr_t key, int type)
{
vmu_object_t *object;
if (vmu_data.vmu_free_objects != NULL) {
object = vmu_data.vmu_free_objects;
vmu_data.vmu_free_objects =
vmu_data.vmu_free_objects->vmo_next;
} else {
object = kmem_cache_alloc(vmu_object_cache, KM_SLEEP);
}
object->vmo_next = NULL;
object->vmo_key = key;
object->vmo_type = type;
avl_create(&(object->vmo_bounds), bounds_cmp, sizeof (vmu_bound_t), 0);
return (object);
}
static vmu_bound_t *
vmu_alloc_bound()
{
vmu_bound_t *bound;
if (vmu_data.vmu_free_bounds != NULL) {
bound = vmu_data.vmu_free_bounds;
vmu_data.vmu_free_bounds =
vmu_data.vmu_free_bounds->vmb_next;
} else {
bound = kmem_cache_alloc(vmu_bound_cache, KM_SLEEP);
}
bound->vmb_next = NULL;
bound->vmb_start = 0;
bound->vmb_end = 0;
bound->vmb_type = 0;
return (bound);
}
static vmu_object_t *
vmu_find_insert_object(mod_hash_t *hash, caddr_t key, uint_t type)
{
int ret;
vmu_object_t *object;
ret = i_mod_hash_find_nosync(hash, (mod_hash_key_t)key,
(mod_hash_val_t *)&object);
if (ret != 0) {
object = vmu_alloc_object(key, type);
ret = i_mod_hash_insert_nosync(hash, (mod_hash_key_t)key,
(mod_hash_val_t)object, (mod_hash_hndl_t)0);
ASSERT(ret == 0);
}
return (object);
}
static int
vmu_find_insert_anon(mod_hash_t *hash, caddr_t key)
{
int ret;
caddr_t val;
ret = i_mod_hash_find_nosync(hash, (mod_hash_key_t)key,
(mod_hash_val_t *)&val);
if (ret == 0)
return (0);
ret = i_mod_hash_insert_nosync(hash, (mod_hash_key_t)key,
(mod_hash_val_t)key, (mod_hash_hndl_t)0);
ASSERT(ret == 0);
return (1);
}
static vmu_entity_t *
vmu_find_insert_entity(mod_hash_t *hash, id_t id, uint_t type, id_t zoneid)
{
int ret;
vmu_entity_t *entity;
ret = i_mod_hash_find_nosync(hash, (mod_hash_key_t)(uintptr_t)id,
(mod_hash_val_t *)&entity);
if (ret != 0) {
entity = vmu_alloc_entity(id, type, zoneid);
ret = i_mod_hash_insert_nosync(hash,
(mod_hash_key_t)(uintptr_t)id, (mod_hash_val_t)entity,
(mod_hash_hndl_t)0);
ASSERT(ret == 0);
}
return (entity);
}
static pgcnt_t
vmu_insert_lookup_object_bounds(vmu_object_t *ro, pgcnt_t start, pgcnt_t
end, char type, vmu_bound_t **first, vmu_bound_t **last)
{
avl_tree_t *tree = &(ro->vmo_bounds);
avl_index_t where;
vmu_bound_t *walker, *tmp;
pgcnt_t ret = 0;
ASSERT(start <= end);
*first = *last = NULL;
tmp = vmu_alloc_bound();
tmp->vmb_start = start;
tmp->vmb_type = type;
if (walker = avl_find(tree, tmp, &where)) {
vmu_free_bound(tmp);
*first = walker;
}
if (walker == NULL) {
walker = avl_nearest(tree, where, AVL_BEFORE);
if (walker != NULL) {
if (ISWITHIN(walker, start)) {
vmu_free_bound(tmp);
*first = walker;
}
}
}
if (*first == NULL) {
walker = avl_nearest(tree, where, AVL_AFTER);
if (walker != NULL && walker->vmb_start <= end) {
tmp->vmb_end = walker->vmb_start - 1;
*first = tmp;
} else {
tmp->vmb_end = end;
*first = *last = tmp;
}
ret += tmp->vmb_end - tmp->vmb_start + 1;
avl_insert(tree, tmp, where);
}
ASSERT(*first != NULL);
if (*last != NULL) {
return (ret);
}
*last = *first;
for (;;) {
if (ISWITHIN(*last, end)) {
break;
}
walker = AVL_NEXT(tree, *last);
if (walker == NULL || walker->vmb_start > end) {
tmp = vmu_alloc_bound();
tmp->vmb_start = (*last)->vmb_end + 1;
tmp->vmb_end = end;
tmp->vmb_type = type;
ret += tmp->vmb_end - tmp->vmb_start + 1;
avl_insert_here(tree, tmp, *last, AVL_AFTER);
*last = tmp;
break;
} else {
if ((*last)->vmb_end + 1 != walker->vmb_start) {
tmp = vmu_alloc_bound();
tmp->vmb_start = (*last)->vmb_end + 1;
tmp->vmb_end = walker->vmb_start - 1;
tmp->vmb_type = type;
ret += tmp->vmb_end - tmp->vmb_start + 1;
avl_insert_here(tree, tmp, *last, AVL_AFTER);
*last = tmp;
} else {
*last = walker;
}
}
}
return (ret);
}
static pgcnt_t
vmu_update_bounds(avl_tree_t *tree, vmu_bound_t **first, vmu_bound_t **last,
avl_tree_t *new_tree, vmu_bound_t *new_first, vmu_bound_t *new_last)
{
vmu_bound_t *next, *new_next, *tmp;
pgcnt_t rss = 0;
next = *first;
new_next = new_first;
ASSERT((*first)->vmb_type != VMUSAGE_BOUND_UNKNOWN ||
(*first)->vmb_start >= new_first->vmb_start);
ASSERT((*last)->vmb_type != VMUSAGE_BOUND_UNKNOWN ||
(*last)->vmb_end <= new_last->vmb_end);
for (;;) {
if (next->vmb_type != VMUSAGE_BOUND_UNKNOWN) {
if (next == *last)
break;
next = AVL_NEXT(tree, next);
continue;
}
while (new_next->vmb_end < next->vmb_start)
new_next = AVL_NEXT(new_tree, new_next);
ASSERT(new_next->vmb_type != VMUSAGE_BOUND_UNKNOWN);
next->vmb_type = new_next->vmb_type;
if (new_next->vmb_end < next->vmb_end) {
tmp = vmu_alloc_bound();
tmp->vmb_type = VMUSAGE_BOUND_UNKNOWN;
tmp->vmb_start = new_next->vmb_end + 1;
tmp->vmb_end = next->vmb_end;
avl_insert_here(tree, tmp, next, AVL_AFTER);
next->vmb_end = new_next->vmb_end;
if (*last == next)
*last = tmp;
if (next->vmb_type == VMUSAGE_BOUND_INCORE)
rss += next->vmb_end - next->vmb_start + 1;
next = tmp;
} else {
if (next->vmb_type == VMUSAGE_BOUND_INCORE)
rss += next->vmb_end - next->vmb_start + 1;
if (next == *last)
break;
next = AVL_NEXT(tree, next);
}
}
return (rss);
}
static void
vmu_merge_bounds(avl_tree_t *tree, vmu_bound_t **first, vmu_bound_t **last)
{
vmu_bound_t *current;
vmu_bound_t *next;
ASSERT(tree != NULL);
ASSERT(*first != NULL);
ASSERT(*last != NULL);
current = *first;
while (current != *last) {
next = AVL_NEXT(tree, current);
if ((current->vmb_end + 1) == next->vmb_start &&
current->vmb_type == next->vmb_type) {
current->vmb_end = next->vmb_end;
avl_remove(tree, next);
vmu_free_bound(next);
if (next == *last) {
*last = current;
}
} else {
current = AVL_NEXT(tree, current);
}
}
}
static void
vmu_amp_update_incore_bounds(avl_tree_t *tree, struct anon_map *amp,
vmu_bound_t **first, vmu_bound_t **last, boolean_t incore)
{
vmu_bound_t *next;
vmu_bound_t *tmp;
pgcnt_t index;
short bound_type;
short page_type;
vnode_t *vn;
anoff_t off;
struct anon *ap;
next = *first;
ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
for (;;) {
if (incore == B_TRUE)
next->vmb_type = VMUSAGE_BOUND_INCORE;
if (next->vmb_type != VMUSAGE_BOUND_UNKNOWN) {
if (next == *last)
break;
next = AVL_NEXT(tree, next);
continue;
}
bound_type = next->vmb_type;
index = next->vmb_start;
while (index <= next->vmb_end) {
page_t *page;
pgcnt_t pgcnt = 1;
uint_t pgshft;
pgcnt_t pgmsk;
ap = anon_get_ptr(amp->ahp, index);
if (ap != NULL)
swap_xlate(ap, &vn, &off);
if (ap != NULL && vn != NULL && vn->v_pages != NULL &&
(page = page_exists(vn, off)) != NULL) {
page_type = VMUSAGE_BOUND_INCORE;
if (page->p_szc > 0) {
pgcnt = page_get_pagecnt(page->p_szc);
pgshft = page_get_shift(page->p_szc);
pgmsk = (0x1 << (pgshft - PAGESHIFT))
- 1;
}
} else {
page_type = VMUSAGE_BOUND_NOT_INCORE;
}
if (bound_type == VMUSAGE_BOUND_UNKNOWN) {
next->vmb_type = page_type;
} else if (next->vmb_type != page_type) {
tmp = vmu_alloc_bound();
tmp->vmb_type = page_type;
tmp->vmb_start = index;
tmp->vmb_end = next->vmb_end;
avl_insert_here(tree, tmp, next, AVL_AFTER);
next->vmb_end = index - 1;
if (*last == next)
*last = tmp;
next = tmp;
}
if (pgcnt > 1) {
index = (index & ~pgmsk) + pgcnt;
} else {
index++;
}
}
if (next == *last) {
ASSERT(next->vmb_type != VMUSAGE_BOUND_UNKNOWN);
break;
} else
next = AVL_NEXT(tree, next);
}
ANON_LOCK_EXIT(&->a_rwlock);
}
static void
vmu_vnode_update_incore_bounds(avl_tree_t *tree, vnode_t *vnode,
vmu_bound_t **first, vmu_bound_t **last)
{
vmu_bound_t *next;
vmu_bound_t *tmp;
pgcnt_t index;
short bound_type;
short page_type;
next = *first;
for (;;) {
if (vnode->v_pages == NULL)
next->vmb_type = VMUSAGE_BOUND_NOT_INCORE;
if (next->vmb_type != VMUSAGE_BOUND_UNKNOWN) {
if (next == *last)
break;
next = AVL_NEXT(tree, next);
continue;
}
bound_type = next->vmb_type;
index = next->vmb_start;
while (index <= next->vmb_end) {
page_t *page;
pgcnt_t pgcnt = 1;
uint_t pgshft;
pgcnt_t pgmsk;
if (vnode->v_pages != NULL &&
(page = page_exists(vnode, ptob(index))) != NULL) {
page_type = VMUSAGE_BOUND_INCORE;
if (page->p_szc > 0) {
pgcnt = page_get_pagecnt(page->p_szc);
pgshft = page_get_shift(page->p_szc);
pgmsk = (0x1 << (pgshft - PAGESHIFT))
- 1;
}
} else {
page_type = VMUSAGE_BOUND_NOT_INCORE;
}
if (bound_type == VMUSAGE_BOUND_UNKNOWN) {
next->vmb_type = page_type;
} else if (next->vmb_type != page_type) {
tmp = vmu_alloc_bound();
tmp->vmb_type = page_type;
tmp->vmb_start = index;
tmp->vmb_end = next->vmb_end;
avl_insert_here(tree, tmp, next, AVL_AFTER);
next->vmb_end = index - 1;
if (*last == next)
*last = tmp;
next = tmp;
}
if (pgcnt > 1) {
index = (index & ~pgmsk) + pgcnt;
} else {
index++;
}
}
if (next == *last) {
ASSERT(next->vmb_type != VMUSAGE_BOUND_UNKNOWN);
break;
} else
next = AVL_NEXT(tree, next);
}
}
static void
vmu_calculate_seg(vmu_entity_t *vmu_entities, struct seg *seg)
{
struct segvn_data *svd;
struct shm_data *shmd;
struct spt_data *sptd;
vmu_object_t *shared_object = NULL;
vmu_object_t *entity_object = NULL;
vmu_entity_t *entity;
vmusage_t *result;
vmu_bound_t *first = NULL;
vmu_bound_t *last = NULL;
vmu_bound_t *cur = NULL;
vmu_bound_t *e_first = NULL;
vmu_bound_t *e_last = NULL;
vmu_bound_t *tmp;
pgcnt_t p_index, s_index, p_start, p_end, s_start, s_end, rss, virt;
struct anon_map *private_amp = NULL;
boolean_t incore = B_FALSE;
boolean_t shared = B_FALSE;
int file = 0;
pgcnt_t swresv = 0;
pgcnt_t panon = 0;
s_start = 0;
p_end = 0;
if (seg->s_size <= 0)
return;
if (seg->s_ops == &segvn_ops) {
svd = (struct segvn_data *)seg->s_data;
if (svd->type == MAP_SHARED) {
shared = B_TRUE;
} else {
swresv = svd->swresv;
if (SEGVN_LOCK_TRYENTER(seg->s_as, &svd->lock,
RW_READER) != 0) {
if (svd->tr_state == SEGVN_TR_OFF &&
svd->amp != NULL) {
private_amp = svd->amp;
p_start = svd->anon_index;
p_end = svd->anon_index +
btop(seg->s_size) - 1;
}
SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
}
}
if (svd->vp != NULL) {
file = 1;
shared_object = vmu_find_insert_object(
vmu_data.vmu_all_vnodes_hash, (caddr_t)svd->vp,
VMUSAGE_TYPE_VNODE);
s_start = btop(svd->offset);
s_end = btop(svd->offset + seg->s_size) - 1;
}
if (svd->amp != NULL && svd->type == MAP_SHARED) {
ASSERT(shared_object == NULL);
shared_object = vmu_find_insert_object(
vmu_data.vmu_all_amps_hash, (caddr_t)svd->amp,
VMUSAGE_TYPE_AMP);
s_start = svd->anon_index;
s_end = svd->anon_index + btop(seg->s_size) - 1;
if (svd->amp->swresv == 0)
incore = B_TRUE;
}
} else if (seg->s_ops == &segspt_shmops) {
shared = B_TRUE;
shmd = (struct shm_data *)seg->s_data;
shared_object = vmu_find_insert_object(
vmu_data.vmu_all_amps_hash, (caddr_t)shmd->shm_amp,
VMUSAGE_TYPE_AMP);
s_start = 0;
s_end = btop(seg->s_size) - 1;
sptd = shmd->shm_sptseg->s_data;
if (sptd->spt_flags & SHM_SHARE_MMU)
incore = B_TRUE;
} else {
return;
}
if (private_amp != NULL) {
ANON_LOCK_ENTER(&private_amp->a_rwlock, RW_WRITER);
p_index = p_start;
s_index = s_start;
while (p_index <= p_end) {
pgcnt_t p_index_next;
pgcnt_t p_bound_size;
int cnt;
anoff_t off;
struct vnode *vn;
struct anon *ap;
page_t *page;
pgcnt_t pgcnt = 1;
pgcnt_t pgstart;
pgcnt_t pgend;
uint_t pgshft;
pgcnt_t pgmsk;
p_index_next = p_index;
ap = anon_get_next_ptr(private_amp->ahp,
&p_index_next);
if (p_index_next > p_end) {
p_index_next = p_end + 1;
ap = NULL;
}
if (p_index_next != p_index) {
p_bound_size = p_index_next - p_index - 1;
if (shared_object != NULL) {
cur = vmu_alloc_bound();
cur->vmb_start = s_index;
cur->vmb_end = s_index + p_bound_size;
cur->vmb_type = VMUSAGE_BOUND_UNKNOWN;
if (first == NULL) {
first = cur;
last = cur;
} else {
last->vmb_next = cur;
last = cur;
}
}
p_index = p_index + p_bound_size + 1;
s_index = s_index + p_bound_size + 1;
}
if (ap == NULL)
break;
cnt = ap->an_refcnt;
swap_xlate(ap, &vn, &off);
if (vn == NULL || vn->v_pages == NULL ||
(page = page_exists(vn, off)) == NULL) {
p_index++;
s_index++;
continue;
}
if (page->p_szc > 0) {
pgcnt = page_get_pagecnt(page->p_szc);
pgshft = page_get_shift(page->p_szc);
pgmsk = (0x1 << (pgshft - PAGESHIFT)) - 1;
pgstart = p_index & ~pgmsk;
pgend = pgstart + pgcnt - 1;
if (pgend > p_end)
pgend = p_end;
pgcnt = pgend - p_index + 1;
p_index += pgcnt;
s_index += pgcnt;
} else {
p_index++;
s_index++;
}
if (cnt == 1) {
panon += pgcnt;
continue;
}
for (entity = vmu_entities; entity != NULL;
entity = entity->vme_next_calc) {
result = &entity->vme_result;
if (vmu_find_insert_anon(entity->vme_anon_hash,
(caddr_t)ap) == 0)
continue;
result->vmu_rss_all += (pgcnt << PAGESHIFT);
result->vmu_rss_private +=
(pgcnt << PAGESHIFT);
}
}
ANON_LOCK_EXIT(&private_amp->a_rwlock);
}
if (swresv > 0 || panon > 0) {
for (entity = vmu_entities; entity != NULL;
entity = entity->vme_next_calc) {
result = &entity->vme_result;
result->vmu_swap_all += swresv;
result->vmu_swap_private += swresv;
result->vmu_rss_all += (panon << PAGESHIFT);
result->vmu_rss_private += (panon << PAGESHIFT);
}
}
if (shared_object != NULL) {
avl_tree_t *tree = &(shared_object->vmo_bounds);
if (first == NULL) {
first = vmu_alloc_bound();
first->vmb_start = s_start;
first->vmb_end = s_end;
first->vmb_type = VMUSAGE_BOUND_UNKNOWN;
}
cur = first;
while (cur != NULL) {
if (vmu_insert_lookup_object_bounds(shared_object,
cur->vmb_start, cur->vmb_end, VMUSAGE_BOUND_UNKNOWN,
&first, &last) > 0) {
if (shared_object->vmo_type ==
VMUSAGE_TYPE_VNODE) {
vmu_vnode_update_incore_bounds(
tree,
(vnode_t *)
shared_object->vmo_key, &first,
&last);
} else {
vmu_amp_update_incore_bounds(
tree,
(struct anon_map *)
shared_object->vmo_key, &first,
&last, incore);
}
vmu_merge_bounds(tree, &first, &last);
}
for (entity = vmu_entities; entity != NULL;
entity = entity->vme_next_calc) {
avl_tree_t *e_tree;
result = &entity->vme_result;
entity_object = vmu_find_insert_object(
shared_object->vmo_type ==
VMUSAGE_TYPE_VNODE ? entity->vme_vnode_hash:
entity->vme_amp_hash,
shared_object->vmo_key,
shared_object->vmo_type);
virt = vmu_insert_lookup_object_bounds(
entity_object, cur->vmb_start, cur->vmb_end,
VMUSAGE_BOUND_UNKNOWN, &e_first, &e_last);
if (virt == 0)
continue;
e_tree = &(entity_object->vmo_bounds);
rss = vmu_update_bounds(e_tree, &e_first,
&e_last, tree, first, last);
result->vmu_rss_all += (rss << PAGESHIFT);
if (shared == B_TRUE && file == B_FALSE) {
result->vmu_swap_all +=
(virt << PAGESHIFT);
result->vmu_swap_shared +=
(virt << PAGESHIFT);
result->vmu_rss_shared +=
(rss << PAGESHIFT);
} else if (shared == B_TRUE && file == B_TRUE) {
result->vmu_rss_shared +=
(rss << PAGESHIFT);
} else if (shared == B_FALSE &&
file == B_TRUE) {
result->vmu_rss_private +=
(rss << PAGESHIFT);
}
vmu_merge_bounds(e_tree, &e_first, &e_last);
}
tmp = cur;
cur = cur->vmb_next;
vmu_free_bound(tmp);
}
}
}
static void
vmu_calculate_proc(proc_t *p)
{
vmu_entity_t *entities = NULL;
vmu_zone_t *zone;
vmu_entity_t *tmp;
struct as *as;
struct seg *seg;
int ret;
if ((vmu_data.vmu_system) != NULL) {
tmp = vmu_data.vmu_system;
tmp->vme_next_calc = entities;
entities = tmp;
}
if (vmu_data.vmu_calc_flags &
(VMUSAGE_ZONE | VMUSAGE_ALL_ZONES | VMUSAGE_PROJECTS |
VMUSAGE_ALL_PROJECTS | VMUSAGE_TASKS | VMUSAGE_ALL_TASKS |
VMUSAGE_RUSERS | VMUSAGE_ALL_RUSERS | VMUSAGE_EUSERS |
VMUSAGE_ALL_EUSERS)) {
ret = i_mod_hash_find_nosync(vmu_data.vmu_zones_hash,
(mod_hash_key_t)(uintptr_t)p->p_zone->zone_id,
(mod_hash_val_t *)&zone);
if (ret != 0) {
zone = vmu_alloc_zone(p->p_zone->zone_id);
ret = i_mod_hash_insert_nosync(vmu_data.vmu_zones_hash,
(mod_hash_key_t)(uintptr_t)p->p_zone->zone_id,
(mod_hash_val_t)zone, (mod_hash_hndl_t)0);
ASSERT(ret == 0);
}
if (zone->vmz_zone != NULL) {
tmp = zone->vmz_zone;
tmp->vme_next_calc = entities;
entities = tmp;
}
if (vmu_data.vmu_calc_flags &
(VMUSAGE_PROJECTS | VMUSAGE_ALL_PROJECTS)) {
tmp = vmu_find_insert_entity(zone->vmz_projects_hash,
p->p_task->tk_proj->kpj_id, VMUSAGE_PROJECTS,
zone->vmz_id);
tmp->vme_next_calc = entities;
entities = tmp;
}
if (vmu_data.vmu_calc_flags &
(VMUSAGE_TASKS | VMUSAGE_ALL_TASKS)) {
tmp = vmu_find_insert_entity(zone->vmz_tasks_hash,
p->p_task->tk_tkid, VMUSAGE_TASKS, zone->vmz_id);
tmp->vme_next_calc = entities;
entities = tmp;
}
if (vmu_data.vmu_calc_flags &
(VMUSAGE_RUSERS | VMUSAGE_ALL_RUSERS)) {
tmp = vmu_find_insert_entity(zone->vmz_rusers_hash,
crgetruid(p->p_cred), VMUSAGE_RUSERS, zone->vmz_id);
tmp->vme_next_calc = entities;
entities = tmp;
}
if (vmu_data.vmu_calc_flags &
(VMUSAGE_EUSERS | VMUSAGE_ALL_EUSERS)) {
tmp = vmu_find_insert_entity(zone->vmz_eusers_hash,
crgetuid(p->p_cred), VMUSAGE_EUSERS, zone->vmz_id);
tmp->vme_next_calc = entities;
entities = tmp;
}
}
if (vmu_data.vmu_calc_flags & VMUSAGE_COL_PROJECTS) {
tmp = vmu_find_insert_entity(vmu_data.vmu_projects_col_hash,
p->p_task->tk_proj->kpj_id, VMUSAGE_PROJECTS, ALL_ZONES);
tmp->vme_next_calc = entities;
entities = tmp;
}
if (vmu_data.vmu_calc_flags & VMUSAGE_COL_RUSERS) {
tmp = vmu_find_insert_entity(vmu_data.vmu_rusers_col_hash,
crgetruid(p->p_cred), VMUSAGE_RUSERS, ALL_ZONES);
tmp->vme_next_calc = entities;
entities = tmp;
}
if (vmu_data.vmu_calc_flags & VMUSAGE_COL_EUSERS) {
tmp = vmu_find_insert_entity(vmu_data.vmu_eusers_col_hash,
crgetuid(p->p_cred), VMUSAGE_EUSERS, ALL_ZONES);
tmp->vme_next_calc = entities;
entities = tmp;
}
ASSERT(entities != NULL);
as = p->p_as;
AS_LOCK_ENTER(as, RW_READER);
for (seg = AS_SEGFIRST(as); seg != NULL;
seg = AS_SEGNEXT(as, seg)) {
vmu_calculate_seg(entities, seg);
}
AS_LOCK_EXIT(as);
}
static void
vmu_clear_calc()
{
if (vmu_data.vmu_system != NULL) {
vmu_free_entity(vmu_data.vmu_system);
vmu_data.vmu_system = NULL;
}
if (vmu_data.vmu_zones_hash != NULL)
i_mod_hash_clear_nosync(vmu_data.vmu_zones_hash);
if (vmu_data.vmu_projects_col_hash != NULL)
i_mod_hash_clear_nosync(vmu_data.vmu_projects_col_hash);
if (vmu_data.vmu_rusers_col_hash != NULL)
i_mod_hash_clear_nosync(vmu_data.vmu_rusers_col_hash);
if (vmu_data.vmu_eusers_col_hash != NULL)
i_mod_hash_clear_nosync(vmu_data.vmu_eusers_col_hash);
i_mod_hash_clear_nosync(vmu_data.vmu_all_vnodes_hash);
i_mod_hash_clear_nosync(vmu_data.vmu_all_amps_hash);
}
static void
vmu_free_extra()
{
vmu_bound_t *tb;
vmu_object_t *to;
vmu_entity_t *te;
vmu_zone_t *tz;
while (vmu_data.vmu_free_bounds != NULL) {
tb = vmu_data.vmu_free_bounds;
vmu_data.vmu_free_bounds = vmu_data.vmu_free_bounds->vmb_next;
kmem_cache_free(vmu_bound_cache, tb);
}
while (vmu_data.vmu_free_objects != NULL) {
to = vmu_data.vmu_free_objects;
vmu_data.vmu_free_objects =
vmu_data.vmu_free_objects->vmo_next;
kmem_cache_free(vmu_object_cache, to);
}
while (vmu_data.vmu_free_entities != NULL) {
te = vmu_data.vmu_free_entities;
vmu_data.vmu_free_entities =
vmu_data.vmu_free_entities->vme_next;
if (te->vme_vnode_hash != NULL)
mod_hash_destroy_hash(te->vme_vnode_hash);
if (te->vme_amp_hash != NULL)
mod_hash_destroy_hash(te->vme_amp_hash);
if (te->vme_anon_hash != NULL)
mod_hash_destroy_hash(te->vme_anon_hash);
kmem_free(te, sizeof (vmu_entity_t));
}
while (vmu_data.vmu_free_zones != NULL) {
tz = vmu_data.vmu_free_zones;
vmu_data.vmu_free_zones =
vmu_data.vmu_free_zones->vmz_next;
if (tz->vmz_projects_hash != NULL)
mod_hash_destroy_hash(tz->vmz_projects_hash);
if (tz->vmz_tasks_hash != NULL)
mod_hash_destroy_hash(tz->vmz_tasks_hash);
if (tz->vmz_rusers_hash != NULL)
mod_hash_destroy_hash(tz->vmz_rusers_hash);
if (tz->vmz_eusers_hash != NULL)
mod_hash_destroy_hash(tz->vmz_eusers_hash);
kmem_free(tz, sizeof (vmu_zone_t));
}
}
extern kcondvar_t *pr_pid_cv;
static void
vmu_calculate()
{
int i = 0;
int ret;
proc_t *p;
vmu_clear_calc();
if (vmu_data.vmu_calc_flags & VMUSAGE_SYSTEM)
vmu_data.vmu_system = vmu_alloc_entity(0, VMUSAGE_SYSTEM,
ALL_ZONES);
mutex_enter(&pidlock);
for (i = 0; i < v.v_proc; i++) {
again:
p = pid_entry(i);
if (p == NULL)
continue;
mutex_enter(&p->p_lock);
mutex_exit(&pidlock);
if (panicstr) {
mutex_exit(&p->p_lock);
return;
}
ret = sprtrylock_proc(p);
if (ret == -1) {
mutex_exit(&p->p_lock);
mutex_enter(&pidlock);
continue;
} else if (ret == 1) {
sprwaitlock_proc(p);
mutex_enter(&pidlock);
goto again;
}
mutex_exit(&p->p_lock);
vmu_calculate_proc(p);
mutex_enter(&p->p_lock);
sprunlock(p);
mutex_enter(&pidlock);
}
mutex_exit(&pidlock);
vmu_free_extra();
}
vmu_cache_t *
vmu_cache_alloc(size_t nres, uint_t flags)
{
vmu_cache_t *cache;
cache = kmem_zalloc(sizeof (vmu_cache_t), KM_SLEEP);
cache->vmc_results = kmem_zalloc(sizeof (vmusage_t) * nres, KM_SLEEP);
cache->vmc_nresults = nres;
cache->vmc_flags = flags;
cache->vmc_refcnt = 1;
return (cache);
}
static void
vmu_cache_hold(vmu_cache_t *cache)
{
ASSERT(MUTEX_HELD(&vmu_data.vmu_lock));
cache->vmc_refcnt++;
}
static void
vmu_cache_rele(vmu_cache_t *cache)
{
ASSERT(MUTEX_HELD(&vmu_data.vmu_lock));
ASSERT(cache->vmc_refcnt > 0);
cache->vmc_refcnt--;
if (cache->vmc_refcnt == 0) {
kmem_free(cache->vmc_results, sizeof (vmusage_t) *
cache->vmc_nresults);
kmem_free(cache, sizeof (vmu_cache_t));
}
}
static int
vmu_copyout_results(vmu_cache_t *cache, vmusage_t *buf, size_t *nres,
uint_t flags, int cpflg)
{
vmusage_t *result, *out_result;
vmusage_t dummy;
size_t i, count = 0;
size_t bufsize;
int ret = 0;
uint_t types = 0;
if (nres != NULL) {
if (ddi_copyin((caddr_t)nres, &bufsize, sizeof (size_t), cpflg))
return (set_errno(EFAULT));
} else {
bufsize = 0;
}
if ((flags & VMUSAGE_SYSTEM) && curproc->p_zone == global_zone)
types |= VMUSAGE_SYSTEM;
if (flags & (VMUSAGE_ZONE | VMUSAGE_ALL_ZONES))
types |= VMUSAGE_ZONE;
if (flags & (VMUSAGE_PROJECTS | VMUSAGE_ALL_PROJECTS |
VMUSAGE_COL_PROJECTS))
types |= VMUSAGE_PROJECTS;
if (flags & (VMUSAGE_TASKS | VMUSAGE_ALL_TASKS))
types |= VMUSAGE_TASKS;
if (flags & (VMUSAGE_RUSERS | VMUSAGE_ALL_RUSERS | VMUSAGE_COL_RUSERS))
types |= VMUSAGE_RUSERS;
if (flags & (VMUSAGE_EUSERS | VMUSAGE_ALL_EUSERS | VMUSAGE_COL_EUSERS))
types |= VMUSAGE_EUSERS;
out_result = buf;
for (result = cache->vmc_results, i = 0;
i < cache->vmc_nresults; result++, i++) {
if (curproc->p_zone != global_zone &&
curproc->p_zone->zone_id != result->vmu_zoneid)
continue;
if (curproc->p_zone != global_zone &&
(flags & VMUSAGE_SYSTEM) != 0 &&
result->vmu_type == VMUSAGE_ZONE) {
count++;
if (out_result != NULL) {
if (bufsize < count) {
ret = set_errno(EOVERFLOW);
} else {
dummy = *result;
dummy.vmu_zoneid = ALL_ZONES;
dummy.vmu_id = 0;
dummy.vmu_type = VMUSAGE_SYSTEM;
if (ddi_copyout(&dummy, out_result,
sizeof (vmusage_t), cpflg))
return (set_errno(EFAULT));
out_result++;
}
}
}
if ((result->vmu_type & types) == 0)
continue;
if (result->vmu_zoneid == ALL_ZONES) {
if (result->vmu_type == VMUSAGE_PROJECTS &&
(flags & VMUSAGE_COL_PROJECTS) == 0)
continue;
if (result->vmu_type == VMUSAGE_EUSERS &&
(flags & VMUSAGE_COL_EUSERS) == 0)
continue;
if (result->vmu_type == VMUSAGE_RUSERS &&
(flags & VMUSAGE_COL_RUSERS) == 0)
continue;
}
if (result->vmu_zoneid != curproc->p_zone->zone_id) {
if (result->vmu_type == VMUSAGE_ZONE &&
(flags & VMUSAGE_ALL_ZONES) == 0)
continue;
if (result->vmu_type == VMUSAGE_PROJECTS &&
(flags & (VMUSAGE_ALL_PROJECTS |
VMUSAGE_COL_PROJECTS)) == 0)
continue;
if (result->vmu_type == VMUSAGE_TASKS &&
(flags & VMUSAGE_ALL_TASKS) == 0)
continue;
if (result->vmu_type == VMUSAGE_RUSERS &&
(flags & (VMUSAGE_ALL_RUSERS |
VMUSAGE_COL_RUSERS)) == 0)
continue;
if (result->vmu_type == VMUSAGE_EUSERS &&
(flags & (VMUSAGE_ALL_EUSERS |
VMUSAGE_COL_EUSERS)) == 0)
continue;
}
count++;
if (out_result != NULL) {
if (bufsize < count) {
ret = set_errno(EOVERFLOW);
} else {
if (ddi_copyout(result, out_result,
sizeof (vmusage_t), cpflg))
return (set_errno(EFAULT));
out_result++;
}
}
}
if (nres != NULL)
if (ddi_copyout(&count, (void *)nres, sizeof (size_t), cpflg))
return (set_errno(EFAULT));
return (ret);
}
int
vm_getusage(uint_t flags, time_t age, vmusage_t *buf, size_t *nres, int cpflg)
{
vmu_entity_t *entity;
vmusage_t *result;
int ret = 0;
int cacherecent = 0;
hrtime_t now;
uint_t flags_orig;
flags_orig = flags;
if (curproc->p_zone != global_zone) {
if (flags & (VMUSAGE_ALL_PROJECTS | VMUSAGE_COL_PROJECTS)) {
flags &= ~(VMUSAGE_ALL_PROJECTS | VMUSAGE_COL_PROJECTS);
flags |= VMUSAGE_PROJECTS;
}
if (flags & (VMUSAGE_ALL_RUSERS | VMUSAGE_COL_RUSERS)) {
flags &= ~(VMUSAGE_ALL_RUSERS | VMUSAGE_COL_RUSERS);
flags |= VMUSAGE_RUSERS;
}
if (flags & (VMUSAGE_ALL_EUSERS | VMUSAGE_COL_EUSERS)) {
flags &= ~(VMUSAGE_ALL_EUSERS | VMUSAGE_COL_EUSERS);
flags |= VMUSAGE_EUSERS;
}
if (flags & VMUSAGE_SYSTEM) {
flags &= ~VMUSAGE_SYSTEM;
flags |= VMUSAGE_ZONE;
}
}
if ((flags & (~VMUSAGE_MASK)) != 0)
return (set_errno(EINVAL));
if ((flags & VMUSAGE_MASK) == 0)
return (set_errno(EINVAL));
mutex_enter(&vmu_data.vmu_lock);
now = gethrtime();
start:
if (vmu_data.vmu_cache != NULL) {
vmu_cache_t *cache;
if ((vmu_data.vmu_cache->vmc_timestamp +
((hrtime_t)age * NANOSEC)) > now)
cacherecent = 1;
if ((vmu_data.vmu_cache->vmc_flags & flags) == flags &&
cacherecent == 1) {
cache = vmu_data.vmu_cache;
vmu_cache_hold(cache);
mutex_exit(&vmu_data.vmu_lock);
ret = vmu_copyout_results(cache, buf, nres, flags_orig,
cpflg);
mutex_enter(&vmu_data.vmu_lock);
vmu_cache_rele(cache);
if (vmu_data.vmu_pending_waiters > 0)
cv_broadcast(&vmu_data.vmu_cv);
mutex_exit(&vmu_data.vmu_lock);
return (ret);
}
if (cacherecent == 1)
flags = vmu_data.vmu_cache->vmc_flags | flags;
}
if (vmu_data.vmu_calc_thread == NULL) {
vmu_cache_t *cache;
vmu_data.vmu_calc_thread = curthread;
vmu_data.vmu_calc_flags = flags;
vmu_data.vmu_entities = NULL;
vmu_data.vmu_nentities = 0;
if (vmu_data.vmu_pending_waiters > 0)
vmu_data.vmu_calc_flags |=
vmu_data.vmu_pending_flags;
vmu_data.vmu_pending_flags = 0;
mutex_exit(&vmu_data.vmu_lock);
vmu_calculate();
mutex_enter(&vmu_data.vmu_lock);
if (vmu_data.vmu_cache != NULL)
vmu_cache_rele(vmu_data.vmu_cache);
cache = vmu_data.vmu_cache =
vmu_cache_alloc(vmu_data.vmu_nentities,
vmu_data.vmu_calc_flags);
result = cache->vmc_results;
for (entity = vmu_data.vmu_entities; entity != NULL;
entity = entity->vme_next) {
*result = entity->vme_result;
result++;
}
cache->vmc_timestamp = gethrtime();
vmu_cache_hold(cache);
vmu_data.vmu_calc_flags = 0;
vmu_data.vmu_calc_thread = NULL;
if (vmu_data.vmu_pending_waiters > 0)
cv_broadcast(&vmu_data.vmu_cv);
mutex_exit(&vmu_data.vmu_lock);
ret = vmu_copyout_results(cache, buf, nres, flags_orig, cpflg);
mutex_enter(&vmu_data.vmu_lock);
vmu_cache_rele(cache);
mutex_exit(&vmu_data.vmu_lock);
return (ret);
}
vmu_data.vmu_pending_flags |= flags;
vmu_data.vmu_pending_waiters++;
while (vmu_data.vmu_calc_thread != NULL) {
if (cv_wait_sig(&vmu_data.vmu_cv,
&vmu_data.vmu_lock) == 0) {
vmu_data.vmu_pending_waiters--;
mutex_exit(&vmu_data.vmu_lock);
return (set_errno(EINTR));
}
}
vmu_data.vmu_pending_waiters--;
goto start;
}