#include <sys/systm.h>
#include <sys/cmn_err.h>
#include <sys/sysmacros.h>
#include <sys/memlist.h>
#include <sys/mem_config.h>
#include <sys/kmem.h>
#include <sys/cpuvar.h>
#include <sys/debug.h>
#include <sys/machsystm.h>
#include <sys/cpu_module.h>
#include <sys/kstat.h>
#include <sys/atomic.h>
#include <vm/hat.h>
#include <vm/seg_kmem.h>
#include <vm/hat_sfmmu.h>
#include <sys/time.h>
#include <sys/callb.h>
#include <sys/ontrap.h>
typedef uint64_t ms_paddr_t;
int memscrub_add_span(pfn_t pfn, pgcnt_t pages);
int memscrub_delete_span(pfn_t pfn, pgcnt_t pages);
int memscrub_init(void);
void memscrub_induced_error(void);
#define MEMSCRUB_MIN_PAGES (32 * 1024 * 1024 / PAGESIZE)
#define MEMSCRUB_DFL_PERIOD_SEC (12 * 60 * 60)
#define MEMSCRUB_DFL_SPAN_PAGES ((32 * 1024 * 1024) / PAGESIZE)
#define MEMSCRUB_DFL_THREAD_PRI MINCLSYSPRI
#define MEMSCRUB_BLOCK_SIZE 256
#define MEMSCRUB_BLOCK_SIZE_SHIFT 8
#define MEMSCRUB_BLOCKS_PER_PAGE (PAGESIZE >> MEMSCRUB_BLOCK_SIZE_SHIFT)
#define MEMSCRUB_BPP4M MMU_PAGESIZE4M >> MEMSCRUB_BLOCK_SIZE_SHIFT
#define MEMSCRUB_BPP512K MMU_PAGESIZE512K >> MEMSCRUB_BLOCK_SIZE_SHIFT
#define MEMSCRUB_BPP64K MMU_PAGESIZE64K >> MEMSCRUB_BLOCK_SIZE_SHIFT
#define MEMSCRUB_BPP MMU_PAGESIZE >> MEMSCRUB_BLOCK_SIZE_SHIFT
#ifdef DEBUG
#define MEMSCRUB_OVERRIDE_MSG "Memory scrubber sleep time is zero " \
"seconds, consuming entire CPU."
#else
#define MEMSCRUB_OVERRIDE_MSG "!Memory scrubber sleep time is zero " \
"seconds, consuming entire CPU."
#endif
uint_t disable_memscrub = 0;
uint_t pause_memscrub = 0;
uint_t read_all_memscrub = 0;
uint_t memscrub_verbose = 0;
uint_t memscrub_all_idle = 0;
uint_t memscrub_span_pages = MEMSCRUB_DFL_SPAN_PAGES;
uint_t memscrub_period_sec = MEMSCRUB_DFL_PERIOD_SEC;
uint_t memscrub_thread_pri = MEMSCRUB_DFL_THREAD_PRI;
uint_t memscrub_delay_start_sec = 5 * 60;
uint_t memscrub_override_ticks = 1;
static void memscrubber(void);
static void memscrub_cleanup(void);
static int memscrub_add_span_gen(pfn_t, pgcnt_t, struct memlist **, uint_t *);
static int memscrub_verify_span(ms_paddr_t *addrp, pgcnt_t *pagesp);
static void memscrub_scan(uint_t blks, ms_paddr_t src);
static struct memlist *memscrub_memlist;
static uint_t memscrub_phys_pages;
static kcondvar_t memscrub_cv;
static kmutex_t memscrub_lock;
static void memscrub_init_mem_config(void);
static void memscrub_uninit_mem_config(void);
typedef struct memscrub_page_retire_span {
ms_paddr_t address;
struct memscrub_page_retire_span *next;
} memscrub_page_retire_span_t;
static memscrub_page_retire_span_t *memscrub_page_retire_span_list = NULL;
static void memscrub_page_retire_span_add(ms_paddr_t);
static void memscrub_page_retire_span_delete(ms_paddr_t);
static int memscrub_page_retire_span_search(ms_paddr_t);
static void memscrub_page_retire_span_list_update(void);
static int add_to_page_retire_list = 0;
static struct memscrub_kstats {
kstat_named_t done_early;
kstat_named_t early_sec;
kstat_named_t done_late;
kstat_named_t late_sec;
kstat_named_t interval_ticks;
kstat_named_t force_run;
kstat_named_t errors_found;
} memscrub_counts = {
{ "done_early", KSTAT_DATA_UINT32 },
{ "early_sec", KSTAT_DATA_UINT32 },
{ "done_late", KSTAT_DATA_UINT32 },
{ "late_sec", KSTAT_DATA_UINT32 },
{ "interval_ticks", KSTAT_DATA_UINT32 },
{ "force_run", KSTAT_DATA_UINT32 },
{ "errors_found", KSTAT_DATA_UINT32 },
};
#define MEMSCRUB_STAT_INC(stat) memscrub_counts.stat.value.ui32++
#define MEMSCRUB_STAT_SET(stat, val) memscrub_counts.stat.value.ui32 = (val)
#define MEMSCRUB_STAT_NINC(stat, val) memscrub_counts.stat.value.ui32 += (val)
static struct kstat *memscrub_ksp = (struct kstat *)NULL;
static timeout_id_t memscrub_tid = 0;
int
memscrub_init(void)
{
struct memlist *src;
if (physinstalled >= MEMSCRUB_MIN_PAGES) {
mutex_init(&memscrub_lock, NULL, MUTEX_DRIVER, NULL);
cv_init(&memscrub_cv, NULL, CV_DRIVER, NULL);
for (src = phys_install; src; src = src->ml_next) {
if (memscrub_add_span(
(pfn_t)(src->ml_address >> PAGESHIFT),
(pgcnt_t)(src->ml_size >> PAGESHIFT))) {
memscrub_cleanup();
return (-1);
}
}
memscrub_ksp = kstat_create("unix", 0, "memscrub_kstat",
"misc", KSTAT_TYPE_NAMED,
sizeof (memscrub_counts) / sizeof (kstat_named_t),
KSTAT_FLAG_VIRTUAL | KSTAT_FLAG_WRITABLE);
if (memscrub_ksp) {
memscrub_ksp->ks_data = (void *)&memscrub_counts;
kstat_install(memscrub_ksp);
} else {
cmn_err(CE_NOTE, "Memscrubber cannot create kstats\n");
}
(void) thread_create(NULL, 0, (void (*)())memscrubber,
NULL, 0, &p0, TS_RUN, memscrub_thread_pri);
memscrub_init_mem_config();
}
return (0);
}
static void
memscrub_cleanup(void)
{
memscrub_uninit_mem_config();
while (memscrub_memlist) {
(void) memscrub_delete_span(
(pfn_t)(memscrub_memlist->ml_address >> PAGESHIFT),
(pgcnt_t)(memscrub_memlist->ml_size >> PAGESHIFT));
}
if (memscrub_ksp)
kstat_delete(memscrub_ksp);
cv_destroy(&memscrub_cv);
mutex_destroy(&memscrub_lock);
}
#ifdef MEMSCRUB_DEBUG
static void
memscrub_printmemlist(char *title, struct memlist *listp)
{
struct memlist *list;
cmn_err(CE_CONT, "%s:\n", title);
for (list = listp; list; list = list->ml_next) {
cmn_err(CE_CONT, "addr = 0x%llx, size = 0x%llx\n",
list->ml_address, list->ml_size);
}
}
#endif
static void
memscrub_wakeup(void *c)
{
mutex_enter(&memscrub_lock);
cv_signal(&memscrub_cv);
mutex_exit(&memscrub_lock);
}
void
memscrub_run(void)
{
MEMSCRUB_STAT_INC(force_run);
if (memscrub_tid) {
(void) untimeout(memscrub_tid);
memscrub_wakeup((void *)NULL);
}
}
static uint_t
compute_interval_ticks(void)
{
static uint_t msp_safe, mpp_safe;
static uint_t interval_ticks, period_ticks;
msp_safe = memscrub_span_pages;
mpp_safe = memscrub_phys_pages;
period_ticks = memscrub_period_sec * hz;
interval_ticks = period_ticks;
ASSERT(mutex_owned(&memscrub_lock));
if ((msp_safe != 0) && (mpp_safe != 0)) {
if (memscrub_phys_pages <= msp_safe) {
interval_ticks = period_ticks;
} else {
interval_ticks = (period_ticks /
(mpp_safe / msp_safe));
}
}
return (interval_ticks);
}
void
memscrubber(void)
{
ms_paddr_t address, addr;
time_t deadline;
pgcnt_t pages;
uint_t reached_end = 1;
uint_t paused_message = 0;
uint_t interval_ticks = 0;
uint_t sleep_warn_printed = 0;
callb_cpr_t cprinfo;
CALLB_CPR_INIT(&cprinfo, &memscrub_lock, callb_generic_cpr, "memscrub");
mutex_enter(&memscrub_lock);
if (memscrub_memlist == NULL) {
cmn_err(CE_WARN, "memscrub_memlist not initialized.");
goto memscrub_exit;
}
address = memscrub_memlist->ml_address;
deadline = gethrestime_sec() + memscrub_delay_start_sec;
for (;;) {
if (disable_memscrub)
break;
interval_ticks = compute_interval_ticks();
if (interval_ticks == 0 && pause_memscrub) {
interval_ticks = hz;
}
if (interval_ticks == 0) {
interval_ticks = memscrub_override_ticks;
if (!sleep_warn_printed) {
cmn_err(CE_NOTE, MEMSCRUB_OVERRIDE_MSG);
sleep_warn_printed = 1;
}
}
MEMSCRUB_STAT_SET(interval_ticks, interval_ticks);
if (reached_end && !pause_memscrub) {
time_t now = gethrestime_sec();
if (now >= deadline) {
MEMSCRUB_STAT_INC(done_late);
MEMSCRUB_STAT_NINC(late_sec, now - deadline);
interval_ticks = 0;
deadline = now + memscrub_period_sec;
} else {
interval_ticks = (deadline - now) * hz;
MEMSCRUB_STAT_INC(done_early);
MEMSCRUB_STAT_NINC(early_sec, deadline - now);
deadline += memscrub_period_sec;
}
reached_end = 0;
sleep_warn_printed = 0;
}
if (interval_ticks != 0) {
CALLB_CPR_SAFE_BEGIN(&cprinfo);
memscrub_tid = timeout(memscrub_wakeup, NULL,
interval_ticks);
cv_wait(&memscrub_cv, &memscrub_lock);
memscrub_tid = 0;
CALLB_CPR_SAFE_END(&cprinfo, &memscrub_lock);
}
if (memscrub_phys_pages == 0) {
cmn_err(CE_WARN, "Memory scrubber has 0 pages to read");
goto memscrub_exit;
}
if (!pause_memscrub) {
if (paused_message) {
paused_message = 0;
if (memscrub_verbose)
cmn_err(CE_NOTE, "Memory scrubber "
"resuming");
}
if (read_all_memscrub) {
if (memscrub_verbose)
cmn_err(CE_NOTE, "Memory scrubber "
"reading all memory per request");
addr = memscrub_memlist->ml_address;
reached_end = 0;
while (!reached_end) {
if (disable_memscrub)
break;
pages = memscrub_phys_pages;
reached_end = memscrub_verify_span(
&addr, &pages);
memscrub_scan(pages *
MEMSCRUB_BLOCKS_PER_PAGE, addr);
addr += ((uint64_t)pages * PAGESIZE);
}
read_all_memscrub = 0;
}
pages = memscrub_span_pages;
if (disable_memscrub)
break;
reached_end = memscrub_verify_span(&address,
&pages);
memscrub_scan(pages * MEMSCRUB_BLOCKS_PER_PAGE,
address);
address += ((uint64_t)pages * PAGESIZE);
}
if (pause_memscrub && !paused_message) {
paused_message = 1;
if (memscrub_verbose)
cmn_err(CE_NOTE, "Memory scrubber paused");
}
}
memscrub_exit:
cmn_err(CE_NOTE, "Memory scrubber exiting");
CALLB_CPR_EXIT(&cprinfo);
memscrub_cleanup();
thread_exit();
}
static int
memscrub_verify_span(ms_paddr_t *addrp, pgcnt_t *pagesp)
{
struct memlist *mlp;
ms_paddr_t address = *addrp;
uint64_t bytes = (uint64_t)*pagesp * PAGESIZE;
uint64_t bytes_remaining;
int reached_end = 0;
ASSERT(mutex_owned(&memscrub_lock));
for (mlp = memscrub_memlist; mlp != NULL; mlp = mlp->ml_next) {
if (address < mlp->ml_address) {
address = mlp->ml_address;
break;
}
if (address < (mlp->ml_address + mlp->ml_size))
break;
}
if (mlp == NULL) {
mlp = memscrub_memlist;
address = mlp->ml_address;
}
bytes_remaining = (mlp->ml_address + mlp->ml_size) - address;
if (bytes > bytes_remaining)
bytes = bytes_remaining;
if ((mlp->ml_next == NULL) &&
((mlp->ml_address + mlp->ml_size) == (address + bytes)))
reached_end = 1;
*addrp = address;
*pagesp = bytes / PAGESIZE;
return (reached_end);
}
int
memscrub_add_span(pfn_t pfn, pgcnt_t pages)
{
#ifdef MEMSCRUB_DEBUG
ms_paddr_t address = (ms_paddr_t)pfn << PAGESHIFT;
uint64_t bytes = (uint64_t)pages << PAGESHIFT;
#endif
int retval;
mutex_enter(&memscrub_lock);
#ifdef MEMSCRUB_DEBUG
memscrub_printmemlist("memscrub_memlist before", memscrub_memlist);
cmn_err(CE_CONT, "memscrub_phys_pages: 0x%x\n", memscrub_phys_pages);
cmn_err(CE_CONT, "memscrub_add_span: address: 0x%llx"
" size: 0x%llx\n", address, bytes);
#endif
retval = memscrub_add_span_gen(pfn, pages, &memscrub_memlist,
&memscrub_phys_pages);
#ifdef MEMSCRUB_DEBUG
memscrub_printmemlist("memscrub_memlist after", memscrub_memlist);
cmn_err(CE_CONT, "memscrub_phys_pages: 0x%x\n", memscrub_phys_pages);
#endif
mutex_exit(&memscrub_lock);
return (retval);
}
static int
memscrub_add_span_gen(
pfn_t pfn,
pgcnt_t pages,
struct memlist **list,
uint_t *npgs)
{
ms_paddr_t address = (ms_paddr_t)pfn << PAGESHIFT;
uint64_t bytes = (uint64_t)pages << PAGESHIFT;
struct memlist *dst;
struct memlist *prev, *next;
int retval = 0;
dst = (struct memlist *)
kmem_alloc(sizeof (struct memlist), KM_NOSLEEP);
if (dst == NULL) {
retval = -1;
goto add_done;
}
dst->ml_address = address;
dst->ml_size = bytes;
if (*list == NULL) {
dst->ml_prev = NULL;
dst->ml_next = NULL;
*list = dst;
goto add_done;
}
for (prev = NULL, next = *list;
next != NULL;
prev = next, next = next->ml_next) {
if (address > (next->ml_address + next->ml_size))
continue;
if ((address + bytes) == next->ml_address) {
kmem_free(dst, sizeof (struct memlist));
next->ml_address = address;
next->ml_size += bytes;
goto add_done;
}
if (address == (next->ml_address + next->ml_size)) {
kmem_free(dst, sizeof (struct memlist));
if (next->ml_next) {
if ((address + bytes) >
next->ml_next->ml_address) {
retval = -1;
goto add_done;
}
if ((address + bytes) ==
next->ml_next->ml_address) {
struct memlist *mlp = next->ml_next;
if (next == *list)
*list = next->ml_next;
mlp->ml_address = next->ml_address;
mlp->ml_size += next->ml_size;
mlp->ml_size += bytes;
if (next->ml_prev)
next->ml_prev->ml_next = mlp;
mlp->ml_prev = next->ml_prev;
kmem_free(next,
sizeof (struct memlist));
goto add_done;
}
}
next->ml_size += bytes;
goto add_done;
}
if ((address + bytes) > next->ml_address) {
retval = -1;
kmem_free(dst, sizeof (struct memlist));
goto add_done;
}
dst->ml_prev = prev;
dst->ml_next = next;
next->ml_prev = dst;
if (prev == NULL) {
*list = dst;
} else {
prev->ml_next = dst;
}
goto add_done;
}
prev->ml_next = dst;
dst->ml_prev = prev;
dst->ml_next = NULL;
add_done:
if (retval != -1)
*npgs += pages;
return (retval);
}
int
memscrub_delete_span(pfn_t pfn, pgcnt_t pages)
{
ms_paddr_t address = (ms_paddr_t)pfn << PAGESHIFT;
uint64_t bytes = (uint64_t)pages << PAGESHIFT;
struct memlist *dst, *next;
int retval = 0;
mutex_enter(&memscrub_lock);
#ifdef MEMSCRUB_DEBUG
memscrub_printmemlist("memscrub_memlist Before", memscrub_memlist);
cmn_err(CE_CONT, "memscrub_phys_pages: 0x%x\n", memscrub_phys_pages);
cmn_err(CE_CONT, "memscrub_delete_span: 0x%llx 0x%llx\n",
address, bytes);
#endif
for (next = memscrub_memlist; next != NULL; next = next->ml_next) {
if ((address >= next->ml_address) &&
(address < next->ml_address + next->ml_size))
break;
}
if (next == NULL) {
retval = -1;
goto delete_done;
}
if (address + bytes > next->ml_address + next->ml_size) {
retval = -1;
goto delete_done;
}
if (address == next->ml_address) {
if (bytes == next->ml_size) {
if (next == memscrub_memlist)
memscrub_memlist = next->ml_next;
if (next->ml_prev != NULL)
next->ml_prev->ml_next = next->ml_next;
if (next->ml_next != NULL)
next->ml_next->ml_prev = next->ml_prev;
kmem_free(next, sizeof (struct memlist));
} else {
next->ml_address += bytes;
next->ml_size -= bytes;
}
goto delete_done;
}
if (address + bytes == next->ml_address + next->ml_size) {
next->ml_size -= bytes;
goto delete_done;
}
{
dst = (struct memlist *)
kmem_alloc(sizeof (struct memlist), KM_NOSLEEP);
if (dst == NULL) {
retval = -1;
goto delete_done;
}
dst->ml_address = address + bytes;
dst->ml_size =
(next->ml_address + next->ml_size) - dst->ml_address;
next->ml_size = address - next->ml_address;
dst->ml_next = next->ml_next;
dst->ml_prev = next;
if (next->ml_next != NULL)
next->ml_next->ml_prev = dst;
next->ml_next = dst;
}
delete_done:
if (retval != -1) {
memscrub_phys_pages -= pages;
if (memscrub_phys_pages == 0)
disable_memscrub = 1;
}
#ifdef MEMSCRUB_DEBUG
memscrub_printmemlist("memscrub_memlist After", memscrub_memlist);
cmn_err(CE_CONT, "memscrub_phys_pages: 0x%x\n", memscrub_phys_pages);
#endif
mutex_exit(&memscrub_lock);
return (retval);
}
static void
memscrub_scan(uint_t blks, ms_paddr_t src)
{
uint_t psz, bpp, pgsread;
pfn_t pfn;
ms_paddr_t pa;
caddr_t va;
on_trap_data_t otd;
int scan_mmu_pagesize = 0;
int retired_pages = 0;
extern void memscrub_read(caddr_t src, uint_t blks);
ASSERT(mutex_owned(&memscrub_lock));
pgsread = 0;
pa = src;
if (memscrub_page_retire_span_list != NULL) {
if (memscrub_page_retire_span_search(src)) {
scan_mmu_pagesize = 1;
}
}
#ifdef MEMSCRUB_DEBUG
cmn_err(CE_NOTE, "scan_mmu_pagesize = %d\n" scan_mmu_pagesize);
#endif
while (blks != 0) {
if (((pa & MMU_PAGEMASK4M) == pa) &&
(blks >= MEMSCRUB_BPP4M)) {
psz = MMU_PAGESIZE4M;
bpp = MEMSCRUB_BPP4M;
} else if (((pa & MMU_PAGEMASK512K) == pa) &&
(blks >= MEMSCRUB_BPP512K)) {
psz = MMU_PAGESIZE512K;
bpp = MEMSCRUB_BPP512K;
} else if (((pa & MMU_PAGEMASK64K) == pa) &&
(blks >= MEMSCRUB_BPP64K)) {
psz = MMU_PAGESIZE64K;
bpp = MEMSCRUB_BPP64K;
} else if ((pa & MMU_PAGEMASK) == pa) {
psz = MMU_PAGESIZE;
bpp = MEMSCRUB_BPP;
} else {
if (memscrub_verbose) {
cmn_err(CE_NOTE, "Memory scrubber ignoring "
"non-page aligned block starting at 0x%"
PRIx64, src);
}
return;
}
if (blks < bpp) bpp = blks;
#ifdef MEMSCRUB_DEBUG
cmn_err(CE_NOTE, "Going to run psz=%x, "
"bpp=%x pa=%llx\n", psz, bpp, pa);
#endif
pfn = mmu_btop(pa);
va = (caddr_t)MEMSCRUBBASE;
hat_devload(kas.a_hat, va, psz, pfn, PROT_READ,
HAT_LOAD_NOCONSIST | HAT_LOAD_LOCK);
thread_affinity_set(curthread, CPU_CURRENT);
if (!on_trap(&otd, OT_DATA_EC) && !scan_mmu_pagesize) {
memscrub_read(va, bpp);
cpu_check_ce(SCRUBBER_CEEN_CHECK,
(uint64_t)pa, va, psz);
no_trap();
thread_affinity_clear(curthread);
} else {
no_trap();
thread_affinity_clear(curthread);
if (psz > MMU_PAGESIZE || scan_mmu_pagesize) {
caddr_t vaddr = va;
ms_paddr_t paddr = pa;
int tmp = 0;
for (; tmp < bpp; tmp += MEMSCRUB_BPP) {
if (page_retire_check(paddr, NULL)
== 0) {
vaddr += MMU_PAGESIZE;
paddr += MMU_PAGESIZE;
retired_pages++;
continue;
}
thread_affinity_set(curthread,
CPU_CURRENT);
if (!on_trap(&otd, OT_DATA_EC)) {
memscrub_read(vaddr,
MEMSCRUB_BPP);
cpu_check_ce(
SCRUBBER_CEEN_CHECK,
(uint64_t)paddr, vaddr,
MMU_PAGESIZE);
no_trap();
} else {
no_trap();
MEMSCRUB_STAT_INC(errors_found);
}
thread_affinity_clear(curthread);
vaddr += MMU_PAGESIZE;
paddr += MMU_PAGESIZE;
}
}
}
hat_unload(kas.a_hat, va, psz, HAT_UNLOAD_UNLOCK);
blks -= bpp;
pa += psz;
pgsread++;
}
if (scan_mmu_pagesize && retired_pages == 0)
memscrub_page_retire_span_delete(src);
if (add_to_page_retire_list) {
if (!memscrub_page_retire_span_search(src))
memscrub_page_retire_span_add(src);
add_to_page_retire_list = 0;
}
if (memscrub_verbose) {
cmn_err(CE_NOTE, "Memory scrubber read 0x%x pages starting "
"at 0x%" PRIx64, pgsread, src);
}
}
void
memscrub_induced_error(void)
{
add_to_page_retire_list = 1;
}
void
memscrub_notify(ms_paddr_t pa)
{
mutex_enter(&memscrub_lock);
if (!memscrub_page_retire_span_search(pa))
memscrub_page_retire_span_add(pa);
mutex_exit(&memscrub_lock);
}
static void
memscrub_page_retire_span_add(ms_paddr_t pa)
{
memscrub_page_retire_span_t *new_span;
new_span = (memscrub_page_retire_span_t *)
kmem_zalloc(sizeof (memscrub_page_retire_span_t), KM_NOSLEEP);
if (new_span == NULL) {
#ifdef MEMSCRUB_DEBUG
cmn_err(CE_NOTE, "failed to allocate new span - span with"
" retired page/s not tracked.\n");
#endif
return;
}
new_span->address = pa;
new_span->next = memscrub_page_retire_span_list;
memscrub_page_retire_span_list = new_span;
}
static void
memscrub_page_retire_span_delete(ms_paddr_t pa)
{
memscrub_page_retire_span_t *prev_span, *next_span;
prev_span = memscrub_page_retire_span_list;
next_span = memscrub_page_retire_span_list->next;
if (pa == prev_span->address) {
memscrub_page_retire_span_list = next_span;
kmem_free(prev_span, sizeof (memscrub_page_retire_span_t));
return;
}
while (next_span) {
if (pa == next_span->address) {
prev_span->next = next_span->next;
kmem_free(next_span,
sizeof (memscrub_page_retire_span_t));
return;
}
prev_span = next_span;
next_span = next_span->next;
}
}
static int
memscrub_page_retire_span_search(ms_paddr_t pa)
{
memscrub_page_retire_span_t *next_span = memscrub_page_retire_span_list;
while (next_span) {
if (pa == next_span->address)
return (1);
next_span = next_span->next;
}
return (0);
}
static void
memscrub_page_retire_span_list_update(void)
{
memscrub_page_retire_span_t *prev, *cur, *next;
if (memscrub_page_retire_span_list == NULL)
return;
prev = cur = memscrub_page_retire_span_list;
next = cur->next;
while (cur) {
if (page_numtopp_nolock(mmu_btop(cur->address)) == NULL) {
if (cur == memscrub_page_retire_span_list) {
memscrub_page_retire_span_list = next;
kmem_free(cur,
sizeof (memscrub_page_retire_span_t));
prev = cur = memscrub_page_retire_span_list;
} else {
prev->next = cur->next;
kmem_free(cur,
sizeof (memscrub_page_retire_span_t));
cur = next;
}
} else {
prev = cur;
cur = next;
}
if (cur != NULL)
next = cur->next;
}
}
static int
new_memscrub(int update_page_retire_list)
{
struct memlist *src, *list, *old_list;
uint_t npgs;
list = NULL;
npgs = 0;
memlist_read_lock();
for (src = phys_install; src; src = src->ml_next) {
if (memscrub_add_span_gen((pfn_t)(src->ml_address >> PAGESHIFT),
(pgcnt_t)(src->ml_size >> PAGESHIFT), &list, &npgs)) {
memlist_read_unlock();
while (list) {
struct memlist *el;
el = list;
list = list->ml_next;
kmem_free(el, sizeof (struct memlist));
}
return (-1);
}
}
memlist_read_unlock();
mutex_enter(&memscrub_lock);
memscrub_phys_pages = npgs;
old_list = memscrub_memlist;
memscrub_memlist = list;
if (update_page_retire_list)
memscrub_page_retire_span_list_update();
mutex_exit(&memscrub_lock);
while (old_list) {
struct memlist *el;
el = old_list;
old_list = old_list->ml_next;
kmem_free(el, sizeof (struct memlist));
}
return (0);
}
static void
memscrub_mem_config_post_add(
void *arg,
pgcnt_t delta_pages)
{
atomic_inc_32(&pause_memscrub);
ASSERT(pause_memscrub != 0);
(void) new_memscrub(0);
atomic_dec_32(&pause_memscrub);
}
static int
memscrub_mem_config_pre_del(
void *arg,
pgcnt_t delta_pages)
{
return (0);
}
static void
memscrub_mem_config_post_del(
void *arg,
pgcnt_t delta_pages,
int cancelled)
{
atomic_inc_32(&pause_memscrub);
ASSERT(pause_memscrub != 0);
if (new_memscrub(1)) {
disable_memscrub = 1;
}
atomic_dec_32(&pause_memscrub);
}
static kphysm_setup_vector_t memscrub_mem_config_vec = {
KPHYSM_SETUP_VECTOR_VERSION,
memscrub_mem_config_post_add,
memscrub_mem_config_pre_del,
memscrub_mem_config_post_del,
};
static void
memscrub_init_mem_config()
{
int ret;
ret = kphysm_setup_func_register(&memscrub_mem_config_vec,
(void *)NULL);
ASSERT(ret == 0);
}
static void
memscrub_uninit_mem_config()
{
kphysm_setup_func_unregister(&memscrub_mem_config_vec, (void *)NULL);
}