#include <sys/param.h>
#include <sys/systm.h>
#include <sys/device.h>
#include <sys/malloc.h>
#include <sys/rwlock.h>
#include <sys/intrmap.h>
struct intrmap_cpus {
struct refcnt ic_refs;
unsigned int ic_count;
struct cpu_info **ic_cpumap;
};
struct intrmap {
unsigned int im_count;
unsigned int im_grid;
struct intrmap_cpus *
im_cpus;
unsigned int *im_cpumap;
};
struct rwlock intrmap_lock = RWLOCK_INITIALIZER("intrcpus");
struct intrmap_cpus *intrmap_cpus = NULL;
int intrmap_ncpu = 0;
static void
intrmap_cpus_put(struct intrmap_cpus *ic)
{
if (ic == NULL)
return;
if (refcnt_rele(&ic->ic_refs)) {
free(ic->ic_cpumap, M_DEVBUF,
ic->ic_count * sizeof(*ic->ic_cpumap));
free(ic, M_DEVBUF, sizeof(*ic));
}
}
static struct intrmap_cpus *
intrmap_cpus_get(void)
{
struct intrmap_cpus *oic = NULL;
struct intrmap_cpus *ic;
rw_enter_write(&intrmap_lock);
if (intrmap_ncpu != ncpus) {
unsigned int icpus = 0;
struct cpu_info **cpumap;
CPU_INFO_ITERATOR cii;
struct cpu_info *ci;
cpumap = mallocarray(ncpus, sizeof(*cpumap),
M_DEVBUF, M_WAITOK);
CPU_INFO_FOREACH(cii, ci) {
#ifdef __HAVE_CPU_TOPOLOGY
if (ci->ci_smt_id > 0)
continue;
#endif
cpumap[icpus++] = ci;
}
if (icpus < ncpus) {
struct cpu_info **icpumap = mallocarray(icpus,
sizeof(*icpumap), M_DEVBUF, M_WAITOK);
memcpy(icpumap, cpumap, icpus * sizeof(*icpumap));
free(cpumap, M_DEVBUF, ncpus * sizeof(*cpumap));
cpumap = icpumap;
}
ic = malloc(sizeof(*ic), M_DEVBUF, M_WAITOK);
refcnt_init(&ic->ic_refs);
ic->ic_count = icpus;
ic->ic_cpumap = cpumap;
oic = intrmap_cpus;
intrmap_cpus = ic;
} else
ic = intrmap_cpus;
refcnt_take(&ic->ic_refs);
rw_exit_write(&intrmap_lock);
intrmap_cpus_put(oic);
return (ic);
}
static int
intrmap_nintrs(const struct intrmap_cpus *ic, unsigned int nintrs,
unsigned int maxintrs)
{
KASSERTMSG(maxintrs > 0, "invalid maximum interrupt count %u",
maxintrs);
if (nintrs == 0 || nintrs > maxintrs)
nintrs = maxintrs;
if (nintrs > ic->ic_count)
nintrs = ic->ic_count;
return (nintrs);
}
static void
intrmap_set_grid(struct intrmap *im, unsigned int unit, unsigned int grid)
{
unsigned int i, offset;
unsigned int *cpumap = im->im_cpumap;
const struct intrmap_cpus *ic = im->im_cpus;
KASSERTMSG(grid > 0, "invalid if_ringmap grid %u", grid);
KASSERTMSG(grid >= im->im_count, "invalid intrmap grid %u, count %u",
grid, im->im_count);
im->im_grid = grid;
offset = (grid * unit) % ic->ic_count;
for (i = 0; i < im->im_count; i++) {
cpumap[i] = offset + i;
KASSERTMSG(cpumap[i] < ic->ic_count,
"invalid cpumap[%u] = %u, offset %u (ncpu %d)", i,
cpumap[i], offset, ic->ic_count);
}
}
struct intrmap *
intrmap_create(const struct device *dv,
unsigned int nintrs, unsigned int maxintrs, unsigned int flags)
{
struct intrmap *im;
unsigned int unit = dv->dv_unit;
unsigned int i, grid = 0, prev_grid;
struct intrmap_cpus *ic;
ic = intrmap_cpus_get();
nintrs = intrmap_nintrs(ic, nintrs, maxintrs);
if (ISSET(flags, INTRMAP_POWEROF2))
nintrs = 1 << (fls(nintrs) - 1);
im = malloc(sizeof(*im), M_DEVBUF, M_WAITOK | M_ZERO);
im->im_count = nintrs;
im->im_cpus = ic;
im->im_cpumap = mallocarray(nintrs, sizeof(*im->im_cpumap), M_DEVBUF,
M_WAITOK | M_ZERO);
prev_grid = ic->ic_count;
for (i = 0; i < ic->ic_count; i++) {
if (ic->ic_count % (i + 1) != 0)
continue;
grid = ic->ic_count / (i + 1);
if (nintrs > grid) {
grid = prev_grid;
break;
}
if (nintrs > ic->ic_count / (i + 2))
break;
prev_grid = grid;
}
intrmap_set_grid(im, unit, grid);
return (im);
}
void
intrmap_destroy(struct intrmap *im)
{
free(im->im_cpumap, M_DEVBUF, im->im_count * sizeof(*im->im_cpumap));
intrmap_cpus_put(im->im_cpus);
free(im, M_DEVBUF, sizeof(*im));
}
unsigned int
intrmap_count(const struct intrmap *im)
{
return (im->im_count);
}
struct cpu_info *
intrmap_cpu(const struct intrmap *im, unsigned int ring)
{
const struct intrmap_cpus *ic = im->im_cpus;
unsigned int icpu;
KASSERTMSG(ring < im->im_count, "invalid ring %u", ring);
icpu = im->im_cpumap[ring];
KASSERTMSG(icpu < ic->ic_count, "invalid interrupt cpu %u for ring %u"
" (intrmap %p)", icpu, ring, im);
return (ic->ic_cpumap[icpu]);
}