#include <sys/param.h>
#include <sys/systm.h>
#include <sh/cache.h>
#include <sh/cache_sh3.h>
#define round_line(x) (((x) + 15) & ~15)
#define trunc_line(x) ((x) & ~15)
void sh3_cache_wbinv_all(void);
void sh3_cache_wbinv_range(vaddr_t, vsize_t);
void sh3_cache_wbinv_range_index(vaddr_t, vsize_t);
void sh3_cache_panic(vaddr_t, vsize_t);
void sh3_cache_nop(vaddr_t, vsize_t);
int sh_cache_way_size;
int sh_cache_way_shift;
int sh_cache_entry_mask;
static inline void cache_sh3_op_line_16_nway(int, vaddr_t, uint32_t);
static inline void cache_sh3_op_8lines_16_nway(int, vaddr_t, uint32_t);
void
sh3_cache_config(void)
{
size_t cache_size;
uint32_t r;
switch (cpu_product) {
default:
case CPU_PRODUCT_7708:
case CPU_PRODUCT_7708S:
case CPU_PRODUCT_7708R:
cache_size = 8 * 1024;
break;
case CPU_PRODUCT_7709:
cache_size = 8 * 1024;
break;
case CPU_PRODUCT_7709A:
cache_size = 16 * 1024;
break;
}
r = _reg_read_4(SH3_CCR);
sh_cache_unified = 1;
sh_cache_enable_unified = (r & SH3_CCR_CE);
sh_cache_line_size = 16;
sh_cache_write_through_p0_u0_p3 = r & SH3_CCR_WT;
sh_cache_write_through_p1 = !(r & SH3_CCR_CB);
sh_cache_write_through = sh_cache_write_through_p0_u0_p3 &&
sh_cache_write_through_p1;
sh_cache_ram_mode = r & SH3_CCR_RA;
if (sh_cache_ram_mode) {
sh_cache_ways = 2;
sh_cache_size_unified = cache_size / 2;
} else {
sh_cache_ways = 4;
sh_cache_size_unified = cache_size;
}
sh_cache_way_size = sh_cache_size_unified / 4;
sh_cache_entry_mask = (sh_cache_way_size - 1) & ~15;
sh_cache_way_shift =
ffs(sh_cache_size_unified / (4 * 16)) - 1
+ 4;
sh_cache_ops._icache_sync_all = sh3_cache_wbinv_all;
sh_cache_ops._icache_sync_range = sh3_cache_wbinv_range;
sh_cache_ops._icache_sync_range_index = sh3_cache_wbinv_range_index;
sh_cache_ops._dcache_wbinv_all = sh3_cache_wbinv_all;
sh_cache_ops._dcache_wbinv_range = sh3_cache_wbinv_range;
sh_cache_ops._dcache_wbinv_range_index = sh3_cache_wbinv_range_index;
sh_cache_ops._dcache_inv_range = sh3_cache_panic;
if (sh_cache_write_through) {
sh_cache_ops._dcache_wb_range = sh3_cache_nop;
} else {
sh_cache_ops._dcache_wb_range = sh3_cache_wbinv_range;
}
}
static inline void
cache_sh3_op_line_16_nway(int n, vaddr_t va, uint32_t bits)
{
vaddr_t cca;
int way;
va &= sh_cache_entry_mask;
for (way = 0; way < n; way++) {
cca = (SH3_CCA | way << sh_cache_way_shift | va);
_reg_bclr_4(cca, bits);
}
}
static inline void
cache_sh3_op_8lines_16_nway(int n, vaddr_t va, uint32_t bits)
{
volatile uint32_t *cca;
int way;
va &= sh_cache_entry_mask;
for (way = 0; way < n; way++) {
cca = (volatile uint32_t *)
(SH3_CCA | way << sh_cache_way_shift | va);
cca[ 0] &= ~bits;
cca[ 4] &= ~bits;
cca[ 8] &= ~bits;
cca[12] &= ~bits;
cca[16] &= ~bits;
cca[20] &= ~bits;
cca[24] &= ~bits;
cca[28] &= ~bits;
}
}
void
sh3_cache_wbinv_all(void)
{
vaddr_t va;
for (va = 0; va < sh_cache_way_size; va += 16 * 8)
cache_sh3_op_8lines_16_nway(sh_cache_ways, va, CCA_U | CCA_V);
}
void
sh3_cache_wbinv_range_index(vaddr_t va, vsize_t sz)
{
vaddr_t eva = round_line(va + sz);
va = trunc_line(va);
while ((eva - va) >= (8 * 16)) {
cache_sh3_op_8lines_16_nway(sh_cache_ways, va, CCA_U | CCA_V);
va += 16 * 8;
}
while (va < eva) {
cache_sh3_op_line_16_nway(sh_cache_ways, va, CCA_U | CCA_V);
va += 16;
}
}
void
sh3_cache_wbinv_range(vaddr_t va, vsize_t sz)
{
vaddr_t eva = round_line(va + sz);
vaddr_t cca;
va = trunc_line(va);
while (va < eva) {
cca = SH3_CCA | CCA_A | (va & sh_cache_entry_mask);
_reg_write_4(cca, va & CCA_TAGADDR_MASK);
va += 16;
}
}
void
sh3_cache_panic(vaddr_t va, vsize_t size)
{
panic("SH3 can't invalidate without write-back");
}
void
sh3_cache_nop(vaddr_t va, vsize_t sz)
{
}