root/arch/arm/mm/proc-arm925.S
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
 *  linux/arch/arm/mm/arm925.S: MMU functions for ARM925
 *
 *  Copyright (C) 1999,2000 ARM Limited
 *  Copyright (C) 2000 Deep Blue Solutions Ltd.
 *  Copyright (C) 2002 RidgeRun, Inc.
 *  Copyright (C) 2002-2003 MontaVista Software, Inc.
 *
 *  Update for Linux-2.6 and cache flush improvements
 *  Copyright (C) 2004 Nokia Corporation by Tony Lindgren <tony@atomide.com>
 *
 *  hacked for non-paged-MM by Hyok S. Choi, 2004.
 *
 * These are the low level assembler for performing cache and TLB
 * functions on the arm925.
 *
 *  CONFIG_CPU_ARM925_CPU_IDLE -> nohlt
 *
 * Some additional notes based on deciphering the TI TRM on OMAP-5910:
 *
 * NOTE1: The TI925T Configuration Register bit "D-cache clean and flush
 *        entry mode" must be 0 to flush the entries in both segments
 *        at once. This is the default value. See TRM 2-20 and 2-24 for
 *        more information.
 *
 * NOTE2: Default is the "D-cache clean and flush entry mode". It looks
 *        like the "Transparent mode" must be on for partial cache flushes
 *        to work in this mode. This mode only works with 16-bit external
 *        memory. See TRM 2-24 for more information.
 *
 * NOTE3: Write-back cache flushing seems to be flakey with devices using
 *        direct memory access, such as USB OHCI. The workaround is to use
 *        write-through cache with CONFIG_CPU_DCACHE_WRITETHROUGH (this is
 *        the default for OMAP-1510).
 */

#include <linux/linkage.h>
#include <linux/init.h>
#include <linux/cfi_types.h>
#include <linux/pgtable.h>
#include <asm/assembler.h>
#include <asm/hwcap.h>
#include <asm/pgtable-hwdef.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#include "proc-macros.S"

/*
 * The size of one data cache line.
 */
#define CACHE_DLINESIZE 16

/*
 * The number of data cache segments.
 */
#define CACHE_DSEGMENTS 2

/*
 * The number of lines in a cache segment.
 */
#define CACHE_DENTRIES  256

/*
 * This is the size at which it becomes more efficient to
 * clean the whole cache, rather than using the individual
 * cache line maintenance instructions.
 */
#define CACHE_DLIMIT    8192

        .text
/*
 * cpu_arm925_proc_init()
 */
SYM_TYPED_FUNC_START(cpu_arm925_proc_init)
        ret     lr
SYM_FUNC_END(cpu_arm925_proc_init)

/*
 * cpu_arm925_proc_fin()
 */
SYM_TYPED_FUNC_START(cpu_arm925_proc_fin)
        mrc     p15, 0, r0, c1, c0, 0           @ ctrl register
        bic     r0, r0, #0x1000                 @ ...i............
        bic     r0, r0, #0x000e                 @ ............wca.
        mcr     p15, 0, r0, c1, c0, 0           @ disable caches
        ret     lr
SYM_FUNC_END(cpu_arm925_proc_fin)

/*
 * cpu_arm925_reset(loc)
 *
 * Perform a soft reset of the system.  Put the CPU into the
 * same state as it would be if it had been reset, and branch
 * to what would be the reset vector.
 *
 * loc: location to jump to for soft reset
 */
        .align  5
        .pushsection    .idmap.text, "ax"
SYM_TYPED_FUNC_START(cpu_arm925_reset)
        /* Send software reset to MPU and DSP */
        mov     ip, #0xff000000
        orr     ip, ip, #0x00fe0000
        orr     ip, ip, #0x0000ce00
        mov     r4, #1
        strh    r4, [ip, #0x10]
SYM_FUNC_END(cpu_arm925_reset)
        .popsection

        mov     ip, #0
        mcr     p15, 0, ip, c7, c7, 0           @ invalidate I,D caches
        mcr     p15, 0, ip, c7, c10, 4          @ drain WB
#ifdef CONFIG_MMU
        mcr     p15, 0, ip, c8, c7, 0           @ invalidate I & D TLBs
#endif
        mrc     p15, 0, ip, c1, c0, 0           @ ctrl register
        bic     ip, ip, #0x000f                 @ ............wcam
        bic     ip, ip, #0x1100                 @ ...i...s........
        mcr     p15, 0, ip, c1, c0, 0           @ ctrl register
        ret     r0

/*
 * cpu_arm925_do_idle()
 *
 * Called with IRQs disabled
 */
        .align  10
SYM_TYPED_FUNC_START(cpu_arm925_do_idle)
        mov     r0, #0
        mrc     p15, 0, r1, c1, c0, 0           @ Read control register
        mcr     p15, 0, r0, c7, c10, 4          @ Drain write buffer
        bic     r2, r1, #1 << 12
        mcr     p15, 0, r2, c1, c0, 0           @ Disable I cache
        mcr     p15, 0, r0, c7, c0, 4           @ Wait for interrupt
        mcr     p15, 0, r1, c1, c0, 0           @ Restore ICache enable
        ret     lr
SYM_FUNC_END(cpu_arm925_do_idle)

/*
 *      flush_icache_all()
 *
 *      Unconditionally clean and invalidate the entire icache.
 */
SYM_TYPED_FUNC_START(arm925_flush_icache_all)
        mov     r0, #0
        mcr     p15, 0, r0, c7, c5, 0           @ invalidate I cache
        ret     lr
SYM_FUNC_END(arm925_flush_icache_all)

/*
 *      flush_user_cache_all()
 *
 *      Clean and invalidate all cache entries in a particular
 *      address space.
 */
SYM_FUNC_ALIAS(arm925_flush_user_cache_all, arm925_flush_kern_cache_all)

/*
 *      flush_kern_cache_all()
 *
 *      Clean and invalidate the entire cache.
 */
SYM_TYPED_FUNC_START(arm925_flush_kern_cache_all)
        mov     r2, #VM_EXEC
        mov     ip, #0
__flush_whole_cache:
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
        mcr     p15, 0, ip, c7, c6, 0           @ invalidate D cache
#else
        /* Flush entries in both segments at once, see NOTE1 above */
        mov     r3, #(CACHE_DENTRIES - 1) << 4  @ 256 entries in segment
2:      mcr     p15, 0, r3, c7, c14, 2          @ clean+invalidate D index
        subs    r3, r3, #1 << 4
        bcs     2b                              @ entries 255 to 0
#endif
        tst     r2, #VM_EXEC
        mcrne   p15, 0, ip, c7, c5, 0           @ invalidate I cache
        mcrne   p15, 0, ip, c7, c10, 4          @ drain WB
        ret     lr
SYM_FUNC_END(arm925_flush_kern_cache_all)

/*
 *      flush_user_cache_range(start, end, flags)
 *
 *      Clean and invalidate a range of cache entries in the
 *      specified address range.
 *
 *      - start - start address (inclusive)
 *      - end   - end address (exclusive)
 *      - flags - vm_flags describing address space
 */
SYM_TYPED_FUNC_START(arm925_flush_user_cache_range)
        mov     ip, #0
        sub     r3, r1, r0                      @ calculate total size
        cmp     r3, #CACHE_DLIMIT
        bgt     __flush_whole_cache
1:      tst     r2, #VM_EXEC
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
        mcr     p15, 0, r0, c7, c6, 1           @ invalidate D entry
        mcrne   p15, 0, r0, c7, c5, 1           @ invalidate I entry
        add     r0, r0, #CACHE_DLINESIZE
        mcr     p15, 0, r0, c7, c6, 1           @ invalidate D entry
        mcrne   p15, 0, r0, c7, c5, 1           @ invalidate I entry
        add     r0, r0, #CACHE_DLINESIZE
#else
        mcr     p15, 0, r0, c7, c14, 1          @ clean and invalidate D entry
        mcrne   p15, 0, r0, c7, c5, 1           @ invalidate I entry
        add     r0, r0, #CACHE_DLINESIZE
        mcr     p15, 0, r0, c7, c14, 1          @ clean and invalidate D entry
        mcrne   p15, 0, r0, c7, c5, 1           @ invalidate I entry
        add     r0, r0, #CACHE_DLINESIZE
#endif
        cmp     r0, r1
        blo     1b
        tst     r2, #VM_EXEC
        mcrne   p15, 0, ip, c7, c10, 4          @ drain WB
        ret     lr
SYM_FUNC_END(arm925_flush_user_cache_range)

/*
 *      coherent_kern_range(start, end)
 *
 *      Ensure coherency between the Icache and the Dcache in the
 *      region described by start, end.  If you have non-snooping
 *      Harvard caches, you need to implement this function.
 *
 *      - start - virtual start address
 *      - end   - virtual end address
 */
SYM_TYPED_FUNC_START(arm925_coherent_kern_range)
#ifdef CONFIG_CFI /* Fallthrough if !CFI */
        b       arm925_coherent_user_range
#endif
SYM_FUNC_END(arm925_coherent_kern_range)

/*
 *      coherent_user_range(start, end)
 *
 *      Ensure coherency between the Icache and the Dcache in the
 *      region described by start, end.  If you have non-snooping
 *      Harvard caches, you need to implement this function.
 *
 *      - start - virtual start address
 *      - end   - virtual end address
 */
SYM_TYPED_FUNC_START(arm925_coherent_user_range)
        bic     r0, r0, #CACHE_DLINESIZE - 1
1:      mcr     p15, 0, r0, c7, c10, 1          @ clean D entry
        mcr     p15, 0, r0, c7, c5, 1           @ invalidate I entry
        add     r0, r0, #CACHE_DLINESIZE
        cmp     r0, r1
        blo     1b
        mcr     p15, 0, r0, c7, c10, 4          @ drain WB
        mov     r0, #0
        ret     lr
SYM_FUNC_END(arm925_coherent_user_range)

/*
 *      flush_kern_dcache_area(void *addr, size_t size)
 *
 *      Ensure no D cache aliasing occurs, either with itself or
 *      the I cache
 *
 *      - addr  - kernel address
 *      - size  - region size
 */
SYM_TYPED_FUNC_START(arm925_flush_kern_dcache_area)
        add     r1, r0, r1
1:      mcr     p15, 0, r0, c7, c14, 1          @ clean+invalidate D entry
        add     r0, r0, #CACHE_DLINESIZE
        cmp     r0, r1
        blo     1b
        mov     r0, #0
        mcr     p15, 0, r0, c7, c5, 0           @ invalidate I cache
        mcr     p15, 0, r0, c7, c10, 4          @ drain WB
        ret     lr
SYM_FUNC_END(arm925_flush_kern_dcache_area)

/*
 *      dma_inv_range(start, end)
 *
 *      Invalidate (discard) the specified virtual address range.
 *      May not write back any entries.  If 'start' or 'end'
 *      are not cache line aligned, those lines must be written
 *      back.
 *
 *      - start - virtual start address
 *      - end   - virtual end address
 *
 * (same as v4wb)
 */
arm925_dma_inv_range:
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
        tst     r0, #CACHE_DLINESIZE - 1
        mcrne   p15, 0, r0, c7, c10, 1          @ clean D entry
        tst     r1, #CACHE_DLINESIZE - 1
        mcrne   p15, 0, r1, c7, c10, 1          @ clean D entry
#endif
        bic     r0, r0, #CACHE_DLINESIZE - 1
1:      mcr     p15, 0, r0, c7, c6, 1           @ invalidate D entry
        add     r0, r0, #CACHE_DLINESIZE
        cmp     r0, r1
        blo     1b
        mcr     p15, 0, r0, c7, c10, 4          @ drain WB
        ret     lr

/*
 *      dma_clean_range(start, end)
 *
 *      Clean the specified virtual address range.
 *
 *      - start - virtual start address
 *      - end   - virtual end address
 *
 * (same as v4wb)
 */
arm925_dma_clean_range:
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
        bic     r0, r0, #CACHE_DLINESIZE - 1
1:      mcr     p15, 0, r0, c7, c10, 1          @ clean D entry
        add     r0, r0, #CACHE_DLINESIZE
        cmp     r0, r1
        blo     1b
#endif
        mcr     p15, 0, r0, c7, c10, 4          @ drain WB
        ret     lr

/*
 *      dma_flush_range(start, end)
 *
 *      Clean and invalidate the specified virtual address range.
 *
 *      - start - virtual start address
 *      - end   - virtual end address
 */
SYM_TYPED_FUNC_START(arm925_dma_flush_range)
        bic     r0, r0, #CACHE_DLINESIZE - 1
1:
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
        mcr     p15, 0, r0, c7, c14, 1          @ clean+invalidate D entry
#else
        mcr     p15, 0, r0, c7, c6, 1           @ invalidate D entry
#endif
        add     r0, r0, #CACHE_DLINESIZE
        cmp     r0, r1
        blo     1b
        mcr     p15, 0, r0, c7, c10, 4          @ drain WB
        ret     lr
SYM_FUNC_END(arm925_dma_flush_range)

/*
 *      dma_map_area(start, size, dir)
 *      - start - kernel virtual start address
 *      - size  - size of region
 *      - dir   - DMA direction
 */
SYM_TYPED_FUNC_START(arm925_dma_map_area)
        add     r1, r1, r0
        cmp     r2, #DMA_TO_DEVICE
        beq     arm925_dma_clean_range
        bcs     arm925_dma_inv_range
        b       arm925_dma_flush_range
SYM_FUNC_END(arm925_dma_map_area)

/*
 *      dma_unmap_area(start, size, dir)
 *      - start - kernel virtual start address
 *      - size  - size of region
 *      - dir   - DMA direction
 */
SYM_TYPED_FUNC_START(arm925_dma_unmap_area)
        ret     lr
SYM_FUNC_END(arm925_dma_unmap_area)

SYM_TYPED_FUNC_START(cpu_arm925_dcache_clean_area)
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
1:      mcr     p15, 0, r0, c7, c10, 1          @ clean D entry
        add     r0, r0, #CACHE_DLINESIZE
        subs    r1, r1, #CACHE_DLINESIZE
        bhi     1b
#endif
        mcr     p15, 0, r0, c7, c10, 4          @ drain WB
        ret     lr
SYM_FUNC_END(cpu_arm925_dcache_clean_area)

/* =============================== PageTable ============================== */

/*
 * cpu_arm925_switch_mm(pgd)
 *
 * Set the translation base pointer to be as described by pgd.
 *
 * pgd: new page tables
 */
        .align  5
SYM_TYPED_FUNC_START(cpu_arm925_switch_mm)
#ifdef CONFIG_MMU
        mov     ip, #0
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
        mcr     p15, 0, ip, c7, c6, 0           @ invalidate D cache
#else
        /* Flush entries in bothe segments at once, see NOTE1 above */
        mov     r3, #(CACHE_DENTRIES - 1) << 4  @ 256 entries in segment
2:      mcr     p15, 0, r3, c7, c14, 2          @ clean & invalidate D index
        subs    r3, r3, #1 << 4
        bcs     2b                              @ entries 255 to 0
#endif
        mcr     p15, 0, ip, c7, c5, 0           @ invalidate I cache
        mcr     p15, 0, ip, c7, c10, 4          @ drain WB
        mcr     p15, 0, r0, c2, c0, 0           @ load page table pointer
        mcr     p15, 0, ip, c8, c7, 0           @ invalidate I & D TLBs
#endif
        ret     lr
SYM_FUNC_END(cpu_arm925_switch_mm)

/*
 * cpu_arm925_set_pte_ext(ptep, pte, ext)
 *
 * Set a PTE and flush it out
 */
        .align  5
SYM_TYPED_FUNC_START(cpu_arm925_set_pte_ext)
#ifdef CONFIG_MMU
        armv3_set_pte_ext
        mov     r0, r0
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
        mcr     p15, 0, r0, c7, c10, 1          @ clean D entry
#endif
        mcr     p15, 0, r0, c7, c10, 4          @ drain WB
#endif /* CONFIG_MMU */
        ret     lr
SYM_FUNC_END(cpu_arm925_set_pte_ext)

        .type   __arm925_setup, #function
__arm925_setup:
        mov     r0, #0

        /* Transparent on, D-cache clean & flush mode. See  NOTE2 above */
        orr     r0,r0,#1 << 1                   @ transparent mode on
        mcr     p15, 0, r0, c15, c1, 0          @ write TI config register

        mov     r0, #0
        mcr     p15, 0, r0, c7, c7              @ invalidate I,D caches on v4
        mcr     p15, 0, r0, c7, c10, 4          @ drain write buffer on v4
#ifdef CONFIG_MMU
        mcr     p15, 0, r0, c8, c7              @ invalidate I,D TLBs on v4
#endif

#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
        mov     r0, #4                          @ disable write-back on caches explicitly
        mcr     p15, 7, r0, c15, c0, 0
#endif

        adr     r5, arm925_crval
        ldmia   r5, {r5, r6}
        mrc     p15, 0, r0, c1, c0              @ get control register v4
        bic     r0, r0, r5
        orr     r0, r0, r6
#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN
        orr     r0, r0, #0x4000                 @ .1.. .... .... ....
#endif
        ret     lr
        .size   __arm925_setup, . - __arm925_setup

        /*
         *  R
         * .RVI ZFRS BLDP WCAM
         * .011 0001 ..11 1101
         * 
         */
        .type   arm925_crval, #object
arm925_crval:
        crval   clear=0x00007f3f, mmuset=0x0000313d, ucset=0x00001130

        __INITDATA
        @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
        define_processor_functions arm925, dabort=v4t_early_abort, pabort=legacy_pabort

        .section ".rodata"

        string  cpu_arch_name, "armv4t"
        string  cpu_elf_name, "v4"
        string  cpu_arm925_name, "ARM925T"

        .align

        .section ".proc.info.init", "a"

.macro arm925_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cache
        .type   __\name\()_proc_info,#object
__\name\()_proc_info:
        .long   \cpu_val
        .long   \cpu_mask
        .long   PMD_TYPE_SECT | \
                PMD_SECT_CACHEABLE | \
                PMD_BIT4 | \
                PMD_SECT_AP_WRITE | \
                PMD_SECT_AP_READ
        .long   PMD_TYPE_SECT | \
                PMD_BIT4 | \
                PMD_SECT_AP_WRITE | \
                PMD_SECT_AP_READ
        initfn  __arm925_setup, __\name\()_proc_info
        .long   cpu_arch_name
        .long   cpu_elf_name
        .long   HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB
        .long   cpu_arm925_name
        .long   arm925_processor_functions
        .long   v4wbi_tlb_fns
        .long   v4wb_user_fns
        .long   arm925_cache_fns
        .size   __\name\()_proc_info, . - __\name\()_proc_info
.endm

        arm925_proc_info arm925, 0x54029250, 0xfffffff0, cpu_arm925_name
        arm925_proc_info arm915, 0x54029150, 0xfffffff0, cpu_arm925_name