/*
* cortex arm arch v7 cache flushing and invalidation
* shared by l.s and rebootcode.s
*/
TEXT cacheiinv(SB), $-4 /* I invalidate */
MOVW $0, R0
MCR CpSC, 0, R0, C(CpCACHE), C(CpCACHEinvi), CpCACHEall /* ok on cortex */
ISB
RET
/*
* set/way operators, passed a suitable set/way value in R0.
*/
TEXT cachedwb_sw(SB), $-4
MCR CpSC, 0, R0, C(CpCACHE), C(CpCACHEwb), CpCACHEsi
RET
TEXT cachedwbinv_sw(SB), $-4
MCR CpSC, 0, R0, C(CpCACHE), C(CpCACHEwbi), CpCACHEsi
RET
TEXT cachedinv_sw(SB), $-4
MCR CpSC, 0, R0, C(CpCACHE), C(CpCACHEinvd), CpCACHEsi
RET
/* set cache size select */
TEXT setcachelvl(SB), $-4
MCR CpSC, CpIDcssel, R0, C(CpID), C(CpIDid), 0
ISB
RET
/* return cache sizes */
TEXT getwayssets(SB), $-4
MRC CpSC, CpIDcsize, R0, C(CpID), C(CpIDid), 0
RET
/*
* l1 cache operations.
* l1 and l2 ops are intended to be called from C, thus need save no
* caller's regs, only those we need to preserve across calls.
*/
/*
* these shift values are for the Cortex-A8 L1 cache (A=2, L=6) and
* the Cortex-A8 L2 cache (A=3, L=6).
* A = log2(# of ways), L = log2(bytes per cache line).
* see armv7 arch ref p. 1403.
*/
#define L1WAYSH 30
#define L1SETSH 6
#define L2WAYSH 29
#define L2SETSH 6
/*
* callers are assumed to be the above l1 and l2 ops.
* R0 is the function to call in the innermost loop.
* R8 is the cache level (one-origin: 1 or 2).
*
* initial translation by 5c, then massaged by hand.
*/
TEXT wholecache+0(SB), $-4
MOVW R0, R1 /* save argument for inner loop in R1 */
SUB $1, R8 /* convert cache level to zero origin */
/* we may not have the MMU on yet, so map R1 to PC's space */
BIC $KSEGM, R1 /* strip segment from address */
MOVW PC, R2 /* get PC's segment ... */
AND $KSEGM, R2
CMP $0, R2 /* PC segment should be non-zero on omap */
BEQ buggery
ORR R2, R1 /* combine them */