| 1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072 |
- .text
- .global __inner_flush_dcache_all
- .global __inner_flush_dcache_L1
- .global __inner_flush_dcache_L2
- .global __inner_clean_dcache_all
- .global __inner_clean_dcache_L1
- .global __inner_clean_dcache_L2
- .global __inner_inv_dcache_all
- .global __inner_inv_dcache_L1
- .global __inner_inv_dcache_L2
- .global __enable_dcache
- .global __enable_icache
- .global __enable_cache
- .global __disable_dcache
- .global __disable_icache
- .global __disable_cache
- .global __disable_dcache__inner_flush_dcache_L1
- .global __disable_dcache__inner_flush_dcache_L1__inner_flush_dcache_L2
- .global __disable_dcache__inner_flush_dcache_L1__inner_clean_dcache_L2
- .global d_i_dis_flush_all
- .equ C1_IBIT , 0x00001000
- .equ C1_CBIT , 0x00000004
- .equ PSR_F_BIT, 0x00000040
- .equ PSR_I_BIT, 0x00000080
- __enable_icache:
- MRC p15,0,r0,c1,c0,0
- ORR r0,r0,#C1_IBIT
- MCR p15,0,r0,c1,c0,0
- BX lr
- __disable_icache:
- MRC p15,0,r0,c1,c0,0
- BIC r0,r0,#C1_IBIT
- MCR p15,0,r0,c1,c0,0
- BX lr
- __enable_dcache:
- MRC p15,0,r0,c1,c0,0
- ORR r0,r0,#C1_CBIT
- dsb
- MCR p15,0,r0,c1,c0,0
- dsb
- isb
- BX lr
- __disable_dcache:
- MRC p15,0,r0,c1,c0,0
- BIC r0,r0,#C1_CBIT
- dsb
- MCR p15,0,r0,c1,c0,0
- dsb
- isb
- /*
- Erratum:794322,An instruction fetch can be allocated into the L2 cache after the cache is disabled Status
- This erratum can be avoided by inserting both of the following after the SCTLR.C bit is cleared to 0,
- and before the caches are cleaned or invalidated:
- 1) A TLBIMVA operation to any address.
- 2) A DSB instruction.
- */
- MCR p15,0,r0,c8,c7,1
- dsb
- isb
- BX lr
- __enable_cache:
- MRC p15,0,r0,c1,c0,0
- ORR r0,r0,#C1_IBIT
- ORR r0,r0,#C1_CBIT
- MCR p15,0,r0,c1,c0,0
- BX lr
- __disable_cache:
- MRC p15,0,r0,c1,c0,0
- BIC r0,r0,#C1_IBIT
- BIC r0,r0,#C1_CBIT
- MCR p15,0,r0,c1,c0,0
- /*
- Erratum:794322,An instruction fetch can be allocated into the L2 cache after the cache is disabled Status
- This erratum can be avoided by inserting both of the following after the SCTLR.C bit is cleared to 0,
- and before the caches are cleaned or invalidated:
- 1) A TLBIMVA operation to any address.
- 2) A DSB instruction.
- */
- MCR p15,0,r0,c8,c7,1
- dsb
- BX lr
- __inner_flush_dcache_all:
- push {r0,r1,r2,r3,r4,r5,r7,r8,r9,r10,r11,r14}
- dmb @ ensure ordering with previous memory accesses
- mrc p15, 1, r0, c0, c0, 1 @ read clidr
- ands r3, r0, #0x7000000 @ extract loc from clidr
- mov r3, r3, lsr #23 @ left align loc bit field
- beq all_finished @ if loc is 0, then no need to clean
- mov r10, #0 @ start clean at cache level 0
- all_loop1:
- add r2, r10, r10, lsr #1 @ work out 3x current cache level
- mov r1, r0, lsr r2 @ extract cache type bits from clidr
- and r1, r1, #7 @ mask of the bits for current cache only
- cmp r1, #2 @ see what cache we have at this level
- blt all_skip @ skip if no cache, or just i-cache
- #ifdef CONFIG_ARM_ERRATA_814220
- dsb
- #endif
- mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
- isb @ isb to sych the new cssr&csidr
- mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
- and r2, r1, #7 @ extract the length of the cache lines
- add r2, r2, #4 @ add 4 (line length offset)
- ldr r4, =0x3ff
- ands r4, r4, r1, lsr #3 @ find maximum number on the way size
- clz r5, r4 @ find bit position of way size increment
- ldr r7, =0x7fff
- ands r7, r7, r1, lsr #13 @ extract max number of the index size
- all_loop2:
- mov r9, r4 @ create working copy of max way size
- all_loop3:
- orr r11, r10, r9, lsl r5 @ factor way and cache number into r11
- orr r11, r11, r7, lsl r2 @ factor index number into r11
- #ifdef CONFIG_L1C_OPT
- #replace DCCISW by DCISW+DCCSW
- cmp r10, #2
- mrsne r1, cpsr @disable IRQ and save flag to make clean and invalidate atomic
- orrne r8, r1, #PSR_I_BIT | PSR_F_BIT
- msrne cpsr_c, r8
- mcrne p15, 0, r11, c7, c10, 2 @ clean by set/way
- mcrne p15, 0, r11, c7, c6, 2 @ invalidate by set/way
- msrne cpsr_c, r1
- mcreq p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
- #else
- mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
- #endif
- subs r9, r9, #1 @ decrement the way
- bge all_loop3
- subs r7, r7, #1 @ decrement the index
- bge all_loop2
- all_skip:
- add r10, r10, #2 @ increment cache number
- cmp r3, r10
- bgt all_loop1
- all_finished:
- mov r10, #0 @ swith back to cache level 0
- mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
- dsb
- isb
- pop {r0,r1,r2,r3,r4,r5,r7,r8,r9,r10,r11,r14}
- bx lr
- __inner_flush_dcache_L1:
- push {r0,r1,r2,r3,r4,r5,r7,r8,r9,r10,r11,r14}
- dmb @ ensure ordering with previous memory accesses
- mrc p15, 1, r0, c0, c0, 1 @ read clidr
- ands r3, r0, #0x7000000 @ extract loc from clidr
- mov r3, r3, lsr #23 @ left align loc bit field
- beq L1_finished @ if loc is 0, then no need to clean
- mov r10, #0 @ start clean at cache level 1
- L1_loop1:
- add r2, r10, r10, lsr #1 @ work out 3x current cache level
- mov r1, r0, lsr r2 @ extract cache type bits from clidr
- and r1, r1, #7 @ mask of the bits for current cache only
- cmp r1, #2 @ see what cache we have at this level
- blt L1_skip @ skip if no cache, or just i-cache
- #ifdef CONFIG_ARM_ERRATA_814220
- dsb
- #endif
- mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
- isb @ isb to sych the new cssr&csidr
- mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
- and r2, r1, #7 @ extract the length of the cache lines
- add r2, r2, #4 @ add 4 (line length offset)
- ldr r4, =0x3ff
- ands r4, r4, r1, lsr #3 @ find maximum number on the way size
- clz r5, r4 @ find bit position of way size increment
- ldr r7, =0x7fff
- ands r7, r7, r1, lsr #13 @ extract max number of the index size
- L1_loop2:
- mov r9, r4 @ create working copy of max way size
- L1_loop3:
- orr r11, r10, r9, lsl r5 @ factor way and cache number into r11
- orr r11, r11, r7, lsl r2 @ factor index number into r11
- #ifdef CONFIG_L1C_OPT
- #replace DCCISW by DCISW+DCCSW
- mrs r1, cpsr @disable IRQ and save flag to make clean and invalidate atomic
- orr r8, r1, #PSR_I_BIT | PSR_F_BIT
- msr cpsr_c, r8
- mcr p15, 0, r11, c7, c10, 2 @ clean by set/way
- mcr p15, 0, r11, c7, c6, 2 @ invalidate by set/way
- msr cpsr_c, r1
- #else
- mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
- #endif
- subs r9, r9, #1 @ decrement the way
- bge L1_loop3
- subs r7, r7, #1 @ decrement the index
- bge L1_loop2
- L1_skip:
- @add r10, r10, #2 @ increment cache number
- @cmp r3, r10
- @bgt L1_loop1
- L1_finished:
- mov r10, #0 @ swith back to cache level 0
- mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
- dsb
- isb
- pop {r0,r1,r2,r3,r4,r5,r7,r8,r9,r10,r11,r14}
- bx lr
- __inner_flush_dcache_L2:
- push {r0,r1,r2,r3,r4,r5,r7,r9,r10,r11,r14}
- @push {r4,r5,r7,r9,r10,r11}
- dmb @ ensure ordering with previous memory accesses
- mrc p15, 1, r0, c0, c0, 1 @ read clidr
- ands r3, r0, #0x7000000 @ extract loc from clidr
- mov r3, r3, lsr #23 @ left align loc bit field
- beq L2_finished @ if loc is 0, then no need to clean
- mov r10, #2 @ start clean at cache level 2
- L2_loop1:
- add r2, r10, r10, lsr #1 @ work out 3x current cache level
- mov r1, r0, lsr r2 @ extract cache type bits from clidr
- and r1, r1, #7 @ mask of the bits for current cache only
- cmp r1, #2 @ see what cache we have at this level
- blt L2_skip @ skip if no cache, or just i-cache
- #ifdef CONFIG_ARM_ERRATA_814220
- dsb
- #endif
- mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
- isb @ isb to sych the new cssr&csidr
- mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
- and r2, r1, #7 @ extract the length of the cache lines
- add r2, r2, #4 @ add 4 (line length offset)
- ldr r4, =0x3ff
- ands r4, r4, r1, lsr #3 @ find maximum number on the way size
- clz r5, r4 @ find bit position of way size increment
- ldr r7, =0x7fff
- ands r7, r7, r1, lsr #13 @ extract max number of the index size
- L2_loop2:
- mov r9, r4 @ create working copy of max way size
- L2_loop3:
- orr r11, r10, r9, lsl r5 @ factor way and cache number into r11
- orr r11, r11, r7, lsl r2 @ factor index number into r11
- mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
- subs r9, r9, #1 @ decrement the way
- bge L2_loop3
- subs r7, r7, #1 @ decrement the index
- bge L2_loop2
- L2_skip:
- @add r10, r10, #2 @ increment cache number
- @cmp r3, r10
- @bgt L2_loop1
- L2_finished:
- mov r10, #0 @ swith back to cache level 0
- mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
- dsb
- isb
- @pop {r4,r5,r7,r9,r10,r11}
- pop {r0,r1,r2,r3,r4,r5,r7,r9,r10,r11,r14}
- bx lr
- __inner_clean_dcache_all:
- push {r0,r1,r2,r3,r4,r5,r7,r9,r10,r11,r14}
- @push {r4,r5,r7,r9,r10,r11}
- dmb @ ensure ordering with previous memory accesses
- mrc p15, 1, r0, c0, c0, 1 @ read clidr
- ands r3, r0, #0x7000000 @ extract loc from clidr
- mov r3, r3, lsr #23 @ left align loc bit field
- beq all_cl_finished @ if loc is 0, then no need to clean
- mov r10, #0 @ start clean at cache level 0
- all_cl_loop1:
- add r2, r10, r10, lsr #1 @ work out 3x current cache level
- mov r1, r0, lsr r2 @ extract cache type bits from clidr
- and r1, r1, #7 @ mask of the bits for current cache only
- cmp r1, #2 @ see what cache we have at this level
- blt all_cl_skip @ skip if no cache, or just i-cache
- #ifdef CONFIG_ARM_ERRATA_814220
- dsb
- #endif
- mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
- isb @ isb to sych the new cssr&csidr
- mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
- and r2, r1, #7 @ extract the length of the cache lines
- add r2, r2, #4 @ add 4 (line length offset)
- ldr r4, =0x3ff
- ands r4, r4, r1, lsr #3 @ find maximum number on the way size
- clz r5, r4 @ find bit position of way size increment
- ldr r7, =0x7fff
- ands r7, r7, r1, lsr #13 @ extract max number of the index size
- all_cl_loop2:
- mov r9, r4 @ create working copy of max way size
- all_cl_loop3:
- orr r11, r10, r9, lsl r5 @ factor way and cache number into r11
- orr r11, r11, r7, lsl r2 @ factor index number into r11
- mcr p15, 0, r11, c7, c10, 2 @ clean by set/way
- subs r9, r9, #1 @ decrement the way
- bge all_cl_loop3
- subs r7, r7, #1 @ decrement the index
- bge all_cl_loop2
- all_cl_skip:
- add r10, r10, #2 @ increment cache number
- cmp r3, r10
- bgt all_cl_loop1
- all_cl_finished:
- mov r10, #0 @ swith back to cache level 0
- mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
- dsb
- isb
- @pop {r4,r5,r7,r9,r10,r11}
- pop {r0,r1,r2,r3,r4,r5,r7,r9,r10,r11,r14}
- bx lr
- __inner_clean_dcache_L1:
- push {r0,r1,r2,r3,r4,r5,r7,r9,r10,r11,r14}
- @push {r4,r5,r7,r9,r10,r11}
- dmb @ ensure ordering with previous memory accesses
- mrc p15, 1, r0, c0, c0, 1 @ read clidr
- ands r3, r0, #0x7000000 @ extract loc from clidr
- mov r3, r3, lsr #23 @ left align loc bit field
- beq L1_cl_finished @ if loc is 0, then no need to clean
- mov r10, #0 @ start clean at cache level 1
- L1_cl_loop1:
- add r2, r10, r10, lsr #1 @ work out 3x current cache level
- mov r1, r0, lsr r2 @ extract cache type bits from clidr
- and r1, r1, #7 @ mask of the bits for current cache only
- cmp r1, #2 @ see what cache we have at this level
- blt L1_cl_skip @ skip if no cache, or just i-cache
- #ifdef CONFIG_ARM_ERRATA_814220
- dsb
- #endif
- mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
- isb @ isb to sych the new cssr&csidr
- mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
- and r2, r1, #7 @ extract the length of the cache lines
- add r2, r2, #4 @ add 4 (line length offset)
- ldr r4, =0x3ff
- ands r4, r4, r1, lsr #3 @ find maximum number on the way size
- clz r5, r4 @ find bit position of way size increment
- ldr r7, =0x7fff
- ands r7, r7, r1, lsr #13 @ extract max number of the index size
- L1_cl_loop2:
- mov r9, r4 @ create working copy of max way size
- L1_cl_loop3:
- orr r11, r10, r9, lsl r5 @ factor way and cache number into r11
- orr r11, r11, r7, lsl r2 @ factor index number into r11
- mcr p15, 0, r11, c7, c10, 2 @ clean by set/way
- subs r9, r9, #1 @ decrement the way
- bge L1_cl_loop3
- subs r7, r7, #1 @ decrement the index
- bge L1_cl_loop2
- L1_cl_skip:
- @add r10, r10, #2 @ increment cache number
- @cmp r3, r10
- @bgt L1_cl_loop1
- L1_cl_finished:
- mov r10, #0 @ swith back to cache level 0
- mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
- dsb
- isb
- @pop {r4,r5,r7,r9,r10,r11}
- pop {r0,r1,r2,r3,r4,r5,r7,r9,r10,r11,r14}
- bx lr
- __inner_clean_dcache_L2:
- #if 0
- mov r0, sp
- mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
- dsb
- sub r0, r0, #64
- mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
- dsb
- #endif
- push {r0,r1,r2,r3,r4,r5,r7,r9,r10,r11,r14}
- @push {r4,r5,r7,r9,r10,r11}
- #if 0
- mov r0, sp
- mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
- dsb
- sub r0, r0, #64
- mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
- dsb
- #endif
- dmb @ ensure ordering with previous memory accesses
- mrc p15, 1, r0, c0, c0, 1 @ read clidr
- ands r3, r0, #0x7000000 @ extract loc from clidr
- mov r3, r3, lsr #23 @ left align loc bit field
- beq L2_cl_finished @ if loc is 0, then no need to clean
- mov r10, #2 @ start clean at cache level 2
- L2_cl_loop1:
- add r2, r10, r10, lsr #1 @ work out 3x current cache level
- mov r1, r0, lsr r2 @ extract cache type bits from clidr
- and r1, r1, #7 @ mask of the bits for current cache only
- cmp r1, #2 @ see what cache we have at this level
- blt L2_cl_skip @ skip if no cache, or just i-cache
- #ifdef CONFIG_ARM_ERRATA_814220
- dsb
- #endif
- mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
- isb @ isb to sych the new cssr&csidr
- mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
- and r2, r1, #7 @ extract the length of the cache lines
- add r2, r2, #4 @ add 4 (line length offset)
- ldr r4, =0x3ff
- ands r4, r4, r1, lsr #3 @ find maximum number on the way size
- clz r5, r4 @ find bit position of way size increment
- ldr r7, =0x7fff
- ands r7, r7, r1, lsr #13 @ extract max number of the index size
- L2_cl_loop2:
- mov r9, r4 @ create working copy of max way size
- L2_cl_loop3:
- orr r11, r10, r9, lsl r5 @ factor way and cache number into r11
- orr r11, r11, r7, lsl r2 @ factor index number into r11
- mcr p15, 0, r11, c7, c10, 2 @ clean by set/way
- subs r9, r9, #1 @ decrement the way
- bge L2_cl_loop3
- subs r7, r7, #1 @ decrement the index
- bge L2_cl_loop2
- L2_cl_skip:
- @add r10, r10, #2 @ increment cache number
- @cmp r3, r10
- @bgt L2_cl_loop1
- L2_cl_finished:
- mov r10, #0 @ swith back to cache level 0
- mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
- dsb
- isb
- @pop {r4,r5,r7,r9,r10,r11}
- pop {r0,r1,r2,r3,r4,r5,r7,r9,r10,r11,r14}
- bx lr
- __inner_inv_dcache_all:
- push {r0,r1,r2,r3,r4,r5,r7,r9,r10,r11,r14}
- @push {r4,r5,r7,r9,r10,r11}
- dmb @ ensure ordering with previous memory accesses
- mrc p15, 1, r0, c0, c0, 1 @ read clidr
- ands r3, r0, #0x7000000 @ extract loc from clidr
- mov r3, r3, lsr #23 @ left align loc bit field
- beq all_inv_finished @ if loc is 0, then no need to clean
- mov r10, #0 @ start clean at cache level 0
- all_inv_loop1:
- add r2, r10, r10, lsr #1 @ work out 3x current cache level
- mov r1, r0, lsr r2 @ extract cache type bits from clidr
- and r1, r1, #7 @ mask of the bits for current cache only
- cmp r1, #2 @ see what cache we have at this level
- blt all_inv_skip @ skip if no cache, or just i-cache
- #ifdef CONFIG_ARM_ERRATA_814220
- dsb
- #endif
- mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
- isb @ isb to sych the new cssr&csidr
- mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
- and r2, r1, #7 @ extract the length of the cache lines
- add r2, r2, #4 @ add 4 (line length offset)
- ldr r4, =0x3ff
- ands r4, r4, r1, lsr #3 @ find maximum number on the way size
- clz r5, r4 @ find bit position of way size increment
- ldr r7, =0x7fff
- ands r7, r7, r1, lsr #13 @ extract max number of the index size
- all_inv_loop2:
- mov r9, r4 @ create working copy of max way size
- all_inv_loop3:
- orr r11, r10, r9, lsl r5 @ factor way and cache number into r11
- orr r11, r11, r7, lsl r2 @ factor index number into r11
- mcr p15, 0, r11, c7, c6, 2 @ invalidate by set/way
- subs r9, r9, #1 @ decrement the way
- bge all_inv_loop3
- subs r7, r7, #1 @ decrement the index
- bge all_inv_loop2
- all_inv_skip:
- add r10, r10, #2 @ increment cache number
- cmp r3, r10
- bgt all_inv_loop1
- all_inv_finished:
- mov r10, #0 @ swith back to cache level 0
- mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
- dsb
- isb
- @pop {r4,r5,r7,r9,r10,r11}
- pop {r0,r1,r2,r3,r4,r5,r7,r9,r10,r11,r14}
- bx lr
- __inner_inv_dcache_L1:
- push {r0,r1,r2,r3,r4,r5,r7,r9,r10,r11,r14}
- @push {r4,r5,r7,r9,r10,r11}
- dmb @ ensure ordering with previous memory accesses
- mrc p15, 1, r0, c0, c0, 1 @ read clidr
- ands r3, r0, #0x7000000 @ extract loc from clidr
- mov r3, r3, lsr #23 @ left align loc bit field
- beq L1_inv_finished @ if loc is 0, then no need to clean
- mov r10, #0 @ start clean at cache level 1
- L1_inv_loop1:
- add r2, r10, r10, lsr #1 @ work out 3x current cache level
- mov r1, r0, lsr r2 @ extract cache type bits from clidr
- and r1, r1, #7 @ mask of the bits for current cache only
- cmp r1, #2 @ see what cache we have at this level
- blt L1_inv_skip @ skip if no cache, or just i-cache
- #ifdef CONFIG_ARM_ERRATA_814220
- dsb
- #endif
- mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
- isb @ isb to sych the new cssr&csidr
- mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
- and r2, r1, #7 @ extract the length of the cache lines
- add r2, r2, #4 @ add 4 (line length offset)
- ldr r4, =0x3ff
- ands r4, r4, r1, lsr #3 @ find maximum number on the way size
- clz r5, r4 @ find bit position of way size increment
- ldr r7, =0x7fff
- ands r7, r7, r1, lsr #13 @ extract max number of the index size
- L1_inv_loop2:
- mov r9, r4 @ create working copy of max way size
- L1_inv_loop3:
- orr r11, r10, r9, lsl r5 @ factor way and cache number into r11
- orr r11, r11, r7, lsl r2 @ factor index number into r11
- mcr p15, 0, r11, c7, c6, 2 @ invalidate by set/way
- subs r9, r9, #1 @ decrement the way
- bge L1_inv_loop3
- subs r7, r7, #1 @ decrement the index
- bge L1_inv_loop2
- L1_inv_skip:
- @add r10, r10, #2 @ increment cache number
- @cmp r3, r10
- @bgt L1_inv_loop1
- L1_inv_finished:
- mov r10, #0 @ swith back to cache level 0
- mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
- dsb
- isb
- @pop {r4,r5,r7,r9,r10,r11}
- pop {r0,r1,r2,r3,r4,r5,r7,r9,r10,r11,r14}
- bx lr
- __inner_inv_dcache_L2:
- push {r0,r1,r2,r3,r4,r5,r7,r9,r10,r11,r14}
- @push {r4,r5,r7,r9,r10,r11}
- dmb @ ensure ordering with previous memory accesses
- mrc p15, 1, r0, c0, c0, 1 @ read clidr
- ands r3, r0, #0x7000000 @ extract loc from clidr
- mov r3, r3, lsr #23 @ left align loc bit field
- beq L2_inv_finished @ if loc is 0, then no need to clean
- mov r10, #2 @ start clean at cache level 2
- L2_inv_loop1:
- add r2, r10, r10, lsr #1 @ work out 3x current cache level
- mov r1, r0, lsr r2 @ extract cache type bits from clidr
- and r1, r1, #7 @ mask of the bits for current cache only
- cmp r1, #2 @ see what cache we have at this level
- blt L2_inv_skip @ skip if no cache, or just i-cache
- #ifdef CONFIG_ARM_ERRATA_814220
- dsb
- #endif
- mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
- isb @ isb to sych the new cssr&csidr
- mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
- and r2, r1, #7 @ extract the length of the cache lines
- add r2, r2, #4 @ add 4 (line length offset)
- ldr r4, =0x3ff
- ands r4, r4, r1, lsr #3 @ find maximum number on the way size
- clz r5, r4 @ find bit position of way size increment
- ldr r7, =0x7fff
- ands r7, r7, r1, lsr #13 @ extract max number of the index size
- L2_inv_loop2:
- mov r9, r4 @ create working copy of max way size
- L2_inv_loop3:
- orr r11, r10, r9, lsl r5 @ factor way and cache number into r11
- orr r11, r11, r7, lsl r2 @ factor index number into r11
- mcr p15, 0, r11, c7, c6, 2 @ invalidate by set/way
- subs r9, r9, #1 @ decrement the way
- bge L2_inv_loop3
- subs r7, r7, #1 @ decrement the index
- bge L2_inv_loop2
- L2_inv_skip:
- @add r10, r10, #2 @ increment cache number
- @cmp r3, r10
- @bgt L2_inv_loop1
- L2_inv_finished:
- mov r10, #0 @ swith back to cache level 0
- mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
- dsb
- isb
- @pop {r4,r5,r7,r9,r10,r11}
- pop {r0,r1,r2,r3,r4,r5,r7,r9,r10,r11,r14}
- bx lr
- __disable_dcache__inner_flush_dcache_L1:
- /*******************************************************************************
- * push stack *
- ******************************************************************************/
- push {r0,r1,r2,r3,r4,r5,r7,r9,r10,r11,r14}
- /*******************************************************************************
- * __disable_dcache *
- ******************************************************************************/
- MRC p15,0,r0,c1,c0,0
- BIC r0,r0,#C1_CBIT
- dsb
- MCR p15,0,r0,c1,c0,0
- dsb
- isb
- /*
- Erratum:794322,An instruction fetch can be allocated into the L2 cache after the cache is disabled Status
- This erratum can be avoided by inserting both of the following after the SCTLR.C bit is cleared to 0,
- and before the caches are cleaned or invalidated:
- 1) A TLBIMVA operation to any address.
- 2) A DSB instruction.
- */
- MCR p15,0,r0,c8,c7,1
- dsb
- isb
- /*******************************************************************************
- * __inner_flush_dcache_L1 *
- ******************************************************************************/
- dmb @ ensure ordering with previous memory accesses
- mrc p15, 1, r0, c0, c0, 1 @ read clidr
- ands r3, r0, #0x7000000 @ extract loc from clidr
- mov r3, r3, lsr #23 @ left align loc bit field
- beq DF1_L1_finished @ if loc is 0, then no need to clean
- mov r10, #0 @ start clean at cache level 1
- DF1_L1_loop1:
- add r2, r10, r10, lsr #1 @ work out 3x current cache level
- mov r1, r0, lsr r2 @ extract cache type bits from clidr
- and r1, r1, #7 @ mask of the bits for current cache only
- cmp r1, #2 @ see what cache we have at this level
- blt DF1_L1_skip @ skip if no cache, or just i-cache
- #ifdef CONFIG_ARM_ERRATA_814220
- dsb
- #endif
- mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
- isb @ isb to sych the new cssr&csidr
- mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
- and r2, r1, #7 @ extract the length of the cache lines
- add r2, r2, #4 @ add 4 (line length offset)
- ldr r4, =0x3ff
- ands r4, r4, r1, lsr #3 @ find maximum number on the way size
- clz r5, r4 @ find bit position of way size increment
- ldr r7, =0x7fff
- ands r7, r7, r1, lsr #13 @ extract max number of the index size
- DF1_L1_loop2:
- mov r9, r4 @ create working copy of max way size
- DF1_L1_loop3:
- orr r11, r10, r9, lsl r5 @ factor way and cache number into r11
- orr r11, r11, r7, lsl r2 @ factor index number into r11
- #if 1
- mcr p15, 0, r11, c7, c10, 2 @ clean by set/way
- mcr p15, 0, r11, c7, c6, 2 @ invalidate by set/way
- #endif
- #if 0
- mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
- #endif
- subs r9, r9, #1 @ decrement the way
- bge DF1_L1_loop3
- subs r7, r7, #1 @ decrement the index
- bge DF1_L1_loop2
- DF1_L1_skip:
- @add r10, r10, #2 @ increment cache number
- @cmp r3, r10
- @bgt DF1_L1_loop1
- DF1_L1_finished:
- mov r10, #0 @ swith back to cache level 0
- mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
- dsb
- isb
- /*******************************************************************************
- * pop stack *
- ******************************************************************************/
- pop {r0,r1,r2,r3,r4,r5,r7,r9,r10,r11,r14}
- bx lr
- __disable_dcache__inner_flush_dcache_L1__inner_flush_dcache_L2:
- /*******************************************************************************
- * push stack *
- ******************************************************************************/
- push {r0,r1,r2,r3,r4,r5,r7,r9,r10,r11,r14}
- /*******************************************************************************
- * __disable_dcache *
- ******************************************************************************/
- MRC p15,0,r0,c1,c0,0
- BIC r0,r0,#C1_CBIT
- dsb
- MCR p15,0,r0,c1,c0,0
- dsb
- isb
- /*
- Erratum:794322,An instruction fetch can be allocated into the L2 cache after the cache is disabled Status
- This erratum can be avoided by inserting both of the following after the SCTLR.C bit is cleared to 0,
- and before the caches are cleaned or invalidated:
- 1) A TLBIMVA operation to any address.
- 2) A DSB instruction.
- */
- MCR p15,0,r0,c8,c7,1
- dsb
- isb
- /*******************************************************************************
- * __inner_flush_dcache_L1 *
- ******************************************************************************/
- dmb @ ensure ordering with previous memory accesses
- mrc p15, 1, r0, c0, c0, 1 @ read clidr
- ands r3, r0, #0x7000000 @ extract loc from clidr
- mov r3, r3, lsr #23 @ left align loc bit field
- beq DF1F2_L1_finished @ if loc is 0, then no need to clean
- mov r10, #0 @ start clean at cache level 1
- DF1F2_L1_loop1:
- add r2, r10, r10, lsr #1 @ work out 3x current cache level
- mov r1, r0, lsr r2 @ extract cache type bits from clidr
- and r1, r1, #7 @ mask of the bits for current cache only
- cmp r1, #2 @ see what cache we have at this level
- blt DF1F2_L1_skip @ skip if no cache, or just i-cache
- #ifdef CONFIG_ARM_ERRATA_814220
- dsb
- #endif
- mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
- isb @ isb to sych the new cssr&csidr
- mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
- and r2, r1, #7 @ extract the length of the cache lines
- add r2, r2, #4 @ add 4 (line length offset)
- ldr r4, =0x3ff
- ands r4, r4, r1, lsr #3 @ find maximum number on the way size
- clz r5, r4 @ find bit position of way size increment
- ldr r7, =0x7fff
- ands r7, r7, r1, lsr #13 @ extract max number of the index size
- DF1F2_L1_loop2:
- mov r9, r4 @ create working copy of max way size
- DF1F2_L1_loop3:
- orr r11, r10, r9, lsl r5 @ factor way and cache number into r11
- orr r11, r11, r7, lsl r2 @ factor index number into r11
- #if 1
- mcr p15, 0, r11, c7, c10, 2 @ clean by set/way
- mcr p15, 0, r11, c7, c6, 2 @ invalidate by set/way
- #endif
- #if 0
- mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
- #endif
- subs r9, r9, #1 @ decrement the way
- bge DF1F2_L1_loop3
- subs r7, r7, #1 @ decrement the index
- bge DF1F2_L1_loop2
- DF1F2_L1_skip:
- @add r10, r10, #2 @ increment cache number
- @cmp r3, r10
- @bgt DF1F2_L1_loop1
- DF1F2_L1_finished:
- mov r10, #0 @ swith back to cache level 0
- mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
- dsb
- isb
- /*******************************************************************************
- * clrex *
- ******************************************************************************/
- clrex
- /*******************************************************************************
- * __inner_flush_dcache_L2 *
- ******************************************************************************/
- dmb @ ensure ordering with previous memory accesses
- mrc p15, 1, r0, c0, c0, 1 @ read clidr
- ands r3, r0, #0x7000000 @ extract loc from clidr
- mov r3, r3, lsr #23 @ left align loc bit field
- beq DF1F2_L2_finished @ if loc is 0, then no need to clean
- mov r10, #2 @ start clean at cache level 2
- DF1F2_L2_loop1:
- add r2, r10, r10, lsr #1 @ work out 3x current cache level
- mov r1, r0, lsr r2 @ extract cache type bits from clidr
- and r1, r1, #7 @ mask of the bits for current cache only
- cmp r1, #2 @ see what cache we have at this level
- blt DF1F2_L2_skip @ skip if no cache, or just i-cache
- #ifdef CONFIG_ARM_ERRATA_814220
- dsb
- #endif
- mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
- isb @ isb to sych the new cssr&csidr
- mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
- and r2, r1, #7 @ extract the length of the cache lines
- add r2, r2, #4 @ add 4 (line length offset)
- ldr r4, =0x3ff
- ands r4, r4, r1, lsr #3 @ find maximum number on the way size
- clz r5, r4 @ find bit position of way size increment
- ldr r7, =0x7fff
- ands r7, r7, r1, lsr #13 @ extract max number of the index size
- DF1F2_L2_loop2:
- mov r9, r4 @ create working copy of max way size
- DF1F2_L2_loop3:
- orr r11, r10, r9, lsl r5 @ factor way and cache number into r11
- orr r11, r11, r7, lsl r2 @ factor index number into r11
- mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
- subs r9, r9, #1 @ decrement the way
- bge DF1F2_L2_loop3
- subs r7, r7, #1 @ decrement the index
- bge DF1F2_L2_loop2
- DF1F2_L2_skip:
- @add r10, r10, #2 @ increment cache number
- @cmp r3, r10
- @bgt DF1F2_L2_loop1
- DF1F2_L2_finished:
- mov r10, #0 @ swith back to cache level 0
- mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
- dsb
- isb
- /*******************************************************************************
- * pop stack *
- ******************************************************************************/
- pop {r0,r1,r2,r3,r4,r5,r7,r9,r10,r11,r14}
- bx lr
- __disable_dcache__inner_flush_dcache_L1__inner_clean_dcache_L2:
- /*******************************************************************************
- * push stack *
- ******************************************************************************/
- push {r0,r1,r2,r3,r4,r5,r7,r9,r10,r11,r14}
- /*******************************************************************************
- * __disable_dcache *
- ******************************************************************************/
- MRC p15,0,r0,c1,c0,0
- BIC r0,r0,#C1_CBIT
- dsb
- MCR p15,0,r0,c1,c0,0
- dsb
- isb
- /*
- Erratum:794322,An instruction fetch can be allocated into the L2 cache after the cache is disabled Status
- This erratum can be avoided by inserting both of the following after the SCTLR.C bit is cleared to 0,
- and before the caches are cleaned or invalidated:
- 1) A TLBIMVA operation to any address.
- 2) A DSB instruction.
- */
- MCR p15,0,r0,c8,c7,1
- dsb
- isb
- /*******************************************************************************
- * __inner_flush_dcache_L1 *
- ******************************************************************************/
- dmb @ ensure ordering with previous memory accesses
- mrc p15, 1, r0, c0, c0, 1 @ read clidr
- ands r3, r0, #0x7000000 @ extract loc from clidr
- mov r3, r3, lsr #23 @ left align loc bit field
- beq DF1C2_L1_finished @ if loc is 0, then no need to clean
- mov r10, #0 @ start clean at cache level 1
- DF1C2_L1_loop1:
- add r2, r10, r10, lsr #1 @ work out 3x current cache level
- mov r1, r0, lsr r2 @ extract cache type bits from clidr
- and r1, r1, #7 @ mask of the bits for current cache only
- cmp r1, #2 @ see what cache we have at this level
- blt DF1C2_L1_skip @ skip if no cache, or just i-cache
- #ifdef CONFIG_ARM_ERRATA_814220
- dsb
- #endif
- mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
- isb @ isb to sych the new cssr&csidr
- mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
- and r2, r1, #7 @ extract the length of the cache lines
- add r2, r2, #4 @ add 4 (line length offset)
- ldr r4, =0x3ff
- ands r4, r4, r1, lsr #3 @ find maximum number on the way size
- clz r5, r4 @ find bit position of way size increment
- ldr r7, =0x7fff
- ands r7, r7, r1, lsr #13 @ extract max number of the index size
- DF1C2_L1_loop2:
- mov r9, r4 @ create working copy of max way size
- DF1C2_L1_loop3:
- orr r11, r10, r9, lsl r5 @ factor way and cache number into r11
- orr r11, r11, r7, lsl r2 @ factor index number into r11
- #if 1
- mcr p15, 0, r11, c7, c10, 2 @ clean by set/way
- mcr p15, 0, r11, c7, c6, 2 @ invalidate by set/way
- #endif
- #if 0
- mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
- #endif
- subs r9, r9, #1 @ decrement the way
- bge DF1C2_L1_loop3
- subs r7, r7, #1 @ decrement the index
- bge DF1C2_L1_loop2
- DF1C2_L1_skip:
- @add r10, r10, #2 @ increment cache number
- @cmp r3, r10
- @bgt DF1C2_L1_loop1
- DF1C2_L1_finished:
- mov r10, #0 @ swith back to cache level 0
- mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
- dsb
- isb
- /*******************************************************************************
- * clrex *
- ******************************************************************************/
- clrex
- /*******************************************************************************
- * __inner_clean_dcache_L2 *
- ******************************************************************************/
- dmb @ ensure ordering with previous memory accesses
- mrc p15, 1, r0, c0, c0, 1 @ read clidr
- ands r3, r0, #0x7000000 @ extract loc from clidr
- mov r3, r3, lsr #23 @ left align loc bit field
- beq DF1C2_L2_cl_finished @ if loc is 0, then no need to clean
- mov r10, #2 @ start clean at cache level 2
- DF1C2_L2_cl_loop1:
- add r2, r10, r10, lsr #1 @ work out 3x current cache level
- mov r1, r0, lsr r2 @ extract cache type bits from clidr
- and r1, r1, #7 @ mask of the bits for current cache only
- cmp r1, #2 @ see what cache we have at this level
- blt DF1C2_L2_cl_skip @ skip if no cache, or just i-cache
- #ifdef CONFIG_ARM_ERRATA_814220
- dsb
- #endif
- mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
- isb @ isb to sych the new cssr&csidr
- mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
- and r2, r1, #7 @ extract the length of the cache lines
- add r2, r2, #4 @ add 4 (line length offset)
- ldr r4, =0x3ff
- ands r4, r4, r1, lsr #3 @ find maximum number on the way size
- clz r5, r4 @ find bit position of way size increment
- ldr r7, =0x7fff
- ands r7, r7, r1, lsr #13 @ extract max number of the index size
- DF1C2_L2_cl_loop2:
- mov r9, r4 @ create working copy of max way size
- DF1C2_L2_cl_loop3:
- orr r11, r10, r9, lsl r5 @ factor way and cache number into r11
- orr r11, r11, r7, lsl r2 @ factor index number into r11
- mcr p15, 0, r11, c7, c10, 2 @ clean by set/way
- subs r9, r9, #1 @ decrement the way
- bge DF1C2_L2_cl_loop3
- subs r7, r7, #1 @ decrement the index
- bge DF1C2_L2_cl_loop2
- DF1C2_L2_cl_skip:
- @add r10, r10, #2 @ increment cache number
- @cmp r3, r10
- @bgt DF1C2_L2_cl_loop1
- DF1C2_L2_cl_finished:
- mov r10, #0 @ swith back to cache level 0
- mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
- dsb
- isb
- /*******************************************************************************
- * pop stack *
- ******************************************************************************/
- pop {r0,r1,r2,r3,r4,r5,r7,r9,r10,r11,r14}
- bx lr
- d_i_dis_flush_all:
- /*******************************************************************************
- * push stack *
- ******************************************************************************/
- push {r0,r1,r2,r3,r4,r5,r7,r9,r10,r11,r14}
- /*******************************************************************************
- * __disable_dcache *
- ******************************************************************************/
- MRC p15,0,r0,c1,c0,0
- BIC r0,r0,#C1_CBIT
- BIC r0,r0,#C1_IBIT
- dsb
- MCR p15,0,r0,c1,c0,0
- dsb
- isb
- /*
- Erratum:794322,An instruction fetch can be allocated into the L2 cache after the cache is disabled Status
- This erratum can be avoided by inserting both of the following after the SCTLR.C bit is cleared to 0,
- and before the caches are cleaned or invalidated:
- 1) A TLBIMVA operation to any address.
- 2) A DSB instruction.
- */
- MCR p15,0,r0,c8,c7,1
- dsb
- isb
- /*******************************************************************************
- * __inner_flush_dcache_L1 *
- ******************************************************************************/
- dmb @ ensure ordering with previous memory accesses
- mrc p15, 1, r0, c0, c0, 1 @ read clidr
- ands r3, r0, #0x7000000 @ extract loc from clidr
- mov r3, r3, lsr #23 @ left align loc bit field
- beq DIF1F2_L1_finished @ if loc is 0, then no need to clean
- mov r10, #0 @ start clean at cache level 1
- DIF1F2_L1_loop1:
- add r2, r10, r10, lsr #1 @ work out 3x current cache level
- mov r1, r0, lsr r2 @ extract cache type bits from clidr
- and r1, r1, #7 @ mask of the bits for current cache only
- cmp r1, #2 @ see what cache we have at this level
- blt DIF1F2_L1_skip @ skip if no cache, or just i-cache
- #ifdef CONFIG_ARM_ERRATA_814220
- dsb
- #endif
- mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
- isb @ isb to sych the new cssr&csidr
- mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
- and r2, r1, #7 @ extract the length of the cache lines
- add r2, r2, #4 @ add 4 (line length offset)
- ldr r4, =0x3ff
- ands r4, r4, r1, lsr #3 @ find maximum number on the way size
- clz r5, r4 @ find bit position of way size increment
- ldr r7, =0x7fff
- ands r7, r7, r1, lsr #13 @ extract max number of the index size
- DIF1F2_L1_loop2:
- mov r9, r4 @ create working copy of max way size
- DIF1F2_L1_loop3:
- orr r11, r10, r9, lsl r5 @ factor way and cache number into r11
- orr r11, r11, r7, lsl r2 @ factor index number into r11
- #if 1
- mcr p15, 0, r11, c7, c10, 2 @ clean by set/way
- mcr p15, 0, r11, c7, c6, 2 @ invalidate by set/way
- #endif
- #if 0
- mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
- #endif
- subs r9, r9, #1 @ decrement the way
- bge DIF1F2_L1_loop3
- subs r7, r7, #1 @ decrement the index
- bge DIF1F2_L1_loop2
- DIF1F2_L1_skip:
- @add r10, r10, #2 @ increment cache number
- @cmp r3, r10
- @bgt DIF1F2_L1_loop1
- DIF1F2_L1_finished:
- mov r10, #0 @ swith back to cache level 0
- mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
- dsb
- isb
- /*******************************************************************************
- * clrex *
- ******************************************************************************/
- clrex
- /*******************************************************************************
- * __inner_flush_dcache_L2 *
- ******************************************************************************/
- dmb @ ensure ordering with previous memory accesses
- mrc p15, 1, r0, c0, c0, 1 @ read clidr
- ands r3, r0, #0x7000000 @ extract loc from clidr
- mov r3, r3, lsr #23 @ left align loc bit field
- beq DIF1F2_L2_finished @ if loc is 0, then no need to clean
- mov r10, #2 @ start clean at cache level 2
- DIF1F2_L2_loop1:
- add r2, r10, r10, lsr #1 @ work out 3x current cache level
- mov r1, r0, lsr r2 @ extract cache type bits from clidr
- and r1, r1, #7 @ mask of the bits for current cache only
- cmp r1, #2 @ see what cache we have at this level
- blt DIF1F2_L2_skip @ skip if no cache, or just i-cache
- #ifdef CONFIG_ARM_ERRATA_814220
- dsb
- #endif
- mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
- isb @ isb to sych the new cssr&csidr
- mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
- and r2, r1, #7 @ extract the length of the cache lines
- add r2, r2, #4 @ add 4 (line length offset)
- ldr r4, =0x3ff
- ands r4, r4, r1, lsr #3 @ find maximum number on the way size
- clz r5, r4 @ find bit position of way size increment
- ldr r7, =0x7fff
- ands r7, r7, r1, lsr #13 @ extract max number of the index size
- DIF1F2_L2_loop2:
- mov r9, r4 @ create working copy of max way size
- DIF1F2_L2_loop3:
- orr r11, r10, r9, lsl r5 @ factor way and cache number into r11
- orr r11, r11, r7, lsl r2 @ factor index number into r11
- mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
- subs r9, r9, #1 @ decrement the way
- bge DIF1F2_L2_loop3
- subs r7, r7, #1 @ decrement the index
- bge DIF1F2_L2_loop2
- DIF1F2_L2_skip:
- @add r10, r10, #2 @ increment cache number
- @cmp r3, r10
- @bgt DIF1F2_L2_loop1
- DIF1F2_L2_finished:
- mov r10, #0 @ swith back to cache level 0
- mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
- dsb
- isb
- /*******************************************************************************
- * pop stack *
- ******************************************************************************/
- pop {r0,r1,r2,r3,r4,r5,r7,r9,r10,r11,r14}
- bx lr
- .end
|