head.S 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302
  1. /*
  2. * linux/arch/arm/boot/compressed/head.S
  3. *
  4. * Copyright (C) 1996-2002 Russell King
  5. * Copyright (C) 2004 Hyok S. Choi (MPU support)
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. */
  11. #include <linux/linkage.h>
  12. #include <asm/assembler.h>
  13. .arch armv7-a
  14. /*
  15. * Debugging stuff
  16. *
  17. * Note that these macros must not contain any code which is not
  18. * 100% relocatable. Any attempt to do so will result in a crash.
  19. * Please select one of the following when turning on debugging.
  20. */
  21. #ifdef DEBUG
  22. #if defined(CONFIG_DEBUG_ICEDCC)
  23. #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) || defined(CONFIG_CPU_V7)
  24. .macro loadsp, rb, tmp
  25. .endm
  26. .macro writeb, ch, rb
  27. mcr p14, 0, \ch, c0, c5, 0
  28. .endm
  29. #elif defined(CONFIG_CPU_XSCALE)
  30. .macro loadsp, rb, tmp
  31. .endm
  32. .macro writeb, ch, rb
  33. mcr p14, 0, \ch, c8, c0, 0
  34. .endm
  35. #else
  36. .macro loadsp, rb, tmp
  37. .endm
  38. .macro writeb, ch, rb
  39. mcr p14, 0, \ch, c1, c0, 0
  40. .endm
  41. #endif
  42. #else
  43. #include CONFIG_DEBUG_LL_INCLUDE
  44. .macro writeb, ch, rb
  45. senduart \ch, \rb
  46. .endm
  47. #if defined(CONFIG_ARCH_SA1100)
  48. .macro loadsp, rb, tmp
  49. mov \rb, #0x80000000 @ physical base address
  50. #ifdef CONFIG_DEBUG_LL_SER3
  51. add \rb, \rb, #0x00050000 @ Ser3
  52. #else
  53. add \rb, \rb, #0x00010000 @ Ser1
  54. #endif
  55. .endm
  56. #else
  57. .macro loadsp, rb, tmp
  58. addruart \rb, \tmp
  59. .endm
  60. #endif
  61. #endif
  62. #endif
  63. .macro kputc,val
  64. mov r0, \val
  65. bl putc
  66. .endm
  67. .macro kphex,val,len
  68. mov r0, \val
  69. mov r1, #\len
  70. bl phex
  71. .endm
  72. .macro debug_reloc_start
  73. #ifdef DEBUG
  74. kputc #'\n'
  75. kphex r6, 8 /* processor id */
  76. kputc #':'
  77. kphex r7, 8 /* architecture id */
  78. #ifdef CONFIG_CPU_CP15
  79. kputc #':'
  80. mrc p15, 0, r0, c1, c0
  81. kphex r0, 8 /* control reg */
  82. #endif
  83. kputc #'\n'
  84. kphex r5, 8 /* decompressed kernel start */
  85. kputc #'-'
  86. kphex r9, 8 /* decompressed kernel end */
  87. kputc #'>'
  88. kphex r4, 8 /* kernel execution address */
  89. kputc #'\n'
  90. #endif
  91. .endm
  92. .macro debug_reloc_end
  93. #ifdef DEBUG
  94. kphex r5, 8 /* end of kernel */
  95. kputc #'\n'
  96. mov r0, r4
  97. bl memdump /* dump 256 bytes at start of kernel */
  98. #endif
  99. .endm
  100. .section ".start", #alloc, #execinstr
  101. /*
  102. * sort out different calling conventions
  103. */
  104. .align
  105. .arm @ Always enter in ARM state
  106. start:
  107. .type start,#function
  108. .rept 7
  109. mov r0, r0
  110. .endr
  111. ARM( mov r0, r0 )
  112. ARM( b 1f )
  113. THUMB( adr r12, BSYM(1f) )
  114. THUMB( bx r12 )
  115. .word _magic_sig @ Magic numbers to help the loader
  116. .word _magic_start @ absolute load/run zImage address
  117. .word _magic_end @ zImage end address
  118. .word 0x04030201 @ endianness flag
  119. THUMB( .thumb )
  120. 1:
  121. ARM_BE8( setend be ) @ go BE8 if compiled for BE8
  122. mrs r9, cpsr
  123. #ifdef CONFIG_ARM_VIRT_EXT
  124. bl __hyp_stub_install @ get into SVC mode, reversibly
  125. #endif
  126. mov r7, r1 @ save architecture ID
  127. mov r8, r2 @ save atags pointer
  128. /*
  129. * Booting from Angel - need to enter SVC mode and disable
  130. * FIQs/IRQs (numeric definitions from angel arm.h source).
  131. * We only do this if we were in user mode on entry.
  132. */
  133. mrs r2, cpsr @ get current mode
  134. tst r2, #3 @ not user?
  135. bne not_angel
  136. mov r0, #0x17 @ angel_SWIreason_EnterSVC
  137. ARM( swi 0x123456 ) @ angel_SWI_ARM
  138. THUMB( svc 0xab ) @ angel_SWI_THUMB
  139. not_angel:
  140. safe_svcmode_maskall r0
  141. msr spsr_cxsf, r9 @ Save the CPU boot mode in
  142. @ SPSR
  143. /*
  144. * Note that some cache flushing and other stuff may
  145. * be needed here - is there an Angel SWI call for this?
  146. */
  147. /*
  148. * some architecture specific code can be inserted
  149. * by the linker here, but it should preserve r7, r8, and r9.
  150. */
  151. .text
  152. #ifdef CONFIG_AUTO_ZRELADDR
  153. @ determine final kernel image address
  154. mov r4, pc
  155. and r4, r4, #0xf8000000
  156. add r4, r4, #TEXT_OFFSET
  157. #else
  158. ldr r4, =zreladdr
  159. #endif
  160. /*
  161. * Set up a page table only if it won't overwrite ourself.
  162. * That means r4 < pc && r4 - 16k page directory > &_end.
  163. * Given that r4 > &_end is most unfrequent, we add a rough
  164. * additional 1MB of room for a possible appended DTB.
  165. */
  166. mov r0, pc
  167. cmp r0, r4
  168. ldrcc r0, LC0+32
  169. addcc r0, r0, pc
  170. cmpcc r4, r0
  171. orrcc r4, r4, #1 @ remember we skipped cache_on
  172. blcs cache_on
  173. restart: adr r0, LC0
  174. ldmia r0, {r1, r2, r3, r6, r10, r11, r12}
  175. ldr sp, [r0, #28]
  176. /*
  177. * We might be running at a different address. We need
  178. * to fix up various pointers.
  179. */
  180. sub r0, r0, r1 @ calculate the delta offset
  181. add r6, r6, r0 @ _edata
  182. add r10, r10, r0 @ inflated kernel size location
  183. /*
  184. * The kernel build system appends the size of the
  185. * decompressed kernel at the end of the compressed data
  186. * in little-endian form.
  187. */
  188. ldrb r9, [r10, #0]
  189. ldrb lr, [r10, #1]
  190. orr r9, r9, lr, lsl #8
  191. ldrb lr, [r10, #2]
  192. ldrb r10, [r10, #3]
  193. orr r9, r9, lr, lsl #16
  194. orr r9, r9, r10, lsl #24
  195. #ifndef CONFIG_ZBOOT_ROM
  196. /* malloc space is above the relocated stack (64k max) */
  197. add sp, sp, r0
  198. add r10, sp, #0x10000
  199. #else
  200. /*
  201. * With ZBOOT_ROM the bss/stack is non relocatable,
  202. * but someone could still run this code from RAM,
  203. * in which case our reference is _edata.
  204. */
  205. mov r10, r6
  206. #endif
  207. mov r5, #0 @ init dtb size to 0
  208. #ifdef CONFIG_ARM_APPENDED_DTB
  209. /*
  210. * r0 = delta
  211. * r2 = BSS start
  212. * r3 = BSS end
  213. * r4 = final kernel address (possibly with LSB set)
  214. * r5 = appended dtb size (still unknown)
  215. * r6 = _edata
  216. * r7 = architecture ID
  217. * r8 = atags/device tree pointer
  218. * r9 = size of decompressed image
  219. * r10 = end of this image, including bss/stack/malloc space if non XIP
  220. * r11 = GOT start
  221. * r12 = GOT end
  222. * sp = stack pointer
  223. *
  224. * if there are device trees (dtb) appended to zImage, advance r10 so that the
  225. * dtb data will get relocated along with the kernel if necessary.
  226. */
  227. ldr lr, [r6, #0]
  228. #ifndef __ARMEB__
  229. ldr r1, =0xedfe0dd0 @ sig is 0xd00dfeed big endian
  230. #else
  231. ldr r1, =0xd00dfeed
  232. #endif
  233. cmp lr, r1
  234. bne dtb_check_done @ not found
  235. #ifdef CONFIG_ARM_ATAG_DTB_COMPAT
  236. /*
  237. * OK... Let's do some funky business here.
  238. * If we do have a DTB appended to zImage, and we do have
  239. * an ATAG list around, we want the later to be translated
  240. * and folded into the former here. To be on the safe side,
  241. * let's temporarily move the stack away into the malloc
  242. * area. No GOT fixup has occurred yet, but none of the
  243. * code we're about to call uses any global variable.
  244. */
  245. add sp, sp, #0x10000
  246. stmfd sp!, {r0-r3, ip, lr}
  247. mov r0, r8
  248. mov r1, r6
  249. sub r2, sp, r6
  250. bl atags_to_fdt
  251. /*
  252. * If returned value is 1, there is no ATAG at the location
  253. * pointed by r8. Try the typical 0x100 offset from start
  254. * of RAM and hope for the best.
  255. */
  256. cmp r0, #1
  257. sub r0, r4, #TEXT_OFFSET
  258. bic r0, r0, #1
  259. add r0, r0, #0x100
  260. mov r1, r6
  261. sub r2, sp, r6
  262. bleq atags_to_fdt
  263. ldmfd sp!, {r0-r3, ip, lr}
  264. sub sp, sp, #0x10000
  265. #endif
  266. mov r8, r6 @ use the appended device tree
  267. /*
  268. * Make sure that the DTB doesn't end up in the final
  269. * kernel's .bss area. To do so, we adjust the decompressed
  270. * kernel size to compensate if that .bss size is larger
  271. * than the relocated code.
  272. */
  273. ldr r5, =_kernel_bss_size
  274. adr r1, wont_overwrite
  275. sub r1, r6, r1
  276. subs r1, r5, r1
  277. addhi r9, r9, r1
  278. /* Get the dtb's size */
  279. ldr r5, [r6, #4]
  280. #ifndef __ARMEB__
  281. /* convert r5 (dtb size) to little endian */
  282. eor r1, r5, r5, ror #16
  283. bic r1, r1, #0x00ff0000
  284. mov r5, r5, ror #8
  285. eor r5, r5, r1, lsr #8
  286. #endif
  287. /* preserve 64-bit alignment */
  288. add r5, r5, #7
  289. bic r5, r5, #7
  290. /* relocate some pointers past the appended dtb */
  291. add r6, r6, r5
  292. add r10, r10, r5
  293. add sp, sp, r5
  294. dtb_check_done:
  295. #endif
  296. /*
  297. * Check to see if we will overwrite ourselves.
  298. * r4 = final kernel address (possibly with LSB set)
  299. * r9 = size of decompressed image
  300. * r10 = end of this image, including bss/stack/malloc space if non XIP
  301. * We basically want:
  302. * r4 - 16k page directory >= r10 -> OK
  303. * r4 + image length <= address of wont_overwrite -> OK
  304. * Note: the possible LSB in r4 is harmless here.
  305. */
  306. add r10, r10, #16384
  307. cmp r4, r10
  308. bhs wont_overwrite
  309. add r10, r4, r9
  310. adr r9, wont_overwrite
  311. cmp r10, r9
  312. bls wont_overwrite
  313. /*
  314. * Relocate ourselves past the end of the decompressed kernel.
  315. * r6 = _edata
  316. * r10 = end of the decompressed kernel
  317. * Because we always copy ahead, we need to do it from the end and go
  318. * backward in case the source and destination overlap.
  319. */
  320. /*
  321. * Bump to the next 256-byte boundary with the size of
  322. * the relocation code added. This avoids overwriting
  323. * ourself when the offset is small.
  324. */
  325. add r10, r10, #((reloc_code_end - restart + 256) & ~255)
  326. bic r10, r10, #255
  327. /* Get start of code we want to copy and align it down. */
  328. adr r5, restart
  329. bic r5, r5, #31
  330. /* Relocate the hyp vector base if necessary */
  331. #ifdef CONFIG_ARM_VIRT_EXT
  332. mrs r0, spsr
  333. and r0, r0, #MODE_MASK
  334. cmp r0, #HYP_MODE
  335. bne 1f
  336. bl __hyp_get_vectors
  337. sub r0, r0, r5
  338. add r0, r0, r10
  339. bl __hyp_set_vectors
  340. 1:
  341. #endif
  342. sub r9, r6, r5 @ size to copy
  343. add r9, r9, #31 @ rounded up to a multiple
  344. bic r9, r9, #31 @ ... of 32 bytes
  345. add r6, r9, r5
  346. add r9, r9, r10
  347. 1: ldmdb r6!, {r0 - r3, r10 - r12, lr}
  348. cmp r6, r5
  349. stmdb r9!, {r0 - r3, r10 - r12, lr}
  350. bhi 1b
  351. /* Preserve offset to relocated code. */
  352. sub r6, r9, r6
  353. #ifndef CONFIG_ZBOOT_ROM
  354. /* cache_clean_flush may use the stack, so relocate it */
  355. add sp, sp, r6
  356. #endif
  357. bl cache_clean_flush
  358. adr r0, BSYM(restart)
  359. add r0, r0, r6
  360. mov pc, r0
  361. wont_overwrite:
  362. /*
  363. * If delta is zero, we are running at the address we were linked at.
  364. * r0 = delta
  365. * r2 = BSS start
  366. * r3 = BSS end
  367. * r4 = kernel execution address (possibly with LSB set)
  368. * r5 = appended dtb size (0 if not present)
  369. * r7 = architecture ID
  370. * r8 = atags pointer
  371. * r11 = GOT start
  372. * r12 = GOT end
  373. * sp = stack pointer
  374. */
  375. orrs r1, r0, r5
  376. beq not_relocated
  377. add r11, r11, r0
  378. add r12, r12, r0
  379. #ifndef CONFIG_ZBOOT_ROM
  380. /*
  381. * If we're running fully PIC === CONFIG_ZBOOT_ROM = n,
  382. * we need to fix up pointers into the BSS region.
  383. * Note that the stack pointer has already been fixed up.
  384. */
  385. add r2, r2, r0
  386. add r3, r3, r0
  387. /*
  388. * Relocate all entries in the GOT table.
  389. * Bump bss entries to _edata + dtb size
  390. */
  391. 1: ldr r1, [r11, #0] @ relocate entries in the GOT
  392. add r1, r1, r0 @ This fixes up C references
  393. cmp r1, r2 @ if entry >= bss_start &&
  394. cmphs r3, r1 @ bss_end > entry
  395. addhi r1, r1, r5 @ entry += dtb size
  396. str r1, [r11], #4 @ next entry
  397. cmp r11, r12
  398. blo 1b
  399. /* bump our bss pointers too */
  400. add r2, r2, r5
  401. add r3, r3, r5
  402. #else
  403. /*
  404. * Relocate entries in the GOT table. We only relocate
  405. * the entries that are outside the (relocated) BSS region.
  406. */
  407. 1: ldr r1, [r11, #0] @ relocate entries in the GOT
  408. cmp r1, r2 @ entry < bss_start ||
  409. cmphs r3, r1 @ _end < entry
  410. addlo r1, r1, r0 @ table. This fixes up the
  411. str r1, [r11], #4 @ C references.
  412. cmp r11, r12
  413. blo 1b
  414. #endif
  415. not_relocated: mov r0, #0
  416. 1: str r0, [r2], #4 @ clear bss
  417. str r0, [r2], #4
  418. str r0, [r2], #4
  419. str r0, [r2], #4
  420. cmp r2, r3
  421. blo 1b
  422. /*
  423. * Did we skip the cache setup earlier?
  424. * That is indicated by the LSB in r4.
  425. * Do it now if so.
  426. */
  427. tst r4, #1
  428. bic r4, r4, #1
  429. blne cache_on
  430. /*
  431. * The C runtime environment should now be setup sufficiently.
  432. * Set up some pointers, and start decompressing.
  433. * r4 = kernel execution address
  434. * r7 = architecture ID
  435. * r8 = atags pointer
  436. */
  437. mov r0, r4
  438. mov r1, sp @ malloc space above stack
  439. add r2, sp, #0x10000 @ 64k max
  440. mov r3, r7
  441. bl decompress_kernel
  442. bl cache_clean_flush
  443. bl cache_off
  444. mov r1, r7 @ restore architecture number
  445. mov r2, r8 @ restore atags pointer
  446. #ifdef CONFIG_ARM_VIRT_EXT
  447. mrs r0, spsr @ Get saved CPU boot mode
  448. and r0, r0, #MODE_MASK
  449. cmp r0, #HYP_MODE @ if not booted in HYP mode...
  450. bne __enter_kernel @ boot kernel directly
  451. adr r12, .L__hyp_reentry_vectors_offset
  452. ldr r0, [r12]
  453. add r0, r0, r12
  454. bl __hyp_set_vectors
  455. __HVC(0) @ otherwise bounce to hyp mode
  456. b . @ should never be reached
  457. .align 2
  458. .L__hyp_reentry_vectors_offset: .long __hyp_reentry_vectors - .
  459. #else
  460. b __enter_kernel
  461. #endif
  462. .align 2
  463. .type LC0, #object
  464. LC0: .word LC0 @ r1
  465. .word __bss_start @ r2
  466. .word _end @ r3
  467. .word _edata @ r6
  468. .word input_data_end - 4 @ r10 (inflated size location)
  469. .word _got_start @ r11
  470. .word _got_end @ ip
  471. .word .L_user_stack_end @ sp
  472. .word _end - restart + 16384 + 1024*1024
  473. .size LC0, . - LC0
  474. #ifdef CONFIG_ARCH_RPC
  475. .globl params
  476. params: ldr r0, =0x10000100 @ params_phys for RPC
  477. mov pc, lr
  478. .ltorg
  479. .align
  480. #endif
  481. /*
  482. * Turn on the cache. We need to setup some page tables so that we
  483. * can have both the I and D caches on.
  484. *
  485. * We place the page tables 16k down from the kernel execution address,
  486. * and we hope that nothing else is using it. If we're using it, we
  487. * will go pop!
  488. *
  489. * On entry,
  490. * r4 = kernel execution address
  491. * r7 = architecture number
  492. * r8 = atags pointer
  493. * On exit,
  494. * r0, r1, r2, r3, r9, r10, r12 corrupted
  495. * This routine must preserve:
  496. * r4, r7, r8
  497. */
  498. .align 5
  499. cache_on: mov r3, #8 @ cache_on function
  500. b call_cache_fn
  501. /*
  502. * Initialize the highest priority protection region, PR7
  503. * to cover all 32bit address and cacheable and bufferable.
  504. */
  505. __armv4_mpu_cache_on:
  506. mov r0, #0x3f @ 4G, the whole
  507. mcr p15, 0, r0, c6, c7, 0 @ PR7 Area Setting
  508. mcr p15, 0, r0, c6, c7, 1
  509. mov r0, #0x80 @ PR7
  510. mcr p15, 0, r0, c2, c0, 0 @ D-cache on
  511. mcr p15, 0, r0, c2, c0, 1 @ I-cache on
  512. mcr p15, 0, r0, c3, c0, 0 @ write-buffer on
  513. mov r0, #0xc000
  514. mcr p15, 0, r0, c5, c0, 1 @ I-access permission
  515. mcr p15, 0, r0, c5, c0, 0 @ D-access permission
  516. mov r0, #0
  517. mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
  518. mcr p15, 0, r0, c7, c5, 0 @ flush(inval) I-Cache
  519. mcr p15, 0, r0, c7, c6, 0 @ flush(inval) D-Cache
  520. mrc p15, 0, r0, c1, c0, 0 @ read control reg
  521. @ ...I .... ..D. WC.M
  522. orr r0, r0, #0x002d @ .... .... ..1. 11.1
  523. orr r0, r0, #0x1000 @ ...1 .... .... ....
  524. mcr p15, 0, r0, c1, c0, 0 @ write control reg
  525. mov r0, #0
  526. mcr p15, 0, r0, c7, c5, 0 @ flush(inval) I-Cache
  527. mcr p15, 0, r0, c7, c6, 0 @ flush(inval) D-Cache
  528. mov pc, lr
  529. __armv3_mpu_cache_on:
  530. mov r0, #0x3f @ 4G, the whole
  531. mcr p15, 0, r0, c6, c7, 0 @ PR7 Area Setting
  532. mov r0, #0x80 @ PR7
  533. mcr p15, 0, r0, c2, c0, 0 @ cache on
  534. mcr p15, 0, r0, c3, c0, 0 @ write-buffer on
  535. mov r0, #0xc000
  536. mcr p15, 0, r0, c5, c0, 0 @ access permission
  537. mov r0, #0
  538. mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
  539. /*
  540. * ?? ARMv3 MMU does not allow reading the control register,
  541. * does this really work on ARMv3 MPU?
  542. */
  543. mrc p15, 0, r0, c1, c0, 0 @ read control reg
  544. @ .... .... .... WC.M
  545. orr r0, r0, #0x000d @ .... .... .... 11.1
  546. /* ?? this overwrites the value constructed above? */
  547. mov r0, #0
  548. mcr p15, 0, r0, c1, c0, 0 @ write control reg
  549. /* ?? invalidate for the second time? */
  550. mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
  551. mov pc, lr
  552. #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
  553. #define CB_BITS 0x08
  554. #else
  555. #define CB_BITS 0x0c
  556. #endif
  557. __setup_mmu: sub r3, r4, #16384 @ Page directory size
  558. bic r3, r3, #0xff @ Align the pointer
  559. bic r3, r3, #0x3f00
  560. /*
  561. * Initialise the page tables, turning on the cacheable and bufferable
  562. * bits for the RAM area only.
  563. */
  564. mov r0, r3
  565. mov r9, r0, lsr #18
  566. mov r9, r9, lsl #18 @ start of RAM
  567. add r10, r9, #0x10000000 @ a reasonable RAM size
  568. mov r1, #0x12 @ XN|U + section mapping
  569. orr r1, r1, #3 << 10 @ AP=11
  570. add r2, r3, #16384
  571. 1: cmp r1, r9 @ if virt > start of RAM
  572. cmphs r10, r1 @ && end of RAM > virt
  573. bic r1, r1, #0x1c @ clear XN|U + C + B
  574. orrlo r1, r1, #0x10 @ Set XN|U for non-RAM
  575. orrhs r1, r1, r6 @ set RAM section settings
  576. str r1, [r0], #4 @ 1:1 mapping
  577. add r1, r1, #1048576
  578. teq r0, r2
  579. bne 1b
  580. /*
  581. * If ever we are running from Flash, then we surely want the cache
  582. * to be enabled also for our execution instance... We map 2MB of it
  583. * so there is no map overlap problem for up to 1 MB compressed kernel.
  584. * If the execution is in RAM then we would only be duplicating the above.
  585. */
  586. orr r1, r6, #0x04 @ ensure B is set for this
  587. orr r1, r1, #3 << 10
  588. mov r2, pc
  589. mov r2, r2, lsr #20
  590. orr r1, r1, r2, lsl #20
  591. add r0, r3, r2, lsl #2
  592. str r1, [r0], #4
  593. add r1, r1, #1048576
  594. str r1, [r0]
  595. mov pc, lr
  596. ENDPROC(__setup_mmu)
  597. @ Enable unaligned access on v6, to allow better code generation
  598. @ for the decompressor C code:
  599. __armv6_mmu_cache_on:
  600. mrc p15, 0, r0, c1, c0, 0 @ read SCTLR
  601. bic r0, r0, #2 @ A (no unaligned access fault)
  602. orr r0, r0, #1 << 22 @ U (v6 unaligned access model)
  603. mcr p15, 0, r0, c1, c0, 0 @ write SCTLR
  604. b __armv4_mmu_cache_on
  605. __arm926ejs_mmu_cache_on:
  606. #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
  607. mov r0, #4 @ put dcache in WT mode
  608. mcr p15, 7, r0, c15, c0, 0
  609. #endif
  610. __armv4_mmu_cache_on:
  611. mov r12, lr
  612. #ifdef CONFIG_MMU
  613. mov r6, #CB_BITS | 0x12 @ U
  614. bl __setup_mmu
  615. mov r0, #0
  616. mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
  617. mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
  618. mrc p15, 0, r0, c1, c0, 0 @ read control reg
  619. orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement
  620. orr r0, r0, #0x0030
  621. ARM_BE8( orr r0, r0, #1 << 25 ) @ big-endian page tables
  622. bl __common_mmu_cache_on
  623. mov r0, #0
  624. mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
  625. #endif
  626. mov pc, r12
  627. __armv7_mmu_cache_on:
  628. mov r12, lr
  629. #ifdef CONFIG_MMU
  630. mrc p15, 0, r11, c0, c1, 4 @ read ID_MMFR0
  631. tst r11, #0xf @ VMSA
  632. movne r6, #CB_BITS | 0x02 @ !XN
  633. blne __setup_mmu
  634. mov r0, #0
  635. mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
  636. tst r11, #0xf @ VMSA
  637. mcrne p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
  638. #endif
  639. mrc p15, 0, r0, c1, c0, 0 @ read control reg
  640. bic r0, r0, #1 << 28 @ clear SCTLR.TRE
  641. orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement
  642. orr r0, r0, #0x003c @ write buffer
  643. bic r0, r0, #2 @ A (no unaligned access fault)
  644. orr r0, r0, #1 << 22 @ U (v6 unaligned access model)
  645. @ (needed for ARM1176)
  646. #ifdef CONFIG_MMU
  647. ARM_BE8( orr r0, r0, #1 << 25 ) @ big-endian page tables
  648. mrcne p15, 0, r6, c2, c0, 2 @ read ttb control reg
  649. orrne r0, r0, #1 @ MMU enabled
  650. movne r1, #0xfffffffd @ domain 0 = client
  651. bic r6, r6, #1 << 31 @ 32-bit translation system
  652. bic r6, r6, #3 << 0 @ use only ttbr0
  653. mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer
  654. mcrne p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
  655. mcr p15, 0, r0, c7, c5, 4 @ ISB
  656. mcrne p15, 0, r1, c3, c0, 0 @ load domain access control
  657. mcrne p15, 0, r6, c2, c0, 2 @ load ttb control
  658. #endif
  659. mcr p15, 0, r0, c7, c5, 4 @ ISB
  660. mcr p15, 0, r0, c1, c0, 0 @ load control register
  661. mrc p15, 0, r0, c1, c0, 0 @ and read it back
  662. mov r0, #0
  663. mcr p15, 0, r0, c7, c5, 4 @ ISB
  664. mov pc, r12
  665. __fa526_cache_on:
  666. mov r12, lr
  667. mov r6, #CB_BITS | 0x12 @ U
  668. bl __setup_mmu
  669. mov r0, #0
  670. mcr p15, 0, r0, c7, c7, 0 @ Invalidate whole cache
  671. mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
  672. mcr p15, 0, r0, c8, c7, 0 @ flush UTLB
  673. mrc p15, 0, r0, c1, c0, 0 @ read control reg
  674. orr r0, r0, #0x1000 @ I-cache enable
  675. bl __common_mmu_cache_on
  676. mov r0, #0
  677. mcr p15, 0, r0, c8, c7, 0 @ flush UTLB
  678. mov pc, r12
  679. __common_mmu_cache_on:
  680. #ifndef CONFIG_THUMB2_KERNEL
  681. #ifndef DEBUG
  682. orr r0, r0, #0x000d @ Write buffer, mmu
  683. #endif
  684. mov r1, #-1
  685. mcr p15, 0, r3, c2, c0, 0 @ load page table pointer
  686. mcr p15, 0, r1, c3, c0, 0 @ load domain access control
  687. b 1f
  688. .align 5 @ cache line aligned
  689. 1: mcr p15, 0, r0, c1, c0, 0 @ load control register
  690. mrc p15, 0, r0, c1, c0, 0 @ and read it back to
  691. sub pc, lr, r0, lsr #32 @ properly flush pipeline
  692. #endif
  693. #define PROC_ENTRY_SIZE (4*5)
  694. /*
  695. * Here follow the relocatable cache support functions for the
  696. * various processors. This is a generic hook for locating an
  697. * entry and jumping to an instruction at the specified offset
  698. * from the start of the block. Please note this is all position
  699. * independent code.
  700. *
  701. * r1 = corrupted
  702. * r2 = corrupted
  703. * r3 = block offset
  704. * r9 = corrupted
  705. * r12 = corrupted
  706. */
  707. call_cache_fn: adr r12, proc_types
  708. #ifdef CONFIG_CPU_CP15
  709. mrc p15, 0, r9, c0, c0 @ get processor ID
  710. #else
  711. ldr r9, =CONFIG_PROCESSOR_ID
  712. #endif
  713. 1: ldr r1, [r12, #0] @ get value
  714. ldr r2, [r12, #4] @ get mask
  715. eor r1, r1, r9 @ (real ^ match)
  716. tst r1, r2 @ & mask
  717. ARM( addeq pc, r12, r3 ) @ call cache function
  718. THUMB( addeq r12, r3 )
  719. THUMB( moveq pc, r12 ) @ call cache function
  720. add r12, r12, #PROC_ENTRY_SIZE
  721. b 1b
  722. /*
  723. * Table for cache operations. This is basically:
  724. * - CPU ID match
  725. * - CPU ID mask
  726. * - 'cache on' method instruction
  727. * - 'cache off' method instruction
  728. * - 'cache flush' method instruction
  729. *
  730. * We match an entry using: ((real_id ^ match) & mask) == 0
  731. *
  732. * Writethrough caches generally only need 'on' and 'off'
  733. * methods. Writeback caches _must_ have the flush method
  734. * defined.
  735. */
  736. .align 2
  737. .type proc_types,#object
  738. proc_types:
  739. .word 0x41000000 @ old ARM ID
  740. .word 0xff00f000
  741. mov pc, lr
  742. THUMB( nop )
  743. mov pc, lr
  744. THUMB( nop )
  745. mov pc, lr
  746. THUMB( nop )
  747. .word 0x41007000 @ ARM7/710
  748. .word 0xfff8fe00
  749. mov pc, lr
  750. THUMB( nop )
  751. mov pc, lr
  752. THUMB( nop )
  753. mov pc, lr
  754. THUMB( nop )
  755. .word 0x41807200 @ ARM720T (writethrough)
  756. .word 0xffffff00
  757. W(b) __armv4_mmu_cache_on
  758. W(b) __armv4_mmu_cache_off
  759. mov pc, lr
  760. THUMB( nop )
  761. .word 0x41007400 @ ARM74x
  762. .word 0xff00ff00
  763. W(b) __armv3_mpu_cache_on
  764. W(b) __armv3_mpu_cache_off
  765. W(b) __armv3_mpu_cache_flush
  766. .word 0x41009400 @ ARM94x
  767. .word 0xff00ff00
  768. W(b) __armv4_mpu_cache_on
  769. W(b) __armv4_mpu_cache_off
  770. W(b) __armv4_mpu_cache_flush
  771. .word 0x41069260 @ ARM926EJ-S (v5TEJ)
  772. .word 0xff0ffff0
  773. W(b) __arm926ejs_mmu_cache_on
  774. W(b) __armv4_mmu_cache_off
  775. W(b) __armv5tej_mmu_cache_flush
  776. .word 0x00007000 @ ARM7 IDs
  777. .word 0x0000f000
  778. mov pc, lr
  779. THUMB( nop )
  780. mov pc, lr
  781. THUMB( nop )
  782. mov pc, lr
  783. THUMB( nop )
  784. @ Everything from here on will be the new ID system.
  785. .word 0x4401a100 @ sa110 / sa1100
  786. .word 0xffffffe0
  787. W(b) __armv4_mmu_cache_on
  788. W(b) __armv4_mmu_cache_off
  789. W(b) __armv4_mmu_cache_flush
  790. .word 0x6901b110 @ sa1110
  791. .word 0xfffffff0
  792. W(b) __armv4_mmu_cache_on
  793. W(b) __armv4_mmu_cache_off
  794. W(b) __armv4_mmu_cache_flush
  795. .word 0x56056900
  796. .word 0xffffff00 @ PXA9xx
  797. W(b) __armv4_mmu_cache_on
  798. W(b) __armv4_mmu_cache_off
  799. W(b) __armv4_mmu_cache_flush
  800. .word 0x56158000 @ PXA168
  801. .word 0xfffff000
  802. W(b) __armv4_mmu_cache_on
  803. W(b) __armv4_mmu_cache_off
  804. W(b) __armv5tej_mmu_cache_flush
  805. .word 0x56050000 @ Feroceon
  806. .word 0xff0f0000
  807. W(b) __armv4_mmu_cache_on
  808. W(b) __armv4_mmu_cache_off
  809. W(b) __armv5tej_mmu_cache_flush
  810. #ifdef CONFIG_CPU_FEROCEON_OLD_ID
  811. /* this conflicts with the standard ARMv5TE entry */
  812. .long 0x41009260 @ Old Feroceon
  813. .long 0xff00fff0
  814. b __armv4_mmu_cache_on
  815. b __armv4_mmu_cache_off
  816. b __armv5tej_mmu_cache_flush
  817. #endif
  818. .word 0x66015261 @ FA526
  819. .word 0xff01fff1
  820. W(b) __fa526_cache_on
  821. W(b) __armv4_mmu_cache_off
  822. W(b) __fa526_cache_flush
  823. @ These match on the architecture ID
  824. .word 0x00020000 @ ARMv4T
  825. .word 0x000f0000
  826. W(b) __armv4_mmu_cache_on
  827. W(b) __armv4_mmu_cache_off
  828. W(b) __armv4_mmu_cache_flush
  829. .word 0x00050000 @ ARMv5TE
  830. .word 0x000f0000
  831. W(b) __armv4_mmu_cache_on
  832. W(b) __armv4_mmu_cache_off
  833. W(b) __armv4_mmu_cache_flush
  834. .word 0x00060000 @ ARMv5TEJ
  835. .word 0x000f0000
  836. W(b) __armv4_mmu_cache_on
  837. W(b) __armv4_mmu_cache_off
  838. W(b) __armv5tej_mmu_cache_flush
  839. .word 0x0007b000 @ ARMv6
  840. .word 0x000ff000
  841. W(b) __armv6_mmu_cache_on
  842. W(b) __armv4_mmu_cache_off
  843. W(b) __armv6_mmu_cache_flush
  844. .word 0x000f0000 @ new CPU Id
  845. .word 0x000f0000
  846. W(b) __armv7_mmu_cache_on
  847. W(b) __armv7_mmu_cache_off
  848. W(b) __armv7_mmu_cache_flush
  849. .word 0 @ unrecognised type
  850. .word 0
  851. mov pc, lr
  852. THUMB( nop )
  853. mov pc, lr
  854. THUMB( nop )
  855. mov pc, lr
  856. THUMB( nop )
  857. .size proc_types, . - proc_types
  858. /*
  859. * If you get a "non-constant expression in ".if" statement"
  860. * error from the assembler on this line, check that you have
  861. * not accidentally written a "b" instruction where you should
  862. * have written W(b).
  863. */
  864. .if (. - proc_types) % PROC_ENTRY_SIZE != 0
  865. .error "The size of one or more proc_types entries is wrong."
  866. .endif
  867. /*
  868. * Turn off the Cache and MMU. ARMv3 does not support
  869. * reading the control register, but ARMv4 does.
  870. *
  871. * On exit,
  872. * r0, r1, r2, r3, r9, r12 corrupted
  873. * This routine must preserve:
  874. * r4, r7, r8
  875. */
  876. .align 5
  877. cache_off: mov r3, #12 @ cache_off function
  878. b call_cache_fn
  879. __armv4_mpu_cache_off:
  880. mrc p15, 0, r0, c1, c0
  881. bic r0, r0, #0x000d
  882. mcr p15, 0, r0, c1, c0 @ turn MPU and cache off
  883. mov r0, #0
  884. mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
  885. mcr p15, 0, r0, c7, c6, 0 @ flush D-Cache
  886. mcr p15, 0, r0, c7, c5, 0 @ flush I-Cache
  887. mov pc, lr
  888. __armv3_mpu_cache_off:
  889. mrc p15, 0, r0, c1, c0
  890. bic r0, r0, #0x000d
  891. mcr p15, 0, r0, c1, c0, 0 @ turn MPU and cache off
  892. mov r0, #0
  893. mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
  894. mov pc, lr
  895. __armv4_mmu_cache_off:
  896. #ifdef CONFIG_MMU
  897. mrc p15, 0, r0, c1, c0
  898. bic r0, r0, #0x000d
  899. mcr p15, 0, r0, c1, c0 @ turn MMU and cache off
  900. mov r0, #0
  901. mcr p15, 0, r0, c7, c7 @ invalidate whole cache v4
  902. mcr p15, 0, r0, c8, c7 @ invalidate whole TLB v4
  903. #endif
  904. mov pc, lr
  905. __armv7_mmu_cache_off:
  906. mrc p15, 0, r0, c1, c0
  907. #ifdef CONFIG_MMU
  908. bic r0, r0, #0x000d
  909. #else
  910. bic r0, r0, #0x000c
  911. #endif
  912. mcr p15, 0, r0, c1, c0 @ turn MMU and cache off
  913. mov r12, lr
  914. bl __armv7_mmu_cache_flush
  915. mov r0, #0
  916. #ifdef CONFIG_MMU
  917. mcr p15, 0, r0, c8, c7, 0 @ invalidate whole TLB
  918. #endif
  919. mcr p15, 0, r0, c7, c5, 6 @ invalidate BTC
  920. mcr p15, 0, r0, c7, c10, 4 @ DSB
  921. mcr p15, 0, r0, c7, c5, 4 @ ISB
  922. mov pc, r12
  923. /*
  924. * Clean and flush the cache to maintain consistency.
  925. *
  926. * On exit,
  927. * r1, r2, r3, r9, r10, r11, r12 corrupted
  928. * This routine must preserve:
  929. * r4, r6, r7, r8
  930. */
  931. .align 5
  932. cache_clean_flush:
  933. mov r3, #16
  934. b call_cache_fn
  935. __armv4_mpu_cache_flush:
  936. tst r4, #1
  937. movne pc, lr
  938. mov r2, #1
  939. mov r3, #0
  940. mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
  941. mov r1, #7 << 5 @ 8 segments
  942. 1: orr r3, r1, #63 << 26 @ 64 entries
  943. 2: mcr p15, 0, r3, c7, c14, 2 @ clean & invalidate D index
  944. subs r3, r3, #1 << 26
  945. bcs 2b @ entries 63 to 0
  946. subs r1, r1, #1 << 5
  947. bcs 1b @ segments 7 to 0
  948. teq r2, #0
  949. mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
  950. mcr p15, 0, ip, c7, c10, 4 @ drain WB
  951. mov pc, lr
  952. __fa526_cache_flush:
  953. tst r4, #1
  954. movne pc, lr
  955. mov r1, #0
  956. mcr p15, 0, r1, c7, c14, 0 @ clean and invalidate D cache
  957. mcr p15, 0, r1, c7, c5, 0 @ flush I cache
  958. mcr p15, 0, r1, c7, c10, 4 @ drain WB
  959. mov pc, lr
  960. __armv6_mmu_cache_flush:
  961. mov r1, #0
  962. tst r4, #1
  963. mcreq p15, 0, r1, c7, c14, 0 @ clean+invalidate D
  964. mcr p15, 0, r1, c7, c5, 0 @ invalidate I+BTB
  965. mcreq p15, 0, r1, c7, c15, 0 @ clean+invalidate unified
  966. mcr p15, 0, r1, c7, c10, 4 @ drain WB
  967. mov pc, lr
  968. __armv7_mmu_cache_flush:
  969. tst r4, #1
  970. bne iflush
  971. mrc p15, 0, r10, c0, c1, 5 @ read ID_MMFR1
  972. tst r10, #0xf << 16 @ hierarchical cache (ARMv7)
  973. mov r10, #0
  974. beq hierarchical
  975. mcr p15, 0, r10, c7, c14, 0 @ clean+invalidate D
  976. b iflush
  977. hierarchical:
  978. mcr p15, 0, r10, c7, c10, 5 @ DMB
  979. stmfd sp!, {r0-r7, r9-r11}
  980. mrc p15, 1, r0, c0, c0, 1 @ read clidr
  981. ands r3, r0, #0x7000000 @ extract loc from clidr
  982. mov r3, r3, lsr #23 @ left align loc bit field
  983. beq finished @ if loc is 0, then no need to clean
  984. mov r10, #0 @ start clean at cache level 0
  985. loop1:
  986. add r2, r10, r10, lsr #1 @ work out 3x current cache level
  987. mov r1, r0, lsr r2 @ extract cache type bits from clidr
  988. and r1, r1, #7 @ mask of the bits for current cache only
  989. cmp r1, #2 @ see what cache we have at this level
  990. blt skip @ skip if no cache, or just i-cache
  991. mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
  992. mcr p15, 0, r10, c7, c5, 4 @ isb to sych the new cssr&csidr
  993. mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
  994. and r2, r1, #7 @ extract the length of the cache lines
  995. add r2, r2, #4 @ add 4 (line length offset)
  996. ldr r4, =0x3ff
  997. ands r4, r4, r1, lsr #3 @ find maximum number on the way size
  998. clz r5, r4 @ find bit position of way size increment
  999. ldr r7, =0x7fff
  1000. ands r7, r7, r1, lsr #13 @ extract max number of the index size
  1001. loop2:
  1002. mov r9, r4 @ create working copy of max way size
  1003. loop3:
  1004. ARM( orr r11, r10, r9, lsl r5 ) @ factor way and cache number into r11
  1005. ARM( orr r11, r11, r7, lsl r2 ) @ factor index number into r11
  1006. THUMB( lsl r6, r9, r5 )
  1007. THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11
  1008. THUMB( lsl r6, r7, r2 )
  1009. THUMB( orr r11, r11, r6 ) @ factor index number into r11
  1010. mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
  1011. subs r9, r9, #1 @ decrement the way
  1012. bge loop3
  1013. subs r7, r7, #1 @ decrement the index
  1014. bge loop2
  1015. skip:
  1016. add r10, r10, #2 @ increment cache number
  1017. cmp r3, r10
  1018. bgt loop1
  1019. finished:
  1020. ldmfd sp!, {r0-r7, r9-r11}
  1021. mov r10, #0 @ swith back to cache level 0
  1022. mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
  1023. iflush:
  1024. mcr p15, 0, r10, c7, c10, 4 @ DSB
  1025. mcr p15, 0, r10, c7, c5, 0 @ invalidate I+BTB
  1026. mcr p15, 0, r10, c7, c10, 4 @ DSB
  1027. mcr p15, 0, r10, c7, c5, 4 @ ISB
  1028. mov pc, lr
  1029. __armv5tej_mmu_cache_flush:
  1030. tst r4, #1
  1031. movne pc, lr
  1032. 1: mrc p15, 0, r15, c7, c14, 3 @ test,clean,invalidate D cache
  1033. bne 1b
  1034. mcr p15, 0, r0, c7, c5, 0 @ flush I cache
  1035. mcr p15, 0, r0, c7, c10, 4 @ drain WB
  1036. mov pc, lr
  1037. __armv4_mmu_cache_flush:
  1038. tst r4, #1
  1039. movne pc, lr
  1040. mov r2, #64*1024 @ default: 32K dcache size (*2)
  1041. mov r11, #32 @ default: 32 byte line size
  1042. mrc p15, 0, r3, c0, c0, 1 @ read cache type
  1043. teq r3, r9 @ cache ID register present?
  1044. beq no_cache_id
  1045. mov r1, r3, lsr #18
  1046. and r1, r1, #7
  1047. mov r2, #1024
  1048. mov r2, r2, lsl r1 @ base dcache size *2
  1049. tst r3, #1 << 14 @ test M bit
  1050. addne r2, r2, r2, lsr #1 @ +1/2 size if M == 1
  1051. mov r3, r3, lsr #12
  1052. and r3, r3, #3
  1053. mov r11, #8
  1054. mov r11, r11, lsl r3 @ cache line size in bytes
  1055. no_cache_id:
  1056. mov r1, pc
  1057. bic r1, r1, #63 @ align to longest cache line
  1058. add r2, r1, r2
  1059. 1:
  1060. ARM( ldr r3, [r1], r11 ) @ s/w flush D cache
  1061. THUMB( ldr r3, [r1] ) @ s/w flush D cache
  1062. THUMB( add r1, r1, r11 )
  1063. teq r1, r2
  1064. bne 1b
  1065. mcr p15, 0, r1, c7, c5, 0 @ flush I cache
  1066. mcr p15, 0, r1, c7, c6, 0 @ flush D cache
  1067. mcr p15, 0, r1, c7, c10, 4 @ drain WB
  1068. mov pc, lr
  1069. __armv3_mmu_cache_flush:
  1070. __armv3_mpu_cache_flush:
  1071. tst r4, #1
  1072. movne pc, lr
  1073. mov r1, #0
  1074. mcr p15, 0, r1, c7, c0, 0 @ invalidate whole cache v3
  1075. mov pc, lr
  1076. /*
  1077. * Various debugging routines for printing hex characters and
  1078. * memory, which again must be relocatable.
  1079. */
  1080. #ifdef DEBUG
  1081. .align 2
  1082. .type phexbuf,#object
  1083. phexbuf: .space 12
  1084. .size phexbuf, . - phexbuf
  1085. @ phex corrupts {r0, r1, r2, r3}
  1086. phex: adr r3, phexbuf
  1087. mov r2, #0
  1088. strb r2, [r3, r1]
  1089. 1: subs r1, r1, #1
  1090. movmi r0, r3
  1091. bmi puts
  1092. and r2, r0, #15
  1093. mov r0, r0, lsr #4
  1094. cmp r2, #10
  1095. addge r2, r2, #7
  1096. add r2, r2, #'0'
  1097. strb r2, [r3, r1]
  1098. b 1b
  1099. @ puts corrupts {r0, r1, r2, r3}
  1100. puts: loadsp r3, r1
  1101. 1: ldrb r2, [r0], #1
  1102. teq r2, #0
  1103. moveq pc, lr
  1104. 2: writeb r2, r3
  1105. mov r1, #0x00020000
  1106. 3: subs r1, r1, #1
  1107. bne 3b
  1108. teq r2, #'\n'
  1109. moveq r2, #'\r'
  1110. beq 2b
  1111. teq r0, #0
  1112. bne 1b
  1113. mov pc, lr
  1114. @ putc corrupts {r0, r1, r2, r3}
  1115. putc:
  1116. mov r2, r0
  1117. mov r0, #0
  1118. loadsp r3, r1
  1119. b 2b
  1120. @ memdump corrupts {r0, r1, r2, r3, r10, r11, r12, lr}
  1121. memdump: mov r12, r0
  1122. mov r10, lr
  1123. mov r11, #0
  1124. 2: mov r0, r11, lsl #2
  1125. add r0, r0, r12
  1126. mov r1, #8
  1127. bl phex
  1128. mov r0, #':'
  1129. bl putc
  1130. 1: mov r0, #' '
  1131. bl putc
  1132. ldr r0, [r12, r11, lsl #2]
  1133. mov r1, #8
  1134. bl phex
  1135. and r0, r11, #7
  1136. teq r0, #3
  1137. moveq r0, #' '
  1138. bleq putc
  1139. and r0, r11, #7
  1140. add r11, r11, #1
  1141. teq r0, #7
  1142. bne 1b
  1143. mov r0, #'\n'
  1144. bl putc
  1145. cmp r11, #64
  1146. blt 2b
  1147. mov pc, r10
  1148. #endif
  1149. .ltorg
  1150. #ifdef CONFIG_ARM_VIRT_EXT
  1151. .align 5
  1152. __hyp_reentry_vectors:
  1153. W(b) . @ reset
  1154. W(b) . @ undef
  1155. W(b) . @ svc
  1156. W(b) . @ pabort
  1157. W(b) . @ dabort
  1158. W(b) __enter_kernel @ hyp
  1159. W(b) . @ irq
  1160. W(b) . @ fiq
  1161. #endif /* CONFIG_ARM_VIRT_EXT */
  1162. __enter_kernel:
  1163. mov r0, #0 @ must be 0
  1164. ARM( mov pc, r4 ) @ call kernel
  1165. THUMB( bx r4 ) @ entry point is always ARM
  1166. reloc_code_end:
  1167. .align
  1168. .section ".stack", "aw", %nobits
  1169. .L_user_stack: .space 4096
  1170. .L_user_stack_end: