r4kcache.h 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Inline assembly cache operations.
  7. *
  8. * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
  9. * Copyright (C) 1997 - 2002 Ralf Baechle (ralf@gnu.org)
  10. * Copyright (C) 2004 Ralf Baechle (ralf@linux-mips.org)
  11. */
  12. #ifndef _ASM_R4KCACHE_H
  13. #define _ASM_R4KCACHE_H
  14. #include <asm/asm.h>
  15. #include <asm/cacheops.h>
  16. #include <asm/cpu-features.h>
  17. #include <asm/cpu-type.h>
  18. #include <asm/mipsmtregs.h>
  19. #include <asm/uaccess.h> /* for segment_eq() */
  20. extern void (*r4k_blast_dcache)(void);
  21. extern void (*r4k_blast_icache)(void);
  22. /*
  23. * This macro return a properly sign-extended address suitable as base address
  24. * for indexed cache operations. Two issues here:
  25. *
  26. * - The MIPS32 and MIPS64 specs permit an implementation to directly derive
  27. * the index bits from the virtual address. This breaks with tradition
  28. * set by the R4000. To keep unpleasant surprises from happening we pick
  29. * an address in KSEG0 / CKSEG0.
  30. * - We need a properly sign extended address for 64-bit code. To get away
  31. * without ifdefs we let the compiler do it by a type cast.
  32. */
  33. #define INDEX_BASE CKSEG0
  34. #define cache_op(op,addr) \
  35. __asm__ __volatile__( \
  36. " .set push \n" \
  37. " .set noreorder \n" \
  38. " .set arch=r4000 \n" \
  39. " cache %0, %1 \n" \
  40. " .set pop \n" \
  41. : \
  42. : "i" (op), "R" (*(unsigned char *)(addr)))
  43. #ifdef CONFIG_MIPS_MT
  44. /*
  45. * Optionally force single-threaded execution during I-cache flushes.
  46. */
  47. #define PROTECT_CACHE_FLUSHES 1
  48. #ifdef PROTECT_CACHE_FLUSHES
  49. extern int mt_protiflush;
  50. extern int mt_protdflush;
  51. extern void mt_cflush_lockdown(void);
  52. extern void mt_cflush_release(void);
  53. #define BEGIN_MT_IPROT \
  54. unsigned long flags = 0; \
  55. unsigned long mtflags = 0; \
  56. if(mt_protiflush) { \
  57. local_irq_save(flags); \
  58. ehb(); \
  59. mtflags = dvpe(); \
  60. mt_cflush_lockdown(); \
  61. }
  62. #define END_MT_IPROT \
  63. if(mt_protiflush) { \
  64. mt_cflush_release(); \
  65. evpe(mtflags); \
  66. local_irq_restore(flags); \
  67. }
  68. #define BEGIN_MT_DPROT \
  69. unsigned long flags = 0; \
  70. unsigned long mtflags = 0; \
  71. if(mt_protdflush) { \
  72. local_irq_save(flags); \
  73. ehb(); \
  74. mtflags = dvpe(); \
  75. mt_cflush_lockdown(); \
  76. }
  77. #define END_MT_DPROT \
  78. if(mt_protdflush) { \
  79. mt_cflush_release(); \
  80. evpe(mtflags); \
  81. local_irq_restore(flags); \
  82. }
  83. #else
  84. #define BEGIN_MT_IPROT
  85. #define BEGIN_MT_DPROT
  86. #define END_MT_IPROT
  87. #define END_MT_DPROT
  88. #endif /* PROTECT_CACHE_FLUSHES */
  89. #define __iflush_prologue \
  90. unsigned long redundance; \
  91. extern int mt_n_iflushes; \
  92. BEGIN_MT_IPROT \
  93. for (redundance = 0; redundance < mt_n_iflushes; redundance++) {
  94. #define __iflush_epilogue \
  95. END_MT_IPROT \
  96. }
  97. #define __dflush_prologue \
  98. unsigned long redundance; \
  99. extern int mt_n_dflushes; \
  100. BEGIN_MT_DPROT \
  101. for (redundance = 0; redundance < mt_n_dflushes; redundance++) {
  102. #define __dflush_epilogue \
  103. END_MT_DPROT \
  104. }
  105. #define __inv_dflush_prologue __dflush_prologue
  106. #define __inv_dflush_epilogue __dflush_epilogue
  107. #define __sflush_prologue {
  108. #define __sflush_epilogue }
  109. #define __inv_sflush_prologue __sflush_prologue
  110. #define __inv_sflush_epilogue __sflush_epilogue
  111. #else /* CONFIG_MIPS_MT */
  112. #define __iflush_prologue {
  113. #define __iflush_epilogue }
  114. #define __dflush_prologue {
  115. #define __dflush_epilogue }
  116. #define __inv_dflush_prologue {
  117. #define __inv_dflush_epilogue }
  118. #define __sflush_prologue {
  119. #define __sflush_epilogue }
  120. #define __inv_sflush_prologue {
  121. #define __inv_sflush_epilogue }
  122. #endif /* CONFIG_MIPS_MT */
  123. static inline void flush_icache_line_indexed(unsigned long addr)
  124. {
  125. __iflush_prologue
  126. cache_op(Index_Invalidate_I, addr);
  127. __iflush_epilogue
  128. }
  129. static inline void flush_dcache_line_indexed(unsigned long addr)
  130. {
  131. __dflush_prologue
  132. cache_op(Index_Writeback_Inv_D, addr);
  133. __dflush_epilogue
  134. }
  135. static inline void flush_scache_line_indexed(unsigned long addr)
  136. {
  137. cache_op(Index_Writeback_Inv_SD, addr);
  138. }
  139. static inline void flush_icache_line(unsigned long addr)
  140. {
  141. __iflush_prologue
  142. switch (boot_cpu_type()) {
  143. case CPU_LOONGSON2:
  144. cache_op(Hit_Invalidate_I_Loongson2, addr);
  145. break;
  146. default:
  147. cache_op(Hit_Invalidate_I, addr);
  148. break;
  149. }
  150. __iflush_epilogue
  151. }
  152. static inline void flush_dcache_line(unsigned long addr)
  153. {
  154. __dflush_prologue
  155. cache_op(Hit_Writeback_Inv_D, addr);
  156. __dflush_epilogue
  157. }
  158. static inline void invalidate_dcache_line(unsigned long addr)
  159. {
  160. __dflush_prologue
  161. cache_op(Hit_Invalidate_D, addr);
  162. __dflush_epilogue
  163. }
  164. static inline void invalidate_scache_line(unsigned long addr)
  165. {
  166. cache_op(Hit_Invalidate_SD, addr);
  167. }
  168. static inline void flush_scache_line(unsigned long addr)
  169. {
  170. cache_op(Hit_Writeback_Inv_SD, addr);
  171. }
  172. #define protected_cache_op(op,addr) \
  173. __asm__ __volatile__( \
  174. " .set push \n" \
  175. " .set noreorder \n" \
  176. " .set arch=r4000 \n" \
  177. "1: cache %0, (%1) \n" \
  178. "2: .set pop \n" \
  179. " .section __ex_table,\"a\" \n" \
  180. " "STR(PTR)" 1b, 2b \n" \
  181. " .previous" \
  182. : \
  183. : "i" (op), "r" (addr))
  184. #define protected_cachee_op(op,addr) \
  185. __asm__ __volatile__( \
  186. " .set push \n" \
  187. " .set noreorder \n" \
  188. " .set mips0 \n" \
  189. " .set eva \n" \
  190. "1: cachee %0, (%1) \n" \
  191. "2: .set pop \n" \
  192. " .section __ex_table,\"a\" \n" \
  193. " "STR(PTR)" 1b, 2b \n" \
  194. " .previous" \
  195. : \
  196. : "i" (op), "r" (addr))
  197. /*
  198. * The next two are for badland addresses like signal trampolines.
  199. */
  200. static inline void protected_flush_icache_line(unsigned long addr)
  201. {
  202. switch (boot_cpu_type()) {
  203. case CPU_LOONGSON2:
  204. protected_cache_op(Hit_Invalidate_I_Loongson2, addr);
  205. break;
  206. default:
  207. #ifdef CONFIG_EVA
  208. protected_cachee_op(Hit_Invalidate_I, addr);
  209. #else
  210. protected_cache_op(Hit_Invalidate_I, addr);
  211. #endif
  212. break;
  213. }
  214. }
  215. /*
  216. * R10000 / R12000 hazard - these processors don't support the Hit_Writeback_D
  217. * cacheop so we use Hit_Writeback_Inv_D which is supported by all R4000-style
  218. * caches. We're talking about one cacheline unnecessarily getting invalidated
  219. * here so the penalty isn't overly hard.
  220. */
  221. static inline void protected_writeback_dcache_line(unsigned long addr)
  222. {
  223. #ifdef CONFIG_EVA
  224. protected_cachee_op(Hit_Writeback_Inv_D, addr);
  225. #else
  226. protected_cache_op(Hit_Writeback_Inv_D, addr);
  227. #endif
  228. }
  229. static inline void protected_writeback_scache_line(unsigned long addr)
  230. {
  231. protected_cache_op(Hit_Writeback_Inv_SD, addr);
  232. }
  233. /*
  234. * This one is RM7000-specific
  235. */
  236. static inline void invalidate_tcache_page(unsigned long addr)
  237. {
  238. cache_op(Page_Invalidate_T, addr);
  239. }
  240. #define cache16_unroll32(base,op) \
  241. __asm__ __volatile__( \
  242. " .set push \n" \
  243. " .set noreorder \n" \
  244. " .set mips3 \n" \
  245. " cache %1, 0x000(%0); cache %1, 0x010(%0) \n" \
  246. " cache %1, 0x020(%0); cache %1, 0x030(%0) \n" \
  247. " cache %1, 0x040(%0); cache %1, 0x050(%0) \n" \
  248. " cache %1, 0x060(%0); cache %1, 0x070(%0) \n" \
  249. " cache %1, 0x080(%0); cache %1, 0x090(%0) \n" \
  250. " cache %1, 0x0a0(%0); cache %1, 0x0b0(%0) \n" \
  251. " cache %1, 0x0c0(%0); cache %1, 0x0d0(%0) \n" \
  252. " cache %1, 0x0e0(%0); cache %1, 0x0f0(%0) \n" \
  253. " cache %1, 0x100(%0); cache %1, 0x110(%0) \n" \
  254. " cache %1, 0x120(%0); cache %1, 0x130(%0) \n" \
  255. " cache %1, 0x140(%0); cache %1, 0x150(%0) \n" \
  256. " cache %1, 0x160(%0); cache %1, 0x170(%0) \n" \
  257. " cache %1, 0x180(%0); cache %1, 0x190(%0) \n" \
  258. " cache %1, 0x1a0(%0); cache %1, 0x1b0(%0) \n" \
  259. " cache %1, 0x1c0(%0); cache %1, 0x1d0(%0) \n" \
  260. " cache %1, 0x1e0(%0); cache %1, 0x1f0(%0) \n" \
  261. " .set pop \n" \
  262. : \
  263. : "r" (base), \
  264. "i" (op));
  265. #define cache32_unroll32(base,op) \
  266. __asm__ __volatile__( \
  267. " .set push \n" \
  268. " .set noreorder \n" \
  269. " .set mips3 \n" \
  270. " cache %1, 0x000(%0); cache %1, 0x020(%0) \n" \
  271. " cache %1, 0x040(%0); cache %1, 0x060(%0) \n" \
  272. " cache %1, 0x080(%0); cache %1, 0x0a0(%0) \n" \
  273. " cache %1, 0x0c0(%0); cache %1, 0x0e0(%0) \n" \
  274. " cache %1, 0x100(%0); cache %1, 0x120(%0) \n" \
  275. " cache %1, 0x140(%0); cache %1, 0x160(%0) \n" \
  276. " cache %1, 0x180(%0); cache %1, 0x1a0(%0) \n" \
  277. " cache %1, 0x1c0(%0); cache %1, 0x1e0(%0) \n" \
  278. " cache %1, 0x200(%0); cache %1, 0x220(%0) \n" \
  279. " cache %1, 0x240(%0); cache %1, 0x260(%0) \n" \
  280. " cache %1, 0x280(%0); cache %1, 0x2a0(%0) \n" \
  281. " cache %1, 0x2c0(%0); cache %1, 0x2e0(%0) \n" \
  282. " cache %1, 0x300(%0); cache %1, 0x320(%0) \n" \
  283. " cache %1, 0x340(%0); cache %1, 0x360(%0) \n" \
  284. " cache %1, 0x380(%0); cache %1, 0x3a0(%0) \n" \
  285. " cache %1, 0x3c0(%0); cache %1, 0x3e0(%0) \n" \
  286. " .set pop \n" \
  287. : \
  288. : "r" (base), \
  289. "i" (op));
  290. #define cache64_unroll32(base,op) \
  291. __asm__ __volatile__( \
  292. " .set push \n" \
  293. " .set noreorder \n" \
  294. " .set mips3 \n" \
  295. " cache %1, 0x000(%0); cache %1, 0x040(%0) \n" \
  296. " cache %1, 0x080(%0); cache %1, 0x0c0(%0) \n" \
  297. " cache %1, 0x100(%0); cache %1, 0x140(%0) \n" \
  298. " cache %1, 0x180(%0); cache %1, 0x1c0(%0) \n" \
  299. " cache %1, 0x200(%0); cache %1, 0x240(%0) \n" \
  300. " cache %1, 0x280(%0); cache %1, 0x2c0(%0) \n" \
  301. " cache %1, 0x300(%0); cache %1, 0x340(%0) \n" \
  302. " cache %1, 0x380(%0); cache %1, 0x3c0(%0) \n" \
  303. " cache %1, 0x400(%0); cache %1, 0x440(%0) \n" \
  304. " cache %1, 0x480(%0); cache %1, 0x4c0(%0) \n" \
  305. " cache %1, 0x500(%0); cache %1, 0x540(%0) \n" \
  306. " cache %1, 0x580(%0); cache %1, 0x5c0(%0) \n" \
  307. " cache %1, 0x600(%0); cache %1, 0x640(%0) \n" \
  308. " cache %1, 0x680(%0); cache %1, 0x6c0(%0) \n" \
  309. " cache %1, 0x700(%0); cache %1, 0x740(%0) \n" \
  310. " cache %1, 0x780(%0); cache %1, 0x7c0(%0) \n" \
  311. " .set pop \n" \
  312. : \
  313. : "r" (base), \
  314. "i" (op));
  315. #define cache128_unroll32(base,op) \
  316. __asm__ __volatile__( \
  317. " .set push \n" \
  318. " .set noreorder \n" \
  319. " .set mips3 \n" \
  320. " cache %1, 0x000(%0); cache %1, 0x080(%0) \n" \
  321. " cache %1, 0x100(%0); cache %1, 0x180(%0) \n" \
  322. " cache %1, 0x200(%0); cache %1, 0x280(%0) \n" \
  323. " cache %1, 0x300(%0); cache %1, 0x380(%0) \n" \
  324. " cache %1, 0x400(%0); cache %1, 0x480(%0) \n" \
  325. " cache %1, 0x500(%0); cache %1, 0x580(%0) \n" \
  326. " cache %1, 0x600(%0); cache %1, 0x680(%0) \n" \
  327. " cache %1, 0x700(%0); cache %1, 0x780(%0) \n" \
  328. " cache %1, 0x800(%0); cache %1, 0x880(%0) \n" \
  329. " cache %1, 0x900(%0); cache %1, 0x980(%0) \n" \
  330. " cache %1, 0xa00(%0); cache %1, 0xa80(%0) \n" \
  331. " cache %1, 0xb00(%0); cache %1, 0xb80(%0) \n" \
  332. " cache %1, 0xc00(%0); cache %1, 0xc80(%0) \n" \
  333. " cache %1, 0xd00(%0); cache %1, 0xd80(%0) \n" \
  334. " cache %1, 0xe00(%0); cache %1, 0xe80(%0) \n" \
  335. " cache %1, 0xf00(%0); cache %1, 0xf80(%0) \n" \
  336. " .set pop \n" \
  337. : \
  338. : "r" (base), \
  339. "i" (op));
  340. /*
  341. * Perform the cache operation specified by op using a user mode virtual
  342. * address while in kernel mode.
  343. */
  344. #define cache16_unroll32_user(base,op) \
  345. __asm__ __volatile__( \
  346. " .set push \n" \
  347. " .set noreorder \n" \
  348. " .set mips0 \n" \
  349. " .set eva \n" \
  350. " cachee %1, 0x000(%0); cachee %1, 0x010(%0) \n" \
  351. " cachee %1, 0x020(%0); cachee %1, 0x030(%0) \n" \
  352. " cachee %1, 0x040(%0); cachee %1, 0x050(%0) \n" \
  353. " cachee %1, 0x060(%0); cachee %1, 0x070(%0) \n" \
  354. " cachee %1, 0x080(%0); cachee %1, 0x090(%0) \n" \
  355. " cachee %1, 0x0a0(%0); cachee %1, 0x0b0(%0) \n" \
  356. " cachee %1, 0x0c0(%0); cachee %1, 0x0d0(%0) \n" \
  357. " cachee %1, 0x0e0(%0); cachee %1, 0x0f0(%0) \n" \
  358. " cachee %1, 0x100(%0); cachee %1, 0x110(%0) \n" \
  359. " cachee %1, 0x120(%0); cachee %1, 0x130(%0) \n" \
  360. " cachee %1, 0x140(%0); cachee %1, 0x150(%0) \n" \
  361. " cachee %1, 0x160(%0); cachee %1, 0x170(%0) \n" \
  362. " cachee %1, 0x180(%0); cachee %1, 0x190(%0) \n" \
  363. " cachee %1, 0x1a0(%0); cachee %1, 0x1b0(%0) \n" \
  364. " cachee %1, 0x1c0(%0); cachee %1, 0x1d0(%0) \n" \
  365. " cachee %1, 0x1e0(%0); cachee %1, 0x1f0(%0) \n" \
  366. " .set pop \n" \
  367. : \
  368. : "r" (base), \
  369. "i" (op));
  370. #define cache32_unroll32_user(base, op) \
  371. __asm__ __volatile__( \
  372. " .set push \n" \
  373. " .set noreorder \n" \
  374. " .set mips0 \n" \
  375. " .set eva \n" \
  376. " cachee %1, 0x000(%0); cachee %1, 0x020(%0) \n" \
  377. " cachee %1, 0x040(%0); cachee %1, 0x060(%0) \n" \
  378. " cachee %1, 0x080(%0); cachee %1, 0x0a0(%0) \n" \
  379. " cachee %1, 0x0c0(%0); cachee %1, 0x0e0(%0) \n" \
  380. " cachee %1, 0x100(%0); cachee %1, 0x120(%0) \n" \
  381. " cachee %1, 0x140(%0); cachee %1, 0x160(%0) \n" \
  382. " cachee %1, 0x180(%0); cachee %1, 0x1a0(%0) \n" \
  383. " cachee %1, 0x1c0(%0); cachee %1, 0x1e0(%0) \n" \
  384. " cachee %1, 0x200(%0); cachee %1, 0x220(%0) \n" \
  385. " cachee %1, 0x240(%0); cachee %1, 0x260(%0) \n" \
  386. " cachee %1, 0x280(%0); cachee %1, 0x2a0(%0) \n" \
  387. " cachee %1, 0x2c0(%0); cachee %1, 0x2e0(%0) \n" \
  388. " cachee %1, 0x300(%0); cachee %1, 0x320(%0) \n" \
  389. " cachee %1, 0x340(%0); cachee %1, 0x360(%0) \n" \
  390. " cachee %1, 0x380(%0); cachee %1, 0x3a0(%0) \n" \
  391. " cachee %1, 0x3c0(%0); cachee %1, 0x3e0(%0) \n" \
  392. " .set pop \n" \
  393. : \
  394. : "r" (base), \
  395. "i" (op));
  396. #define cache64_unroll32_user(base, op) \
  397. __asm__ __volatile__( \
  398. " .set push \n" \
  399. " .set noreorder \n" \
  400. " .set mips0 \n" \
  401. " .set eva \n" \
  402. " cachee %1, 0x000(%0); cachee %1, 0x040(%0) \n" \
  403. " cachee %1, 0x080(%0); cachee %1, 0x0c0(%0) \n" \
  404. " cachee %1, 0x100(%0); cachee %1, 0x140(%0) \n" \
  405. " cachee %1, 0x180(%0); cachee %1, 0x1c0(%0) \n" \
  406. " cachee %1, 0x200(%0); cachee %1, 0x240(%0) \n" \
  407. " cachee %1, 0x280(%0); cachee %1, 0x2c0(%0) \n" \
  408. " cachee %1, 0x300(%0); cachee %1, 0x340(%0) \n" \
  409. " cachee %1, 0x380(%0); cachee %1, 0x3c0(%0) \n" \
  410. " cachee %1, 0x400(%0); cachee %1, 0x440(%0) \n" \
  411. " cachee %1, 0x480(%0); cachee %1, 0x4c0(%0) \n" \
  412. " cachee %1, 0x500(%0); cachee %1, 0x540(%0) \n" \
  413. " cachee %1, 0x580(%0); cachee %1, 0x5c0(%0) \n" \
  414. " cachee %1, 0x600(%0); cachee %1, 0x640(%0) \n" \
  415. " cachee %1, 0x680(%0); cachee %1, 0x6c0(%0) \n" \
  416. " cachee %1, 0x700(%0); cachee %1, 0x740(%0) \n" \
  417. " cachee %1, 0x780(%0); cachee %1, 0x7c0(%0) \n" \
  418. " .set pop \n" \
  419. : \
  420. : "r" (base), \
  421. "i" (op));
  422. /* build blast_xxx, blast_xxx_page, blast_xxx_page_indexed */
  423. #define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize, extra) \
  424. static inline void extra##blast_##pfx##cache##lsize(void) \
  425. { \
  426. unsigned long start = INDEX_BASE; \
  427. unsigned long end = start + current_cpu_data.desc.waysize; \
  428. unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \
  429. unsigned long ws_end = current_cpu_data.desc.ways << \
  430. current_cpu_data.desc.waybit; \
  431. unsigned long ws, addr; \
  432. \
  433. __##pfx##flush_prologue \
  434. \
  435. for (ws = 0; ws < ws_end; ws += ws_inc) \
  436. for (addr = start; addr < end; addr += lsize * 32) \
  437. cache##lsize##_unroll32(addr|ws, indexop); \
  438. \
  439. __##pfx##flush_epilogue \
  440. } \
  441. \
  442. static inline void extra##blast_##pfx##cache##lsize##_page(unsigned long page) \
  443. { \
  444. unsigned long start = page; \
  445. unsigned long end = page + PAGE_SIZE; \
  446. \
  447. __##pfx##flush_prologue \
  448. \
  449. do { \
  450. cache##lsize##_unroll32(start, hitop); \
  451. start += lsize * 32; \
  452. } while (start < end); \
  453. \
  454. __##pfx##flush_epilogue \
  455. } \
  456. \
  457. static inline void extra##blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \
  458. { \
  459. unsigned long indexmask = current_cpu_data.desc.waysize - 1; \
  460. unsigned long start = INDEX_BASE + (page & indexmask); \
  461. unsigned long end = start + PAGE_SIZE; \
  462. unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \
  463. unsigned long ws_end = current_cpu_data.desc.ways << \
  464. current_cpu_data.desc.waybit; \
  465. unsigned long ws, addr; \
  466. \
  467. __##pfx##flush_prologue \
  468. \
  469. for (ws = 0; ws < ws_end; ws += ws_inc) \
  470. for (addr = start; addr < end; addr += lsize * 32) \
  471. cache##lsize##_unroll32(addr|ws, indexop); \
  472. \
  473. __##pfx##flush_epilogue \
  474. }
  475. __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16, )
  476. __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16, )
  477. __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16, )
  478. __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32, )
  479. __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32, )
  480. __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I_Loongson2, 32, loongson2_)
  481. __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32, )
  482. __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64, )
  483. __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64, )
  484. __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64, )
  485. __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 128, )
  486. __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 128, )
  487. __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128, )
  488. __BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16, )
  489. __BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 32, )
  490. __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 16, )
  491. __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32, )
  492. __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64, )
  493. __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128, )
  494. #define __BUILD_BLAST_USER_CACHE(pfx, desc, indexop, hitop, lsize) \
  495. static inline void blast_##pfx##cache##lsize##_user_page(unsigned long page) \
  496. { \
  497. unsigned long start = page; \
  498. unsigned long end = page + PAGE_SIZE; \
  499. \
  500. __##pfx##flush_prologue \
  501. \
  502. do { \
  503. cache##lsize##_unroll32_user(start, hitop); \
  504. start += lsize * 32; \
  505. } while (start < end); \
  506. \
  507. __##pfx##flush_epilogue \
  508. }
  509. __BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
  510. 16)
  511. __BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16)
  512. __BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
  513. 32)
  514. __BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32)
  515. __BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
  516. 64)
  517. __BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64)
  518. /* build blast_xxx_range, protected_blast_xxx_range */
  519. #define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot, extra) \
  520. static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start, \
  521. unsigned long end) \
  522. { \
  523. unsigned long lsize = cpu_##desc##_line_size(); \
  524. unsigned long addr = start & ~(lsize - 1); \
  525. unsigned long aend = (end - 1) & ~(lsize - 1); \
  526. \
  527. __##pfx##flush_prologue \
  528. \
  529. while (1) { \
  530. prot##cache_op(hitop, addr); \
  531. if (addr == aend) \
  532. break; \
  533. addr += lsize; \
  534. } \
  535. \
  536. __##pfx##flush_epilogue \
  537. }
  538. #ifndef CONFIG_EVA
  539. __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_, )
  540. __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_, )
  541. #else
  542. #define __BUILD_PROT_BLAST_CACHE_RANGE(pfx, desc, hitop) \
  543. static inline void protected_blast_##pfx##cache##_range(unsigned long start,\
  544. unsigned long end) \
  545. { \
  546. unsigned long lsize = cpu_##desc##_line_size(); \
  547. unsigned long addr = start & ~(lsize - 1); \
  548. unsigned long aend = (end - 1) & ~(lsize - 1); \
  549. \
  550. __##pfx##flush_prologue \
  551. \
  552. if (segment_eq(get_fs(), USER_DS)) { \
  553. while (1) { \
  554. protected_cachee_op(hitop, addr); \
  555. if (addr == aend) \
  556. break; \
  557. addr += lsize; \
  558. } \
  559. } else { \
  560. while (1) { \
  561. protected_cache_op(hitop, addr); \
  562. if (addr == aend) \
  563. break; \
  564. addr += lsize; \
  565. } \
  566. \
  567. } \
  568. __##pfx##flush_epilogue \
  569. }
  570. __BUILD_PROT_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D)
  571. __BUILD_PROT_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I)
  572. #endif
  573. __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_, )
  574. __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I_Loongson2, \
  575. protected_, loongson2_)
  576. __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, , )
  577. __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, , )
  578. __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , )
  579. /* blast_inv_dcache_range */
  580. __BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, , )
  581. __BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, , )
  582. #endif /* _ASM_R4KCACHE_H */