usercopy.c 36 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354
  1. /*
  2. * User address space access functions.
  3. * The non-inlined parts of asm-metag/uaccess.h are here.
  4. *
  5. * Copyright (C) 2006, Imagination Technologies.
  6. * Copyright (C) 2000, Axis Communications AB.
  7. *
  8. * Written by Hans-Peter Nilsson.
  9. * Pieces used from memcpy, originally by Kenny Ranerup long time ago.
  10. * Modified for Meta by Will Newton.
  11. */
  12. #include <linux/export.h>
  13. #include <linux/uaccess.h>
  14. #include <asm/cache.h> /* def of L1_CACHE_BYTES */
  15. #define USE_RAPF
  16. #define RAPF_MIN_BUF_SIZE (3*L1_CACHE_BYTES)
  17. /* The "double write" in this code is because the Meta will not fault
  18. * immediately unless the memory pipe is forced to by e.g. a data stall or
  19. * another memory op. The second write should be discarded by the write
  20. * combiner so should have virtually no cost.
  21. */
  22. #define __asm_copy_user_cont(to, from, ret, COPY, FIXUP, TENTRY) \
  23. asm volatile ( \
  24. COPY \
  25. "1:\n" \
  26. " .section .fixup,\"ax\"\n" \
  27. " MOV D1Ar1,#0\n" \
  28. FIXUP \
  29. " MOVT D1Ar1,#HI(1b)\n" \
  30. " JUMP D1Ar1,#LO(1b)\n" \
  31. " .previous\n" \
  32. " .section __ex_table,\"a\"\n" \
  33. TENTRY \
  34. " .previous\n" \
  35. : "=r" (to), "=r" (from), "=r" (ret) \
  36. : "0" (to), "1" (from), "2" (ret) \
  37. : "D1Ar1", "memory")
  38. #define __asm_copy_to_user_1(to, from, ret) \
  39. __asm_copy_user_cont(to, from, ret, \
  40. " GETB D1Ar1,[%1++]\n" \
  41. " SETB [%0],D1Ar1\n" \
  42. "2: SETB [%0++],D1Ar1\n", \
  43. "3: ADD %2,%2,#1\n", \
  44. " .long 2b,3b\n")
  45. #define __asm_copy_to_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
  46. __asm_copy_user_cont(to, from, ret, \
  47. " GETW D1Ar1,[%1++]\n" \
  48. " SETW [%0],D1Ar1\n" \
  49. "2: SETW [%0++],D1Ar1\n" COPY, \
  50. "3: ADD %2,%2,#2\n" FIXUP, \
  51. " .long 2b,3b\n" TENTRY)
  52. #define __asm_copy_to_user_2(to, from, ret) \
  53. __asm_copy_to_user_2x_cont(to, from, ret, "", "", "")
  54. #define __asm_copy_to_user_3(to, from, ret) \
  55. __asm_copy_to_user_2x_cont(to, from, ret, \
  56. " GETB D1Ar1,[%1++]\n" \
  57. " SETB [%0],D1Ar1\n" \
  58. "4: SETB [%0++],D1Ar1\n", \
  59. "5: ADD %2,%2,#1\n", \
  60. " .long 4b,5b\n")
  61. #define __asm_copy_to_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
  62. __asm_copy_user_cont(to, from, ret, \
  63. " GETD D1Ar1,[%1++]\n" \
  64. " SETD [%0],D1Ar1\n" \
  65. "2: SETD [%0++],D1Ar1\n" COPY, \
  66. "3: ADD %2,%2,#4\n" FIXUP, \
  67. " .long 2b,3b\n" TENTRY)
  68. #define __asm_copy_to_user_4(to, from, ret) \
  69. __asm_copy_to_user_4x_cont(to, from, ret, "", "", "")
  70. #define __asm_copy_to_user_5(to, from, ret) \
  71. __asm_copy_to_user_4x_cont(to, from, ret, \
  72. " GETB D1Ar1,[%1++]\n" \
  73. " SETB [%0],D1Ar1\n" \
  74. "4: SETB [%0++],D1Ar1\n", \
  75. "5: ADD %2,%2,#1\n", \
  76. " .long 4b,5b\n")
  77. #define __asm_copy_to_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
  78. __asm_copy_to_user_4x_cont(to, from, ret, \
  79. " GETW D1Ar1,[%1++]\n" \
  80. " SETW [%0],D1Ar1\n" \
  81. "4: SETW [%0++],D1Ar1\n" COPY, \
  82. "5: ADD %2,%2,#2\n" FIXUP, \
  83. " .long 4b,5b\n" TENTRY)
  84. #define __asm_copy_to_user_6(to, from, ret) \
  85. __asm_copy_to_user_6x_cont(to, from, ret, "", "", "")
  86. #define __asm_copy_to_user_7(to, from, ret) \
  87. __asm_copy_to_user_6x_cont(to, from, ret, \
  88. " GETB D1Ar1,[%1++]\n" \
  89. " SETB [%0],D1Ar1\n" \
  90. "6: SETB [%0++],D1Ar1\n", \
  91. "7: ADD %2,%2,#1\n", \
  92. " .long 6b,7b\n")
  93. #define __asm_copy_to_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
  94. __asm_copy_to_user_4x_cont(to, from, ret, \
  95. " GETD D1Ar1,[%1++]\n" \
  96. " SETD [%0],D1Ar1\n" \
  97. "4: SETD [%0++],D1Ar1\n" COPY, \
  98. "5: ADD %2,%2,#4\n" FIXUP, \
  99. " .long 4b,5b\n" TENTRY)
  100. #define __asm_copy_to_user_8(to, from, ret) \
  101. __asm_copy_to_user_8x_cont(to, from, ret, "", "", "")
  102. #define __asm_copy_to_user_9(to, from, ret) \
  103. __asm_copy_to_user_8x_cont(to, from, ret, \
  104. " GETB D1Ar1,[%1++]\n" \
  105. " SETB [%0],D1Ar1\n" \
  106. "6: SETB [%0++],D1Ar1\n", \
  107. "7: ADD %2,%2,#1\n", \
  108. " .long 6b,7b\n")
  109. #define __asm_copy_to_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
  110. __asm_copy_to_user_8x_cont(to, from, ret, \
  111. " GETW D1Ar1,[%1++]\n" \
  112. " SETW [%0],D1Ar1\n" \
  113. "6: SETW [%0++],D1Ar1\n" COPY, \
  114. "7: ADD %2,%2,#2\n" FIXUP, \
  115. " .long 6b,7b\n" TENTRY)
  116. #define __asm_copy_to_user_10(to, from, ret) \
  117. __asm_copy_to_user_10x_cont(to, from, ret, "", "", "")
  118. #define __asm_copy_to_user_11(to, from, ret) \
  119. __asm_copy_to_user_10x_cont(to, from, ret, \
  120. " GETB D1Ar1,[%1++]\n" \
  121. " SETB [%0],D1Ar1\n" \
  122. "8: SETB [%0++],D1Ar1\n", \
  123. "9: ADD %2,%2,#1\n", \
  124. " .long 8b,9b\n")
  125. #define __asm_copy_to_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
  126. __asm_copy_to_user_8x_cont(to, from, ret, \
  127. " GETD D1Ar1,[%1++]\n" \
  128. " SETD [%0],D1Ar1\n" \
  129. "6: SETD [%0++],D1Ar1\n" COPY, \
  130. "7: ADD %2,%2,#4\n" FIXUP, \
  131. " .long 6b,7b\n" TENTRY)
  132. #define __asm_copy_to_user_12(to, from, ret) \
  133. __asm_copy_to_user_12x_cont(to, from, ret, "", "", "")
  134. #define __asm_copy_to_user_13(to, from, ret) \
  135. __asm_copy_to_user_12x_cont(to, from, ret, \
  136. " GETB D1Ar1,[%1++]\n" \
  137. " SETB [%0],D1Ar1\n" \
  138. "8: SETB [%0++],D1Ar1\n", \
  139. "9: ADD %2,%2,#1\n", \
  140. " .long 8b,9b\n")
  141. #define __asm_copy_to_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
  142. __asm_copy_to_user_12x_cont(to, from, ret, \
  143. " GETW D1Ar1,[%1++]\n" \
  144. " SETW [%0],D1Ar1\n" \
  145. "8: SETW [%0++],D1Ar1\n" COPY, \
  146. "9: ADD %2,%2,#2\n" FIXUP, \
  147. " .long 8b,9b\n" TENTRY)
  148. #define __asm_copy_to_user_14(to, from, ret) \
  149. __asm_copy_to_user_14x_cont(to, from, ret, "", "", "")
  150. #define __asm_copy_to_user_15(to, from, ret) \
  151. __asm_copy_to_user_14x_cont(to, from, ret, \
  152. " GETB D1Ar1,[%1++]\n" \
  153. " SETB [%0],D1Ar1\n" \
  154. "10: SETB [%0++],D1Ar1\n", \
  155. "11: ADD %2,%2,#1\n", \
  156. " .long 10b,11b\n")
  157. #define __asm_copy_to_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
  158. __asm_copy_to_user_12x_cont(to, from, ret, \
  159. " GETD D1Ar1,[%1++]\n" \
  160. " SETD [%0],D1Ar1\n" \
  161. "8: SETD [%0++],D1Ar1\n" COPY, \
  162. "9: ADD %2,%2,#4\n" FIXUP, \
  163. " .long 8b,9b\n" TENTRY)
  164. #define __asm_copy_to_user_16(to, from, ret) \
  165. __asm_copy_to_user_16x_cont(to, from, ret, "", "", "")
  166. #define __asm_copy_to_user_8x64(to, from, ret) \
  167. asm volatile ( \
  168. " GETL D0Ar2,D1Ar1,[%1++]\n" \
  169. " SETL [%0],D0Ar2,D1Ar1\n" \
  170. "2: SETL [%0++],D0Ar2,D1Ar1\n" \
  171. "1:\n" \
  172. " .section .fixup,\"ax\"\n" \
  173. "3: ADD %2,%2,#8\n" \
  174. " MOVT D0Ar2,#HI(1b)\n" \
  175. " JUMP D0Ar2,#LO(1b)\n" \
  176. " .previous\n" \
  177. " .section __ex_table,\"a\"\n" \
  178. " .long 2b,3b\n" \
  179. " .previous\n" \
  180. : "=r" (to), "=r" (from), "=r" (ret) \
  181. : "0" (to), "1" (from), "2" (ret) \
  182. : "D1Ar1", "D0Ar2", "memory")
  183. /*
  184. * optimized copying loop using RAPF when 64 bit aligned
  185. *
  186. * n will be automatically decremented inside the loop
  187. * ret will be left intact. if error occurs we will rewind
  188. * so that the original non optimized code will fill up
  189. * this value correctly.
  190. *
  191. * on fault:
  192. * > n will hold total number of uncopied bytes
  193. *
  194. * > {'to','from'} will be rewind back so that
  195. * the non-optimized code will do the proper fix up
  196. *
  197. * DCACHE drops the cacheline which helps in reducing cache
  198. * pollution.
  199. *
  200. * We introduce an extra SETL at the end of the loop to
  201. * ensure we don't fall off the loop before we catch all
  202. * erros.
  203. *
  204. * NOTICE:
  205. * LSM_STEP in TXSTATUS must be cleared in fix up code.
  206. * since we're using M{S,G}ETL, a fault might happen at
  207. * any address in the middle of M{S,G}ETL causing
  208. * the value of LSM_STEP to be incorrect which can
  209. * cause subsequent use of M{S,G}ET{L,D} to go wrong.
  210. * ie: if LSM_STEP was 1 when a fault occurs, the
  211. * next call to M{S,G}ET{L,D} will skip the first
  212. * copy/getting as it think that the first 1 has already
  213. * been done.
  214. *
  215. */
  216. #define __asm_copy_user_64bit_rapf_loop( \
  217. to, from, ret, n, id, FIXUP) \
  218. asm volatile ( \
  219. ".balign 8\n" \
  220. "MOV RAPF, %1\n" \
  221. "MSETL [A0StP++], D0Ar6, D0FrT, D0.5, D0.6, D0.7\n" \
  222. "MOV D0Ar6, #0\n" \
  223. "LSR D1Ar5, %3, #6\n" \
  224. "SUB TXRPT, D1Ar5, #2\n" \
  225. "MOV RAPF, %1\n" \
  226. "$Lloop"id":\n" \
  227. "ADD RAPF, %1, #64\n" \
  228. "21:\n" \
  229. "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
  230. "22:\n" \
  231. "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
  232. "SUB %3, %3, #32\n" \
  233. "23:\n" \
  234. "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
  235. "24:\n" \
  236. "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
  237. "SUB %3, %3, #32\n" \
  238. "DCACHE [%1+#-64], D0Ar6\n" \
  239. "BR $Lloop"id"\n" \
  240. \
  241. "MOV RAPF, %1\n" \
  242. "25:\n" \
  243. "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
  244. "26:\n" \
  245. "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
  246. "SUB %3, %3, #32\n" \
  247. "27:\n" \
  248. "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
  249. "28:\n" \
  250. "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
  251. "SUB %0, %0, #8\n" \
  252. "29:\n" \
  253. "SETL [%0++], D0.7, D1.7\n" \
  254. "SUB %3, %3, #32\n" \
  255. "1:" \
  256. "DCACHE [%1+#-64], D0Ar6\n" \
  257. "GETL D0Ar6, D1Ar5, [A0StP+#-40]\n" \
  258. "GETL D0FrT, D1RtP, [A0StP+#-32]\n" \
  259. "GETL D0.5, D1.5, [A0StP+#-24]\n" \
  260. "GETL D0.6, D1.6, [A0StP+#-16]\n" \
  261. "GETL D0.7, D1.7, [A0StP+#-8]\n" \
  262. "SUB A0StP, A0StP, #40\n" \
  263. " .section .fixup,\"ax\"\n" \
  264. "4:\n" \
  265. " ADD %0, %0, #8\n" \
  266. "3:\n" \
  267. " MOV D0Ar2, TXSTATUS\n" \
  268. " MOV D1Ar1, TXSTATUS\n" \
  269. " AND D1Ar1, D1Ar1, #0xFFFFF8FF\n" \
  270. " MOV TXSTATUS, D1Ar1\n" \
  271. FIXUP \
  272. " MOVT D0Ar2,#HI(1b)\n" \
  273. " JUMP D0Ar2,#LO(1b)\n" \
  274. " .previous\n" \
  275. " .section __ex_table,\"a\"\n" \
  276. " .long 21b,3b\n" \
  277. " .long 22b,3b\n" \
  278. " .long 23b,3b\n" \
  279. " .long 24b,3b\n" \
  280. " .long 25b,3b\n" \
  281. " .long 26b,3b\n" \
  282. " .long 27b,3b\n" \
  283. " .long 28b,3b\n" \
  284. " .long 29b,4b\n" \
  285. " .previous\n" \
  286. : "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \
  287. : "0" (to), "1" (from), "2" (ret), "3" (n) \
  288. : "D1Ar1", "D0Ar2", "memory")
  289. /* rewind 'to' and 'from' pointers when a fault occurs
  290. *
  291. * Rationale:
  292. * A fault always occurs on writing to user buffer. A fault
  293. * is at a single address, so we need to rewind by only 4
  294. * bytes.
  295. * Since we do a complete read from kernel buffer before
  296. * writing, we need to rewind it also. The amount to be
  297. * rewind equals the number of faulty writes in MSETD
  298. * which is: [4 - (LSM_STEP-1)]*8
  299. * LSM_STEP is bits 10:8 in TXSTATUS which is already read
  300. * and stored in D0Ar2
  301. *
  302. * NOTE: If a fault occurs at the last operation in M{G,S}ETL
  303. * LSM_STEP will be 0. ie: we do 4 writes in our case, if
  304. * a fault happens at the 4th write, LSM_STEP will be 0
  305. * instead of 4. The code copes with that.
  306. *
  307. * n is updated by the number of successful writes, which is:
  308. * n = n - (LSM_STEP-1)*8
  309. */
  310. #define __asm_copy_to_user_64bit_rapf_loop(to, from, ret, n, id)\
  311. __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \
  312. "LSR D0Ar2, D0Ar2, #8\n" \
  313. "AND D0Ar2, D0Ar2, #0x7\n" \
  314. "ADDZ D0Ar2, D0Ar2, #4\n" \
  315. "SUB D0Ar2, D0Ar2, #1\n" \
  316. "MOV D1Ar1, #4\n" \
  317. "SUB D0Ar2, D1Ar1, D0Ar2\n" \
  318. "LSL D0Ar2, D0Ar2, #3\n" \
  319. "LSL D1Ar1, D1Ar1, #3\n" \
  320. "SUB D1Ar1, D1Ar1, D0Ar2\n" \
  321. "SUB %0, %0, #8\n" \
  322. "SUB %1, %1,D0Ar2\n" \
  323. "SUB %3, %3, D1Ar1\n")
  324. /*
  325. * optimized copying loop using RAPF when 32 bit aligned
  326. *
  327. * n will be automatically decremented inside the loop
  328. * ret will be left intact. if error occurs we will rewind
  329. * so that the original non optimized code will fill up
  330. * this value correctly.
  331. *
  332. * on fault:
  333. * > n will hold total number of uncopied bytes
  334. *
  335. * > {'to','from'} will be rewind back so that
  336. * the non-optimized code will do the proper fix up
  337. *
  338. * DCACHE drops the cacheline which helps in reducing cache
  339. * pollution.
  340. *
  341. * We introduce an extra SETD at the end of the loop to
  342. * ensure we don't fall off the loop before we catch all
  343. * erros.
  344. *
  345. * NOTICE:
  346. * LSM_STEP in TXSTATUS must be cleared in fix up code.
  347. * since we're using M{S,G}ETL, a fault might happen at
  348. * any address in the middle of M{S,G}ETL causing
  349. * the value of LSM_STEP to be incorrect which can
  350. * cause subsequent use of M{S,G}ET{L,D} to go wrong.
  351. * ie: if LSM_STEP was 1 when a fault occurs, the
  352. * next call to M{S,G}ET{L,D} will skip the first
  353. * copy/getting as it think that the first 1 has already
  354. * been done.
  355. *
  356. */
  357. #define __asm_copy_user_32bit_rapf_loop( \
  358. to, from, ret, n, id, FIXUP) \
  359. asm volatile ( \
  360. ".balign 8\n" \
  361. "MOV RAPF, %1\n" \
  362. "MSETL [A0StP++], D0Ar6, D0FrT, D0.5, D0.6, D0.7\n" \
  363. "MOV D0Ar6, #0\n" \
  364. "LSR D1Ar5, %3, #6\n" \
  365. "SUB TXRPT, D1Ar5, #2\n" \
  366. "MOV RAPF, %1\n" \
  367. "$Lloop"id":\n" \
  368. "ADD RAPF, %1, #64\n" \
  369. "21:\n" \
  370. "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
  371. "22:\n" \
  372. "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
  373. "SUB %3, %3, #16\n" \
  374. "23:\n" \
  375. "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
  376. "24:\n" \
  377. "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
  378. "SUB %3, %3, #16\n" \
  379. "25:\n" \
  380. "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
  381. "26:\n" \
  382. "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
  383. "SUB %3, %3, #16\n" \
  384. "27:\n" \
  385. "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
  386. "28:\n" \
  387. "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
  388. "SUB %3, %3, #16\n" \
  389. "DCACHE [%1+#-64], D0Ar6\n" \
  390. "BR $Lloop"id"\n" \
  391. \
  392. "MOV RAPF, %1\n" \
  393. "29:\n" \
  394. "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
  395. "30:\n" \
  396. "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
  397. "SUB %3, %3, #16\n" \
  398. "31:\n" \
  399. "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
  400. "32:\n" \
  401. "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
  402. "SUB %3, %3, #16\n" \
  403. "33:\n" \
  404. "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
  405. "34:\n" \
  406. "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
  407. "SUB %3, %3, #16\n" \
  408. "35:\n" \
  409. "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
  410. "36:\n" \
  411. "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
  412. "SUB %0, %0, #4\n" \
  413. "37:\n" \
  414. "SETD [%0++], D0.7\n" \
  415. "SUB %3, %3, #16\n" \
  416. "1:" \
  417. "DCACHE [%1+#-64], D0Ar6\n" \
  418. "GETL D0Ar6, D1Ar5, [A0StP+#-40]\n" \
  419. "GETL D0FrT, D1RtP, [A0StP+#-32]\n" \
  420. "GETL D0.5, D1.5, [A0StP+#-24]\n" \
  421. "GETL D0.6, D1.6, [A0StP+#-16]\n" \
  422. "GETL D0.7, D1.7, [A0StP+#-8]\n" \
  423. "SUB A0StP, A0StP, #40\n" \
  424. " .section .fixup,\"ax\"\n" \
  425. "4:\n" \
  426. " ADD %0, %0, #4\n" \
  427. "3:\n" \
  428. " MOV D0Ar2, TXSTATUS\n" \
  429. " MOV D1Ar1, TXSTATUS\n" \
  430. " AND D1Ar1, D1Ar1, #0xFFFFF8FF\n" \
  431. " MOV TXSTATUS, D1Ar1\n" \
  432. FIXUP \
  433. " MOVT D0Ar2,#HI(1b)\n" \
  434. " JUMP D0Ar2,#LO(1b)\n" \
  435. " .previous\n" \
  436. " .section __ex_table,\"a\"\n" \
  437. " .long 21b,3b\n" \
  438. " .long 22b,3b\n" \
  439. " .long 23b,3b\n" \
  440. " .long 24b,3b\n" \
  441. " .long 25b,3b\n" \
  442. " .long 26b,3b\n" \
  443. " .long 27b,3b\n" \
  444. " .long 28b,3b\n" \
  445. " .long 29b,3b\n" \
  446. " .long 30b,3b\n" \
  447. " .long 31b,3b\n" \
  448. " .long 32b,3b\n" \
  449. " .long 33b,3b\n" \
  450. " .long 34b,3b\n" \
  451. " .long 35b,3b\n" \
  452. " .long 36b,3b\n" \
  453. " .long 37b,4b\n" \
  454. " .previous\n" \
  455. : "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \
  456. : "0" (to), "1" (from), "2" (ret), "3" (n) \
  457. : "D1Ar1", "D0Ar2", "memory")
  458. /* rewind 'to' and 'from' pointers when a fault occurs
  459. *
  460. * Rationale:
  461. * A fault always occurs on writing to user buffer. A fault
  462. * is at a single address, so we need to rewind by only 4
  463. * bytes.
  464. * Since we do a complete read from kernel buffer before
  465. * writing, we need to rewind it also. The amount to be
  466. * rewind equals the number of faulty writes in MSETD
  467. * which is: [4 - (LSM_STEP-1)]*4
  468. * LSM_STEP is bits 10:8 in TXSTATUS which is already read
  469. * and stored in D0Ar2
  470. *
  471. * NOTE: If a fault occurs at the last operation in M{G,S}ETL
  472. * LSM_STEP will be 0. ie: we do 4 writes in our case, if
  473. * a fault happens at the 4th write, LSM_STEP will be 0
  474. * instead of 4. The code copes with that.
  475. *
  476. * n is updated by the number of successful writes, which is:
  477. * n = n - (LSM_STEP-1)*4
  478. */
  479. #define __asm_copy_to_user_32bit_rapf_loop(to, from, ret, n, id)\
  480. __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \
  481. "LSR D0Ar2, D0Ar2, #8\n" \
  482. "AND D0Ar2, D0Ar2, #0x7\n" \
  483. "ADDZ D0Ar2, D0Ar2, #4\n" \
  484. "SUB D0Ar2, D0Ar2, #1\n" \
  485. "MOV D1Ar1, #4\n" \
  486. "SUB D0Ar2, D1Ar1, D0Ar2\n" \
  487. "LSL D0Ar2, D0Ar2, #2\n" \
  488. "LSL D1Ar1, D1Ar1, #2\n" \
  489. "SUB D1Ar1, D1Ar1, D0Ar2\n" \
  490. "SUB %0, %0, #4\n" \
  491. "SUB %1, %1, D0Ar2\n" \
  492. "SUB %3, %3, D1Ar1\n")
  493. unsigned long __copy_user(void __user *pdst, const void *psrc,
  494. unsigned long n)
  495. {
  496. register char __user *dst asm ("A0.2") = pdst;
  497. register const char *src asm ("A1.2") = psrc;
  498. unsigned long retn = 0;
  499. if (n == 0)
  500. return 0;
  501. if ((unsigned long) src & 1) {
  502. __asm_copy_to_user_1(dst, src, retn);
  503. n--;
  504. }
  505. if ((unsigned long) dst & 1) {
  506. /* Worst case - byte copy */
  507. while (n > 0) {
  508. __asm_copy_to_user_1(dst, src, retn);
  509. n--;
  510. }
  511. }
  512. if (((unsigned long) src & 2) && n >= 2) {
  513. __asm_copy_to_user_2(dst, src, retn);
  514. n -= 2;
  515. }
  516. if ((unsigned long) dst & 2) {
  517. /* Second worst case - word copy */
  518. while (n >= 2) {
  519. __asm_copy_to_user_2(dst, src, retn);
  520. n -= 2;
  521. }
  522. }
  523. #ifdef USE_RAPF
  524. /* 64 bit copy loop */
  525. if (!(((unsigned long) src | (__force unsigned long) dst) & 7)) {
  526. if (n >= RAPF_MIN_BUF_SIZE) {
  527. /* copy user using 64 bit rapf copy */
  528. __asm_copy_to_user_64bit_rapf_loop(dst, src, retn,
  529. n, "64cu");
  530. }
  531. while (n >= 8) {
  532. __asm_copy_to_user_8x64(dst, src, retn);
  533. n -= 8;
  534. }
  535. }
  536. if (n >= RAPF_MIN_BUF_SIZE) {
  537. /* copy user using 32 bit rapf copy */
  538. __asm_copy_to_user_32bit_rapf_loop(dst, src, retn, n, "32cu");
  539. }
  540. #else
  541. /* 64 bit copy loop */
  542. if (!(((unsigned long) src | (__force unsigned long) dst) & 7)) {
  543. while (n >= 8) {
  544. __asm_copy_to_user_8x64(dst, src, retn);
  545. n -= 8;
  546. }
  547. }
  548. #endif
  549. while (n >= 16) {
  550. __asm_copy_to_user_16(dst, src, retn);
  551. n -= 16;
  552. }
  553. while (n >= 4) {
  554. __asm_copy_to_user_4(dst, src, retn);
  555. n -= 4;
  556. }
  557. switch (n) {
  558. case 0:
  559. break;
  560. case 1:
  561. __asm_copy_to_user_1(dst, src, retn);
  562. break;
  563. case 2:
  564. __asm_copy_to_user_2(dst, src, retn);
  565. break;
  566. case 3:
  567. __asm_copy_to_user_3(dst, src, retn);
  568. break;
  569. }
  570. return retn;
  571. }
  572. EXPORT_SYMBOL(__copy_user);
  573. #define __asm_copy_from_user_1(to, from, ret) \
  574. __asm_copy_user_cont(to, from, ret, \
  575. " GETB D1Ar1,[%1++]\n" \
  576. "2: SETB [%0++],D1Ar1\n", \
  577. "3: ADD %2,%2,#1\n" \
  578. " SETB [%0++],D1Ar1\n", \
  579. " .long 2b,3b\n")
  580. #define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
  581. __asm_copy_user_cont(to, from, ret, \
  582. " GETW D1Ar1,[%1++]\n" \
  583. "2: SETW [%0++],D1Ar1\n" COPY, \
  584. "3: ADD %2,%2,#2\n" \
  585. " SETW [%0++],D1Ar1\n" FIXUP, \
  586. " .long 2b,3b\n" TENTRY)
  587. #define __asm_copy_from_user_2(to, from, ret) \
  588. __asm_copy_from_user_2x_cont(to, from, ret, "", "", "")
  589. #define __asm_copy_from_user_3(to, from, ret) \
  590. __asm_copy_from_user_2x_cont(to, from, ret, \
  591. " GETB D1Ar1,[%1++]\n" \
  592. "4: SETB [%0++],D1Ar1\n", \
  593. "5: ADD %2,%2,#1\n" \
  594. " SETB [%0++],D1Ar1\n", \
  595. " .long 4b,5b\n")
  596. #define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
  597. __asm_copy_user_cont(to, from, ret, \
  598. " GETD D1Ar1,[%1++]\n" \
  599. "2: SETD [%0++],D1Ar1\n" COPY, \
  600. "3: ADD %2,%2,#4\n" \
  601. " SETD [%0++],D1Ar1\n" FIXUP, \
  602. " .long 2b,3b\n" TENTRY)
  603. #define __asm_copy_from_user_4(to, from, ret) \
  604. __asm_copy_from_user_4x_cont(to, from, ret, "", "", "")
  605. #define __asm_copy_from_user_5(to, from, ret) \
  606. __asm_copy_from_user_4x_cont(to, from, ret, \
  607. " GETB D1Ar1,[%1++]\n" \
  608. "4: SETB [%0++],D1Ar1\n", \
  609. "5: ADD %2,%2,#1\n" \
  610. " SETB [%0++],D1Ar1\n", \
  611. " .long 4b,5b\n")
  612. #define __asm_copy_from_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
  613. __asm_copy_from_user_4x_cont(to, from, ret, \
  614. " GETW D1Ar1,[%1++]\n" \
  615. "4: SETW [%0++],D1Ar1\n" COPY, \
  616. "5: ADD %2,%2,#2\n" \
  617. " SETW [%0++],D1Ar1\n" FIXUP, \
  618. " .long 4b,5b\n" TENTRY)
  619. #define __asm_copy_from_user_6(to, from, ret) \
  620. __asm_copy_from_user_6x_cont(to, from, ret, "", "", "")
  621. #define __asm_copy_from_user_7(to, from, ret) \
  622. __asm_copy_from_user_6x_cont(to, from, ret, \
  623. " GETB D1Ar1,[%1++]\n" \
  624. "6: SETB [%0++],D1Ar1\n", \
  625. "7: ADD %2,%2,#1\n" \
  626. " SETB [%0++],D1Ar1\n", \
  627. " .long 6b,7b\n")
  628. #define __asm_copy_from_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
  629. __asm_copy_from_user_4x_cont(to, from, ret, \
  630. " GETD D1Ar1,[%1++]\n" \
  631. "4: SETD [%0++],D1Ar1\n" COPY, \
  632. "5: ADD %2,%2,#4\n" \
  633. " SETD [%0++],D1Ar1\n" FIXUP, \
  634. " .long 4b,5b\n" TENTRY)
  635. #define __asm_copy_from_user_8(to, from, ret) \
  636. __asm_copy_from_user_8x_cont(to, from, ret, "", "", "")
  637. #define __asm_copy_from_user_9(to, from, ret) \
  638. __asm_copy_from_user_8x_cont(to, from, ret, \
  639. " GETB D1Ar1,[%1++]\n" \
  640. "6: SETB [%0++],D1Ar1\n", \
  641. "7: ADD %2,%2,#1\n" \
  642. " SETB [%0++],D1Ar1\n", \
  643. " .long 6b,7b\n")
  644. #define __asm_copy_from_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
  645. __asm_copy_from_user_8x_cont(to, from, ret, \
  646. " GETW D1Ar1,[%1++]\n" \
  647. "6: SETW [%0++],D1Ar1\n" COPY, \
  648. "7: ADD %2,%2,#2\n" \
  649. " SETW [%0++],D1Ar1\n" FIXUP, \
  650. " .long 6b,7b\n" TENTRY)
  651. #define __asm_copy_from_user_10(to, from, ret) \
  652. __asm_copy_from_user_10x_cont(to, from, ret, "", "", "")
  653. #define __asm_copy_from_user_11(to, from, ret) \
  654. __asm_copy_from_user_10x_cont(to, from, ret, \
  655. " GETB D1Ar1,[%1++]\n" \
  656. "8: SETB [%0++],D1Ar1\n", \
  657. "9: ADD %2,%2,#1\n" \
  658. " SETB [%0++],D1Ar1\n", \
  659. " .long 8b,9b\n")
  660. #define __asm_copy_from_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
  661. __asm_copy_from_user_8x_cont(to, from, ret, \
  662. " GETD D1Ar1,[%1++]\n" \
  663. "6: SETD [%0++],D1Ar1\n" COPY, \
  664. "7: ADD %2,%2,#4\n" \
  665. " SETD [%0++],D1Ar1\n" FIXUP, \
  666. " .long 6b,7b\n" TENTRY)
  667. #define __asm_copy_from_user_12(to, from, ret) \
  668. __asm_copy_from_user_12x_cont(to, from, ret, "", "", "")
  669. #define __asm_copy_from_user_13(to, from, ret) \
  670. __asm_copy_from_user_12x_cont(to, from, ret, \
  671. " GETB D1Ar1,[%1++]\n" \
  672. "8: SETB [%0++],D1Ar1\n", \
  673. "9: ADD %2,%2,#1\n" \
  674. " SETB [%0++],D1Ar1\n", \
  675. " .long 8b,9b\n")
  676. #define __asm_copy_from_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
  677. __asm_copy_from_user_12x_cont(to, from, ret, \
  678. " GETW D1Ar1,[%1++]\n" \
  679. "8: SETW [%0++],D1Ar1\n" COPY, \
  680. "9: ADD %2,%2,#2\n" \
  681. " SETW [%0++],D1Ar1\n" FIXUP, \
  682. " .long 8b,9b\n" TENTRY)
  683. #define __asm_copy_from_user_14(to, from, ret) \
  684. __asm_copy_from_user_14x_cont(to, from, ret, "", "", "")
  685. #define __asm_copy_from_user_15(to, from, ret) \
  686. __asm_copy_from_user_14x_cont(to, from, ret, \
  687. " GETB D1Ar1,[%1++]\n" \
  688. "10: SETB [%0++],D1Ar1\n", \
  689. "11: ADD %2,%2,#1\n" \
  690. " SETB [%0++],D1Ar1\n", \
  691. " .long 10b,11b\n")
  692. #define __asm_copy_from_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
  693. __asm_copy_from_user_12x_cont(to, from, ret, \
  694. " GETD D1Ar1,[%1++]\n" \
  695. "8: SETD [%0++],D1Ar1\n" COPY, \
  696. "9: ADD %2,%2,#4\n" \
  697. " SETD [%0++],D1Ar1\n" FIXUP, \
  698. " .long 8b,9b\n" TENTRY)
  699. #define __asm_copy_from_user_16(to, from, ret) \
  700. __asm_copy_from_user_16x_cont(to, from, ret, "", "", "")
  701. #define __asm_copy_from_user_8x64(to, from, ret) \
  702. asm volatile ( \
  703. " GETL D0Ar2,D1Ar1,[%1++]\n" \
  704. "2: SETL [%0++],D0Ar2,D1Ar1\n" \
  705. "1:\n" \
  706. " .section .fixup,\"ax\"\n" \
  707. " MOV D1Ar1,#0\n" \
  708. " MOV D0Ar2,#0\n" \
  709. "3: ADD %2,%2,#8\n" \
  710. " SETL [%0++],D0Ar2,D1Ar1\n" \
  711. " MOVT D0Ar2,#HI(1b)\n" \
  712. " JUMP D0Ar2,#LO(1b)\n" \
  713. " .previous\n" \
  714. " .section __ex_table,\"a\"\n" \
  715. " .long 2b,3b\n" \
  716. " .previous\n" \
  717. : "=a" (to), "=r" (from), "=r" (ret) \
  718. : "0" (to), "1" (from), "2" (ret) \
  719. : "D1Ar1", "D0Ar2", "memory")
  720. /* rewind 'from' pointer when a fault occurs
  721. *
  722. * Rationale:
  723. * A fault occurs while reading from user buffer, which is the
  724. * source. Since the fault is at a single address, we only
  725. * need to rewind by 8 bytes.
  726. * Since we don't write to kernel buffer until we read first,
  727. * the kernel buffer is at the right state and needn't be
  728. * corrected.
  729. */
  730. #define __asm_copy_from_user_64bit_rapf_loop(to, from, ret, n, id) \
  731. __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \
  732. "SUB %1, %1, #8\n")
  733. /* rewind 'from' pointer when a fault occurs
  734. *
  735. * Rationale:
  736. * A fault occurs while reading from user buffer, which is the
  737. * source. Since the fault is at a single address, we only
  738. * need to rewind by 4 bytes.
  739. * Since we don't write to kernel buffer until we read first,
  740. * the kernel buffer is at the right state and needn't be
  741. * corrected.
  742. */
  743. #define __asm_copy_from_user_32bit_rapf_loop(to, from, ret, n, id) \
  744. __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \
  745. "SUB %1, %1, #4\n")
  746. /* Copy from user to kernel, zeroing the bytes that were inaccessible in
  747. userland. The return-value is the number of bytes that were
  748. inaccessible. */
  749. unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
  750. unsigned long n)
  751. {
  752. register char *dst asm ("A0.2") = pdst;
  753. register const char __user *src asm ("A1.2") = psrc;
  754. unsigned long retn = 0;
  755. if (n == 0)
  756. return 0;
  757. if ((unsigned long) src & 1) {
  758. __asm_copy_from_user_1(dst, src, retn);
  759. n--;
  760. }
  761. if ((unsigned long) dst & 1) {
  762. /* Worst case - byte copy */
  763. while (n > 0) {
  764. __asm_copy_from_user_1(dst, src, retn);
  765. n--;
  766. if (retn)
  767. goto copy_exception_bytes;
  768. }
  769. }
  770. if (((unsigned long) src & 2) && n >= 2) {
  771. __asm_copy_from_user_2(dst, src, retn);
  772. n -= 2;
  773. }
  774. if ((unsigned long) dst & 2) {
  775. /* Second worst case - word copy */
  776. while (n >= 2) {
  777. __asm_copy_from_user_2(dst, src, retn);
  778. n -= 2;
  779. if (retn)
  780. goto copy_exception_bytes;
  781. }
  782. }
  783. /* We only need one check after the unalignment-adjustments,
  784. because if both adjustments were done, either both or
  785. neither reference had an exception. */
  786. if (retn != 0)
  787. goto copy_exception_bytes;
  788. #ifdef USE_RAPF
  789. /* 64 bit copy loop */
  790. if (!(((unsigned long) src | (unsigned long) dst) & 7)) {
  791. if (n >= RAPF_MIN_BUF_SIZE) {
  792. /* Copy using fast 64bit rapf */
  793. __asm_copy_from_user_64bit_rapf_loop(dst, src, retn,
  794. n, "64cuz");
  795. }
  796. while (n >= 8) {
  797. __asm_copy_from_user_8x64(dst, src, retn);
  798. n -= 8;
  799. if (retn)
  800. goto copy_exception_bytes;
  801. }
  802. }
  803. if (n >= RAPF_MIN_BUF_SIZE) {
  804. /* Copy using fast 32bit rapf */
  805. __asm_copy_from_user_32bit_rapf_loop(dst, src, retn,
  806. n, "32cuz");
  807. }
  808. #else
  809. /* 64 bit copy loop */
  810. if (!(((unsigned long) src | (unsigned long) dst) & 7)) {
  811. while (n >= 8) {
  812. __asm_copy_from_user_8x64(dst, src, retn);
  813. n -= 8;
  814. if (retn)
  815. goto copy_exception_bytes;
  816. }
  817. }
  818. #endif
  819. while (n >= 4) {
  820. __asm_copy_from_user_4(dst, src, retn);
  821. n -= 4;
  822. if (retn)
  823. goto copy_exception_bytes;
  824. }
  825. /* If we get here, there were no memory read faults. */
  826. switch (n) {
  827. /* These copies are at least "naturally aligned" (so we don't
  828. have to check each byte), due to the src alignment code.
  829. The *_3 case *will* get the correct count for retn. */
  830. case 0:
  831. /* This case deliberately left in (if you have doubts check the
  832. generated assembly code). */
  833. break;
  834. case 1:
  835. __asm_copy_from_user_1(dst, src, retn);
  836. break;
  837. case 2:
  838. __asm_copy_from_user_2(dst, src, retn);
  839. break;
  840. case 3:
  841. __asm_copy_from_user_3(dst, src, retn);
  842. break;
  843. }
  844. /* If we get here, retn correctly reflects the number of failing
  845. bytes. */
  846. return retn;
  847. copy_exception_bytes:
  848. /* We already have "retn" bytes cleared, and need to clear the
  849. remaining "n" bytes. A non-optimized simple byte-for-byte in-line
  850. memset is preferred here, since this isn't speed-critical code and
  851. we'd rather have this a leaf-function than calling memset. */
  852. {
  853. char *endp;
  854. for (endp = dst + n; dst < endp; dst++)
  855. *dst = 0;
  856. }
  857. return retn + n;
  858. }
  859. EXPORT_SYMBOL(__copy_user_zeroing);
  860. #define __asm_clear_8x64(to, ret) \
  861. asm volatile ( \
  862. " MOV D0Ar2,#0\n" \
  863. " MOV D1Ar1,#0\n" \
  864. " SETL [%0],D0Ar2,D1Ar1\n" \
  865. "2: SETL [%0++],D0Ar2,D1Ar1\n" \
  866. "1:\n" \
  867. " .section .fixup,\"ax\"\n" \
  868. "3: ADD %1,%1,#8\n" \
  869. " MOVT D0Ar2,#HI(1b)\n" \
  870. " JUMP D0Ar2,#LO(1b)\n" \
  871. " .previous\n" \
  872. " .section __ex_table,\"a\"\n" \
  873. " .long 2b,3b\n" \
  874. " .previous\n" \
  875. : "=r" (to), "=r" (ret) \
  876. : "0" (to), "1" (ret) \
  877. : "D1Ar1", "D0Ar2", "memory")
  878. /* Zero userspace. */
  879. #define __asm_clear(to, ret, CLEAR, FIXUP, TENTRY) \
  880. asm volatile ( \
  881. " MOV D1Ar1,#0\n" \
  882. CLEAR \
  883. "1:\n" \
  884. " .section .fixup,\"ax\"\n" \
  885. FIXUP \
  886. " MOVT D1Ar1,#HI(1b)\n" \
  887. " JUMP D1Ar1,#LO(1b)\n" \
  888. " .previous\n" \
  889. " .section __ex_table,\"a\"\n" \
  890. TENTRY \
  891. " .previous" \
  892. : "=r" (to), "=r" (ret) \
  893. : "0" (to), "1" (ret) \
  894. : "D1Ar1", "memory")
  895. #define __asm_clear_1(to, ret) \
  896. __asm_clear(to, ret, \
  897. " SETB [%0],D1Ar1\n" \
  898. "2: SETB [%0++],D1Ar1\n", \
  899. "3: ADD %1,%1,#1\n", \
  900. " .long 2b,3b\n")
  901. #define __asm_clear_2(to, ret) \
  902. __asm_clear(to, ret, \
  903. " SETW [%0],D1Ar1\n" \
  904. "2: SETW [%0++],D1Ar1\n", \
  905. "3: ADD %1,%1,#2\n", \
  906. " .long 2b,3b\n")
  907. #define __asm_clear_3(to, ret) \
  908. __asm_clear(to, ret, \
  909. "2: SETW [%0++],D1Ar1\n" \
  910. " SETB [%0],D1Ar1\n" \
  911. "3: SETB [%0++],D1Ar1\n", \
  912. "4: ADD %1,%1,#2\n" \
  913. "5: ADD %1,%1,#1\n", \
  914. " .long 2b,4b\n" \
  915. " .long 3b,5b\n")
  916. #define __asm_clear_4x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
  917. __asm_clear(to, ret, \
  918. " SETD [%0],D1Ar1\n" \
  919. "2: SETD [%0++],D1Ar1\n" CLEAR, \
  920. "3: ADD %1,%1,#4\n" FIXUP, \
  921. " .long 2b,3b\n" TENTRY)
  922. #define __asm_clear_4(to, ret) \
  923. __asm_clear_4x_cont(to, ret, "", "", "")
  924. #define __asm_clear_8x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
  925. __asm_clear_4x_cont(to, ret, \
  926. " SETD [%0],D1Ar1\n" \
  927. "4: SETD [%0++],D1Ar1\n" CLEAR, \
  928. "5: ADD %1,%1,#4\n" FIXUP, \
  929. " .long 4b,5b\n" TENTRY)
  930. #define __asm_clear_8(to, ret) \
  931. __asm_clear_8x_cont(to, ret, "", "", "")
  932. #define __asm_clear_12x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
  933. __asm_clear_8x_cont(to, ret, \
  934. " SETD [%0],D1Ar1\n" \
  935. "6: SETD [%0++],D1Ar1\n" CLEAR, \
  936. "7: ADD %1,%1,#4\n" FIXUP, \
  937. " .long 6b,7b\n" TENTRY)
  938. #define __asm_clear_12(to, ret) \
  939. __asm_clear_12x_cont(to, ret, "", "", "")
  940. #define __asm_clear_16x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
  941. __asm_clear_12x_cont(to, ret, \
  942. " SETD [%0],D1Ar1\n" \
  943. "8: SETD [%0++],D1Ar1\n" CLEAR, \
  944. "9: ADD %1,%1,#4\n" FIXUP, \
  945. " .long 8b,9b\n" TENTRY)
  946. #define __asm_clear_16(to, ret) \
  947. __asm_clear_16x_cont(to, ret, "", "", "")
  948. unsigned long __do_clear_user(void __user *pto, unsigned long pn)
  949. {
  950. register char __user *dst asm ("D0Re0") = pto;
  951. register unsigned long n asm ("D1Re0") = pn;
  952. register unsigned long retn asm ("D0Ar6") = 0;
  953. if ((unsigned long) dst & 1) {
  954. __asm_clear_1(dst, retn);
  955. n--;
  956. }
  957. if ((unsigned long) dst & 2) {
  958. __asm_clear_2(dst, retn);
  959. n -= 2;
  960. }
  961. /* 64 bit copy loop */
  962. if (!((__force unsigned long) dst & 7)) {
  963. while (n >= 8) {
  964. __asm_clear_8x64(dst, retn);
  965. n -= 8;
  966. }
  967. }
  968. while (n >= 16) {
  969. __asm_clear_16(dst, retn);
  970. n -= 16;
  971. }
  972. while (n >= 4) {
  973. __asm_clear_4(dst, retn);
  974. n -= 4;
  975. }
  976. switch (n) {
  977. case 0:
  978. break;
  979. case 1:
  980. __asm_clear_1(dst, retn);
  981. break;
  982. case 2:
  983. __asm_clear_2(dst, retn);
  984. break;
  985. case 3:
  986. __asm_clear_3(dst, retn);
  987. break;
  988. }
  989. return retn;
  990. }
  991. EXPORT_SYMBOL(__do_clear_user);
  992. unsigned char __get_user_asm_b(const void __user *addr, long *err)
  993. {
  994. register unsigned char x asm ("D0Re0") = 0;
  995. asm volatile (
  996. " GETB %0,[%2]\n"
  997. "1:\n"
  998. " GETB %0,[%2]\n"
  999. "2:\n"
  1000. " .section .fixup,\"ax\"\n"
  1001. "3: MOV D0FrT,%3\n"
  1002. " SETD [%1],D0FrT\n"
  1003. " MOVT D0FrT,#HI(2b)\n"
  1004. " JUMP D0FrT,#LO(2b)\n"
  1005. " .previous\n"
  1006. " .section __ex_table,\"a\"\n"
  1007. " .long 1b,3b\n"
  1008. " .previous\n"
  1009. : "=r" (x)
  1010. : "r" (err), "r" (addr), "P" (-EFAULT)
  1011. : "D0FrT");
  1012. return x;
  1013. }
  1014. EXPORT_SYMBOL(__get_user_asm_b);
  1015. unsigned short __get_user_asm_w(const void __user *addr, long *err)
  1016. {
  1017. register unsigned short x asm ("D0Re0") = 0;
  1018. asm volatile (
  1019. " GETW %0,[%2]\n"
  1020. "1:\n"
  1021. " GETW %0,[%2]\n"
  1022. "2:\n"
  1023. " .section .fixup,\"ax\"\n"
  1024. "3: MOV D0FrT,%3\n"
  1025. " SETD [%1],D0FrT\n"
  1026. " MOVT D0FrT,#HI(2b)\n"
  1027. " JUMP D0FrT,#LO(2b)\n"
  1028. " .previous\n"
  1029. " .section __ex_table,\"a\"\n"
  1030. " .long 1b,3b\n"
  1031. " .previous\n"
  1032. : "=r" (x)
  1033. : "r" (err), "r" (addr), "P" (-EFAULT)
  1034. : "D0FrT");
  1035. return x;
  1036. }
  1037. EXPORT_SYMBOL(__get_user_asm_w);
  1038. unsigned int __get_user_asm_d(const void __user *addr, long *err)
  1039. {
  1040. register unsigned int x asm ("D0Re0") = 0;
  1041. asm volatile (
  1042. " GETD %0,[%2]\n"
  1043. "1:\n"
  1044. " GETD %0,[%2]\n"
  1045. "2:\n"
  1046. " .section .fixup,\"ax\"\n"
  1047. "3: MOV D0FrT,%3\n"
  1048. " SETD [%1],D0FrT\n"
  1049. " MOVT D0FrT,#HI(2b)\n"
  1050. " JUMP D0FrT,#LO(2b)\n"
  1051. " .previous\n"
  1052. " .section __ex_table,\"a\"\n"
  1053. " .long 1b,3b\n"
  1054. " .previous\n"
  1055. : "=r" (x)
  1056. : "r" (err), "r" (addr), "P" (-EFAULT)
  1057. : "D0FrT");
  1058. return x;
  1059. }
  1060. EXPORT_SYMBOL(__get_user_asm_d);
  1061. long __put_user_asm_b(unsigned int x, void __user *addr)
  1062. {
  1063. register unsigned int err asm ("D0Re0") = 0;
  1064. asm volatile (
  1065. " MOV %0,#0\n"
  1066. " SETB [%2],%1\n"
  1067. "1:\n"
  1068. " SETB [%2],%1\n"
  1069. "2:\n"
  1070. ".section .fixup,\"ax\"\n"
  1071. "3: MOV %0,%3\n"
  1072. " MOVT D0FrT,#HI(2b)\n"
  1073. " JUMP D0FrT,#LO(2b)\n"
  1074. ".previous\n"
  1075. ".section __ex_table,\"a\"\n"
  1076. " .long 1b,3b\n"
  1077. ".previous"
  1078. : "=r"(err)
  1079. : "d" (x), "a" (addr), "P"(-EFAULT)
  1080. : "D0FrT");
  1081. return err;
  1082. }
  1083. EXPORT_SYMBOL(__put_user_asm_b);
  1084. long __put_user_asm_w(unsigned int x, void __user *addr)
  1085. {
  1086. register unsigned int err asm ("D0Re0") = 0;
  1087. asm volatile (
  1088. " MOV %0,#0\n"
  1089. " SETW [%2],%1\n"
  1090. "1:\n"
  1091. " SETW [%2],%1\n"
  1092. "2:\n"
  1093. ".section .fixup,\"ax\"\n"
  1094. "3: MOV %0,%3\n"
  1095. " MOVT D0FrT,#HI(2b)\n"
  1096. " JUMP D0FrT,#LO(2b)\n"
  1097. ".previous\n"
  1098. ".section __ex_table,\"a\"\n"
  1099. " .long 1b,3b\n"
  1100. ".previous"
  1101. : "=r"(err)
  1102. : "d" (x), "a" (addr), "P"(-EFAULT)
  1103. : "D0FrT");
  1104. return err;
  1105. }
  1106. EXPORT_SYMBOL(__put_user_asm_w);
  1107. long __put_user_asm_d(unsigned int x, void __user *addr)
  1108. {
  1109. register unsigned int err asm ("D0Re0") = 0;
  1110. asm volatile (
  1111. " MOV %0,#0\n"
  1112. " SETD [%2],%1\n"
  1113. "1:\n"
  1114. " SETD [%2],%1\n"
  1115. "2:\n"
  1116. ".section .fixup,\"ax\"\n"
  1117. "3: MOV %0,%3\n"
  1118. " MOVT D0FrT,#HI(2b)\n"
  1119. " JUMP D0FrT,#LO(2b)\n"
  1120. ".previous\n"
  1121. ".section __ex_table,\"a\"\n"
  1122. " .long 1b,3b\n"
  1123. ".previous"
  1124. : "=r"(err)
  1125. : "d" (x), "a" (addr), "P"(-EFAULT)
  1126. : "D0FrT");
  1127. return err;
  1128. }
  1129. EXPORT_SYMBOL(__put_user_asm_d);
  1130. long __put_user_asm_l(unsigned long long x, void __user *addr)
  1131. {
  1132. register unsigned int err asm ("D0Re0") = 0;
  1133. asm volatile (
  1134. " MOV %0,#0\n"
  1135. " SETL [%2],%1,%t1\n"
  1136. "1:\n"
  1137. " SETL [%2],%1,%t1\n"
  1138. "2:\n"
  1139. ".section .fixup,\"ax\"\n"
  1140. "3: MOV %0,%3\n"
  1141. " MOVT D0FrT,#HI(2b)\n"
  1142. " JUMP D0FrT,#LO(2b)\n"
  1143. ".previous\n"
  1144. ".section __ex_table,\"a\"\n"
  1145. " .long 1b,3b\n"
  1146. ".previous"
  1147. : "=r"(err)
  1148. : "d" (x), "a" (addr), "P"(-EFAULT)
  1149. : "D0FrT");
  1150. return err;
  1151. }
  1152. EXPORT_SYMBOL(__put_user_asm_l);
  1153. long strnlen_user(const char __user *src, long count)
  1154. {
  1155. long res;
  1156. if (!access_ok(VERIFY_READ, src, 0))
  1157. return 0;
  1158. asm volatile (" MOV D0Ar4, %1\n"
  1159. " MOV D0Ar6, %2\n"
  1160. "0:\n"
  1161. " SUBS D0FrT, D0Ar6, #0\n"
  1162. " SUB D0Ar6, D0Ar6, #1\n"
  1163. " BLE 2f\n"
  1164. " GETB D0FrT, [D0Ar4+#1++]\n"
  1165. "1:\n"
  1166. " TST D0FrT, #255\n"
  1167. " BNE 0b\n"
  1168. "2:\n"
  1169. " SUB %0, %2, D0Ar6\n"
  1170. "3:\n"
  1171. " .section .fixup,\"ax\"\n"
  1172. "4:\n"
  1173. " MOV %0, #0\n"
  1174. " MOVT D0FrT,#HI(3b)\n"
  1175. " JUMP D0FrT,#LO(3b)\n"
  1176. " .previous\n"
  1177. " .section __ex_table,\"a\"\n"
  1178. " .long 1b,4b\n"
  1179. " .previous\n"
  1180. : "=r" (res)
  1181. : "r" (src), "r" (count)
  1182. : "D0FrT", "D0Ar4", "D0Ar6", "cc");
  1183. return res;
  1184. }
  1185. EXPORT_SYMBOL(strnlen_user);
  1186. long __strncpy_from_user(char *dst, const char __user *src, long count)
  1187. {
  1188. long res;
  1189. if (count == 0)
  1190. return 0;
  1191. /*
  1192. * Currently, in 2.4.0-test9, most ports use a simple byte-copy loop.
  1193. * So do we.
  1194. *
  1195. * This code is deduced from:
  1196. *
  1197. * char tmp2;
  1198. * long tmp1, tmp3;
  1199. * tmp1 = count;
  1200. * while ((*dst++ = (tmp2 = *src++)) != 0
  1201. * && --tmp1)
  1202. * ;
  1203. *
  1204. * res = count - tmp1;
  1205. *
  1206. * with tweaks.
  1207. */
  1208. asm volatile (" MOV %0,%3\n"
  1209. "1:\n"
  1210. " GETB D0FrT,[%2++]\n"
  1211. "2:\n"
  1212. " CMP D0FrT,#0\n"
  1213. " SETB [%1++],D0FrT\n"
  1214. " BEQ 3f\n"
  1215. " SUBS %0,%0,#1\n"
  1216. " BNZ 1b\n"
  1217. "3:\n"
  1218. " SUB %0,%3,%0\n"
  1219. "4:\n"
  1220. " .section .fixup,\"ax\"\n"
  1221. "5:\n"
  1222. " MOV %0,%7\n"
  1223. " MOVT D0FrT,#HI(4b)\n"
  1224. " JUMP D0FrT,#LO(4b)\n"
  1225. " .previous\n"
  1226. " .section __ex_table,\"a\"\n"
  1227. " .long 2b,5b\n"
  1228. " .previous"
  1229. : "=r" (res), "=r" (dst), "=r" (src), "=r" (count)
  1230. : "3" (count), "1" (dst), "2" (src), "P" (-EFAULT)
  1231. : "D0FrT", "memory", "cc");
  1232. return res;
  1233. }
  1234. EXPORT_SYMBOL(__strncpy_from_user);