aes-ce-ccm-core.S 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222
  1. /*
  2. * aesce-ccm-core.S - AES-CCM transform for ARMv8 with Crypto Extensions
  3. *
  4. * Copyright (C) 2013 - 2014 Linaro Ltd <ard.biesheuvel@linaro.org>
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #include <linux/linkage.h>
  11. .text
  12. .arch armv8-a+crypto
  13. /*
  14. * void ce_aes_ccm_auth_data(u8 mac[], u8 const in[], u32 abytes,
  15. * u32 *macp, u8 const rk[], u32 rounds);
  16. */
  17. ENTRY(ce_aes_ccm_auth_data)
  18. ldr w8, [x3] /* leftover from prev round? */
  19. ld1 {v0.2d}, [x0] /* load mac */
  20. cbz w8, 1f
  21. sub w8, w8, #16
  22. eor v1.16b, v1.16b, v1.16b
  23. 0: ldrb w7, [x1], #1 /* get 1 byte of input */
  24. subs w2, w2, #1
  25. add w8, w8, #1
  26. ins v1.b[0], w7
  27. ext v1.16b, v1.16b, v1.16b, #1 /* rotate in the input bytes */
  28. beq 8f /* out of input? */
  29. cbnz w8, 0b
  30. eor v0.16b, v0.16b, v1.16b
  31. 1: ld1 {v3.2d}, [x4] /* load first round key */
  32. prfm pldl1strm, [x1]
  33. cmp w5, #12 /* which key size? */
  34. add x6, x4, #16
  35. sub w7, w5, #2 /* modified # of rounds */
  36. bmi 2f
  37. bne 5f
  38. mov v5.16b, v3.16b
  39. b 4f
  40. 2: mov v4.16b, v3.16b
  41. ld1 {v5.2d}, [x6], #16 /* load 2nd round key */
  42. 3: aese v0.16b, v4.16b
  43. aesmc v0.16b, v0.16b
  44. 4: ld1 {v3.2d}, [x6], #16 /* load next round key */
  45. aese v0.16b, v5.16b
  46. aesmc v0.16b, v0.16b
  47. 5: ld1 {v4.2d}, [x6], #16 /* load next round key */
  48. subs w7, w7, #3
  49. aese v0.16b, v3.16b
  50. aesmc v0.16b, v0.16b
  51. ld1 {v5.2d}, [x6], #16 /* load next round key */
  52. bpl 3b
  53. aese v0.16b, v4.16b
  54. subs w2, w2, #16 /* last data? */
  55. eor v0.16b, v0.16b, v5.16b /* final round */
  56. bmi 6f
  57. ld1 {v1.16b}, [x1], #16 /* load next input block */
  58. eor v0.16b, v0.16b, v1.16b /* xor with mac */
  59. bne 1b
  60. 6: st1 {v0.2d}, [x0] /* store mac */
  61. beq 10f
  62. adds w2, w2, #16
  63. beq 10f
  64. mov w8, w2
  65. 7: ldrb w7, [x1], #1
  66. umov w6, v0.b[0]
  67. eor w6, w6, w7
  68. strb w6, [x0], #1
  69. subs w2, w2, #1
  70. beq 10f
  71. ext v0.16b, v0.16b, v0.16b, #1 /* rotate out the mac bytes */
  72. b 7b
  73. 8: mov w7, w8
  74. add w8, w8, #16
  75. 9: ext v1.16b, v1.16b, v1.16b, #1
  76. adds w7, w7, #1
  77. bne 9b
  78. eor v0.16b, v0.16b, v1.16b
  79. st1 {v0.2d}, [x0]
  80. 10: str w8, [x3]
  81. ret
  82. ENDPROC(ce_aes_ccm_auth_data)
  83. /*
  84. * void ce_aes_ccm_final(u8 mac[], u8 const ctr[], u8 const rk[],
  85. * u32 rounds);
  86. */
  87. ENTRY(ce_aes_ccm_final)
  88. ld1 {v3.2d}, [x2], #16 /* load first round key */
  89. ld1 {v0.2d}, [x0] /* load mac */
  90. cmp w3, #12 /* which key size? */
  91. sub w3, w3, #2 /* modified # of rounds */
  92. ld1 {v1.2d}, [x1] /* load 1st ctriv */
  93. bmi 0f
  94. bne 3f
  95. mov v5.16b, v3.16b
  96. b 2f
  97. 0: mov v4.16b, v3.16b
  98. 1: ld1 {v5.2d}, [x2], #16 /* load next round key */
  99. aese v0.16b, v4.16b
  100. aese v1.16b, v4.16b
  101. aesmc v0.16b, v0.16b
  102. aesmc v1.16b, v1.16b
  103. 2: ld1 {v3.2d}, [x2], #16 /* load next round key */
  104. aese v0.16b, v5.16b
  105. aese v1.16b, v5.16b
  106. aesmc v0.16b, v0.16b
  107. aesmc v1.16b, v1.16b
  108. 3: ld1 {v4.2d}, [x2], #16 /* load next round key */
  109. subs w3, w3, #3
  110. aese v0.16b, v3.16b
  111. aese v1.16b, v3.16b
  112. aesmc v0.16b, v0.16b
  113. aesmc v1.16b, v1.16b
  114. bpl 1b
  115. aese v0.16b, v4.16b
  116. aese v1.16b, v4.16b
  117. /* final round key cancels out */
  118. eor v0.16b, v0.16b, v1.16b /* en-/decrypt the mac */
  119. st1 {v0.2d}, [x0] /* store result */
  120. ret
  121. ENDPROC(ce_aes_ccm_final)
  122. .macro aes_ccm_do_crypt,enc
  123. ldr x8, [x6, #8] /* load lower ctr */
  124. ld1 {v0.2d}, [x5] /* load mac */
  125. rev x8, x8 /* keep swabbed ctr in reg */
  126. 0: /* outer loop */
  127. ld1 {v1.1d}, [x6] /* load upper ctr */
  128. prfm pldl1strm, [x1]
  129. add x8, x8, #1
  130. rev x9, x8
  131. cmp w4, #12 /* which key size? */
  132. sub w7, w4, #2 /* get modified # of rounds */
  133. ins v1.d[1], x9 /* no carry in lower ctr */
  134. ld1 {v3.2d}, [x3] /* load first round key */
  135. add x10, x3, #16
  136. bmi 1f
  137. bne 4f
  138. mov v5.16b, v3.16b
  139. b 3f
  140. 1: mov v4.16b, v3.16b
  141. ld1 {v5.2d}, [x10], #16 /* load 2nd round key */
  142. 2: /* inner loop: 3 rounds, 2x interleaved */
  143. aese v0.16b, v4.16b
  144. aese v1.16b, v4.16b
  145. aesmc v0.16b, v0.16b
  146. aesmc v1.16b, v1.16b
  147. 3: ld1 {v3.2d}, [x10], #16 /* load next round key */
  148. aese v0.16b, v5.16b
  149. aese v1.16b, v5.16b
  150. aesmc v0.16b, v0.16b
  151. aesmc v1.16b, v1.16b
  152. 4: ld1 {v4.2d}, [x10], #16 /* load next round key */
  153. subs w7, w7, #3
  154. aese v0.16b, v3.16b
  155. aese v1.16b, v3.16b
  156. aesmc v0.16b, v0.16b
  157. aesmc v1.16b, v1.16b
  158. ld1 {v5.2d}, [x10], #16 /* load next round key */
  159. bpl 2b
  160. aese v0.16b, v4.16b
  161. aese v1.16b, v4.16b
  162. subs w2, w2, #16
  163. bmi 6f /* partial block? */
  164. ld1 {v2.16b}, [x1], #16 /* load next input block */
  165. .if \enc == 1
  166. eor v2.16b, v2.16b, v5.16b /* final round enc+mac */
  167. eor v1.16b, v1.16b, v2.16b /* xor with crypted ctr */
  168. .else
  169. eor v2.16b, v2.16b, v1.16b /* xor with crypted ctr */
  170. eor v1.16b, v2.16b, v5.16b /* final round enc */
  171. .endif
  172. eor v0.16b, v0.16b, v2.16b /* xor mac with pt ^ rk[last] */
  173. st1 {v1.16b}, [x0], #16 /* write output block */
  174. bne 0b
  175. rev x8, x8
  176. st1 {v0.2d}, [x5] /* store mac */
  177. str x8, [x6, #8] /* store lsb end of ctr (BE) */
  178. 5: ret
  179. 6: eor v0.16b, v0.16b, v5.16b /* final round mac */
  180. eor v1.16b, v1.16b, v5.16b /* final round enc */
  181. st1 {v0.2d}, [x5] /* store mac */
  182. add w2, w2, #16 /* process partial tail block */
  183. 7: ldrb w9, [x1], #1 /* get 1 byte of input */
  184. umov w6, v1.b[0] /* get top crypted ctr byte */
  185. umov w7, v0.b[0] /* get top mac byte */
  186. .if \enc == 1
  187. eor w7, w7, w9
  188. eor w9, w9, w6
  189. .else
  190. eor w9, w9, w6
  191. eor w7, w7, w9
  192. .endif
  193. strb w9, [x0], #1 /* store out byte */
  194. strb w7, [x5], #1 /* store mac byte */
  195. subs w2, w2, #1
  196. beq 5b
  197. ext v0.16b, v0.16b, v0.16b, #1 /* shift out mac byte */
  198. ext v1.16b, v1.16b, v1.16b, #1 /* shift out ctr byte */
  199. b 7b
  200. .endm
  201. /*
  202. * void ce_aes_ccm_encrypt(u8 out[], u8 const in[], u32 cbytes,
  203. * u8 const rk[], u32 rounds, u8 mac[],
  204. * u8 ctr[]);
  205. * void ce_aes_ccm_decrypt(u8 out[], u8 const in[], u32 cbytes,
  206. * u8 const rk[], u32 rounds, u8 mac[],
  207. * u8 ctr[]);
  208. */
  209. ENTRY(ce_aes_ccm_encrypt)
  210. aes_ccm_do_crypt 1
  211. ENDPROC(ce_aes_ccm_encrypt)
  212. ENTRY(ce_aes_ccm_decrypt)
  213. aes_ccm_do_crypt 0
  214. ENDPROC(ce_aes_ccm_decrypt)