sys_regs.c 43 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537
  1. /*
  2. * Copyright (C) 2012,2013 - ARM Ltd
  3. * Author: Marc Zyngier <marc.zyngier@arm.com>
  4. *
  5. * Derived from arch/arm/kvm/coproc.c:
  6. * Copyright (C) 2012 - Virtual Open Systems and Columbia University
  7. * Authors: Rusty Russell <rusty@rustcorp.com.au>
  8. * Christoffer Dall <c.dall@virtualopensystems.com>
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License, version 2, as
  12. * published by the Free Software Foundation.
  13. *
  14. * This program is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. * GNU General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU General Public License
  20. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  21. */
  22. #include <linux/mm.h>
  23. #include <linux/kvm_host.h>
  24. #include <linux/uaccess.h>
  25. #include <asm/kvm_arm.h>
  26. #include <asm/kvm_host.h>
  27. #include <asm/kvm_emulate.h>
  28. #include <asm/kvm_coproc.h>
  29. #include <asm/kvm_mmu.h>
  30. #include <asm/cacheflush.h>
  31. #include <asm/cputype.h>
  32. #include <asm/debug-monitors.h>
  33. #include <trace/events/kvm.h>
  34. #include "sys_regs.h"
  35. /*
  36. * All of this file is extremly similar to the ARM coproc.c, but the
  37. * types are different. My gut feeling is that it should be pretty
  38. * easy to merge, but that would be an ABI breakage -- again. VFP
  39. * would also need to be abstracted.
  40. *
  41. * For AArch32, we only take care of what is being trapped. Anything
  42. * that has to do with init and userspace access has to go via the
  43. * 64bit interface.
  44. */
  45. /* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */
  46. static u32 cache_levels;
  47. /* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
  48. #define CSSELR_MAX 12
  49. /* Which cache CCSIDR represents depends on CSSELR value. */
  50. static u32 get_ccsidr(u32 csselr)
  51. {
  52. u32 ccsidr;
  53. /* Make sure noone else changes CSSELR during this! */
  54. local_irq_disable();
  55. /* Put value into CSSELR */
  56. asm volatile("msr csselr_el1, %x0" : : "r" (csselr));
  57. isb();
  58. /* Read result out of CCSIDR */
  59. asm volatile("mrs %0, ccsidr_el1" : "=r" (ccsidr));
  60. local_irq_enable();
  61. return ccsidr;
  62. }
  63. static void do_dc_cisw(u32 val)
  64. {
  65. asm volatile("dc cisw, %x0" : : "r" (val));
  66. dsb(ish);
  67. }
  68. static void do_dc_csw(u32 val)
  69. {
  70. asm volatile("dc csw, %x0" : : "r" (val));
  71. dsb(ish);
  72. }
  73. /* See note at ARM ARM B1.14.4 */
  74. static bool access_dcsw(struct kvm_vcpu *vcpu,
  75. const struct sys_reg_params *p,
  76. const struct sys_reg_desc *r)
  77. {
  78. unsigned long val;
  79. int cpu;
  80. if (!p->is_write)
  81. return read_from_write_only(vcpu, p);
  82. cpu = get_cpu();
  83. cpumask_setall(&vcpu->arch.require_dcache_flush);
  84. cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush);
  85. /* If we were already preempted, take the long way around */
  86. if (cpu != vcpu->arch.last_pcpu) {
  87. flush_cache_all();
  88. goto done;
  89. }
  90. val = *vcpu_reg(vcpu, p->Rt);
  91. switch (p->CRm) {
  92. case 6: /* Upgrade DCISW to DCCISW, as per HCR.SWIO */
  93. case 14: /* DCCISW */
  94. do_dc_cisw(val);
  95. break;
  96. case 10: /* DCCSW */
  97. do_dc_csw(val);
  98. break;
  99. }
  100. done:
  101. put_cpu();
  102. return true;
  103. }
  104. /*
  105. * Generic accessor for VM registers. Only called as long as HCR_TVM
  106. * is set.
  107. */
  108. static bool access_vm_reg(struct kvm_vcpu *vcpu,
  109. const struct sys_reg_params *p,
  110. const struct sys_reg_desc *r)
  111. {
  112. unsigned long val;
  113. BUG_ON(!p->is_write);
  114. val = *vcpu_reg(vcpu, p->Rt);
  115. if (!p->is_aarch32) {
  116. vcpu_sys_reg(vcpu, r->reg) = val;
  117. } else {
  118. if (!p->is_32bit)
  119. vcpu_cp15_64_high(vcpu, r->reg) = val >> 32;
  120. vcpu_cp15_64_low(vcpu, r->reg) = val & 0xffffffffUL;
  121. }
  122. return true;
  123. }
  124. /*
  125. * SCTLR_EL1 accessor. Only called as long as HCR_TVM is set. If the
  126. * guest enables the MMU, we stop trapping the VM sys_regs and leave
  127. * it in complete control of the caches.
  128. */
  129. static bool access_sctlr(struct kvm_vcpu *vcpu,
  130. const struct sys_reg_params *p,
  131. const struct sys_reg_desc *r)
  132. {
  133. access_vm_reg(vcpu, p, r);
  134. if (vcpu_has_cache_enabled(vcpu)) { /* MMU+Caches enabled? */
  135. vcpu->arch.hcr_el2 &= ~HCR_TVM;
  136. stage2_flush_vm(vcpu->kvm);
  137. }
  138. return true;
  139. }
  140. static bool trap_raz_wi(struct kvm_vcpu *vcpu,
  141. const struct sys_reg_params *p,
  142. const struct sys_reg_desc *r)
  143. {
  144. if (p->is_write)
  145. return ignore_write(vcpu, p);
  146. else
  147. return read_zero(vcpu, p);
  148. }
  149. static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
  150. const struct sys_reg_params *p,
  151. const struct sys_reg_desc *r)
  152. {
  153. if (p->is_write) {
  154. return ignore_write(vcpu, p);
  155. } else {
  156. *vcpu_reg(vcpu, p->Rt) = (1 << 3);
  157. return true;
  158. }
  159. }
  160. static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
  161. const struct sys_reg_params *p,
  162. const struct sys_reg_desc *r)
  163. {
  164. if (p->is_write) {
  165. return ignore_write(vcpu, p);
  166. } else {
  167. u32 val;
  168. asm volatile("mrs %0, dbgauthstatus_el1" : "=r" (val));
  169. *vcpu_reg(vcpu, p->Rt) = val;
  170. return true;
  171. }
  172. }
  173. /*
  174. * We want to avoid world-switching all the DBG registers all the
  175. * time:
  176. *
  177. * - If we've touched any debug register, it is likely that we're
  178. * going to touch more of them. It then makes sense to disable the
  179. * traps and start doing the save/restore dance
  180. * - If debug is active (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), it is
  181. * then mandatory to save/restore the registers, as the guest
  182. * depends on them.
  183. *
  184. * For this, we use a DIRTY bit, indicating the guest has modified the
  185. * debug registers, used as follow:
  186. *
  187. * On guest entry:
  188. * - If the dirty bit is set (because we're coming back from trapping),
  189. * disable the traps, save host registers, restore guest registers.
  190. * - If debug is actively in use (DBG_MDSCR_KDE or DBG_MDSCR_MDE set),
  191. * set the dirty bit, disable the traps, save host registers,
  192. * restore guest registers.
  193. * - Otherwise, enable the traps
  194. *
  195. * On guest exit:
  196. * - If the dirty bit is set, save guest registers, restore host
  197. * registers and clear the dirty bit. This ensure that the host can
  198. * now use the debug registers.
  199. */
  200. static bool trap_debug_regs(struct kvm_vcpu *vcpu,
  201. const struct sys_reg_params *p,
  202. const struct sys_reg_desc *r)
  203. {
  204. if (p->is_write) {
  205. vcpu_sys_reg(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt);
  206. vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
  207. } else {
  208. *vcpu_reg(vcpu, p->Rt) = vcpu_sys_reg(vcpu, r->reg);
  209. }
  210. return true;
  211. }
  212. static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
  213. {
  214. u64 amair;
  215. asm volatile("mrs %0, amair_el1\n" : "=r" (amair));
  216. vcpu_sys_reg(vcpu, AMAIR_EL1) = amair;
  217. }
  218. static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
  219. {
  220. /*
  221. * Simply map the vcpu_id into the Aff0 field of the MPIDR.
  222. */
  223. vcpu_sys_reg(vcpu, MPIDR_EL1) = (1UL << 31) | (vcpu->vcpu_id & 0xff);
  224. }
  225. /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
  226. #define DBG_BCR_BVR_WCR_WVR_EL1(n) \
  227. /* DBGBVRn_EL1 */ \
  228. { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b100), \
  229. trap_debug_regs, reset_val, (DBGBVR0_EL1 + (n)), 0 }, \
  230. /* DBGBCRn_EL1 */ \
  231. { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b101), \
  232. trap_debug_regs, reset_val, (DBGBCR0_EL1 + (n)), 0 }, \
  233. /* DBGWVRn_EL1 */ \
  234. { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b110), \
  235. trap_debug_regs, reset_val, (DBGWVR0_EL1 + (n)), 0 }, \
  236. /* DBGWCRn_EL1 */ \
  237. { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b111), \
  238. trap_debug_regs, reset_val, (DBGWCR0_EL1 + (n)), 0 }
  239. /*
  240. * Architected system registers.
  241. * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
  242. *
  243. * We could trap ID_DFR0 and tell the guest we don't support performance
  244. * monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was
  245. * NAKed, so it will read the PMCR anyway.
  246. *
  247. * Therefore we tell the guest we have 0 counters. Unfortunately, we
  248. * must always support PMCCNTR (the cycle counter): we just RAZ/WI for
  249. * all PM registers, which doesn't crash the guest kernel at least.
  250. *
  251. * Debug handling: We do trap most, if not all debug related system
  252. * registers. The implementation is good enough to ensure that a guest
  253. * can use these with minimal performance degradation. The drawback is
  254. * that we don't implement any of the external debug, none of the
  255. * OSlock protocol. This should be revisited if we ever encounter a
  256. * more demanding guest...
  257. */
  258. static const struct sys_reg_desc sys_reg_descs[] = {
  259. /* DC ISW */
  260. { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b0110), Op2(0b010),
  261. access_dcsw },
  262. /* DC CSW */
  263. { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1010), Op2(0b010),
  264. access_dcsw },
  265. /* DC CISW */
  266. { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b010),
  267. access_dcsw },
  268. DBG_BCR_BVR_WCR_WVR_EL1(0),
  269. DBG_BCR_BVR_WCR_WVR_EL1(1),
  270. /* MDCCINT_EL1 */
  271. { Op0(0b10), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b000),
  272. trap_debug_regs, reset_val, MDCCINT_EL1, 0 },
  273. /* MDSCR_EL1 */
  274. { Op0(0b10), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b010),
  275. trap_debug_regs, reset_val, MDSCR_EL1, 0 },
  276. DBG_BCR_BVR_WCR_WVR_EL1(2),
  277. DBG_BCR_BVR_WCR_WVR_EL1(3),
  278. DBG_BCR_BVR_WCR_WVR_EL1(4),
  279. DBG_BCR_BVR_WCR_WVR_EL1(5),
  280. DBG_BCR_BVR_WCR_WVR_EL1(6),
  281. DBG_BCR_BVR_WCR_WVR_EL1(7),
  282. DBG_BCR_BVR_WCR_WVR_EL1(8),
  283. DBG_BCR_BVR_WCR_WVR_EL1(9),
  284. DBG_BCR_BVR_WCR_WVR_EL1(10),
  285. DBG_BCR_BVR_WCR_WVR_EL1(11),
  286. DBG_BCR_BVR_WCR_WVR_EL1(12),
  287. DBG_BCR_BVR_WCR_WVR_EL1(13),
  288. DBG_BCR_BVR_WCR_WVR_EL1(14),
  289. DBG_BCR_BVR_WCR_WVR_EL1(15),
  290. /* MDRAR_EL1 */
  291. { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000),
  292. trap_raz_wi },
  293. /* OSLAR_EL1 */
  294. { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b100),
  295. trap_raz_wi },
  296. /* OSLSR_EL1 */
  297. { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0001), Op2(0b100),
  298. trap_oslsr_el1 },
  299. /* OSDLR_EL1 */
  300. { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0011), Op2(0b100),
  301. trap_raz_wi },
  302. /* DBGPRCR_EL1 */
  303. { Op0(0b10), Op1(0b000), CRn(0b0001), CRm(0b0100), Op2(0b100),
  304. trap_raz_wi },
  305. /* DBGCLAIMSET_EL1 */
  306. { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1000), Op2(0b110),
  307. trap_raz_wi },
  308. /* DBGCLAIMCLR_EL1 */
  309. { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1001), Op2(0b110),
  310. trap_raz_wi },
  311. /* DBGAUTHSTATUS_EL1 */
  312. { Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b110),
  313. trap_dbgauthstatus_el1 },
  314. /* TEECR32_EL1 */
  315. { Op0(0b10), Op1(0b010), CRn(0b0000), CRm(0b0000), Op2(0b000),
  316. NULL, reset_val, TEECR32_EL1, 0 },
  317. /* TEEHBR32_EL1 */
  318. { Op0(0b10), Op1(0b010), CRn(0b0001), CRm(0b0000), Op2(0b000),
  319. NULL, reset_val, TEEHBR32_EL1, 0 },
  320. /* MDCCSR_EL1 */
  321. { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0001), Op2(0b000),
  322. trap_raz_wi },
  323. /* DBGDTR_EL0 */
  324. { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0100), Op2(0b000),
  325. trap_raz_wi },
  326. /* DBGDTR[TR]X_EL0 */
  327. { Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0101), Op2(0b000),
  328. trap_raz_wi },
  329. /* DBGVCR32_EL2 */
  330. { Op0(0b10), Op1(0b100), CRn(0b0000), CRm(0b0111), Op2(0b000),
  331. NULL, reset_val, DBGVCR32_EL2, 0 },
  332. /* MPIDR_EL1 */
  333. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b101),
  334. NULL, reset_mpidr, MPIDR_EL1 },
  335. /* SCTLR_EL1 */
  336. { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000),
  337. access_sctlr, reset_val, SCTLR_EL1, 0x00C50078 },
  338. /* CPACR_EL1 */
  339. { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b010),
  340. NULL, reset_val, CPACR_EL1, 0 },
  341. /* TTBR0_EL1 */
  342. { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b000),
  343. access_vm_reg, reset_unknown, TTBR0_EL1 },
  344. /* TTBR1_EL1 */
  345. { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b001),
  346. access_vm_reg, reset_unknown, TTBR1_EL1 },
  347. /* TCR_EL1 */
  348. { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b010),
  349. access_vm_reg, reset_val, TCR_EL1, 0 },
  350. /* AFSR0_EL1 */
  351. { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b000),
  352. access_vm_reg, reset_unknown, AFSR0_EL1 },
  353. /* AFSR1_EL1 */
  354. { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b001),
  355. access_vm_reg, reset_unknown, AFSR1_EL1 },
  356. /* ESR_EL1 */
  357. { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0010), Op2(0b000),
  358. access_vm_reg, reset_unknown, ESR_EL1 },
  359. /* FAR_EL1 */
  360. { Op0(0b11), Op1(0b000), CRn(0b0110), CRm(0b0000), Op2(0b000),
  361. access_vm_reg, reset_unknown, FAR_EL1 },
  362. /* PAR_EL1 */
  363. { Op0(0b11), Op1(0b000), CRn(0b0111), CRm(0b0100), Op2(0b000),
  364. NULL, reset_unknown, PAR_EL1 },
  365. /* PMINTENSET_EL1 */
  366. { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b001),
  367. trap_raz_wi },
  368. /* PMINTENCLR_EL1 */
  369. { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b010),
  370. trap_raz_wi },
  371. /* MAIR_EL1 */
  372. { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0010), Op2(0b000),
  373. access_vm_reg, reset_unknown, MAIR_EL1 },
  374. /* AMAIR_EL1 */
  375. { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0011), Op2(0b000),
  376. access_vm_reg, reset_amair_el1, AMAIR_EL1 },
  377. /* VBAR_EL1 */
  378. { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b0000), Op2(0b000),
  379. NULL, reset_val, VBAR_EL1, 0 },
  380. /* ICC_SRE_EL1 */
  381. { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1100), Op2(0b101),
  382. trap_raz_wi },
  383. /* CONTEXTIDR_EL1 */
  384. { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001),
  385. access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
  386. /* TPIDR_EL1 */
  387. { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b100),
  388. NULL, reset_unknown, TPIDR_EL1 },
  389. /* CNTKCTL_EL1 */
  390. { Op0(0b11), Op1(0b000), CRn(0b1110), CRm(0b0001), Op2(0b000),
  391. NULL, reset_val, CNTKCTL_EL1, 0},
  392. /* CSSELR_EL1 */
  393. { Op0(0b11), Op1(0b010), CRn(0b0000), CRm(0b0000), Op2(0b000),
  394. NULL, reset_unknown, CSSELR_EL1 },
  395. /* PMCR_EL0 */
  396. { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b000),
  397. trap_raz_wi },
  398. /* PMCNTENSET_EL0 */
  399. { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b001),
  400. trap_raz_wi },
  401. /* PMCNTENCLR_EL0 */
  402. { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b010),
  403. trap_raz_wi },
  404. /* PMOVSCLR_EL0 */
  405. { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b011),
  406. trap_raz_wi },
  407. /* PMSWINC_EL0 */
  408. { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b100),
  409. trap_raz_wi },
  410. /* PMSELR_EL0 */
  411. { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b101),
  412. trap_raz_wi },
  413. /* PMCEID0_EL0 */
  414. { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b110),
  415. trap_raz_wi },
  416. /* PMCEID1_EL0 */
  417. { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b111),
  418. trap_raz_wi },
  419. /* PMCCNTR_EL0 */
  420. { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b000),
  421. trap_raz_wi },
  422. /* PMXEVTYPER_EL0 */
  423. { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b001),
  424. trap_raz_wi },
  425. /* PMXEVCNTR_EL0 */
  426. { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b010),
  427. trap_raz_wi },
  428. /* PMUSERENR_EL0 */
  429. { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b000),
  430. trap_raz_wi },
  431. /* PMOVSSET_EL0 */
  432. { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b011),
  433. trap_raz_wi },
  434. /* TPIDR_EL0 */
  435. { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b010),
  436. NULL, reset_unknown, TPIDR_EL0 },
  437. /* TPIDRRO_EL0 */
  438. { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b011),
  439. NULL, reset_unknown, TPIDRRO_EL0 },
  440. /* DACR32_EL2 */
  441. { Op0(0b11), Op1(0b100), CRn(0b0011), CRm(0b0000), Op2(0b000),
  442. NULL, reset_unknown, DACR32_EL2 },
  443. /* IFSR32_EL2 */
  444. { Op0(0b11), Op1(0b100), CRn(0b0101), CRm(0b0000), Op2(0b001),
  445. NULL, reset_unknown, IFSR32_EL2 },
  446. /* FPEXC32_EL2 */
  447. { Op0(0b11), Op1(0b100), CRn(0b0101), CRm(0b0011), Op2(0b000),
  448. NULL, reset_val, FPEXC32_EL2, 0x70 },
  449. };
  450. static bool trap_dbgidr(struct kvm_vcpu *vcpu,
  451. const struct sys_reg_params *p,
  452. const struct sys_reg_desc *r)
  453. {
  454. if (p->is_write) {
  455. return ignore_write(vcpu, p);
  456. } else {
  457. u64 dfr = read_cpuid(ID_AA64DFR0_EL1);
  458. u64 pfr = read_cpuid(ID_AA64PFR0_EL1);
  459. u32 el3 = !!((pfr >> 12) & 0xf);
  460. *vcpu_reg(vcpu, p->Rt) = ((((dfr >> 20) & 0xf) << 28) |
  461. (((dfr >> 12) & 0xf) << 24) |
  462. (((dfr >> 28) & 0xf) << 20) |
  463. (6 << 16) | (el3 << 14) | (el3 << 12));
  464. return true;
  465. }
  466. }
  467. static bool trap_debug32(struct kvm_vcpu *vcpu,
  468. const struct sys_reg_params *p,
  469. const struct sys_reg_desc *r)
  470. {
  471. if (p->is_write) {
  472. vcpu_cp14(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt);
  473. vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
  474. } else {
  475. *vcpu_reg(vcpu, p->Rt) = vcpu_cp14(vcpu, r->reg);
  476. }
  477. return true;
  478. }
  479. #define DBG_BCR_BVR_WCR_WVR(n) \
  480. /* DBGBVRn */ \
  481. { Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_debug32, \
  482. NULL, (cp14_DBGBVR0 + (n) * 2) }, \
  483. /* DBGBCRn */ \
  484. { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_debug32, \
  485. NULL, (cp14_DBGBCR0 + (n) * 2) }, \
  486. /* DBGWVRn */ \
  487. { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_debug32, \
  488. NULL, (cp14_DBGWVR0 + (n) * 2) }, \
  489. /* DBGWCRn */ \
  490. { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_debug32, \
  491. NULL, (cp14_DBGWCR0 + (n) * 2) }
  492. #define DBGBXVR(n) \
  493. { Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_debug32, \
  494. NULL, cp14_DBGBXVR0 + n * 2 }
  495. /*
  496. * Trapped cp14 registers. We generally ignore most of the external
  497. * debug, on the principle that they don't really make sense to a
  498. * guest. Revisit this one day, whould this principle change.
  499. */
  500. static const struct sys_reg_desc cp14_regs[] = {
  501. /* DBGIDR */
  502. { Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgidr },
  503. /* DBGDTRRXext */
  504. { Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi },
  505. DBG_BCR_BVR_WCR_WVR(0),
  506. /* DBGDSCRint */
  507. { Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi },
  508. DBG_BCR_BVR_WCR_WVR(1),
  509. /* DBGDCCINT */
  510. { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug32 },
  511. /* DBGDSCRext */
  512. { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug32 },
  513. DBG_BCR_BVR_WCR_WVR(2),
  514. /* DBGDTR[RT]Xint */
  515. { Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi },
  516. /* DBGDTR[RT]Xext */
  517. { Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi },
  518. DBG_BCR_BVR_WCR_WVR(3),
  519. DBG_BCR_BVR_WCR_WVR(4),
  520. DBG_BCR_BVR_WCR_WVR(5),
  521. /* DBGWFAR */
  522. { Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi },
  523. /* DBGOSECCR */
  524. { Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi },
  525. DBG_BCR_BVR_WCR_WVR(6),
  526. /* DBGVCR */
  527. { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug32 },
  528. DBG_BCR_BVR_WCR_WVR(7),
  529. DBG_BCR_BVR_WCR_WVR(8),
  530. DBG_BCR_BVR_WCR_WVR(9),
  531. DBG_BCR_BVR_WCR_WVR(10),
  532. DBG_BCR_BVR_WCR_WVR(11),
  533. DBG_BCR_BVR_WCR_WVR(12),
  534. DBG_BCR_BVR_WCR_WVR(13),
  535. DBG_BCR_BVR_WCR_WVR(14),
  536. DBG_BCR_BVR_WCR_WVR(15),
  537. /* DBGDRAR (32bit) */
  538. { Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi },
  539. DBGBXVR(0),
  540. /* DBGOSLAR */
  541. { Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_raz_wi },
  542. DBGBXVR(1),
  543. /* DBGOSLSR */
  544. { Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1 },
  545. DBGBXVR(2),
  546. DBGBXVR(3),
  547. /* DBGOSDLR */
  548. { Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi },
  549. DBGBXVR(4),
  550. /* DBGPRCR */
  551. { Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi },
  552. DBGBXVR(5),
  553. DBGBXVR(6),
  554. DBGBXVR(7),
  555. DBGBXVR(8),
  556. DBGBXVR(9),
  557. DBGBXVR(10),
  558. DBGBXVR(11),
  559. DBGBXVR(12),
  560. DBGBXVR(13),
  561. DBGBXVR(14),
  562. DBGBXVR(15),
  563. /* DBGDSAR (32bit) */
  564. { Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi },
  565. /* DBGDEVID2 */
  566. { Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi },
  567. /* DBGDEVID1 */
  568. { Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi },
  569. /* DBGDEVID */
  570. { Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi },
  571. /* DBGCLAIMSET */
  572. { Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi },
  573. /* DBGCLAIMCLR */
  574. { Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi },
  575. /* DBGAUTHSTATUS */
  576. { Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1 },
  577. };
  578. /* Trapped cp14 64bit registers */
  579. static const struct sys_reg_desc cp14_64_regs[] = {
  580. /* DBGDRAR (64bit) */
  581. { Op1( 0), CRm( 1), .access = trap_raz_wi },
  582. /* DBGDSAR (64bit) */
  583. { Op1( 0), CRm( 2), .access = trap_raz_wi },
  584. };
  585. /*
  586. * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding,
  587. * depending on the way they are accessed (as a 32bit or a 64bit
  588. * register).
  589. */
  590. static const struct sys_reg_desc cp15_regs[] = {
  591. { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_sctlr, NULL, c1_SCTLR },
  592. { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
  593. { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 },
  594. { Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR },
  595. { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, c3_DACR },
  596. { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, c5_DFSR },
  597. { Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, c5_IFSR },
  598. { Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, c5_ADFSR },
  599. { Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, c5_AIFSR },
  600. { Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, c6_DFAR },
  601. { Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, c6_IFAR },
  602. /*
  603. * DC{C,I,CI}SW operations:
  604. */
  605. { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw },
  606. { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw },
  607. { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
  608. /* PMU */
  609. { Op1( 0), CRn( 9), CRm(12), Op2( 0), trap_raz_wi },
  610. { Op1( 0), CRn( 9), CRm(12), Op2( 1), trap_raz_wi },
  611. { Op1( 0), CRn( 9), CRm(12), Op2( 2), trap_raz_wi },
  612. { Op1( 0), CRn( 9), CRm(12), Op2( 3), trap_raz_wi },
  613. { Op1( 0), CRn( 9), CRm(12), Op2( 5), trap_raz_wi },
  614. { Op1( 0), CRn( 9), CRm(12), Op2( 6), trap_raz_wi },
  615. { Op1( 0), CRn( 9), CRm(12), Op2( 7), trap_raz_wi },
  616. { Op1( 0), CRn( 9), CRm(13), Op2( 0), trap_raz_wi },
  617. { Op1( 0), CRn( 9), CRm(13), Op2( 1), trap_raz_wi },
  618. { Op1( 0), CRn( 9), CRm(13), Op2( 2), trap_raz_wi },
  619. { Op1( 0), CRn( 9), CRm(14), Op2( 0), trap_raz_wi },
  620. { Op1( 0), CRn( 9), CRm(14), Op2( 1), trap_raz_wi },
  621. { Op1( 0), CRn( 9), CRm(14), Op2( 2), trap_raz_wi },
  622. { Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, c10_PRRR },
  623. { Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR },
  624. { Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, c10_AMAIR0 },
  625. { Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 },
  626. /* ICC_SRE */
  627. { Op1( 0), CRn(12), CRm(12), Op2( 5), trap_raz_wi },
  628. { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID },
  629. };
  630. static const struct sys_reg_desc cp15_64_regs[] = {
  631. { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
  632. { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 },
  633. };
  634. /* Target specific emulation tables */
  635. static struct kvm_sys_reg_target_table *target_tables[KVM_ARM_NUM_TARGETS];
  636. void kvm_register_target_sys_reg_table(unsigned int target,
  637. struct kvm_sys_reg_target_table *table)
  638. {
  639. target_tables[target] = table;
  640. }
  641. /* Get specific register table for this target. */
  642. static const struct sys_reg_desc *get_target_table(unsigned target,
  643. bool mode_is_64,
  644. size_t *num)
  645. {
  646. struct kvm_sys_reg_target_table *table;
  647. table = target_tables[target];
  648. if (mode_is_64) {
  649. *num = table->table64.num;
  650. return table->table64.table;
  651. } else {
  652. *num = table->table32.num;
  653. return table->table32.table;
  654. }
  655. }
  656. static const struct sys_reg_desc *find_reg(const struct sys_reg_params *params,
  657. const struct sys_reg_desc table[],
  658. unsigned int num)
  659. {
  660. unsigned int i;
  661. for (i = 0; i < num; i++) {
  662. const struct sys_reg_desc *r = &table[i];
  663. if (params->Op0 != r->Op0)
  664. continue;
  665. if (params->Op1 != r->Op1)
  666. continue;
  667. if (params->CRn != r->CRn)
  668. continue;
  669. if (params->CRm != r->CRm)
  670. continue;
  671. if (params->Op2 != r->Op2)
  672. continue;
  673. return r;
  674. }
  675. return NULL;
  676. }
  677. int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
  678. {
  679. kvm_inject_undefined(vcpu);
  680. return 1;
  681. }
  682. /*
  683. * emulate_cp -- tries to match a sys_reg access in a handling table, and
  684. * call the corresponding trap handler.
  685. *
  686. * @params: pointer to the descriptor of the access
  687. * @table: array of trap descriptors
  688. * @num: size of the trap descriptor array
  689. *
  690. * Return 0 if the access has been handled, and -1 if not.
  691. */
  692. static int emulate_cp(struct kvm_vcpu *vcpu,
  693. const struct sys_reg_params *params,
  694. const struct sys_reg_desc *table,
  695. size_t num)
  696. {
  697. const struct sys_reg_desc *r;
  698. if (!table)
  699. return -1; /* Not handled */
  700. r = find_reg(params, table, num);
  701. if (r) {
  702. /*
  703. * Not having an accessor means that we have
  704. * configured a trap that we don't know how to
  705. * handle. This certainly qualifies as a gross bug
  706. * that should be fixed right away.
  707. */
  708. BUG_ON(!r->access);
  709. if (likely(r->access(vcpu, params, r))) {
  710. /* Skip instruction, since it was emulated */
  711. kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
  712. }
  713. /* Handled */
  714. return 0;
  715. }
  716. /* Not handled */
  717. return -1;
  718. }
  719. static void unhandled_cp_access(struct kvm_vcpu *vcpu,
  720. struct sys_reg_params *params)
  721. {
  722. u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
  723. int cp;
  724. switch(hsr_ec) {
  725. case ESR_EL2_EC_CP15_32:
  726. case ESR_EL2_EC_CP15_64:
  727. cp = 15;
  728. break;
  729. case ESR_EL2_EC_CP14_MR:
  730. case ESR_EL2_EC_CP14_64:
  731. cp = 14;
  732. break;
  733. default:
  734. WARN_ON((cp = -1));
  735. }
  736. kvm_err("Unsupported guest CP%d access at: %08lx\n",
  737. cp, *vcpu_pc(vcpu));
  738. print_sys_reg_instr(params);
  739. kvm_inject_undefined(vcpu);
  740. }
  741. /**
  742. * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP15 access
  743. * @vcpu: The VCPU pointer
  744. * @run: The kvm_run struct
  745. */
  746. static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
  747. const struct sys_reg_desc *global,
  748. size_t nr_global,
  749. const struct sys_reg_desc *target_specific,
  750. size_t nr_specific)
  751. {
  752. struct sys_reg_params params;
  753. u32 hsr = kvm_vcpu_get_hsr(vcpu);
  754. int Rt2 = (hsr >> 10) & 0xf;
  755. params.is_aarch32 = true;
  756. params.is_32bit = false;
  757. params.CRm = (hsr >> 1) & 0xf;
  758. params.Rt = (hsr >> 5) & 0xf;
  759. params.is_write = ((hsr & 1) == 0);
  760. params.Op0 = 0;
  761. params.Op1 = (hsr >> 16) & 0xf;
  762. params.Op2 = 0;
  763. params.CRn = 0;
  764. /*
  765. * Massive hack here. Store Rt2 in the top 32bits so we only
  766. * have one register to deal with. As we use the same trap
  767. * backends between AArch32 and AArch64, we get away with it.
  768. */
  769. if (params.is_write) {
  770. u64 val = *vcpu_reg(vcpu, params.Rt);
  771. val &= 0xffffffff;
  772. val |= *vcpu_reg(vcpu, Rt2) << 32;
  773. *vcpu_reg(vcpu, params.Rt) = val;
  774. }
  775. if (!emulate_cp(vcpu, &params, target_specific, nr_specific))
  776. goto out;
  777. if (!emulate_cp(vcpu, &params, global, nr_global))
  778. goto out;
  779. unhandled_cp_access(vcpu, &params);
  780. out:
  781. /* Do the opposite hack for the read side */
  782. if (!params.is_write) {
  783. u64 val = *vcpu_reg(vcpu, params.Rt);
  784. val >>= 32;
  785. *vcpu_reg(vcpu, Rt2) = val;
  786. }
  787. return 1;
  788. }
  789. /**
  790. * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access
  791. * @vcpu: The VCPU pointer
  792. * @run: The kvm_run struct
  793. */
  794. static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
  795. const struct sys_reg_desc *global,
  796. size_t nr_global,
  797. const struct sys_reg_desc *target_specific,
  798. size_t nr_specific)
  799. {
  800. struct sys_reg_params params;
  801. u32 hsr = kvm_vcpu_get_hsr(vcpu);
  802. params.is_aarch32 = true;
  803. params.is_32bit = true;
  804. params.CRm = (hsr >> 1) & 0xf;
  805. params.Rt = (hsr >> 5) & 0xf;
  806. params.is_write = ((hsr & 1) == 0);
  807. params.CRn = (hsr >> 10) & 0xf;
  808. params.Op0 = 0;
  809. params.Op1 = (hsr >> 14) & 0x7;
  810. params.Op2 = (hsr >> 17) & 0x7;
  811. if (!emulate_cp(vcpu, &params, target_specific, nr_specific))
  812. return 1;
  813. if (!emulate_cp(vcpu, &params, global, nr_global))
  814. return 1;
  815. unhandled_cp_access(vcpu, &params);
  816. return 1;
  817. }
  818. int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
  819. {
  820. const struct sys_reg_desc *target_specific;
  821. size_t num;
  822. target_specific = get_target_table(vcpu->arch.target, false, &num);
  823. return kvm_handle_cp_64(vcpu,
  824. cp15_64_regs, ARRAY_SIZE(cp15_64_regs),
  825. target_specific, num);
  826. }
  827. int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
  828. {
  829. const struct sys_reg_desc *target_specific;
  830. size_t num;
  831. target_specific = get_target_table(vcpu->arch.target, false, &num);
  832. return kvm_handle_cp_32(vcpu,
  833. cp15_regs, ARRAY_SIZE(cp15_regs),
  834. target_specific, num);
  835. }
  836. int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
  837. {
  838. return kvm_handle_cp_64(vcpu,
  839. cp14_64_regs, ARRAY_SIZE(cp14_64_regs),
  840. NULL, 0);
  841. }
  842. int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
  843. {
  844. return kvm_handle_cp_32(vcpu,
  845. cp14_regs, ARRAY_SIZE(cp14_regs),
  846. NULL, 0);
  847. }
  848. static int emulate_sys_reg(struct kvm_vcpu *vcpu,
  849. const struct sys_reg_params *params)
  850. {
  851. size_t num;
  852. const struct sys_reg_desc *table, *r;
  853. table = get_target_table(vcpu->arch.target, true, &num);
  854. /* Search target-specific then generic table. */
  855. r = find_reg(params, table, num);
  856. if (!r)
  857. r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
  858. if (likely(r)) {
  859. /*
  860. * Not having an accessor means that we have
  861. * configured a trap that we don't know how to
  862. * handle. This certainly qualifies as a gross bug
  863. * that should be fixed right away.
  864. */
  865. BUG_ON(!r->access);
  866. if (likely(r->access(vcpu, params, r))) {
  867. /* Skip instruction, since it was emulated */
  868. kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
  869. return 1;
  870. }
  871. /* If access function fails, it should complain. */
  872. } else {
  873. kvm_err("Unsupported guest sys_reg access at: %lx\n",
  874. *vcpu_pc(vcpu));
  875. print_sys_reg_instr(params);
  876. }
  877. kvm_inject_undefined(vcpu);
  878. return 1;
  879. }
  880. static void reset_sys_reg_descs(struct kvm_vcpu *vcpu,
  881. const struct sys_reg_desc *table, size_t num)
  882. {
  883. unsigned long i;
  884. for (i = 0; i < num; i++)
  885. if (table[i].reset)
  886. table[i].reset(vcpu, &table[i]);
  887. }
  888. /**
  889. * kvm_handle_sys_reg -- handles a mrs/msr trap on a guest sys_reg access
  890. * @vcpu: The VCPU pointer
  891. * @run: The kvm_run struct
  892. */
  893. int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run)
  894. {
  895. struct sys_reg_params params;
  896. unsigned long esr = kvm_vcpu_get_hsr(vcpu);
  897. params.is_aarch32 = false;
  898. params.is_32bit = false;
  899. params.Op0 = (esr >> 20) & 3;
  900. params.Op1 = (esr >> 14) & 0x7;
  901. params.CRn = (esr >> 10) & 0xf;
  902. params.CRm = (esr >> 1) & 0xf;
  903. params.Op2 = (esr >> 17) & 0x7;
  904. params.Rt = (esr >> 5) & 0x1f;
  905. params.is_write = !(esr & 1);
  906. return emulate_sys_reg(vcpu, &params);
  907. }
  908. /******************************************************************************
  909. * Userspace API
  910. *****************************************************************************/
  911. static bool index_to_params(u64 id, struct sys_reg_params *params)
  912. {
  913. switch (id & KVM_REG_SIZE_MASK) {
  914. case KVM_REG_SIZE_U64:
  915. /* Any unused index bits means it's not valid. */
  916. if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
  917. | KVM_REG_ARM_COPROC_MASK
  918. | KVM_REG_ARM64_SYSREG_OP0_MASK
  919. | KVM_REG_ARM64_SYSREG_OP1_MASK
  920. | KVM_REG_ARM64_SYSREG_CRN_MASK
  921. | KVM_REG_ARM64_SYSREG_CRM_MASK
  922. | KVM_REG_ARM64_SYSREG_OP2_MASK))
  923. return false;
  924. params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK)
  925. >> KVM_REG_ARM64_SYSREG_OP0_SHIFT);
  926. params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK)
  927. >> KVM_REG_ARM64_SYSREG_OP1_SHIFT);
  928. params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK)
  929. >> KVM_REG_ARM64_SYSREG_CRN_SHIFT);
  930. params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK)
  931. >> KVM_REG_ARM64_SYSREG_CRM_SHIFT);
  932. params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK)
  933. >> KVM_REG_ARM64_SYSREG_OP2_SHIFT);
  934. return true;
  935. default:
  936. return false;
  937. }
  938. }
  939. /* Decode an index value, and find the sys_reg_desc entry. */
  940. static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu,
  941. u64 id)
  942. {
  943. size_t num;
  944. const struct sys_reg_desc *table, *r;
  945. struct sys_reg_params params;
  946. /* We only do sys_reg for now. */
  947. if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG)
  948. return NULL;
  949. if (!index_to_params(id, &params))
  950. return NULL;
  951. table = get_target_table(vcpu->arch.target, true, &num);
  952. r = find_reg(&params, table, num);
  953. if (!r)
  954. r = find_reg(&params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
  955. /* Not saved in the sys_reg array? */
  956. if (r && !r->reg)
  957. r = NULL;
  958. return r;
  959. }
  960. /*
  961. * These are the invariant sys_reg registers: we let the guest see the
  962. * host versions of these, so they're part of the guest state.
  963. *
  964. * A future CPU may provide a mechanism to present different values to
  965. * the guest, or a future kvm may trap them.
  966. */
  967. #define FUNCTION_INVARIANT(reg) \
  968. static void get_##reg(struct kvm_vcpu *v, \
  969. const struct sys_reg_desc *r) \
  970. { \
  971. u64 val; \
  972. \
  973. asm volatile("mrs %0, " __stringify(reg) "\n" \
  974. : "=r" (val)); \
  975. ((struct sys_reg_desc *)r)->val = val; \
  976. }
  977. FUNCTION_INVARIANT(midr_el1)
  978. FUNCTION_INVARIANT(ctr_el0)
  979. FUNCTION_INVARIANT(revidr_el1)
  980. FUNCTION_INVARIANT(id_pfr0_el1)
  981. FUNCTION_INVARIANT(id_pfr1_el1)
  982. FUNCTION_INVARIANT(id_dfr0_el1)
  983. FUNCTION_INVARIANT(id_afr0_el1)
  984. FUNCTION_INVARIANT(id_mmfr0_el1)
  985. FUNCTION_INVARIANT(id_mmfr1_el1)
  986. FUNCTION_INVARIANT(id_mmfr2_el1)
  987. FUNCTION_INVARIANT(id_mmfr3_el1)
  988. FUNCTION_INVARIANT(id_isar0_el1)
  989. FUNCTION_INVARIANT(id_isar1_el1)
  990. FUNCTION_INVARIANT(id_isar2_el1)
  991. FUNCTION_INVARIANT(id_isar3_el1)
  992. FUNCTION_INVARIANT(id_isar4_el1)
  993. FUNCTION_INVARIANT(id_isar5_el1)
  994. FUNCTION_INVARIANT(clidr_el1)
  995. FUNCTION_INVARIANT(aidr_el1)
  996. /* ->val is filled in by kvm_sys_reg_table_init() */
  997. static struct sys_reg_desc invariant_sys_regs[] = {
  998. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b000),
  999. NULL, get_midr_el1 },
  1000. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b110),
  1001. NULL, get_revidr_el1 },
  1002. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b000),
  1003. NULL, get_id_pfr0_el1 },
  1004. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b001),
  1005. NULL, get_id_pfr1_el1 },
  1006. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b010),
  1007. NULL, get_id_dfr0_el1 },
  1008. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b011),
  1009. NULL, get_id_afr0_el1 },
  1010. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b100),
  1011. NULL, get_id_mmfr0_el1 },
  1012. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b101),
  1013. NULL, get_id_mmfr1_el1 },
  1014. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b110),
  1015. NULL, get_id_mmfr2_el1 },
  1016. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b111),
  1017. NULL, get_id_mmfr3_el1 },
  1018. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b000),
  1019. NULL, get_id_isar0_el1 },
  1020. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b001),
  1021. NULL, get_id_isar1_el1 },
  1022. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b010),
  1023. NULL, get_id_isar2_el1 },
  1024. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b011),
  1025. NULL, get_id_isar3_el1 },
  1026. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b100),
  1027. NULL, get_id_isar4_el1 },
  1028. { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b101),
  1029. NULL, get_id_isar5_el1 },
  1030. { Op0(0b11), Op1(0b001), CRn(0b0000), CRm(0b0000), Op2(0b001),
  1031. NULL, get_clidr_el1 },
  1032. { Op0(0b11), Op1(0b001), CRn(0b0000), CRm(0b0000), Op2(0b111),
  1033. NULL, get_aidr_el1 },
  1034. { Op0(0b11), Op1(0b011), CRn(0b0000), CRm(0b0000), Op2(0b001),
  1035. NULL, get_ctr_el0 },
  1036. };
  1037. static int reg_from_user(u64 *val, const void __user *uaddr, u64 id)
  1038. {
  1039. if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0)
  1040. return -EFAULT;
  1041. return 0;
  1042. }
  1043. static int reg_to_user(void __user *uaddr, const u64 *val, u64 id)
  1044. {
  1045. if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0)
  1046. return -EFAULT;
  1047. return 0;
  1048. }
  1049. static int get_invariant_sys_reg(u64 id, void __user *uaddr)
  1050. {
  1051. struct sys_reg_params params;
  1052. const struct sys_reg_desc *r;
  1053. if (!index_to_params(id, &params))
  1054. return -ENOENT;
  1055. r = find_reg(&params, invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs));
  1056. if (!r)
  1057. return -ENOENT;
  1058. return reg_to_user(uaddr, &r->val, id);
  1059. }
  1060. static int set_invariant_sys_reg(u64 id, void __user *uaddr)
  1061. {
  1062. struct sys_reg_params params;
  1063. const struct sys_reg_desc *r;
  1064. int err;
  1065. u64 val = 0; /* Make sure high bits are 0 for 32-bit regs */
  1066. if (!index_to_params(id, &params))
  1067. return -ENOENT;
  1068. r = find_reg(&params, invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs));
  1069. if (!r)
  1070. return -ENOENT;
  1071. err = reg_from_user(&val, uaddr, id);
  1072. if (err)
  1073. return err;
  1074. /* This is what we mean by invariant: you can't change it. */
  1075. if (r->val != val)
  1076. return -EINVAL;
  1077. return 0;
  1078. }
  1079. static bool is_valid_cache(u32 val)
  1080. {
  1081. u32 level, ctype;
  1082. if (val >= CSSELR_MAX)
  1083. return false;
  1084. /* Bottom bit is Instruction or Data bit. Next 3 bits are level. */
  1085. level = (val >> 1);
  1086. ctype = (cache_levels >> (level * 3)) & 7;
  1087. switch (ctype) {
  1088. case 0: /* No cache */
  1089. return false;
  1090. case 1: /* Instruction cache only */
  1091. return (val & 1);
  1092. case 2: /* Data cache only */
  1093. case 4: /* Unified cache */
  1094. return !(val & 1);
  1095. case 3: /* Separate instruction and data caches */
  1096. return true;
  1097. default: /* Reserved: we can't know instruction or data. */
  1098. return false;
  1099. }
  1100. }
  1101. static int demux_c15_get(u64 id, void __user *uaddr)
  1102. {
  1103. u32 val;
  1104. u32 __user *uval = uaddr;
  1105. /* Fail if we have unknown bits set. */
  1106. if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
  1107. | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
  1108. return -ENOENT;
  1109. switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
  1110. case KVM_REG_ARM_DEMUX_ID_CCSIDR:
  1111. if (KVM_REG_SIZE(id) != 4)
  1112. return -ENOENT;
  1113. val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
  1114. >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
  1115. if (!is_valid_cache(val))
  1116. return -ENOENT;
  1117. return put_user(get_ccsidr(val), uval);
  1118. default:
  1119. return -ENOENT;
  1120. }
  1121. }
  1122. static int demux_c15_set(u64 id, void __user *uaddr)
  1123. {
  1124. u32 val, newval;
  1125. u32 __user *uval = uaddr;
  1126. /* Fail if we have unknown bits set. */
  1127. if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
  1128. | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
  1129. return -ENOENT;
  1130. switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
  1131. case KVM_REG_ARM_DEMUX_ID_CCSIDR:
  1132. if (KVM_REG_SIZE(id) != 4)
  1133. return -ENOENT;
  1134. val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
  1135. >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
  1136. if (!is_valid_cache(val))
  1137. return -ENOENT;
  1138. if (get_user(newval, uval))
  1139. return -EFAULT;
  1140. /* This is also invariant: you can't change it. */
  1141. if (newval != get_ccsidr(val))
  1142. return -EINVAL;
  1143. return 0;
  1144. default:
  1145. return -ENOENT;
  1146. }
  1147. }
  1148. int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
  1149. {
  1150. const struct sys_reg_desc *r;
  1151. void __user *uaddr = (void __user *)(unsigned long)reg->addr;
  1152. if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
  1153. return demux_c15_get(reg->id, uaddr);
  1154. if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
  1155. return -ENOENT;
  1156. r = index_to_sys_reg_desc(vcpu, reg->id);
  1157. if (!r)
  1158. return get_invariant_sys_reg(reg->id, uaddr);
  1159. return reg_to_user(uaddr, &vcpu_sys_reg(vcpu, r->reg), reg->id);
  1160. }
  1161. int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
  1162. {
  1163. const struct sys_reg_desc *r;
  1164. void __user *uaddr = (void __user *)(unsigned long)reg->addr;
  1165. if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
  1166. return demux_c15_set(reg->id, uaddr);
  1167. if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
  1168. return -ENOENT;
  1169. r = index_to_sys_reg_desc(vcpu, reg->id);
  1170. if (!r)
  1171. return set_invariant_sys_reg(reg->id, uaddr);
  1172. return reg_from_user(&vcpu_sys_reg(vcpu, r->reg), uaddr, reg->id);
  1173. }
  1174. static unsigned int num_demux_regs(void)
  1175. {
  1176. unsigned int i, count = 0;
  1177. for (i = 0; i < CSSELR_MAX; i++)
  1178. if (is_valid_cache(i))
  1179. count++;
  1180. return count;
  1181. }
  1182. static int write_demux_regids(u64 __user *uindices)
  1183. {
  1184. u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
  1185. unsigned int i;
  1186. val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
  1187. for (i = 0; i < CSSELR_MAX; i++) {
  1188. if (!is_valid_cache(i))
  1189. continue;
  1190. if (put_user(val | i, uindices))
  1191. return -EFAULT;
  1192. uindices++;
  1193. }
  1194. return 0;
  1195. }
  1196. static u64 sys_reg_to_index(const struct sys_reg_desc *reg)
  1197. {
  1198. return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 |
  1199. KVM_REG_ARM64_SYSREG |
  1200. (reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) |
  1201. (reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) |
  1202. (reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) |
  1203. (reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) |
  1204. (reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT));
  1205. }
  1206. static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind)
  1207. {
  1208. if (!*uind)
  1209. return true;
  1210. if (put_user(sys_reg_to_index(reg), *uind))
  1211. return false;
  1212. (*uind)++;
  1213. return true;
  1214. }
  1215. /* Assumed ordered tables, see kvm_sys_reg_table_init. */
  1216. static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
  1217. {
  1218. const struct sys_reg_desc *i1, *i2, *end1, *end2;
  1219. unsigned int total = 0;
  1220. size_t num;
  1221. /* We check for duplicates here, to allow arch-specific overrides. */
  1222. i1 = get_target_table(vcpu->arch.target, true, &num);
  1223. end1 = i1 + num;
  1224. i2 = sys_reg_descs;
  1225. end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs);
  1226. BUG_ON(i1 == end1 || i2 == end2);
  1227. /* Walk carefully, as both tables may refer to the same register. */
  1228. while (i1 || i2) {
  1229. int cmp = cmp_sys_reg(i1, i2);
  1230. /* target-specific overrides generic entry. */
  1231. if (cmp <= 0) {
  1232. /* Ignore registers we trap but don't save. */
  1233. if (i1->reg) {
  1234. if (!copy_reg_to_user(i1, &uind))
  1235. return -EFAULT;
  1236. total++;
  1237. }
  1238. } else {
  1239. /* Ignore registers we trap but don't save. */
  1240. if (i2->reg) {
  1241. if (!copy_reg_to_user(i2, &uind))
  1242. return -EFAULT;
  1243. total++;
  1244. }
  1245. }
  1246. if (cmp <= 0 && ++i1 == end1)
  1247. i1 = NULL;
  1248. if (cmp >= 0 && ++i2 == end2)
  1249. i2 = NULL;
  1250. }
  1251. return total;
  1252. }
  1253. unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu)
  1254. {
  1255. return ARRAY_SIZE(invariant_sys_regs)
  1256. + num_demux_regs()
  1257. + walk_sys_regs(vcpu, (u64 __user *)NULL);
  1258. }
  1259. int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
  1260. {
  1261. unsigned int i;
  1262. int err;
  1263. /* Then give them all the invariant registers' indices. */
  1264. for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) {
  1265. if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices))
  1266. return -EFAULT;
  1267. uindices++;
  1268. }
  1269. err = walk_sys_regs(vcpu, uindices);
  1270. if (err < 0)
  1271. return err;
  1272. uindices += err;
  1273. return write_demux_regids(uindices);
  1274. }
  1275. static int check_sysreg_table(const struct sys_reg_desc *table, unsigned int n)
  1276. {
  1277. unsigned int i;
  1278. for (i = 1; i < n; i++) {
  1279. if (cmp_sys_reg(&table[i-1], &table[i]) >= 0) {
  1280. kvm_err("sys_reg table %p out of order (%d)\n", table, i - 1);
  1281. return 1;
  1282. }
  1283. }
  1284. return 0;
  1285. }
  1286. void kvm_sys_reg_table_init(void)
  1287. {
  1288. unsigned int i;
  1289. struct sys_reg_desc clidr;
  1290. /* Make sure tables are unique and in order. */
  1291. BUG_ON(check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs)));
  1292. BUG_ON(check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs)));
  1293. BUG_ON(check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs)));
  1294. BUG_ON(check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs)));
  1295. BUG_ON(check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs)));
  1296. BUG_ON(check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs)));
  1297. /* We abuse the reset function to overwrite the table itself. */
  1298. for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++)
  1299. invariant_sys_regs[i].reset(NULL, &invariant_sys_regs[i]);
  1300. /*
  1301. * CLIDR format is awkward, so clean it up. See ARM B4.1.20:
  1302. *
  1303. * If software reads the Cache Type fields from Ctype1
  1304. * upwards, once it has seen a value of 0b000, no caches
  1305. * exist at further-out levels of the hierarchy. So, for
  1306. * example, if Ctype3 is the first Cache Type field with a
  1307. * value of 0b000, the values of Ctype4 to Ctype7 must be
  1308. * ignored.
  1309. */
  1310. get_clidr_el1(NULL, &clidr); /* Ugly... */
  1311. cache_levels = clidr.val;
  1312. for (i = 0; i < 7; i++)
  1313. if (((cache_levels >> (i*3)) & 7) == 0)
  1314. break;
  1315. /* Clear all higher bits. */
  1316. cache_levels &= (1 << (i*3))-1;
  1317. }
  1318. /**
  1319. * kvm_reset_sys_regs - sets system registers to reset value
  1320. * @vcpu: The VCPU pointer
  1321. *
  1322. * This function finds the right table above and sets the registers on the
  1323. * virtual CPU struct to their architecturally defined reset values.
  1324. */
  1325. void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
  1326. {
  1327. size_t num;
  1328. const struct sys_reg_desc *table;
  1329. /* Catch someone adding a register without putting in reset entry. */
  1330. memset(&vcpu->arch.ctxt.sys_regs, 0x42, sizeof(vcpu->arch.ctxt.sys_regs));
  1331. /* Generic chip reset first (so target could override). */
  1332. reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
  1333. table = get_target_table(vcpu->arch.target, true, &num);
  1334. reset_sys_reg_descs(vcpu, table, num);
  1335. for (num = 1; num < NR_SYS_REGS; num++)
  1336. if (vcpu_sys_reg(vcpu, num) == 0x4242424242424242)
  1337. panic("Didn't reset vcpu_sys_reg(%zi)", num);
  1338. }