mips.c 27 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * KVM/MIPS: MIPS specific KVM APIs
  7. *
  8. * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
  9. * Authors: Sanjay Lal <sanjayl@kymasys.com>
  10. */
  11. #include <linux/errno.h>
  12. #include <linux/err.h>
  13. #include <linux/module.h>
  14. #include <linux/vmalloc.h>
  15. #include <linux/fs.h>
  16. #include <linux/bootmem.h>
  17. #include <asm/fpu.h>
  18. #include <asm/page.h>
  19. #include <asm/cacheflush.h>
  20. #include <asm/mmu_context.h>
  21. #include <asm/pgtable.h>
  22. #include <linux/kvm_host.h>
  23. #include "interrupt.h"
  24. #include "commpage.h"
  25. #define CREATE_TRACE_POINTS
  26. #include "trace.h"
  27. #ifndef VECTORSPACING
  28. #define VECTORSPACING 0x100 /* for EI/VI mode */
  29. #endif
  30. #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x)
  31. struct kvm_stats_debugfs_item debugfs_entries[] = {
  32. { "wait", VCPU_STAT(wait_exits), KVM_STAT_VCPU },
  33. { "cache", VCPU_STAT(cache_exits), KVM_STAT_VCPU },
  34. { "signal", VCPU_STAT(signal_exits), KVM_STAT_VCPU },
  35. { "interrupt", VCPU_STAT(int_exits), KVM_STAT_VCPU },
  36. { "cop_unsuable", VCPU_STAT(cop_unusable_exits), KVM_STAT_VCPU },
  37. { "tlbmod", VCPU_STAT(tlbmod_exits), KVM_STAT_VCPU },
  38. { "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits), KVM_STAT_VCPU },
  39. { "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits), KVM_STAT_VCPU },
  40. { "addrerr_st", VCPU_STAT(addrerr_st_exits), KVM_STAT_VCPU },
  41. { "addrerr_ld", VCPU_STAT(addrerr_ld_exits), KVM_STAT_VCPU },
  42. { "syscall", VCPU_STAT(syscall_exits), KVM_STAT_VCPU },
  43. { "resvd_inst", VCPU_STAT(resvd_inst_exits), KVM_STAT_VCPU },
  44. { "break_inst", VCPU_STAT(break_inst_exits), KVM_STAT_VCPU },
  45. { "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU },
  46. { "halt_wakeup", VCPU_STAT(halt_wakeup), KVM_STAT_VCPU },
  47. {NULL}
  48. };
  49. static int kvm_mips_reset_vcpu(struct kvm_vcpu *vcpu)
  50. {
  51. int i;
  52. for_each_possible_cpu(i) {
  53. vcpu->arch.guest_kernel_asid[i] = 0;
  54. vcpu->arch.guest_user_asid[i] = 0;
  55. }
  56. return 0;
  57. }
  58. /*
  59. * XXXKYMA: We are simulatoring a processor that has the WII bit set in
  60. * Config7, so we are "runnable" if interrupts are pending
  61. */
  62. int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
  63. {
  64. return !!(vcpu->arch.pending_exceptions);
  65. }
  66. int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
  67. {
  68. return 1;
  69. }
  70. int kvm_arch_hardware_enable(void)
  71. {
  72. return 0;
  73. }
  74. int kvm_arch_hardware_setup(void)
  75. {
  76. return 0;
  77. }
  78. void kvm_arch_check_processor_compat(void *rtn)
  79. {
  80. *(int *)rtn = 0;
  81. }
  82. static void kvm_mips_init_tlbs(struct kvm *kvm)
  83. {
  84. unsigned long wired;
  85. /*
  86. * Add a wired entry to the TLB, it is used to map the commpage to
  87. * the Guest kernel
  88. */
  89. wired = read_c0_wired();
  90. write_c0_wired(wired + 1);
  91. mtc0_tlbw_hazard();
  92. kvm->arch.commpage_tlb = wired;
  93. kvm_debug("[%d] commpage TLB: %d\n", smp_processor_id(),
  94. kvm->arch.commpage_tlb);
  95. }
  96. static void kvm_mips_init_vm_percpu(void *arg)
  97. {
  98. struct kvm *kvm = (struct kvm *)arg;
  99. kvm_mips_init_tlbs(kvm);
  100. kvm_mips_callbacks->vm_init(kvm);
  101. }
  102. int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
  103. {
  104. if (atomic_inc_return(&kvm_mips_instance) == 1) {
  105. kvm_debug("%s: 1st KVM instance, setup host TLB parameters\n",
  106. __func__);
  107. on_each_cpu(kvm_mips_init_vm_percpu, kvm, 1);
  108. }
  109. return 0;
  110. }
  111. void kvm_mips_free_vcpus(struct kvm *kvm)
  112. {
  113. unsigned int i;
  114. struct kvm_vcpu *vcpu;
  115. /* Put the pages we reserved for the guest pmap */
  116. for (i = 0; i < kvm->arch.guest_pmap_npages; i++) {
  117. if (kvm->arch.guest_pmap[i] != KVM_INVALID_PAGE)
  118. kvm_mips_release_pfn_clean(kvm->arch.guest_pmap[i]);
  119. }
  120. kfree(kvm->arch.guest_pmap);
  121. kvm_for_each_vcpu(i, vcpu, kvm) {
  122. kvm_arch_vcpu_free(vcpu);
  123. }
  124. mutex_lock(&kvm->lock);
  125. for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
  126. kvm->vcpus[i] = NULL;
  127. atomic_set(&kvm->online_vcpus, 0);
  128. mutex_unlock(&kvm->lock);
  129. }
  130. static void kvm_mips_uninit_tlbs(void *arg)
  131. {
  132. /* Restore wired count */
  133. write_c0_wired(0);
  134. mtc0_tlbw_hazard();
  135. /* Clear out all the TLBs */
  136. kvm_local_flush_tlb_all();
  137. }
  138. void kvm_arch_destroy_vm(struct kvm *kvm)
  139. {
  140. kvm_mips_free_vcpus(kvm);
  141. /* If this is the last instance, restore wired count */
  142. if (atomic_dec_return(&kvm_mips_instance) == 0) {
  143. kvm_debug("%s: last KVM instance, restoring TLB parameters\n",
  144. __func__);
  145. on_each_cpu(kvm_mips_uninit_tlbs, NULL, 1);
  146. }
  147. }
  148. long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl,
  149. unsigned long arg)
  150. {
  151. return -ENOIOCTLCMD;
  152. }
  153. int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
  154. unsigned long npages)
  155. {
  156. return 0;
  157. }
  158. int kvm_arch_prepare_memory_region(struct kvm *kvm,
  159. struct kvm_memory_slot *memslot,
  160. struct kvm_userspace_memory_region *mem,
  161. enum kvm_mr_change change)
  162. {
  163. return 0;
  164. }
  165. void kvm_arch_commit_memory_region(struct kvm *kvm,
  166. struct kvm_userspace_memory_region *mem,
  167. const struct kvm_memory_slot *old,
  168. enum kvm_mr_change change)
  169. {
  170. unsigned long npages = 0;
  171. int i;
  172. kvm_debug("%s: kvm: %p slot: %d, GPA: %llx, size: %llx, QVA: %llx\n",
  173. __func__, kvm, mem->slot, mem->guest_phys_addr,
  174. mem->memory_size, mem->userspace_addr);
  175. /* Setup Guest PMAP table */
  176. if (!kvm->arch.guest_pmap) {
  177. if (mem->slot == 0)
  178. npages = mem->memory_size >> PAGE_SHIFT;
  179. if (npages) {
  180. kvm->arch.guest_pmap_npages = npages;
  181. kvm->arch.guest_pmap =
  182. kzalloc(npages * sizeof(unsigned long), GFP_KERNEL);
  183. if (!kvm->arch.guest_pmap) {
  184. kvm_err("Failed to allocate guest PMAP");
  185. return;
  186. }
  187. kvm_debug("Allocated space for Guest PMAP Table (%ld pages) @ %p\n",
  188. npages, kvm->arch.guest_pmap);
  189. /* Now setup the page table */
  190. for (i = 0; i < npages; i++)
  191. kvm->arch.guest_pmap[i] = KVM_INVALID_PAGE;
  192. }
  193. }
  194. }
  195. struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
  196. {
  197. int err, size, offset;
  198. void *gebase;
  199. int i;
  200. struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
  201. if (!vcpu) {
  202. err = -ENOMEM;
  203. goto out;
  204. }
  205. err = kvm_vcpu_init(vcpu, kvm, id);
  206. if (err)
  207. goto out_free_cpu;
  208. kvm_debug("kvm @ %p: create cpu %d at %p\n", kvm, id, vcpu);
  209. /*
  210. * Allocate space for host mode exception handlers that handle
  211. * guest mode exits
  212. */
  213. if (cpu_has_veic || cpu_has_vint)
  214. size = 0x200 + VECTORSPACING * 64;
  215. else
  216. size = 0x4000;
  217. /* Save Linux EBASE */
  218. vcpu->arch.host_ebase = (void *)read_c0_ebase();
  219. gebase = kzalloc(ALIGN(size, PAGE_SIZE), GFP_KERNEL);
  220. if (!gebase) {
  221. err = -ENOMEM;
  222. goto out_free_cpu;
  223. }
  224. kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n",
  225. ALIGN(size, PAGE_SIZE), gebase);
  226. /* Save new ebase */
  227. vcpu->arch.guest_ebase = gebase;
  228. /* Copy L1 Guest Exception handler to correct offset */
  229. /* TLB Refill, EXL = 0 */
  230. memcpy(gebase, mips32_exception,
  231. mips32_exceptionEnd - mips32_exception);
  232. /* General Exception Entry point */
  233. memcpy(gebase + 0x180, mips32_exception,
  234. mips32_exceptionEnd - mips32_exception);
  235. /* For vectored interrupts poke the exception code @ all offsets 0-7 */
  236. for (i = 0; i < 8; i++) {
  237. kvm_debug("L1 Vectored handler @ %p\n",
  238. gebase + 0x200 + (i * VECTORSPACING));
  239. memcpy(gebase + 0x200 + (i * VECTORSPACING), mips32_exception,
  240. mips32_exceptionEnd - mips32_exception);
  241. }
  242. /* General handler, relocate to unmapped space for sanity's sake */
  243. offset = 0x2000;
  244. kvm_debug("Installing KVM Exception handlers @ %p, %#x bytes\n",
  245. gebase + offset,
  246. mips32_GuestExceptionEnd - mips32_GuestException);
  247. memcpy(gebase + offset, mips32_GuestException,
  248. mips32_GuestExceptionEnd - mips32_GuestException);
  249. /* Invalidate the icache for these ranges */
  250. local_flush_icache_range((unsigned long)gebase,
  251. (unsigned long)gebase + ALIGN(size, PAGE_SIZE));
  252. /*
  253. * Allocate comm page for guest kernel, a TLB will be reserved for
  254. * mapping GVA @ 0xFFFF8000 to this page
  255. */
  256. vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL);
  257. if (!vcpu->arch.kseg0_commpage) {
  258. err = -ENOMEM;
  259. goto out_free_gebase;
  260. }
  261. kvm_debug("Allocated COMM page @ %p\n", vcpu->arch.kseg0_commpage);
  262. kvm_mips_commpage_init(vcpu);
  263. /* Init */
  264. vcpu->arch.last_sched_cpu = -1;
  265. /* Start off the timer */
  266. kvm_mips_init_count(vcpu);
  267. return vcpu;
  268. out_free_gebase:
  269. kfree(gebase);
  270. out_free_cpu:
  271. kfree(vcpu);
  272. out:
  273. return ERR_PTR(err);
  274. }
  275. void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
  276. {
  277. hrtimer_cancel(&vcpu->arch.comparecount_timer);
  278. kvm_vcpu_uninit(vcpu);
  279. kvm_mips_dump_stats(vcpu);
  280. kfree(vcpu->arch.guest_ebase);
  281. kfree(vcpu->arch.kseg0_commpage);
  282. kfree(vcpu);
  283. }
  284. void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
  285. {
  286. kvm_arch_vcpu_free(vcpu);
  287. }
  288. int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
  289. struct kvm_guest_debug *dbg)
  290. {
  291. return -ENOIOCTLCMD;
  292. }
  293. int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
  294. {
  295. int r = 0;
  296. sigset_t sigsaved;
  297. if (vcpu->sigset_active)
  298. sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
  299. if (vcpu->mmio_needed) {
  300. if (!vcpu->mmio_is_write)
  301. kvm_mips_complete_mmio_load(vcpu, run);
  302. vcpu->mmio_needed = 0;
  303. }
  304. lose_fpu(1);
  305. local_irq_disable();
  306. /* Check if we have any exceptions/interrupts pending */
  307. kvm_mips_deliver_interrupts(vcpu,
  308. kvm_read_c0_guest_cause(vcpu->arch.cop0));
  309. kvm_guest_enter();
  310. /* Disable hardware page table walking while in guest */
  311. htw_stop();
  312. r = __kvm_mips_vcpu_run(run, vcpu);
  313. /* Re-enable HTW before enabling interrupts */
  314. htw_start();
  315. kvm_guest_exit();
  316. local_irq_enable();
  317. if (vcpu->sigset_active)
  318. sigprocmask(SIG_SETMASK, &sigsaved, NULL);
  319. return r;
  320. }
  321. int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
  322. struct kvm_mips_interrupt *irq)
  323. {
  324. int intr = (int)irq->irq;
  325. struct kvm_vcpu *dvcpu = NULL;
  326. if (intr == 3 || intr == -3 || intr == 4 || intr == -4)
  327. kvm_debug("%s: CPU: %d, INTR: %d\n", __func__, irq->cpu,
  328. (int)intr);
  329. if (irq->cpu == -1)
  330. dvcpu = vcpu;
  331. else
  332. dvcpu = vcpu->kvm->vcpus[irq->cpu];
  333. if (intr == 2 || intr == 3 || intr == 4) {
  334. kvm_mips_callbacks->queue_io_int(dvcpu, irq);
  335. } else if (intr == -2 || intr == -3 || intr == -4) {
  336. kvm_mips_callbacks->dequeue_io_int(dvcpu, irq);
  337. } else {
  338. kvm_err("%s: invalid interrupt ioctl (%d:%d)\n", __func__,
  339. irq->cpu, irq->irq);
  340. return -EINVAL;
  341. }
  342. dvcpu->arch.wait = 0;
  343. if (waitqueue_active(&dvcpu->wq))
  344. wake_up_interruptible(&dvcpu->wq);
  345. return 0;
  346. }
  347. int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
  348. struct kvm_mp_state *mp_state)
  349. {
  350. return -ENOIOCTLCMD;
  351. }
  352. int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
  353. struct kvm_mp_state *mp_state)
  354. {
  355. return -ENOIOCTLCMD;
  356. }
  357. static u64 kvm_mips_get_one_regs[] = {
  358. KVM_REG_MIPS_R0,
  359. KVM_REG_MIPS_R1,
  360. KVM_REG_MIPS_R2,
  361. KVM_REG_MIPS_R3,
  362. KVM_REG_MIPS_R4,
  363. KVM_REG_MIPS_R5,
  364. KVM_REG_MIPS_R6,
  365. KVM_REG_MIPS_R7,
  366. KVM_REG_MIPS_R8,
  367. KVM_REG_MIPS_R9,
  368. KVM_REG_MIPS_R10,
  369. KVM_REG_MIPS_R11,
  370. KVM_REG_MIPS_R12,
  371. KVM_REG_MIPS_R13,
  372. KVM_REG_MIPS_R14,
  373. KVM_REG_MIPS_R15,
  374. KVM_REG_MIPS_R16,
  375. KVM_REG_MIPS_R17,
  376. KVM_REG_MIPS_R18,
  377. KVM_REG_MIPS_R19,
  378. KVM_REG_MIPS_R20,
  379. KVM_REG_MIPS_R21,
  380. KVM_REG_MIPS_R22,
  381. KVM_REG_MIPS_R23,
  382. KVM_REG_MIPS_R24,
  383. KVM_REG_MIPS_R25,
  384. KVM_REG_MIPS_R26,
  385. KVM_REG_MIPS_R27,
  386. KVM_REG_MIPS_R28,
  387. KVM_REG_MIPS_R29,
  388. KVM_REG_MIPS_R30,
  389. KVM_REG_MIPS_R31,
  390. KVM_REG_MIPS_HI,
  391. KVM_REG_MIPS_LO,
  392. KVM_REG_MIPS_PC,
  393. KVM_REG_MIPS_CP0_INDEX,
  394. KVM_REG_MIPS_CP0_CONTEXT,
  395. KVM_REG_MIPS_CP0_USERLOCAL,
  396. KVM_REG_MIPS_CP0_PAGEMASK,
  397. KVM_REG_MIPS_CP0_WIRED,
  398. KVM_REG_MIPS_CP0_HWRENA,
  399. KVM_REG_MIPS_CP0_BADVADDR,
  400. KVM_REG_MIPS_CP0_COUNT,
  401. KVM_REG_MIPS_CP0_ENTRYHI,
  402. KVM_REG_MIPS_CP0_COMPARE,
  403. KVM_REG_MIPS_CP0_STATUS,
  404. KVM_REG_MIPS_CP0_CAUSE,
  405. KVM_REG_MIPS_CP0_EPC,
  406. KVM_REG_MIPS_CP0_CONFIG,
  407. KVM_REG_MIPS_CP0_CONFIG1,
  408. KVM_REG_MIPS_CP0_CONFIG2,
  409. KVM_REG_MIPS_CP0_CONFIG3,
  410. KVM_REG_MIPS_CP0_CONFIG7,
  411. KVM_REG_MIPS_CP0_ERROREPC,
  412. KVM_REG_MIPS_COUNT_CTL,
  413. KVM_REG_MIPS_COUNT_RESUME,
  414. KVM_REG_MIPS_COUNT_HZ,
  415. };
  416. static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
  417. const struct kvm_one_reg *reg)
  418. {
  419. struct mips_coproc *cop0 = vcpu->arch.cop0;
  420. int ret;
  421. s64 v;
  422. switch (reg->id) {
  423. case KVM_REG_MIPS_R0 ... KVM_REG_MIPS_R31:
  424. v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0];
  425. break;
  426. case KVM_REG_MIPS_HI:
  427. v = (long)vcpu->arch.hi;
  428. break;
  429. case KVM_REG_MIPS_LO:
  430. v = (long)vcpu->arch.lo;
  431. break;
  432. case KVM_REG_MIPS_PC:
  433. v = (long)vcpu->arch.pc;
  434. break;
  435. case KVM_REG_MIPS_CP0_INDEX:
  436. v = (long)kvm_read_c0_guest_index(cop0);
  437. break;
  438. case KVM_REG_MIPS_CP0_CONTEXT:
  439. v = (long)kvm_read_c0_guest_context(cop0);
  440. break;
  441. case KVM_REG_MIPS_CP0_USERLOCAL:
  442. v = (long)kvm_read_c0_guest_userlocal(cop0);
  443. break;
  444. case KVM_REG_MIPS_CP0_PAGEMASK:
  445. v = (long)kvm_read_c0_guest_pagemask(cop0);
  446. break;
  447. case KVM_REG_MIPS_CP0_WIRED:
  448. v = (long)kvm_read_c0_guest_wired(cop0);
  449. break;
  450. case KVM_REG_MIPS_CP0_HWRENA:
  451. v = (long)kvm_read_c0_guest_hwrena(cop0);
  452. break;
  453. case KVM_REG_MIPS_CP0_BADVADDR:
  454. v = (long)kvm_read_c0_guest_badvaddr(cop0);
  455. break;
  456. case KVM_REG_MIPS_CP0_ENTRYHI:
  457. v = (long)kvm_read_c0_guest_entryhi(cop0);
  458. break;
  459. case KVM_REG_MIPS_CP0_COMPARE:
  460. v = (long)kvm_read_c0_guest_compare(cop0);
  461. break;
  462. case KVM_REG_MIPS_CP0_STATUS:
  463. v = (long)kvm_read_c0_guest_status(cop0);
  464. break;
  465. case KVM_REG_MIPS_CP0_CAUSE:
  466. v = (long)kvm_read_c0_guest_cause(cop0);
  467. break;
  468. case KVM_REG_MIPS_CP0_EPC:
  469. v = (long)kvm_read_c0_guest_epc(cop0);
  470. break;
  471. case KVM_REG_MIPS_CP0_ERROREPC:
  472. v = (long)kvm_read_c0_guest_errorepc(cop0);
  473. break;
  474. case KVM_REG_MIPS_CP0_CONFIG:
  475. v = (long)kvm_read_c0_guest_config(cop0);
  476. break;
  477. case KVM_REG_MIPS_CP0_CONFIG1:
  478. v = (long)kvm_read_c0_guest_config1(cop0);
  479. break;
  480. case KVM_REG_MIPS_CP0_CONFIG2:
  481. v = (long)kvm_read_c0_guest_config2(cop0);
  482. break;
  483. case KVM_REG_MIPS_CP0_CONFIG3:
  484. v = (long)kvm_read_c0_guest_config3(cop0);
  485. break;
  486. case KVM_REG_MIPS_CP0_CONFIG7:
  487. v = (long)kvm_read_c0_guest_config7(cop0);
  488. break;
  489. /* registers to be handled specially */
  490. case KVM_REG_MIPS_CP0_COUNT:
  491. case KVM_REG_MIPS_COUNT_CTL:
  492. case KVM_REG_MIPS_COUNT_RESUME:
  493. case KVM_REG_MIPS_COUNT_HZ:
  494. ret = kvm_mips_callbacks->get_one_reg(vcpu, reg, &v);
  495. if (ret)
  496. return ret;
  497. break;
  498. default:
  499. return -EINVAL;
  500. }
  501. if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
  502. u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
  503. return put_user(v, uaddr64);
  504. } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
  505. u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
  506. u32 v32 = (u32)v;
  507. return put_user(v32, uaddr32);
  508. } else {
  509. return -EINVAL;
  510. }
  511. }
  512. static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
  513. const struct kvm_one_reg *reg)
  514. {
  515. struct mips_coproc *cop0 = vcpu->arch.cop0;
  516. u64 v;
  517. if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
  518. u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
  519. if (get_user(v, uaddr64) != 0)
  520. return -EFAULT;
  521. } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
  522. u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
  523. s32 v32;
  524. if (get_user(v32, uaddr32) != 0)
  525. return -EFAULT;
  526. v = (s64)v32;
  527. } else {
  528. return -EINVAL;
  529. }
  530. switch (reg->id) {
  531. case KVM_REG_MIPS_R0:
  532. /* Silently ignore requests to set $0 */
  533. break;
  534. case KVM_REG_MIPS_R1 ... KVM_REG_MIPS_R31:
  535. vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0] = v;
  536. break;
  537. case KVM_REG_MIPS_HI:
  538. vcpu->arch.hi = v;
  539. break;
  540. case KVM_REG_MIPS_LO:
  541. vcpu->arch.lo = v;
  542. break;
  543. case KVM_REG_MIPS_PC:
  544. vcpu->arch.pc = v;
  545. break;
  546. case KVM_REG_MIPS_CP0_INDEX:
  547. kvm_write_c0_guest_index(cop0, v);
  548. break;
  549. case KVM_REG_MIPS_CP0_CONTEXT:
  550. kvm_write_c0_guest_context(cop0, v);
  551. break;
  552. case KVM_REG_MIPS_CP0_USERLOCAL:
  553. kvm_write_c0_guest_userlocal(cop0, v);
  554. break;
  555. case KVM_REG_MIPS_CP0_PAGEMASK:
  556. kvm_write_c0_guest_pagemask(cop0, v);
  557. break;
  558. case KVM_REG_MIPS_CP0_WIRED:
  559. kvm_write_c0_guest_wired(cop0, v);
  560. break;
  561. case KVM_REG_MIPS_CP0_HWRENA:
  562. kvm_write_c0_guest_hwrena(cop0, v);
  563. break;
  564. case KVM_REG_MIPS_CP0_BADVADDR:
  565. kvm_write_c0_guest_badvaddr(cop0, v);
  566. break;
  567. case KVM_REG_MIPS_CP0_ENTRYHI:
  568. kvm_write_c0_guest_entryhi(cop0, v);
  569. break;
  570. case KVM_REG_MIPS_CP0_STATUS:
  571. kvm_write_c0_guest_status(cop0, v);
  572. break;
  573. case KVM_REG_MIPS_CP0_EPC:
  574. kvm_write_c0_guest_epc(cop0, v);
  575. break;
  576. case KVM_REG_MIPS_CP0_ERROREPC:
  577. kvm_write_c0_guest_errorepc(cop0, v);
  578. break;
  579. /* registers to be handled specially */
  580. case KVM_REG_MIPS_CP0_COUNT:
  581. case KVM_REG_MIPS_CP0_COMPARE:
  582. case KVM_REG_MIPS_CP0_CAUSE:
  583. case KVM_REG_MIPS_COUNT_CTL:
  584. case KVM_REG_MIPS_COUNT_RESUME:
  585. case KVM_REG_MIPS_COUNT_HZ:
  586. return kvm_mips_callbacks->set_one_reg(vcpu, reg, v);
  587. default:
  588. return -EINVAL;
  589. }
  590. return 0;
  591. }
  592. long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl,
  593. unsigned long arg)
  594. {
  595. struct kvm_vcpu *vcpu = filp->private_data;
  596. void __user *argp = (void __user *)arg;
  597. long r;
  598. switch (ioctl) {
  599. case KVM_SET_ONE_REG:
  600. case KVM_GET_ONE_REG: {
  601. struct kvm_one_reg reg;
  602. if (copy_from_user(&reg, argp, sizeof(reg)))
  603. return -EFAULT;
  604. if (ioctl == KVM_SET_ONE_REG)
  605. return kvm_mips_set_reg(vcpu, &reg);
  606. else
  607. return kvm_mips_get_reg(vcpu, &reg);
  608. }
  609. case KVM_GET_REG_LIST: {
  610. struct kvm_reg_list __user *user_list = argp;
  611. u64 __user *reg_dest;
  612. struct kvm_reg_list reg_list;
  613. unsigned n;
  614. if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
  615. return -EFAULT;
  616. n = reg_list.n;
  617. reg_list.n = ARRAY_SIZE(kvm_mips_get_one_regs);
  618. if (copy_to_user(user_list, &reg_list, sizeof(reg_list)))
  619. return -EFAULT;
  620. if (n < reg_list.n)
  621. return -E2BIG;
  622. reg_dest = user_list->reg;
  623. if (copy_to_user(reg_dest, kvm_mips_get_one_regs,
  624. sizeof(kvm_mips_get_one_regs)))
  625. return -EFAULT;
  626. return 0;
  627. }
  628. case KVM_NMI:
  629. /* Treat the NMI as a CPU reset */
  630. r = kvm_mips_reset_vcpu(vcpu);
  631. break;
  632. case KVM_INTERRUPT:
  633. {
  634. struct kvm_mips_interrupt irq;
  635. r = -EFAULT;
  636. if (copy_from_user(&irq, argp, sizeof(irq)))
  637. goto out;
  638. kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__,
  639. irq.irq);
  640. r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
  641. break;
  642. }
  643. default:
  644. r = -ENOIOCTLCMD;
  645. }
  646. out:
  647. return r;
  648. }
  649. /* Get (and clear) the dirty memory log for a memory slot. */
  650. int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
  651. {
  652. struct kvm_memory_slot *memslot;
  653. unsigned long ga, ga_end;
  654. int is_dirty = 0;
  655. int r;
  656. unsigned long n;
  657. mutex_lock(&kvm->slots_lock);
  658. r = kvm_get_dirty_log(kvm, log, &is_dirty);
  659. if (r)
  660. goto out;
  661. /* If nothing is dirty, don't bother messing with page tables. */
  662. if (is_dirty) {
  663. memslot = id_to_memslot(kvm->memslots, log->slot);
  664. ga = memslot->base_gfn << PAGE_SHIFT;
  665. ga_end = ga + (memslot->npages << PAGE_SHIFT);
  666. kvm_info("%s: dirty, ga: %#lx, ga_end %#lx\n", __func__, ga,
  667. ga_end);
  668. n = kvm_dirty_bitmap_bytes(memslot);
  669. memset(memslot->dirty_bitmap, 0, n);
  670. }
  671. r = 0;
  672. out:
  673. mutex_unlock(&kvm->slots_lock);
  674. return r;
  675. }
  676. long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
  677. {
  678. long r;
  679. switch (ioctl) {
  680. default:
  681. r = -ENOIOCTLCMD;
  682. }
  683. return r;
  684. }
  685. int kvm_arch_init(void *opaque)
  686. {
  687. if (kvm_mips_callbacks) {
  688. kvm_err("kvm: module already exists\n");
  689. return -EEXIST;
  690. }
  691. return kvm_mips_emulation_init(&kvm_mips_callbacks);
  692. }
  693. void kvm_arch_exit(void)
  694. {
  695. kvm_mips_callbacks = NULL;
  696. }
  697. int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
  698. struct kvm_sregs *sregs)
  699. {
  700. return -ENOIOCTLCMD;
  701. }
  702. int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
  703. struct kvm_sregs *sregs)
  704. {
  705. return -ENOIOCTLCMD;
  706. }
  707. int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
  708. {
  709. return 0;
  710. }
  711. int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
  712. {
  713. return -ENOIOCTLCMD;
  714. }
  715. int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
  716. {
  717. return -ENOIOCTLCMD;
  718. }
  719. int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
  720. {
  721. return VM_FAULT_SIGBUS;
  722. }
  723. int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
  724. {
  725. int r;
  726. switch (ext) {
  727. case KVM_CAP_ONE_REG:
  728. r = 1;
  729. break;
  730. case KVM_CAP_COALESCED_MMIO:
  731. r = KVM_COALESCED_MMIO_PAGE_OFFSET;
  732. break;
  733. default:
  734. r = 0;
  735. break;
  736. }
  737. return r;
  738. }
  739. int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
  740. {
  741. return kvm_mips_pending_timer(vcpu);
  742. }
  743. int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
  744. {
  745. int i;
  746. struct mips_coproc *cop0;
  747. if (!vcpu)
  748. return -1;
  749. kvm_debug("VCPU Register Dump:\n");
  750. kvm_debug("\tpc = 0x%08lx\n", vcpu->arch.pc);
  751. kvm_debug("\texceptions: %08lx\n", vcpu->arch.pending_exceptions);
  752. for (i = 0; i < 32; i += 4) {
  753. kvm_debug("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i,
  754. vcpu->arch.gprs[i],
  755. vcpu->arch.gprs[i + 1],
  756. vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]);
  757. }
  758. kvm_debug("\thi: 0x%08lx\n", vcpu->arch.hi);
  759. kvm_debug("\tlo: 0x%08lx\n", vcpu->arch.lo);
  760. cop0 = vcpu->arch.cop0;
  761. kvm_debug("\tStatus: 0x%08lx, Cause: 0x%08lx\n",
  762. kvm_read_c0_guest_status(cop0),
  763. kvm_read_c0_guest_cause(cop0));
  764. kvm_debug("\tEPC: 0x%08lx\n", kvm_read_c0_guest_epc(cop0));
  765. return 0;
  766. }
  767. int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
  768. {
  769. int i;
  770. for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
  771. vcpu->arch.gprs[i] = regs->gpr[i];
  772. vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */
  773. vcpu->arch.hi = regs->hi;
  774. vcpu->arch.lo = regs->lo;
  775. vcpu->arch.pc = regs->pc;
  776. return 0;
  777. }
  778. int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
  779. {
  780. int i;
  781. for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
  782. regs->gpr[i] = vcpu->arch.gprs[i];
  783. regs->hi = vcpu->arch.hi;
  784. regs->lo = vcpu->arch.lo;
  785. regs->pc = vcpu->arch.pc;
  786. return 0;
  787. }
  788. static void kvm_mips_comparecount_func(unsigned long data)
  789. {
  790. struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
  791. kvm_mips_callbacks->queue_timer_int(vcpu);
  792. vcpu->arch.wait = 0;
  793. if (waitqueue_active(&vcpu->wq))
  794. wake_up_interruptible(&vcpu->wq);
  795. }
  796. /* low level hrtimer wake routine */
  797. static enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer)
  798. {
  799. struct kvm_vcpu *vcpu;
  800. vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer);
  801. kvm_mips_comparecount_func((unsigned long) vcpu);
  802. return kvm_mips_count_timeout(vcpu);
  803. }
  804. int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
  805. {
  806. kvm_mips_callbacks->vcpu_init(vcpu);
  807. hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC,
  808. HRTIMER_MODE_REL);
  809. vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup;
  810. return 0;
  811. }
  812. int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
  813. struct kvm_translation *tr)
  814. {
  815. return 0;
  816. }
  817. /* Initial guest state */
  818. int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
  819. {
  820. return kvm_mips_callbacks->vcpu_setup(vcpu);
  821. }
  822. static void kvm_mips_set_c0_status(void)
  823. {
  824. uint32_t status = read_c0_status();
  825. if (cpu_has_dsp)
  826. status |= (ST0_MX);
  827. write_c0_status(status);
  828. ehb();
  829. }
  830. /*
  831. * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
  832. */
  833. int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
  834. {
  835. uint32_t cause = vcpu->arch.host_cp0_cause;
  836. uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
  837. uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
  838. unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
  839. enum emulation_result er = EMULATE_DONE;
  840. int ret = RESUME_GUEST;
  841. /* re-enable HTW before enabling interrupts */
  842. htw_start();
  843. /* Set a default exit reason */
  844. run->exit_reason = KVM_EXIT_UNKNOWN;
  845. run->ready_for_interrupt_injection = 1;
  846. /*
  847. * Set the appropriate status bits based on host CPU features,
  848. * before we hit the scheduler
  849. */
  850. kvm_mips_set_c0_status();
  851. local_irq_enable();
  852. kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n",
  853. cause, opc, run, vcpu);
  854. /*
  855. * Do a privilege check, if in UM most of these exit conditions end up
  856. * causing an exception to be delivered to the Guest Kernel
  857. */
  858. er = kvm_mips_check_privilege(cause, opc, run, vcpu);
  859. if (er == EMULATE_PRIV_FAIL) {
  860. goto skip_emul;
  861. } else if (er == EMULATE_FAIL) {
  862. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  863. ret = RESUME_HOST;
  864. goto skip_emul;
  865. }
  866. switch (exccode) {
  867. case T_INT:
  868. kvm_debug("[%d]T_INT @ %p\n", vcpu->vcpu_id, opc);
  869. ++vcpu->stat.int_exits;
  870. trace_kvm_exit(vcpu, INT_EXITS);
  871. if (need_resched())
  872. cond_resched();
  873. ret = RESUME_GUEST;
  874. break;
  875. case T_COP_UNUSABLE:
  876. kvm_debug("T_COP_UNUSABLE: @ PC: %p\n", opc);
  877. ++vcpu->stat.cop_unusable_exits;
  878. trace_kvm_exit(vcpu, COP_UNUSABLE_EXITS);
  879. ret = kvm_mips_callbacks->handle_cop_unusable(vcpu);
  880. /* XXXKYMA: Might need to return to user space */
  881. if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN)
  882. ret = RESUME_HOST;
  883. break;
  884. case T_TLB_MOD:
  885. ++vcpu->stat.tlbmod_exits;
  886. trace_kvm_exit(vcpu, TLBMOD_EXITS);
  887. ret = kvm_mips_callbacks->handle_tlb_mod(vcpu);
  888. break;
  889. case T_TLB_ST_MISS:
  890. kvm_debug("TLB ST fault: cause %#x, status %#lx, PC: %p, BadVaddr: %#lx\n",
  891. cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc,
  892. badvaddr);
  893. ++vcpu->stat.tlbmiss_st_exits;
  894. trace_kvm_exit(vcpu, TLBMISS_ST_EXITS);
  895. ret = kvm_mips_callbacks->handle_tlb_st_miss(vcpu);
  896. break;
  897. case T_TLB_LD_MISS:
  898. kvm_debug("TLB LD fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
  899. cause, opc, badvaddr);
  900. ++vcpu->stat.tlbmiss_ld_exits;
  901. trace_kvm_exit(vcpu, TLBMISS_LD_EXITS);
  902. ret = kvm_mips_callbacks->handle_tlb_ld_miss(vcpu);
  903. break;
  904. case T_ADDR_ERR_ST:
  905. ++vcpu->stat.addrerr_st_exits;
  906. trace_kvm_exit(vcpu, ADDRERR_ST_EXITS);
  907. ret = kvm_mips_callbacks->handle_addr_err_st(vcpu);
  908. break;
  909. case T_ADDR_ERR_LD:
  910. ++vcpu->stat.addrerr_ld_exits;
  911. trace_kvm_exit(vcpu, ADDRERR_LD_EXITS);
  912. ret = kvm_mips_callbacks->handle_addr_err_ld(vcpu);
  913. break;
  914. case T_SYSCALL:
  915. ++vcpu->stat.syscall_exits;
  916. trace_kvm_exit(vcpu, SYSCALL_EXITS);
  917. ret = kvm_mips_callbacks->handle_syscall(vcpu);
  918. break;
  919. case T_RES_INST:
  920. ++vcpu->stat.resvd_inst_exits;
  921. trace_kvm_exit(vcpu, RESVD_INST_EXITS);
  922. ret = kvm_mips_callbacks->handle_res_inst(vcpu);
  923. break;
  924. case T_BREAK:
  925. ++vcpu->stat.break_inst_exits;
  926. trace_kvm_exit(vcpu, BREAK_INST_EXITS);
  927. ret = kvm_mips_callbacks->handle_break(vcpu);
  928. break;
  929. case T_MSADIS:
  930. ret = kvm_mips_callbacks->handle_msa_disabled(vcpu);
  931. break;
  932. default:
  933. kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#lx\n",
  934. exccode, opc, kvm_get_inst(opc, vcpu), badvaddr,
  935. kvm_read_c0_guest_status(vcpu->arch.cop0));
  936. kvm_arch_vcpu_dump_regs(vcpu);
  937. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  938. ret = RESUME_HOST;
  939. break;
  940. }
  941. skip_emul:
  942. local_irq_disable();
  943. if (er == EMULATE_DONE && !(ret & RESUME_HOST))
  944. kvm_mips_deliver_interrupts(vcpu, cause);
  945. if (!(ret & RESUME_HOST)) {
  946. /* Only check for signals if not already exiting to userspace */
  947. if (signal_pending(current)) {
  948. run->exit_reason = KVM_EXIT_INTR;
  949. ret = (-EINTR << 2) | RESUME_HOST;
  950. ++vcpu->stat.signal_exits;
  951. trace_kvm_exit(vcpu, SIGNAL_EXITS);
  952. }
  953. }
  954. /* Disable HTW before returning to guest or host */
  955. htw_stop();
  956. return ret;
  957. }
  958. int __init kvm_mips_init(void)
  959. {
  960. int ret;
  961. ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
  962. if (ret)
  963. return ret;
  964. /*
  965. * On MIPS, kernel modules are executed from "mapped space", which
  966. * requires TLBs. The TLB handling code is statically linked with
  967. * the rest of the kernel (tlb.c) to avoid the possibility of
  968. * double faulting. The issue is that the TLB code references
  969. * routines that are part of the the KVM module, which are only
  970. * available once the module is loaded.
  971. */
  972. kvm_mips_gfn_to_pfn = gfn_to_pfn;
  973. kvm_mips_release_pfn_clean = kvm_release_pfn_clean;
  974. kvm_mips_is_error_pfn = is_error_pfn;
  975. pr_info("KVM/MIPS Initialized\n");
  976. return 0;
  977. }
  978. void __exit kvm_mips_exit(void)
  979. {
  980. kvm_exit();
  981. kvm_mips_gfn_to_pfn = NULL;
  982. kvm_mips_release_pfn_clean = NULL;
  983. kvm_mips_is_error_pfn = NULL;
  984. pr_info("KVM/MIPS unloaded\n");
  985. }
  986. module_init(kvm_mips_init);
  987. module_exit(kvm_mips_exit);
  988. EXPORT_TRACEPOINT_SYMBOL(kvm_exit);