kvm_host.h 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
  7. * Authors: Sanjay Lal <sanjayl@kymasys.com>
  8. */
  9. #ifndef __MIPS_KVM_HOST_H__
  10. #define __MIPS_KVM_HOST_H__
  11. #include <linux/mutex.h>
  12. #include <linux/hrtimer.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/types.h>
  15. #include <linux/kvm.h>
  16. #include <linux/kvm_types.h>
  17. #include <linux/threads.h>
  18. #include <linux/spinlock.h>
  19. /* MIPS KVM register ids */
  20. #define MIPS_CP0_32(_R, _S) \
  21. (KVM_REG_MIPS | KVM_REG_SIZE_U32 | 0x10000 | (8 * (_R) + (_S)))
  22. #define MIPS_CP0_64(_R, _S) \
  23. (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 0x10000 | (8 * (_R) + (_S)))
  24. #define KVM_REG_MIPS_CP0_INDEX MIPS_CP0_32(0, 0)
  25. #define KVM_REG_MIPS_CP0_ENTRYLO0 MIPS_CP0_64(2, 0)
  26. #define KVM_REG_MIPS_CP0_ENTRYLO1 MIPS_CP0_64(3, 0)
  27. #define KVM_REG_MIPS_CP0_CONTEXT MIPS_CP0_64(4, 0)
  28. #define KVM_REG_MIPS_CP0_USERLOCAL MIPS_CP0_64(4, 2)
  29. #define KVM_REG_MIPS_CP0_PAGEMASK MIPS_CP0_32(5, 0)
  30. #define KVM_REG_MIPS_CP0_PAGEGRAIN MIPS_CP0_32(5, 1)
  31. #define KVM_REG_MIPS_CP0_WIRED MIPS_CP0_32(6, 0)
  32. #define KVM_REG_MIPS_CP0_HWRENA MIPS_CP0_32(7, 0)
  33. #define KVM_REG_MIPS_CP0_BADVADDR MIPS_CP0_64(8, 0)
  34. #define KVM_REG_MIPS_CP0_COUNT MIPS_CP0_32(9, 0)
  35. #define KVM_REG_MIPS_CP0_ENTRYHI MIPS_CP0_64(10, 0)
  36. #define KVM_REG_MIPS_CP0_COMPARE MIPS_CP0_32(11, 0)
  37. #define KVM_REG_MIPS_CP0_STATUS MIPS_CP0_32(12, 0)
  38. #define KVM_REG_MIPS_CP0_CAUSE MIPS_CP0_32(13, 0)
  39. #define KVM_REG_MIPS_CP0_EPC MIPS_CP0_64(14, 0)
  40. #define KVM_REG_MIPS_CP0_EBASE MIPS_CP0_64(15, 1)
  41. #define KVM_REG_MIPS_CP0_CONFIG MIPS_CP0_32(16, 0)
  42. #define KVM_REG_MIPS_CP0_CONFIG1 MIPS_CP0_32(16, 1)
  43. #define KVM_REG_MIPS_CP0_CONFIG2 MIPS_CP0_32(16, 2)
  44. #define KVM_REG_MIPS_CP0_CONFIG3 MIPS_CP0_32(16, 3)
  45. #define KVM_REG_MIPS_CP0_CONFIG7 MIPS_CP0_32(16, 7)
  46. #define KVM_REG_MIPS_CP0_XCONTEXT MIPS_CP0_64(20, 0)
  47. #define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0)
  48. #define KVM_MAX_VCPUS 1
  49. #define KVM_USER_MEM_SLOTS 8
  50. /* memory slots that does not exposed to userspace */
  51. #define KVM_PRIVATE_MEM_SLOTS 0
  52. #define KVM_COALESCED_MMIO_PAGE_OFFSET 1
  53. /* Special address that contains the comm page, used for reducing # of traps */
  54. #define KVM_GUEST_COMMPAGE_ADDR 0x0
  55. #define KVM_GUEST_KERNEL_MODE(vcpu) ((kvm_read_c0_guest_status(vcpu->arch.cop0) & (ST0_EXL | ST0_ERL)) || \
  56. ((kvm_read_c0_guest_status(vcpu->arch.cop0) & KSU_USER) == 0))
  57. #define KVM_GUEST_KUSEG 0x00000000UL
  58. #define KVM_GUEST_KSEG0 0x40000000UL
  59. #define KVM_GUEST_KSEG23 0x60000000UL
  60. #define KVM_GUEST_KSEGX(a) ((_ACAST32_(a)) & 0x60000000)
  61. #define KVM_GUEST_CPHYSADDR(a) ((_ACAST32_(a)) & 0x1fffffff)
  62. #define KVM_GUEST_CKSEG0ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0)
  63. #define KVM_GUEST_CKSEG1ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG1)
  64. #define KVM_GUEST_CKSEG23ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG23)
  65. /*
  66. * Map an address to a certain kernel segment
  67. */
  68. #define KVM_GUEST_KSEG0ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0)
  69. #define KVM_GUEST_KSEG1ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG1)
  70. #define KVM_GUEST_KSEG23ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG23)
  71. #define KVM_INVALID_PAGE 0xdeadbeef
  72. #define KVM_INVALID_INST 0xdeadbeef
  73. #define KVM_INVALID_ADDR 0xdeadbeef
  74. #define KVM_MALTA_GUEST_RTC_ADDR 0xb8000070UL
  75. #define GUEST_TICKS_PER_JIFFY (40000000/HZ)
  76. #define MS_TO_NS(x) (x * 1E6L)
  77. #define CAUSEB_DC 27
  78. #define CAUSEF_DC (_ULCAST_(1) << 27)
  79. extern atomic_t kvm_mips_instance;
  80. extern pfn_t(*kvm_mips_gfn_to_pfn) (struct kvm *kvm, gfn_t gfn);
  81. extern void (*kvm_mips_release_pfn_clean) (pfn_t pfn);
  82. extern bool(*kvm_mips_is_error_pfn) (pfn_t pfn);
  83. struct kvm_vm_stat {
  84. u32 remote_tlb_flush;
  85. };
  86. struct kvm_vcpu_stat {
  87. u32 wait_exits;
  88. u32 cache_exits;
  89. u32 signal_exits;
  90. u32 int_exits;
  91. u32 cop_unusable_exits;
  92. u32 tlbmod_exits;
  93. u32 tlbmiss_ld_exits;
  94. u32 tlbmiss_st_exits;
  95. u32 addrerr_st_exits;
  96. u32 addrerr_ld_exits;
  97. u32 syscall_exits;
  98. u32 resvd_inst_exits;
  99. u32 break_inst_exits;
  100. u32 flush_dcache_exits;
  101. u32 halt_wakeup;
  102. };
  103. enum kvm_mips_exit_types {
  104. WAIT_EXITS,
  105. CACHE_EXITS,
  106. SIGNAL_EXITS,
  107. INT_EXITS,
  108. COP_UNUSABLE_EXITS,
  109. TLBMOD_EXITS,
  110. TLBMISS_LD_EXITS,
  111. TLBMISS_ST_EXITS,
  112. ADDRERR_ST_EXITS,
  113. ADDRERR_LD_EXITS,
  114. SYSCALL_EXITS,
  115. RESVD_INST_EXITS,
  116. BREAK_INST_EXITS,
  117. FLUSH_DCACHE_EXITS,
  118. MAX_KVM_MIPS_EXIT_TYPES
  119. };
  120. struct kvm_arch_memory_slot {
  121. };
  122. struct kvm_arch {
  123. /* Guest GVA->HPA page table */
  124. unsigned long *guest_pmap;
  125. unsigned long guest_pmap_npages;
  126. /* Wired host TLB used for the commpage */
  127. int commpage_tlb;
  128. };
  129. #define N_MIPS_COPROC_REGS 32
  130. #define N_MIPS_COPROC_SEL 8
  131. struct mips_coproc {
  132. unsigned long reg[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL];
  133. #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
  134. unsigned long stat[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL];
  135. #endif
  136. };
  137. /*
  138. * Coprocessor 0 register names
  139. */
  140. #define MIPS_CP0_TLB_INDEX 0
  141. #define MIPS_CP0_TLB_RANDOM 1
  142. #define MIPS_CP0_TLB_LOW 2
  143. #define MIPS_CP0_TLB_LO0 2
  144. #define MIPS_CP0_TLB_LO1 3
  145. #define MIPS_CP0_TLB_CONTEXT 4
  146. #define MIPS_CP0_TLB_PG_MASK 5
  147. #define MIPS_CP0_TLB_WIRED 6
  148. #define MIPS_CP0_HWRENA 7
  149. #define MIPS_CP0_BAD_VADDR 8
  150. #define MIPS_CP0_COUNT 9
  151. #define MIPS_CP0_TLB_HI 10
  152. #define MIPS_CP0_COMPARE 11
  153. #define MIPS_CP0_STATUS 12
  154. #define MIPS_CP0_CAUSE 13
  155. #define MIPS_CP0_EXC_PC 14
  156. #define MIPS_CP0_PRID 15
  157. #define MIPS_CP0_CONFIG 16
  158. #define MIPS_CP0_LLADDR 17
  159. #define MIPS_CP0_WATCH_LO 18
  160. #define MIPS_CP0_WATCH_HI 19
  161. #define MIPS_CP0_TLB_XCONTEXT 20
  162. #define MIPS_CP0_ECC 26
  163. #define MIPS_CP0_CACHE_ERR 27
  164. #define MIPS_CP0_TAG_LO 28
  165. #define MIPS_CP0_TAG_HI 29
  166. #define MIPS_CP0_ERROR_PC 30
  167. #define MIPS_CP0_DEBUG 23
  168. #define MIPS_CP0_DEPC 24
  169. #define MIPS_CP0_PERFCNT 25
  170. #define MIPS_CP0_ERRCTL 26
  171. #define MIPS_CP0_DATA_LO 28
  172. #define MIPS_CP0_DATA_HI 29
  173. #define MIPS_CP0_DESAVE 31
  174. #define MIPS_CP0_CONFIG_SEL 0
  175. #define MIPS_CP0_CONFIG1_SEL 1
  176. #define MIPS_CP0_CONFIG2_SEL 2
  177. #define MIPS_CP0_CONFIG3_SEL 3
  178. /* Config0 register bits */
  179. #define CP0C0_M 31
  180. #define CP0C0_K23 28
  181. #define CP0C0_KU 25
  182. #define CP0C0_MDU 20
  183. #define CP0C0_MM 17
  184. #define CP0C0_BM 16
  185. #define CP0C0_BE 15
  186. #define CP0C0_AT 13
  187. #define CP0C0_AR 10
  188. #define CP0C0_MT 7
  189. #define CP0C0_VI 3
  190. #define CP0C0_K0 0
  191. /* Config1 register bits */
  192. #define CP0C1_M 31
  193. #define CP0C1_MMU 25
  194. #define CP0C1_IS 22
  195. #define CP0C1_IL 19
  196. #define CP0C1_IA 16
  197. #define CP0C1_DS 13
  198. #define CP0C1_DL 10
  199. #define CP0C1_DA 7
  200. #define CP0C1_C2 6
  201. #define CP0C1_MD 5
  202. #define CP0C1_PC 4
  203. #define CP0C1_WR 3
  204. #define CP0C1_CA 2
  205. #define CP0C1_EP 1
  206. #define CP0C1_FP 0
  207. /* Config2 Register bits */
  208. #define CP0C2_M 31
  209. #define CP0C2_TU 28
  210. #define CP0C2_TS 24
  211. #define CP0C2_TL 20
  212. #define CP0C2_TA 16
  213. #define CP0C2_SU 12
  214. #define CP0C2_SS 8
  215. #define CP0C2_SL 4
  216. #define CP0C2_SA 0
  217. /* Config3 Register bits */
  218. #define CP0C3_M 31
  219. #define CP0C3_ISA_ON_EXC 16
  220. #define CP0C3_ULRI 13
  221. #define CP0C3_DSPP 10
  222. #define CP0C3_LPA 7
  223. #define CP0C3_VEIC 6
  224. #define CP0C3_VInt 5
  225. #define CP0C3_SP 4
  226. #define CP0C3_MT 2
  227. #define CP0C3_SM 1
  228. #define CP0C3_TL 0
  229. /* Have config1, Cacheable, noncoherent, write-back, write allocate*/
  230. #define MIPS_CONFIG0 \
  231. ((1 << CP0C0_M) | (0x3 << CP0C0_K0))
  232. /* Have config2, no coprocessor2 attached, no MDMX support attached,
  233. no performance counters, watch registers present,
  234. no code compression, EJTAG present, no FPU, no watch registers */
  235. #define MIPS_CONFIG1 \
  236. ((1 << CP0C1_M) | \
  237. (0 << CP0C1_C2) | (0 << CP0C1_MD) | (0 << CP0C1_PC) | \
  238. (0 << CP0C1_WR) | (0 << CP0C1_CA) | (1 << CP0C1_EP) | \
  239. (0 << CP0C1_FP))
  240. /* Have config3, no tertiary/secondary caches implemented */
  241. #define MIPS_CONFIG2 \
  242. ((1 << CP0C2_M))
  243. /* No config4, no DSP ASE, no large physaddr (PABITS),
  244. no external interrupt controller, no vectored interrupts,
  245. no 1kb pages, no SmartMIPS ASE, no trace logic */
  246. #define MIPS_CONFIG3 \
  247. ((0 << CP0C3_M) | (0 << CP0C3_DSPP) | (0 << CP0C3_LPA) | \
  248. (0 << CP0C3_VEIC) | (0 << CP0C3_VInt) | (0 << CP0C3_SP) | \
  249. (0 << CP0C3_SM) | (0 << CP0C3_TL))
  250. /* MMU types, the first four entries have the same layout as the
  251. CP0C0_MT field. */
  252. enum mips_mmu_types {
  253. MMU_TYPE_NONE,
  254. MMU_TYPE_R4000,
  255. MMU_TYPE_RESERVED,
  256. MMU_TYPE_FMT,
  257. MMU_TYPE_R3000,
  258. MMU_TYPE_R6000,
  259. MMU_TYPE_R8000
  260. };
  261. /*
  262. * Trap codes
  263. */
  264. #define T_INT 0 /* Interrupt pending */
  265. #define T_TLB_MOD 1 /* TLB modified fault */
  266. #define T_TLB_LD_MISS 2 /* TLB miss on load or ifetch */
  267. #define T_TLB_ST_MISS 3 /* TLB miss on a store */
  268. #define T_ADDR_ERR_LD 4 /* Address error on a load or ifetch */
  269. #define T_ADDR_ERR_ST 5 /* Address error on a store */
  270. #define T_BUS_ERR_IFETCH 6 /* Bus error on an ifetch */
  271. #define T_BUS_ERR_LD_ST 7 /* Bus error on a load or store */
  272. #define T_SYSCALL 8 /* System call */
  273. #define T_BREAK 9 /* Breakpoint */
  274. #define T_RES_INST 10 /* Reserved instruction exception */
  275. #define T_COP_UNUSABLE 11 /* Coprocessor unusable */
  276. #define T_OVFLOW 12 /* Arithmetic overflow */
  277. /*
  278. * Trap definitions added for r4000 port.
  279. */
  280. #define T_TRAP 13 /* Trap instruction */
  281. #define T_VCEI 14 /* Virtual coherency exception */
  282. #define T_FPE 15 /* Floating point exception */
  283. #define T_MSADIS 21 /* MSA disabled exception */
  284. #define T_WATCH 23 /* Watch address reference */
  285. #define T_VCED 31 /* Virtual coherency data */
  286. /* Resume Flags */
  287. #define RESUME_FLAG_DR (1<<0) /* Reload guest nonvolatile state? */
  288. #define RESUME_FLAG_HOST (1<<1) /* Resume host? */
  289. #define RESUME_GUEST 0
  290. #define RESUME_GUEST_DR RESUME_FLAG_DR
  291. #define RESUME_HOST RESUME_FLAG_HOST
  292. enum emulation_result {
  293. EMULATE_DONE, /* no further processing */
  294. EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */
  295. EMULATE_FAIL, /* can't emulate this instruction */
  296. EMULATE_WAIT, /* WAIT instruction */
  297. EMULATE_PRIV_FAIL,
  298. };
  299. #define MIPS3_PG_G 0x00000001 /* Global; ignore ASID if in lo0 & lo1 */
  300. #define MIPS3_PG_V 0x00000002 /* Valid */
  301. #define MIPS3_PG_NV 0x00000000
  302. #define MIPS3_PG_D 0x00000004 /* Dirty */
  303. #define mips3_paddr_to_tlbpfn(x) \
  304. (((unsigned long)(x) >> MIPS3_PG_SHIFT) & MIPS3_PG_FRAME)
  305. #define mips3_tlbpfn_to_paddr(x) \
  306. ((unsigned long)((x) & MIPS3_PG_FRAME) << MIPS3_PG_SHIFT)
  307. #define MIPS3_PG_SHIFT 6
  308. #define MIPS3_PG_FRAME 0x3fffffc0
  309. #define VPN2_MASK 0xffffe000
  310. #define TLB_IS_GLOBAL(x) (((x).tlb_lo0 & MIPS3_PG_G) && \
  311. ((x).tlb_lo1 & MIPS3_PG_G))
  312. #define TLB_VPN2(x) ((x).tlb_hi & VPN2_MASK)
  313. #define TLB_ASID(x) ((x).tlb_hi & ASID_MASK)
  314. #define TLB_IS_VALID(x, va) (((va) & (1 << PAGE_SHIFT)) \
  315. ? ((x).tlb_lo1 & MIPS3_PG_V) \
  316. : ((x).tlb_lo0 & MIPS3_PG_V))
  317. #define TLB_HI_VPN2_HIT(x, y) ((TLB_VPN2(x) & ~(x).tlb_mask) == \
  318. ((y) & VPN2_MASK & ~(x).tlb_mask))
  319. #define TLB_HI_ASID_HIT(x, y) (TLB_IS_GLOBAL(x) || \
  320. TLB_ASID(x) == ((y) & ASID_MASK))
  321. struct kvm_mips_tlb {
  322. long tlb_mask;
  323. long tlb_hi;
  324. long tlb_lo0;
  325. long tlb_lo1;
  326. };
  327. #define KVM_MIPS_GUEST_TLB_SIZE 64
  328. struct kvm_vcpu_arch {
  329. void *host_ebase, *guest_ebase;
  330. unsigned long host_stack;
  331. unsigned long host_gp;
  332. /* Host CP0 registers used when handling exits from guest */
  333. unsigned long host_cp0_badvaddr;
  334. unsigned long host_cp0_cause;
  335. unsigned long host_cp0_epc;
  336. unsigned long host_cp0_entryhi;
  337. uint32_t guest_inst;
  338. /* GPRS */
  339. unsigned long gprs[32];
  340. unsigned long hi;
  341. unsigned long lo;
  342. unsigned long pc;
  343. /* FPU State */
  344. struct mips_fpu_struct fpu;
  345. /* COP0 State */
  346. struct mips_coproc *cop0;
  347. /* Host KSEG0 address of the EI/DI offset */
  348. void *kseg0_commpage;
  349. u32 io_gpr; /* GPR used as IO source/target */
  350. struct hrtimer comparecount_timer;
  351. /* Count timer control KVM register */
  352. uint32_t count_ctl;
  353. /* Count bias from the raw time */
  354. uint32_t count_bias;
  355. /* Frequency of timer in Hz */
  356. uint32_t count_hz;
  357. /* Dynamic nanosecond bias (multiple of count_period) to avoid overflow */
  358. s64 count_dyn_bias;
  359. /* Resume time */
  360. ktime_t count_resume;
  361. /* Period of timer tick in ns */
  362. u64 count_period;
  363. /* Bitmask of exceptions that are pending */
  364. unsigned long pending_exceptions;
  365. /* Bitmask of pending exceptions to be cleared */
  366. unsigned long pending_exceptions_clr;
  367. unsigned long pending_load_cause;
  368. /* Save/Restore the entryhi register when are are preempted/scheduled back in */
  369. unsigned long preempt_entryhi;
  370. /* S/W Based TLB for guest */
  371. struct kvm_mips_tlb guest_tlb[KVM_MIPS_GUEST_TLB_SIZE];
  372. /* Cached guest kernel/user ASIDs */
  373. uint32_t guest_user_asid[NR_CPUS];
  374. uint32_t guest_kernel_asid[NR_CPUS];
  375. struct mm_struct guest_kernel_mm, guest_user_mm;
  376. int last_sched_cpu;
  377. /* WAIT executed */
  378. int wait;
  379. };
  380. #define kvm_read_c0_guest_index(cop0) (cop0->reg[MIPS_CP0_TLB_INDEX][0])
  381. #define kvm_write_c0_guest_index(cop0, val) (cop0->reg[MIPS_CP0_TLB_INDEX][0] = val)
  382. #define kvm_read_c0_guest_entrylo0(cop0) (cop0->reg[MIPS_CP0_TLB_LO0][0])
  383. #define kvm_read_c0_guest_entrylo1(cop0) (cop0->reg[MIPS_CP0_TLB_LO1][0])
  384. #define kvm_read_c0_guest_context(cop0) (cop0->reg[MIPS_CP0_TLB_CONTEXT][0])
  385. #define kvm_write_c0_guest_context(cop0, val) (cop0->reg[MIPS_CP0_TLB_CONTEXT][0] = (val))
  386. #define kvm_read_c0_guest_userlocal(cop0) (cop0->reg[MIPS_CP0_TLB_CONTEXT][2])
  387. #define kvm_write_c0_guest_userlocal(cop0, val) (cop0->reg[MIPS_CP0_TLB_CONTEXT][2] = (val))
  388. #define kvm_read_c0_guest_pagemask(cop0) (cop0->reg[MIPS_CP0_TLB_PG_MASK][0])
  389. #define kvm_write_c0_guest_pagemask(cop0, val) (cop0->reg[MIPS_CP0_TLB_PG_MASK][0] = (val))
  390. #define kvm_read_c0_guest_wired(cop0) (cop0->reg[MIPS_CP0_TLB_WIRED][0])
  391. #define kvm_write_c0_guest_wired(cop0, val) (cop0->reg[MIPS_CP0_TLB_WIRED][0] = (val))
  392. #define kvm_read_c0_guest_hwrena(cop0) (cop0->reg[MIPS_CP0_HWRENA][0])
  393. #define kvm_write_c0_guest_hwrena(cop0, val) (cop0->reg[MIPS_CP0_HWRENA][0] = (val))
  394. #define kvm_read_c0_guest_badvaddr(cop0) (cop0->reg[MIPS_CP0_BAD_VADDR][0])
  395. #define kvm_write_c0_guest_badvaddr(cop0, val) (cop0->reg[MIPS_CP0_BAD_VADDR][0] = (val))
  396. #define kvm_read_c0_guest_count(cop0) (cop0->reg[MIPS_CP0_COUNT][0])
  397. #define kvm_write_c0_guest_count(cop0, val) (cop0->reg[MIPS_CP0_COUNT][0] = (val))
  398. #define kvm_read_c0_guest_entryhi(cop0) (cop0->reg[MIPS_CP0_TLB_HI][0])
  399. #define kvm_write_c0_guest_entryhi(cop0, val) (cop0->reg[MIPS_CP0_TLB_HI][0] = (val))
  400. #define kvm_read_c0_guest_compare(cop0) (cop0->reg[MIPS_CP0_COMPARE][0])
  401. #define kvm_write_c0_guest_compare(cop0, val) (cop0->reg[MIPS_CP0_COMPARE][0] = (val))
  402. #define kvm_read_c0_guest_status(cop0) (cop0->reg[MIPS_CP0_STATUS][0])
  403. #define kvm_write_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] = (val))
  404. #define kvm_read_c0_guest_intctl(cop0) (cop0->reg[MIPS_CP0_STATUS][1])
  405. #define kvm_write_c0_guest_intctl(cop0, val) (cop0->reg[MIPS_CP0_STATUS][1] = (val))
  406. #define kvm_read_c0_guest_cause(cop0) (cop0->reg[MIPS_CP0_CAUSE][0])
  407. #define kvm_write_c0_guest_cause(cop0, val) (cop0->reg[MIPS_CP0_CAUSE][0] = (val))
  408. #define kvm_read_c0_guest_epc(cop0) (cop0->reg[MIPS_CP0_EXC_PC][0])
  409. #define kvm_write_c0_guest_epc(cop0, val) (cop0->reg[MIPS_CP0_EXC_PC][0] = (val))
  410. #define kvm_read_c0_guest_prid(cop0) (cop0->reg[MIPS_CP0_PRID][0])
  411. #define kvm_write_c0_guest_prid(cop0, val) (cop0->reg[MIPS_CP0_PRID][0] = (val))
  412. #define kvm_read_c0_guest_ebase(cop0) (cop0->reg[MIPS_CP0_PRID][1])
  413. #define kvm_write_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] = (val))
  414. #define kvm_read_c0_guest_config(cop0) (cop0->reg[MIPS_CP0_CONFIG][0])
  415. #define kvm_read_c0_guest_config1(cop0) (cop0->reg[MIPS_CP0_CONFIG][1])
  416. #define kvm_read_c0_guest_config2(cop0) (cop0->reg[MIPS_CP0_CONFIG][2])
  417. #define kvm_read_c0_guest_config3(cop0) (cop0->reg[MIPS_CP0_CONFIG][3])
  418. #define kvm_read_c0_guest_config7(cop0) (cop0->reg[MIPS_CP0_CONFIG][7])
  419. #define kvm_write_c0_guest_config(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][0] = (val))
  420. #define kvm_write_c0_guest_config1(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][1] = (val))
  421. #define kvm_write_c0_guest_config2(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][2] = (val))
  422. #define kvm_write_c0_guest_config3(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][3] = (val))
  423. #define kvm_write_c0_guest_config7(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][7] = (val))
  424. #define kvm_read_c0_guest_errorepc(cop0) (cop0->reg[MIPS_CP0_ERROR_PC][0])
  425. #define kvm_write_c0_guest_errorepc(cop0, val) (cop0->reg[MIPS_CP0_ERROR_PC][0] = (val))
  426. /*
  427. * Some of the guest registers may be modified asynchronously (e.g. from a
  428. * hrtimer callback in hard irq context) and therefore need stronger atomicity
  429. * guarantees than other registers.
  430. */
  431. static inline void _kvm_atomic_set_c0_guest_reg(unsigned long *reg,
  432. unsigned long val)
  433. {
  434. unsigned long temp;
  435. do {
  436. __asm__ __volatile__(
  437. " .set mips3 \n"
  438. " " __LL "%0, %1 \n"
  439. " or %0, %2 \n"
  440. " " __SC "%0, %1 \n"
  441. " .set mips0 \n"
  442. : "=&r" (temp), "+m" (*reg)
  443. : "r" (val));
  444. } while (unlikely(!temp));
  445. }
  446. static inline void _kvm_atomic_clear_c0_guest_reg(unsigned long *reg,
  447. unsigned long val)
  448. {
  449. unsigned long temp;
  450. do {
  451. __asm__ __volatile__(
  452. " .set mips3 \n"
  453. " " __LL "%0, %1 \n"
  454. " and %0, %2 \n"
  455. " " __SC "%0, %1 \n"
  456. " .set mips0 \n"
  457. : "=&r" (temp), "+m" (*reg)
  458. : "r" (~val));
  459. } while (unlikely(!temp));
  460. }
  461. static inline void _kvm_atomic_change_c0_guest_reg(unsigned long *reg,
  462. unsigned long change,
  463. unsigned long val)
  464. {
  465. unsigned long temp;
  466. do {
  467. __asm__ __volatile__(
  468. " .set mips3 \n"
  469. " " __LL "%0, %1 \n"
  470. " and %0, %2 \n"
  471. " or %0, %3 \n"
  472. " " __SC "%0, %1 \n"
  473. " .set mips0 \n"
  474. : "=&r" (temp), "+m" (*reg)
  475. : "r" (~change), "r" (val & change));
  476. } while (unlikely(!temp));
  477. }
  478. #define kvm_set_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] |= (val))
  479. #define kvm_clear_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] &= ~(val))
  480. /* Cause can be modified asynchronously from hardirq hrtimer callback */
  481. #define kvm_set_c0_guest_cause(cop0, val) \
  482. _kvm_atomic_set_c0_guest_reg(&cop0->reg[MIPS_CP0_CAUSE][0], val)
  483. #define kvm_clear_c0_guest_cause(cop0, val) \
  484. _kvm_atomic_clear_c0_guest_reg(&cop0->reg[MIPS_CP0_CAUSE][0], val)
  485. #define kvm_change_c0_guest_cause(cop0, change, val) \
  486. _kvm_atomic_change_c0_guest_reg(&cop0->reg[MIPS_CP0_CAUSE][0], \
  487. change, val)
  488. #define kvm_set_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] |= (val))
  489. #define kvm_clear_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] &= ~(val))
  490. #define kvm_change_c0_guest_ebase(cop0, change, val) \
  491. { \
  492. kvm_clear_c0_guest_ebase(cop0, change); \
  493. kvm_set_c0_guest_ebase(cop0, ((val) & (change))); \
  494. }
  495. struct kvm_mips_callbacks {
  496. int (*handle_cop_unusable)(struct kvm_vcpu *vcpu);
  497. int (*handle_tlb_mod)(struct kvm_vcpu *vcpu);
  498. int (*handle_tlb_ld_miss)(struct kvm_vcpu *vcpu);
  499. int (*handle_tlb_st_miss)(struct kvm_vcpu *vcpu);
  500. int (*handle_addr_err_st)(struct kvm_vcpu *vcpu);
  501. int (*handle_addr_err_ld)(struct kvm_vcpu *vcpu);
  502. int (*handle_syscall)(struct kvm_vcpu *vcpu);
  503. int (*handle_res_inst)(struct kvm_vcpu *vcpu);
  504. int (*handle_break)(struct kvm_vcpu *vcpu);
  505. int (*handle_msa_disabled)(struct kvm_vcpu *vcpu);
  506. int (*vm_init)(struct kvm *kvm);
  507. int (*vcpu_init)(struct kvm_vcpu *vcpu);
  508. int (*vcpu_setup)(struct kvm_vcpu *vcpu);
  509. gpa_t (*gva_to_gpa)(gva_t gva);
  510. void (*queue_timer_int)(struct kvm_vcpu *vcpu);
  511. void (*dequeue_timer_int)(struct kvm_vcpu *vcpu);
  512. void (*queue_io_int)(struct kvm_vcpu *vcpu,
  513. struct kvm_mips_interrupt *irq);
  514. void (*dequeue_io_int)(struct kvm_vcpu *vcpu,
  515. struct kvm_mips_interrupt *irq);
  516. int (*irq_deliver)(struct kvm_vcpu *vcpu, unsigned int priority,
  517. uint32_t cause);
  518. int (*irq_clear)(struct kvm_vcpu *vcpu, unsigned int priority,
  519. uint32_t cause);
  520. int (*get_one_reg)(struct kvm_vcpu *vcpu,
  521. const struct kvm_one_reg *reg, s64 *v);
  522. int (*set_one_reg)(struct kvm_vcpu *vcpu,
  523. const struct kvm_one_reg *reg, s64 v);
  524. };
  525. extern struct kvm_mips_callbacks *kvm_mips_callbacks;
  526. int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks);
  527. /* Debug: dump vcpu state */
  528. int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
  529. /* Trampoline ASM routine to start running in "Guest" context */
  530. extern int __kvm_mips_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu);
  531. /* TLB handling */
  532. uint32_t kvm_get_kernel_asid(struct kvm_vcpu *vcpu);
  533. uint32_t kvm_get_user_asid(struct kvm_vcpu *vcpu);
  534. uint32_t kvm_get_commpage_asid (struct kvm_vcpu *vcpu);
  535. extern int kvm_mips_handle_kseg0_tlb_fault(unsigned long badbaddr,
  536. struct kvm_vcpu *vcpu);
  537. extern int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
  538. struct kvm_vcpu *vcpu);
  539. extern int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
  540. struct kvm_mips_tlb *tlb,
  541. unsigned long *hpa0,
  542. unsigned long *hpa1);
  543. extern enum emulation_result kvm_mips_handle_tlbmiss(unsigned long cause,
  544. uint32_t *opc,
  545. struct kvm_run *run,
  546. struct kvm_vcpu *vcpu);
  547. extern enum emulation_result kvm_mips_handle_tlbmod(unsigned long cause,
  548. uint32_t *opc,
  549. struct kvm_run *run,
  550. struct kvm_vcpu *vcpu);
  551. extern void kvm_mips_dump_host_tlbs(void);
  552. extern void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu);
  553. extern void kvm_mips_flush_host_tlb(int skip_kseg0);
  554. extern int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi);
  555. extern int kvm_mips_host_tlb_inv_index(struct kvm_vcpu *vcpu, int index);
  556. extern int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu,
  557. unsigned long entryhi);
  558. extern int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr);
  559. extern unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
  560. unsigned long gva);
  561. extern void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
  562. struct kvm_vcpu *vcpu);
  563. extern void kvm_local_flush_tlb_all(void);
  564. extern void kvm_mips_alloc_new_mmu_context(struct kvm_vcpu *vcpu);
  565. extern void kvm_mips_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
  566. extern void kvm_mips_vcpu_put(struct kvm_vcpu *vcpu);
  567. /* Emulation */
  568. uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu);
  569. enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause);
  570. extern enum emulation_result kvm_mips_emulate_inst(unsigned long cause,
  571. uint32_t *opc,
  572. struct kvm_run *run,
  573. struct kvm_vcpu *vcpu);
  574. extern enum emulation_result kvm_mips_emulate_syscall(unsigned long cause,
  575. uint32_t *opc,
  576. struct kvm_run *run,
  577. struct kvm_vcpu *vcpu);
  578. extern enum emulation_result kvm_mips_emulate_tlbmiss_ld(unsigned long cause,
  579. uint32_t *opc,
  580. struct kvm_run *run,
  581. struct kvm_vcpu *vcpu);
  582. extern enum emulation_result kvm_mips_emulate_tlbinv_ld(unsigned long cause,
  583. uint32_t *opc,
  584. struct kvm_run *run,
  585. struct kvm_vcpu *vcpu);
  586. extern enum emulation_result kvm_mips_emulate_tlbmiss_st(unsigned long cause,
  587. uint32_t *opc,
  588. struct kvm_run *run,
  589. struct kvm_vcpu *vcpu);
  590. extern enum emulation_result kvm_mips_emulate_tlbinv_st(unsigned long cause,
  591. uint32_t *opc,
  592. struct kvm_run *run,
  593. struct kvm_vcpu *vcpu);
  594. extern enum emulation_result kvm_mips_emulate_tlbmod(unsigned long cause,
  595. uint32_t *opc,
  596. struct kvm_run *run,
  597. struct kvm_vcpu *vcpu);
  598. extern enum emulation_result kvm_mips_emulate_fpu_exc(unsigned long cause,
  599. uint32_t *opc,
  600. struct kvm_run *run,
  601. struct kvm_vcpu *vcpu);
  602. extern enum emulation_result kvm_mips_handle_ri(unsigned long cause,
  603. uint32_t *opc,
  604. struct kvm_run *run,
  605. struct kvm_vcpu *vcpu);
  606. extern enum emulation_result kvm_mips_emulate_ri_exc(unsigned long cause,
  607. uint32_t *opc,
  608. struct kvm_run *run,
  609. struct kvm_vcpu *vcpu);
  610. extern enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause,
  611. uint32_t *opc,
  612. struct kvm_run *run,
  613. struct kvm_vcpu *vcpu);
  614. extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
  615. struct kvm_run *run);
  616. uint32_t kvm_mips_read_count(struct kvm_vcpu *vcpu);
  617. void kvm_mips_write_count(struct kvm_vcpu *vcpu, uint32_t count);
  618. void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare);
  619. void kvm_mips_init_count(struct kvm_vcpu *vcpu);
  620. int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl);
  621. int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume);
  622. int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz);
  623. void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu);
  624. void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu);
  625. enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu);
  626. enum emulation_result kvm_mips_check_privilege(unsigned long cause,
  627. uint32_t *opc,
  628. struct kvm_run *run,
  629. struct kvm_vcpu *vcpu);
  630. enum emulation_result kvm_mips_emulate_cache(uint32_t inst,
  631. uint32_t *opc,
  632. uint32_t cause,
  633. struct kvm_run *run,
  634. struct kvm_vcpu *vcpu);
  635. enum emulation_result kvm_mips_emulate_CP0(uint32_t inst,
  636. uint32_t *opc,
  637. uint32_t cause,
  638. struct kvm_run *run,
  639. struct kvm_vcpu *vcpu);
  640. enum emulation_result kvm_mips_emulate_store(uint32_t inst,
  641. uint32_t cause,
  642. struct kvm_run *run,
  643. struct kvm_vcpu *vcpu);
  644. enum emulation_result kvm_mips_emulate_load(uint32_t inst,
  645. uint32_t cause,
  646. struct kvm_run *run,
  647. struct kvm_vcpu *vcpu);
  648. /* Dynamic binary translation */
  649. extern int kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc,
  650. struct kvm_vcpu *vcpu);
  651. extern int kvm_mips_trans_cache_va(uint32_t inst, uint32_t *opc,
  652. struct kvm_vcpu *vcpu);
  653. extern int kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc,
  654. struct kvm_vcpu *vcpu);
  655. extern int kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc,
  656. struct kvm_vcpu *vcpu);
  657. /* Misc */
  658. extern void kvm_mips_dump_stats(struct kvm_vcpu *vcpu);
  659. extern unsigned long kvm_mips_get_ramsize(struct kvm *kvm);
  660. static inline void kvm_arch_hardware_disable(void) {}
  661. static inline void kvm_arch_hardware_unsetup(void) {}
  662. static inline void kvm_arch_sync_events(struct kvm *kvm) {}
  663. static inline void kvm_arch_free_memslot(struct kvm *kvm,
  664. struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {}
  665. static inline void kvm_arch_memslots_updated(struct kvm *kvm) {}
  666. static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {}
  667. static inline void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
  668. struct kvm_memory_slot *slot) {}
  669. static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
  670. static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
  671. #endif /* __MIPS_KVM_HOST_H__ */