priv.c 26 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021
  1. /*
  2. * handling privileged instructions
  3. *
  4. * Copyright IBM Corp. 2008, 2013
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License (version 2 only)
  8. * as published by the Free Software Foundation.
  9. *
  10. * Author(s): Carsten Otte <cotte@de.ibm.com>
  11. * Christian Borntraeger <borntraeger@de.ibm.com>
  12. */
  13. #include <linux/kvm.h>
  14. #include <linux/gfp.h>
  15. #include <linux/errno.h>
  16. #include <linux/compat.h>
  17. #include <asm/asm-offsets.h>
  18. #include <asm/facility.h>
  19. #include <asm/current.h>
  20. #include <asm/debug.h>
  21. #include <asm/ebcdic.h>
  22. #include <asm/sysinfo.h>
  23. #include <asm/pgtable.h>
  24. #include <asm/pgalloc.h>
  25. #include <asm/io.h>
  26. #include <asm/ptrace.h>
  27. #include <asm/compat.h>
  28. #include "gaccess.h"
  29. #include "kvm-s390.h"
  30. #include "trace.h"
  31. /* Handle SCK (SET CLOCK) interception */
  32. static int handle_set_clock(struct kvm_vcpu *vcpu)
  33. {
  34. struct kvm_vcpu *cpup;
  35. s64 hostclk, val;
  36. int i, rc;
  37. u64 op2;
  38. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  39. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  40. op2 = kvm_s390_get_base_disp_s(vcpu);
  41. if (op2 & 7) /* Operand must be on a doubleword boundary */
  42. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  43. rc = read_guest(vcpu, op2, &val, sizeof(val));
  44. if (rc)
  45. return kvm_s390_inject_prog_cond(vcpu, rc);
  46. if (store_tod_clock(&hostclk)) {
  47. kvm_s390_set_psw_cc(vcpu, 3);
  48. return 0;
  49. }
  50. val = (val - hostclk) & ~0x3fUL;
  51. mutex_lock(&vcpu->kvm->lock);
  52. kvm_for_each_vcpu(i, cpup, vcpu->kvm)
  53. cpup->arch.sie_block->epoch = val;
  54. mutex_unlock(&vcpu->kvm->lock);
  55. kvm_s390_set_psw_cc(vcpu, 0);
  56. return 0;
  57. }
  58. static int handle_set_prefix(struct kvm_vcpu *vcpu)
  59. {
  60. u64 operand2;
  61. u32 address;
  62. int rc;
  63. vcpu->stat.instruction_spx++;
  64. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  65. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  66. operand2 = kvm_s390_get_base_disp_s(vcpu);
  67. /* must be word boundary */
  68. if (operand2 & 3)
  69. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  70. /* get the value */
  71. rc = read_guest(vcpu, operand2, &address, sizeof(address));
  72. if (rc)
  73. return kvm_s390_inject_prog_cond(vcpu, rc);
  74. address &= 0x7fffe000u;
  75. /*
  76. * Make sure the new value is valid memory. We only need to check the
  77. * first page, since address is 8k aligned and memory pieces are always
  78. * at least 1MB aligned and have at least a size of 1MB.
  79. */
  80. if (kvm_is_error_gpa(vcpu->kvm, address))
  81. return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
  82. kvm_s390_set_prefix(vcpu, address);
  83. VCPU_EVENT(vcpu, 5, "setting prefix to %x", address);
  84. trace_kvm_s390_handle_prefix(vcpu, 1, address);
  85. return 0;
  86. }
  87. static int handle_store_prefix(struct kvm_vcpu *vcpu)
  88. {
  89. u64 operand2;
  90. u32 address;
  91. int rc;
  92. vcpu->stat.instruction_stpx++;
  93. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  94. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  95. operand2 = kvm_s390_get_base_disp_s(vcpu);
  96. /* must be word boundary */
  97. if (operand2 & 3)
  98. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  99. address = kvm_s390_get_prefix(vcpu);
  100. /* get the value */
  101. rc = write_guest(vcpu, operand2, &address, sizeof(address));
  102. if (rc)
  103. return kvm_s390_inject_prog_cond(vcpu, rc);
  104. VCPU_EVENT(vcpu, 5, "storing prefix to %x", address);
  105. trace_kvm_s390_handle_prefix(vcpu, 0, address);
  106. return 0;
  107. }
  108. static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
  109. {
  110. u16 vcpu_id = vcpu->vcpu_id;
  111. u64 ga;
  112. int rc;
  113. vcpu->stat.instruction_stap++;
  114. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  115. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  116. ga = kvm_s390_get_base_disp_s(vcpu);
  117. if (ga & 1)
  118. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  119. rc = write_guest(vcpu, ga, &vcpu_id, sizeof(vcpu_id));
  120. if (rc)
  121. return kvm_s390_inject_prog_cond(vcpu, rc);
  122. VCPU_EVENT(vcpu, 5, "storing cpu address to %llx", ga);
  123. trace_kvm_s390_handle_stap(vcpu, ga);
  124. return 0;
  125. }
  126. static void __skey_check_enable(struct kvm_vcpu *vcpu)
  127. {
  128. if (!(vcpu->arch.sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE)))
  129. return;
  130. s390_enable_skey();
  131. trace_kvm_s390_skey_related_inst(vcpu);
  132. vcpu->arch.sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE);
  133. }
  134. static int handle_skey(struct kvm_vcpu *vcpu)
  135. {
  136. __skey_check_enable(vcpu);
  137. vcpu->stat.instruction_storage_key++;
  138. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  139. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  140. vcpu->arch.sie_block->gpsw.addr =
  141. __rewind_psw(vcpu->arch.sie_block->gpsw, 4);
  142. VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
  143. return 0;
  144. }
  145. static int handle_ipte_interlock(struct kvm_vcpu *vcpu)
  146. {
  147. psw_t *psw = &vcpu->arch.sie_block->gpsw;
  148. vcpu->stat.instruction_ipte_interlock++;
  149. if (psw_bits(*psw).p)
  150. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  151. wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu));
  152. psw->addr = __rewind_psw(*psw, 4);
  153. VCPU_EVENT(vcpu, 4, "%s", "retrying ipte interlock operation");
  154. return 0;
  155. }
  156. static int handle_test_block(struct kvm_vcpu *vcpu)
  157. {
  158. gpa_t addr;
  159. int reg2;
  160. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  161. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  162. kvm_s390_get_regs_rre(vcpu, NULL, &reg2);
  163. addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
  164. addr = kvm_s390_logical_to_effective(vcpu, addr);
  165. if (kvm_s390_check_low_addr_protection(vcpu, addr))
  166. return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
  167. addr = kvm_s390_real_to_abs(vcpu, addr);
  168. if (kvm_is_error_gpa(vcpu->kvm, addr))
  169. return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
  170. /*
  171. * We don't expect errors on modern systems, and do not care
  172. * about storage keys (yet), so let's just clear the page.
  173. */
  174. if (kvm_clear_guest(vcpu->kvm, addr, PAGE_SIZE))
  175. return -EFAULT;
  176. kvm_s390_set_psw_cc(vcpu, 0);
  177. vcpu->run->s.regs.gprs[0] = 0;
  178. return 0;
  179. }
  180. static int handle_tpi(struct kvm_vcpu *vcpu)
  181. {
  182. struct kvm_s390_interrupt_info *inti;
  183. unsigned long len;
  184. u32 tpi_data[3];
  185. int rc;
  186. u64 addr;
  187. addr = kvm_s390_get_base_disp_s(vcpu);
  188. if (addr & 3)
  189. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  190. inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0);
  191. if (!inti) {
  192. kvm_s390_set_psw_cc(vcpu, 0);
  193. return 0;
  194. }
  195. tpi_data[0] = inti->io.subchannel_id << 16 | inti->io.subchannel_nr;
  196. tpi_data[1] = inti->io.io_int_parm;
  197. tpi_data[2] = inti->io.io_int_word;
  198. if (addr) {
  199. /*
  200. * Store the two-word I/O interruption code into the
  201. * provided area.
  202. */
  203. len = sizeof(tpi_data) - 4;
  204. rc = write_guest(vcpu, addr, &tpi_data, len);
  205. if (rc) {
  206. rc = kvm_s390_inject_prog_cond(vcpu, rc);
  207. goto reinject_interrupt;
  208. }
  209. } else {
  210. /*
  211. * Store the three-word I/O interruption code into
  212. * the appropriate lowcore area.
  213. */
  214. len = sizeof(tpi_data);
  215. if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len)) {
  216. /* failed writes to the low core are not recoverable */
  217. rc = -EFAULT;
  218. goto reinject_interrupt;
  219. }
  220. }
  221. /* irq was successfully handed to the guest */
  222. kfree(inti);
  223. kvm_s390_set_psw_cc(vcpu, 1);
  224. return 0;
  225. reinject_interrupt:
  226. /*
  227. * If we encounter a problem storing the interruption code, the
  228. * instruction is suppressed from the guest's view: reinject the
  229. * interrupt.
  230. */
  231. if (kvm_s390_reinject_io_int(vcpu->kvm, inti)) {
  232. kfree(inti);
  233. rc = -EFAULT;
  234. }
  235. /* don't set the cc, a pgm irq was injected or we drop to user space */
  236. return rc ? -EFAULT : 0;
  237. }
  238. static int handle_tsch(struct kvm_vcpu *vcpu)
  239. {
  240. struct kvm_s390_interrupt_info *inti;
  241. inti = kvm_s390_get_io_int(vcpu->kvm, 0,
  242. vcpu->run->s.regs.gprs[1]);
  243. /*
  244. * Prepare exit to userspace.
  245. * We indicate whether we dequeued a pending I/O interrupt
  246. * so that userspace can re-inject it if the instruction gets
  247. * a program check. While this may re-order the pending I/O
  248. * interrupts, this is no problem since the priority is kept
  249. * intact.
  250. */
  251. vcpu->run->exit_reason = KVM_EXIT_S390_TSCH;
  252. vcpu->run->s390_tsch.dequeued = !!inti;
  253. if (inti) {
  254. vcpu->run->s390_tsch.subchannel_id = inti->io.subchannel_id;
  255. vcpu->run->s390_tsch.subchannel_nr = inti->io.subchannel_nr;
  256. vcpu->run->s390_tsch.io_int_parm = inti->io.io_int_parm;
  257. vcpu->run->s390_tsch.io_int_word = inti->io.io_int_word;
  258. }
  259. vcpu->run->s390_tsch.ipb = vcpu->arch.sie_block->ipb;
  260. kfree(inti);
  261. return -EREMOTE;
  262. }
  263. static int handle_io_inst(struct kvm_vcpu *vcpu)
  264. {
  265. VCPU_EVENT(vcpu, 4, "%s", "I/O instruction");
  266. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  267. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  268. if (vcpu->kvm->arch.css_support) {
  269. /*
  270. * Most I/O instructions will be handled by userspace.
  271. * Exceptions are tpi and the interrupt portion of tsch.
  272. */
  273. if (vcpu->arch.sie_block->ipa == 0xb236)
  274. return handle_tpi(vcpu);
  275. if (vcpu->arch.sie_block->ipa == 0xb235)
  276. return handle_tsch(vcpu);
  277. /* Handle in userspace. */
  278. return -EOPNOTSUPP;
  279. } else {
  280. /*
  281. * Set condition code 3 to stop the guest from issuing channel
  282. * I/O instructions.
  283. */
  284. kvm_s390_set_psw_cc(vcpu, 3);
  285. return 0;
  286. }
  287. }
  288. static int handle_stfl(struct kvm_vcpu *vcpu)
  289. {
  290. int rc;
  291. vcpu->stat.instruction_stfl++;
  292. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  293. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  294. rc = write_guest_lc(vcpu, offsetof(struct _lowcore, stfl_fac_list),
  295. vfacilities, 4);
  296. if (rc)
  297. return rc;
  298. VCPU_EVENT(vcpu, 5, "store facility list value %x",
  299. *(unsigned int *) vfacilities);
  300. trace_kvm_s390_handle_stfl(vcpu, *(unsigned int *) vfacilities);
  301. return 0;
  302. }
  303. #define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA)
  304. #define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL
  305. #define PSW_ADDR_24 0x0000000000ffffffUL
  306. #define PSW_ADDR_31 0x000000007fffffffUL
  307. int is_valid_psw(psw_t *psw)
  308. {
  309. if (psw->mask & PSW_MASK_UNASSIGNED)
  310. return 0;
  311. if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_BA) {
  312. if (psw->addr & ~PSW_ADDR_31)
  313. return 0;
  314. }
  315. if (!(psw->mask & PSW_MASK_ADDR_MODE) && (psw->addr & ~PSW_ADDR_24))
  316. return 0;
  317. if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_EA)
  318. return 0;
  319. if (psw->addr & 1)
  320. return 0;
  321. return 1;
  322. }
  323. int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
  324. {
  325. psw_t *gpsw = &vcpu->arch.sie_block->gpsw;
  326. psw_compat_t new_psw;
  327. u64 addr;
  328. int rc;
  329. if (gpsw->mask & PSW_MASK_PSTATE)
  330. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  331. addr = kvm_s390_get_base_disp_s(vcpu);
  332. if (addr & 7)
  333. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  334. rc = read_guest(vcpu, addr, &new_psw, sizeof(new_psw));
  335. if (rc)
  336. return kvm_s390_inject_prog_cond(vcpu, rc);
  337. if (!(new_psw.mask & PSW32_MASK_BASE))
  338. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  339. gpsw->mask = (new_psw.mask & ~PSW32_MASK_BASE) << 32;
  340. gpsw->mask |= new_psw.addr & PSW32_ADDR_AMODE;
  341. gpsw->addr = new_psw.addr & ~PSW32_ADDR_AMODE;
  342. if (!is_valid_psw(gpsw))
  343. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  344. return 0;
  345. }
  346. static int handle_lpswe(struct kvm_vcpu *vcpu)
  347. {
  348. psw_t new_psw;
  349. u64 addr;
  350. int rc;
  351. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  352. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  353. addr = kvm_s390_get_base_disp_s(vcpu);
  354. if (addr & 7)
  355. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  356. rc = read_guest(vcpu, addr, &new_psw, sizeof(new_psw));
  357. if (rc)
  358. return kvm_s390_inject_prog_cond(vcpu, rc);
  359. vcpu->arch.sie_block->gpsw = new_psw;
  360. if (!is_valid_psw(&vcpu->arch.sie_block->gpsw))
  361. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  362. return 0;
  363. }
  364. static int handle_stidp(struct kvm_vcpu *vcpu)
  365. {
  366. u64 stidp_data = vcpu->arch.stidp_data;
  367. u64 operand2;
  368. int rc;
  369. vcpu->stat.instruction_stidp++;
  370. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  371. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  372. operand2 = kvm_s390_get_base_disp_s(vcpu);
  373. if (operand2 & 7)
  374. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  375. rc = write_guest(vcpu, operand2, &stidp_data, sizeof(stidp_data));
  376. if (rc)
  377. return kvm_s390_inject_prog_cond(vcpu, rc);
  378. VCPU_EVENT(vcpu, 5, "%s", "store cpu id");
  379. return 0;
  380. }
  381. static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
  382. {
  383. int cpus = 0;
  384. int n;
  385. cpus = atomic_read(&vcpu->kvm->online_vcpus);
  386. /* deal with other level 3 hypervisors */
  387. if (stsi(mem, 3, 2, 2))
  388. mem->count = 0;
  389. if (mem->count < 8)
  390. mem->count++;
  391. for (n = mem->count - 1; n > 0 ; n--)
  392. memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0]));
  393. memset(&mem->vm[0], 0, sizeof(mem->vm[0]));
  394. mem->vm[0].cpus_total = cpus;
  395. mem->vm[0].cpus_configured = cpus;
  396. mem->vm[0].cpus_standby = 0;
  397. mem->vm[0].cpus_reserved = 0;
  398. mem->vm[0].caf = 1000;
  399. memcpy(mem->vm[0].name, "KVMguest", 8);
  400. ASCEBC(mem->vm[0].name, 8);
  401. memcpy(mem->vm[0].cpi, "KVM/Linux ", 16);
  402. ASCEBC(mem->vm[0].cpi, 16);
  403. }
  404. static int handle_stsi(struct kvm_vcpu *vcpu)
  405. {
  406. int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28;
  407. int sel1 = vcpu->run->s.regs.gprs[0] & 0xff;
  408. int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff;
  409. unsigned long mem = 0;
  410. u64 operand2;
  411. int rc = 0;
  412. vcpu->stat.instruction_stsi++;
  413. VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2);
  414. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  415. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  416. if (fc > 3) {
  417. kvm_s390_set_psw_cc(vcpu, 3);
  418. return 0;
  419. }
  420. if (vcpu->run->s.regs.gprs[0] & 0x0fffff00
  421. || vcpu->run->s.regs.gprs[1] & 0xffff0000)
  422. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  423. if (fc == 0) {
  424. vcpu->run->s.regs.gprs[0] = 3 << 28;
  425. kvm_s390_set_psw_cc(vcpu, 0);
  426. return 0;
  427. }
  428. operand2 = kvm_s390_get_base_disp_s(vcpu);
  429. if (operand2 & 0xfff)
  430. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  431. switch (fc) {
  432. case 1: /* same handling for 1 and 2 */
  433. case 2:
  434. mem = get_zeroed_page(GFP_KERNEL);
  435. if (!mem)
  436. goto out_no_data;
  437. if (stsi((void *) mem, fc, sel1, sel2))
  438. goto out_no_data;
  439. break;
  440. case 3:
  441. if (sel1 != 2 || sel2 != 2)
  442. goto out_no_data;
  443. mem = get_zeroed_page(GFP_KERNEL);
  444. if (!mem)
  445. goto out_no_data;
  446. handle_stsi_3_2_2(vcpu, (void *) mem);
  447. break;
  448. }
  449. rc = write_guest(vcpu, operand2, (void *)mem, PAGE_SIZE);
  450. if (rc) {
  451. rc = kvm_s390_inject_prog_cond(vcpu, rc);
  452. goto out;
  453. }
  454. trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2);
  455. free_page(mem);
  456. kvm_s390_set_psw_cc(vcpu, 0);
  457. vcpu->run->s.regs.gprs[0] = 0;
  458. return 0;
  459. out_no_data:
  460. kvm_s390_set_psw_cc(vcpu, 3);
  461. out:
  462. free_page(mem);
  463. return rc;
  464. }
  465. static const intercept_handler_t b2_handlers[256] = {
  466. [0x02] = handle_stidp,
  467. [0x04] = handle_set_clock,
  468. [0x10] = handle_set_prefix,
  469. [0x11] = handle_store_prefix,
  470. [0x12] = handle_store_cpu_address,
  471. [0x21] = handle_ipte_interlock,
  472. [0x29] = handle_skey,
  473. [0x2a] = handle_skey,
  474. [0x2b] = handle_skey,
  475. [0x2c] = handle_test_block,
  476. [0x30] = handle_io_inst,
  477. [0x31] = handle_io_inst,
  478. [0x32] = handle_io_inst,
  479. [0x33] = handle_io_inst,
  480. [0x34] = handle_io_inst,
  481. [0x35] = handle_io_inst,
  482. [0x36] = handle_io_inst,
  483. [0x37] = handle_io_inst,
  484. [0x38] = handle_io_inst,
  485. [0x39] = handle_io_inst,
  486. [0x3a] = handle_io_inst,
  487. [0x3b] = handle_io_inst,
  488. [0x3c] = handle_io_inst,
  489. [0x50] = handle_ipte_interlock,
  490. [0x5f] = handle_io_inst,
  491. [0x74] = handle_io_inst,
  492. [0x76] = handle_io_inst,
  493. [0x7d] = handle_stsi,
  494. [0xb1] = handle_stfl,
  495. [0xb2] = handle_lpswe,
  496. };
  497. int kvm_s390_handle_b2(struct kvm_vcpu *vcpu)
  498. {
  499. intercept_handler_t handler;
  500. /*
  501. * A lot of B2 instructions are priviledged. Here we check for
  502. * the privileged ones, that we can handle in the kernel.
  503. * Anything else goes to userspace.
  504. */
  505. handler = b2_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
  506. if (handler)
  507. return handler(vcpu);
  508. return -EOPNOTSUPP;
  509. }
  510. static int handle_epsw(struct kvm_vcpu *vcpu)
  511. {
  512. int reg1, reg2;
  513. kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
  514. /* This basically extracts the mask half of the psw. */
  515. vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000UL;
  516. vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32;
  517. if (reg2) {
  518. vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000UL;
  519. vcpu->run->s.regs.gprs[reg2] |=
  520. vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffffUL;
  521. }
  522. return 0;
  523. }
  524. #define PFMF_RESERVED 0xfffc0101UL
  525. #define PFMF_SK 0x00020000UL
  526. #define PFMF_CF 0x00010000UL
  527. #define PFMF_UI 0x00008000UL
  528. #define PFMF_FSC 0x00007000UL
  529. #define PFMF_NQ 0x00000800UL
  530. #define PFMF_MR 0x00000400UL
  531. #define PFMF_MC 0x00000200UL
  532. #define PFMF_KEY 0x000000feUL
  533. static int handle_pfmf(struct kvm_vcpu *vcpu)
  534. {
  535. int reg1, reg2;
  536. unsigned long start, end;
  537. vcpu->stat.instruction_pfmf++;
  538. kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
  539. if (!MACHINE_HAS_PFMF)
  540. return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
  541. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  542. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  543. if (vcpu->run->s.regs.gprs[reg1] & PFMF_RESERVED)
  544. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  545. /* Only provide non-quiescing support if the host supports it */
  546. if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ && !test_facility(14))
  547. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  548. /* No support for conditional-SSKE */
  549. if (vcpu->run->s.regs.gprs[reg1] & (PFMF_MR | PFMF_MC))
  550. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  551. start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
  552. if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
  553. if (kvm_s390_check_low_addr_protection(vcpu, start))
  554. return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
  555. }
  556. switch (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
  557. case 0x00000000:
  558. end = (start + (1UL << 12)) & ~((1UL << 12) - 1);
  559. break;
  560. case 0x00001000:
  561. end = (start + (1UL << 20)) & ~((1UL << 20) - 1);
  562. break;
  563. /* We dont support EDAT2
  564. case 0x00002000:
  565. end = (start + (1UL << 31)) & ~((1UL << 31) - 1);
  566. break;*/
  567. default:
  568. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  569. }
  570. while (start < end) {
  571. unsigned long useraddr, abs_addr;
  572. /* Translate guest address to host address */
  573. if ((vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) == 0)
  574. abs_addr = kvm_s390_real_to_abs(vcpu, start);
  575. else
  576. abs_addr = start;
  577. useraddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(abs_addr));
  578. if (kvm_is_error_hva(useraddr))
  579. return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
  580. if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
  581. if (clear_user((void __user *)useraddr, PAGE_SIZE))
  582. return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
  583. }
  584. if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) {
  585. __skey_check_enable(vcpu);
  586. if (set_guest_storage_key(current->mm, useraddr,
  587. vcpu->run->s.regs.gprs[reg1] & PFMF_KEY,
  588. vcpu->run->s.regs.gprs[reg1] & PFMF_NQ))
  589. return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
  590. }
  591. start += PAGE_SIZE;
  592. }
  593. if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC)
  594. vcpu->run->s.regs.gprs[reg2] = end;
  595. return 0;
  596. }
  597. static int handle_essa(struct kvm_vcpu *vcpu)
  598. {
  599. /* entries expected to be 1FF */
  600. int entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3;
  601. unsigned long *cbrlo, cbrle;
  602. struct gmap *gmap;
  603. int i;
  604. VCPU_EVENT(vcpu, 5, "cmma release %d pages", entries);
  605. gmap = vcpu->arch.gmap;
  606. vcpu->stat.instruction_essa++;
  607. if (!kvm_s390_cmma_enabled(vcpu->kvm))
  608. return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
  609. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  610. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  611. if (((vcpu->arch.sie_block->ipb & 0xf0000000) >> 28) > 6)
  612. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  613. /* Rewind PSW to repeat the ESSA instruction */
  614. vcpu->arch.sie_block->gpsw.addr =
  615. __rewind_psw(vcpu->arch.sie_block->gpsw, 4);
  616. vcpu->arch.sie_block->cbrlo &= PAGE_MASK; /* reset nceo */
  617. cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo);
  618. down_read(&gmap->mm->mmap_sem);
  619. for (i = 0; i < entries; ++i) {
  620. cbrle = cbrlo[i];
  621. if (unlikely(cbrle & ~PAGE_MASK || cbrle < 2 * PAGE_SIZE))
  622. /* invalid entry */
  623. break;
  624. /* try to free backing */
  625. __gmap_zap(gmap, cbrle);
  626. }
  627. up_read(&gmap->mm->mmap_sem);
  628. if (i < entries)
  629. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  630. return 0;
  631. }
  632. static const intercept_handler_t b9_handlers[256] = {
  633. [0x8a] = handle_ipte_interlock,
  634. [0x8d] = handle_epsw,
  635. [0x8e] = handle_ipte_interlock,
  636. [0x8f] = handle_ipte_interlock,
  637. [0xab] = handle_essa,
  638. [0xaf] = handle_pfmf,
  639. };
  640. int kvm_s390_handle_b9(struct kvm_vcpu *vcpu)
  641. {
  642. intercept_handler_t handler;
  643. /* This is handled just as for the B2 instructions. */
  644. handler = b9_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
  645. if (handler)
  646. return handler(vcpu);
  647. return -EOPNOTSUPP;
  648. }
  649. int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu)
  650. {
  651. int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
  652. int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
  653. u32 val = 0;
  654. int reg, rc;
  655. u64 ga;
  656. vcpu->stat.instruction_lctl++;
  657. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  658. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  659. ga = kvm_s390_get_base_disp_rs(vcpu);
  660. if (ga & 3)
  661. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  662. VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x, addr:%llx", reg1, reg3, ga);
  663. trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, ga);
  664. reg = reg1;
  665. do {
  666. rc = read_guest(vcpu, ga, &val, sizeof(val));
  667. if (rc)
  668. return kvm_s390_inject_prog_cond(vcpu, rc);
  669. vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul;
  670. vcpu->arch.sie_block->gcr[reg] |= val;
  671. ga += 4;
  672. if (reg == reg3)
  673. break;
  674. reg = (reg + 1) % 16;
  675. } while (1);
  676. kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
  677. return 0;
  678. }
  679. int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu)
  680. {
  681. int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
  682. int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
  683. u64 ga;
  684. u32 val;
  685. int reg, rc;
  686. vcpu->stat.instruction_stctl++;
  687. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  688. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  689. ga = kvm_s390_get_base_disp_rs(vcpu);
  690. if (ga & 3)
  691. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  692. VCPU_EVENT(vcpu, 5, "stctl r1:%x, r3:%x, addr:%llx", reg1, reg3, ga);
  693. trace_kvm_s390_handle_stctl(vcpu, 0, reg1, reg3, ga);
  694. reg = reg1;
  695. do {
  696. val = vcpu->arch.sie_block->gcr[reg] & 0x00000000fffffffful;
  697. rc = write_guest(vcpu, ga, &val, sizeof(val));
  698. if (rc)
  699. return kvm_s390_inject_prog_cond(vcpu, rc);
  700. ga += 4;
  701. if (reg == reg3)
  702. break;
  703. reg = (reg + 1) % 16;
  704. } while (1);
  705. return 0;
  706. }
  707. static int handle_lctlg(struct kvm_vcpu *vcpu)
  708. {
  709. int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
  710. int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
  711. u64 ga, val;
  712. int reg, rc;
  713. vcpu->stat.instruction_lctlg++;
  714. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  715. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  716. ga = kvm_s390_get_base_disp_rsy(vcpu);
  717. if (ga & 7)
  718. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  719. reg = reg1;
  720. VCPU_EVENT(vcpu, 5, "lctlg r1:%x, r3:%x, addr:%llx", reg1, reg3, ga);
  721. trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, ga);
  722. do {
  723. rc = read_guest(vcpu, ga, &val, sizeof(val));
  724. if (rc)
  725. return kvm_s390_inject_prog_cond(vcpu, rc);
  726. vcpu->arch.sie_block->gcr[reg] = val;
  727. ga += 8;
  728. if (reg == reg3)
  729. break;
  730. reg = (reg + 1) % 16;
  731. } while (1);
  732. kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
  733. return 0;
  734. }
  735. static int handle_stctg(struct kvm_vcpu *vcpu)
  736. {
  737. int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
  738. int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
  739. u64 ga, val;
  740. int reg, rc;
  741. vcpu->stat.instruction_stctg++;
  742. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  743. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  744. ga = kvm_s390_get_base_disp_rsy(vcpu);
  745. if (ga & 7)
  746. return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
  747. reg = reg1;
  748. VCPU_EVENT(vcpu, 5, "stctg r1:%x, r3:%x, addr:%llx", reg1, reg3, ga);
  749. trace_kvm_s390_handle_stctl(vcpu, 1, reg1, reg3, ga);
  750. do {
  751. val = vcpu->arch.sie_block->gcr[reg];
  752. rc = write_guest(vcpu, ga, &val, sizeof(val));
  753. if (rc)
  754. return kvm_s390_inject_prog_cond(vcpu, rc);
  755. ga += 8;
  756. if (reg == reg3)
  757. break;
  758. reg = (reg + 1) % 16;
  759. } while (1);
  760. return 0;
  761. }
  762. static const intercept_handler_t eb_handlers[256] = {
  763. [0x2f] = handle_lctlg,
  764. [0x25] = handle_stctg,
  765. };
  766. int kvm_s390_handle_eb(struct kvm_vcpu *vcpu)
  767. {
  768. intercept_handler_t handler;
  769. handler = eb_handlers[vcpu->arch.sie_block->ipb & 0xff];
  770. if (handler)
  771. return handler(vcpu);
  772. return -EOPNOTSUPP;
  773. }
  774. static int handle_tprot(struct kvm_vcpu *vcpu)
  775. {
  776. u64 address1, address2;
  777. unsigned long hva, gpa;
  778. int ret = 0, cc = 0;
  779. bool writable;
  780. vcpu->stat.instruction_tprot++;
  781. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  782. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  783. kvm_s390_get_base_disp_sse(vcpu, &address1, &address2);
  784. /* we only handle the Linux memory detection case:
  785. * access key == 0
  786. * everything else goes to userspace. */
  787. if (address2 & 0xf0)
  788. return -EOPNOTSUPP;
  789. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
  790. ipte_lock(vcpu);
  791. ret = guest_translate_address(vcpu, address1, &gpa, 1);
  792. if (ret == PGM_PROTECTION) {
  793. /* Write protected? Try again with read-only... */
  794. cc = 1;
  795. ret = guest_translate_address(vcpu, address1, &gpa, 0);
  796. }
  797. if (ret) {
  798. if (ret == PGM_ADDRESSING || ret == PGM_TRANSLATION_SPEC) {
  799. ret = kvm_s390_inject_program_int(vcpu, ret);
  800. } else if (ret > 0) {
  801. /* Translation not available */
  802. kvm_s390_set_psw_cc(vcpu, 3);
  803. ret = 0;
  804. }
  805. goto out_unlock;
  806. }
  807. hva = gfn_to_hva_prot(vcpu->kvm, gpa_to_gfn(gpa), &writable);
  808. if (kvm_is_error_hva(hva)) {
  809. ret = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
  810. } else {
  811. if (!writable)
  812. cc = 1; /* Write not permitted ==> read-only */
  813. kvm_s390_set_psw_cc(vcpu, cc);
  814. /* Note: CC2 only occurs for storage keys (not supported yet) */
  815. }
  816. out_unlock:
  817. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
  818. ipte_unlock(vcpu);
  819. return ret;
  820. }
  821. int kvm_s390_handle_e5(struct kvm_vcpu *vcpu)
  822. {
  823. /* For e5xx... instructions we only handle TPROT */
  824. if ((vcpu->arch.sie_block->ipa & 0x00ff) == 0x01)
  825. return handle_tprot(vcpu);
  826. return -EOPNOTSUPP;
  827. }
  828. static int handle_sckpf(struct kvm_vcpu *vcpu)
  829. {
  830. u32 value;
  831. if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  832. return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  833. if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000)
  834. return kvm_s390_inject_program_int(vcpu,
  835. PGM_SPECIFICATION);
  836. value = vcpu->run->s.regs.gprs[0] & 0x000000000000ffff;
  837. vcpu->arch.sie_block->todpr = value;
  838. return 0;
  839. }
  840. static const intercept_handler_t x01_handlers[256] = {
  841. [0x07] = handle_sckpf,
  842. };
  843. int kvm_s390_handle_01(struct kvm_vcpu *vcpu)
  844. {
  845. intercept_handler_t handler;
  846. handler = x01_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
  847. if (handler)
  848. return handler(vcpu);
  849. return -EOPNOTSUPP;
  850. }