kvm_mmu.h 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252
  1. /*
  2. * Copyright (C) 2012 - Virtual Open Systems and Columbia University
  3. * Author: Christoffer Dall <c.dall@virtualopensystems.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License, version 2, as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  17. */
  18. #ifndef __ARM_KVM_MMU_H__
  19. #define __ARM_KVM_MMU_H__
  20. #include <asm/memory.h>
  21. #include <asm/page.h>
  22. /*
  23. * We directly use the kernel VA for the HYP, as we can directly share
  24. * the mapping (HTTBR "covers" TTBR1).
  25. */
  26. #define HYP_PAGE_OFFSET_MASK UL(~0)
  27. #define HYP_PAGE_OFFSET PAGE_OFFSET
  28. #define KERN_TO_HYP(kva) (kva)
  29. /*
  30. * Our virtual mapping for the boot-time MMU-enable code. Must be
  31. * shared across all the page-tables. Conveniently, we use the vectors
  32. * page, where no kernel data will ever be shared with HYP.
  33. */
  34. #define TRAMPOLINE_VA UL(CONFIG_VECTORS_BASE)
  35. /*
  36. * KVM_MMU_CACHE_MIN_PAGES is the number of stage2 page table translation levels.
  37. */
  38. #define KVM_MMU_CACHE_MIN_PAGES 2
  39. #ifndef __ASSEMBLY__
  40. #include <linux/highmem.h>
  41. #include <asm/cacheflush.h>
  42. #include <asm/pgalloc.h>
  43. int create_hyp_mappings(void *from, void *to);
  44. int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
  45. void free_boot_hyp_pgd(void);
  46. void free_hyp_pgds(void);
  47. void stage2_unmap_vm(struct kvm *kvm);
  48. int kvm_alloc_stage2_pgd(struct kvm *kvm);
  49. void kvm_free_stage2_pgd(struct kvm *kvm);
  50. int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
  51. phys_addr_t pa, unsigned long size, bool writable);
  52. int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
  53. void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
  54. phys_addr_t kvm_mmu_get_httbr(void);
  55. phys_addr_t kvm_mmu_get_boot_httbr(void);
  56. phys_addr_t kvm_get_idmap_vector(void);
  57. int kvm_mmu_init(void);
  58. void kvm_clear_hyp_idmap(void);
  59. static inline void kvm_set_pmd(pmd_t *pmd, pmd_t new_pmd)
  60. {
  61. *pmd = new_pmd;
  62. flush_pmd_entry(pmd);
  63. }
  64. static inline void kvm_set_pte(pte_t *pte, pte_t new_pte)
  65. {
  66. *pte = new_pte;
  67. /*
  68. * flush_pmd_entry just takes a void pointer and cleans the necessary
  69. * cache entries, so we can reuse the function for ptes.
  70. */
  71. flush_pmd_entry(pte);
  72. }
  73. static inline void kvm_clean_pgd(pgd_t *pgd)
  74. {
  75. clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t));
  76. }
  77. static inline void kvm_clean_pmd(pmd_t *pmd)
  78. {
  79. clean_dcache_area(pmd, PTRS_PER_PMD * sizeof(pmd_t));
  80. }
  81. static inline void kvm_clean_pmd_entry(pmd_t *pmd)
  82. {
  83. clean_pmd_entry(pmd);
  84. }
  85. static inline void kvm_clean_pte(pte_t *pte)
  86. {
  87. clean_pte_table(pte);
  88. }
  89. static inline void kvm_set_s2pte_writable(pte_t *pte)
  90. {
  91. pte_val(*pte) |= L_PTE_S2_RDWR;
  92. }
  93. static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
  94. {
  95. pmd_val(*pmd) |= L_PMD_S2_RDWR;
  96. }
  97. /* Open coded p*d_addr_end that can deal with 64bit addresses */
  98. #define kvm_pgd_addr_end(addr, end) \
  99. ({ u64 __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \
  100. (__boundary - 1 < (end) - 1)? __boundary: (end); \
  101. })
  102. #define kvm_pud_addr_end(addr,end) (end)
  103. #define kvm_pmd_addr_end(addr, end) \
  104. ({ u64 __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \
  105. (__boundary - 1 < (end) - 1)? __boundary: (end); \
  106. })
  107. #define kvm_pgd_index(addr) pgd_index(addr)
  108. static inline bool kvm_page_empty(void *ptr)
  109. {
  110. struct page *ptr_page = virt_to_page(ptr);
  111. return page_count(ptr_page) == 1;
  112. }
  113. #define kvm_pte_table_empty(kvm, ptep) kvm_page_empty(ptep)
  114. #define kvm_pmd_table_empty(kvm, pmdp) kvm_page_empty(pmdp)
  115. #define kvm_pud_table_empty(kvm, pudp) (0)
  116. #define KVM_PREALLOC_LEVEL 0
  117. static inline void *kvm_get_hwpgd(struct kvm *kvm)
  118. {
  119. return kvm->arch.pgd;
  120. }
  121. static inline unsigned int kvm_get_hwpgd_size(void)
  122. {
  123. return PTRS_PER_S2_PGD * sizeof(pgd_t);
  124. }
  125. struct kvm;
  126. #define kvm_flush_dcache_to_poc(a,l) __cpuc_flush_dcache_area((a), (l))
  127. static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
  128. {
  129. return (vcpu->arch.cp15[c1_SCTLR] & 0b101) == 0b101;
  130. }
  131. static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn,
  132. unsigned long size,
  133. bool ipa_uncached)
  134. {
  135. /*
  136. * If we are going to insert an instruction page and the icache is
  137. * either VIPT or PIPT, there is a potential problem where the host
  138. * (or another VM) may have used the same page as this guest, and we
  139. * read incorrect data from the icache. If we're using a PIPT cache,
  140. * we can invalidate just that page, but if we are using a VIPT cache
  141. * we need to invalidate the entire icache - damn shame - as written
  142. * in the ARM ARM (DDI 0406C.b - Page B3-1393).
  143. *
  144. * VIVT caches are tagged using both the ASID and the VMID and doesn't
  145. * need any kind of flushing (DDI 0406C.b - Page B3-1392).
  146. *
  147. * We need to do this through a kernel mapping (using the
  148. * user-space mapping has proved to be the wrong
  149. * solution). For that, we need to kmap one page at a time,
  150. * and iterate over the range.
  151. */
  152. bool need_flush = !vcpu_has_cache_enabled(vcpu) || ipa_uncached;
  153. VM_BUG_ON(size & ~PAGE_MASK);
  154. if (!need_flush && !icache_is_pipt())
  155. goto vipt_cache;
  156. while (size) {
  157. void *va = kmap_atomic_pfn(pfn);
  158. if (need_flush)
  159. kvm_flush_dcache_to_poc(va, PAGE_SIZE);
  160. if (icache_is_pipt())
  161. __cpuc_coherent_user_range((unsigned long)va,
  162. (unsigned long)va + PAGE_SIZE);
  163. size -= PAGE_SIZE;
  164. pfn++;
  165. kunmap_atomic(va);
  166. }
  167. vipt_cache:
  168. if (!icache_is_pipt() && !icache_is_vivt_asid_tagged()) {
  169. /* any kind of VIPT cache */
  170. __flush_icache_all();
  171. }
  172. }
  173. static inline void __kvm_flush_dcache_pte(pte_t pte)
  174. {
  175. void *va = kmap_atomic(pte_page(pte));
  176. kvm_flush_dcache_to_poc(va, PAGE_SIZE);
  177. kunmap_atomic(va);
  178. }
  179. static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
  180. {
  181. unsigned long size = PMD_SIZE;
  182. pfn_t pfn = pmd_pfn(pmd);
  183. while (size) {
  184. void *va = kmap_atomic_pfn(pfn);
  185. kvm_flush_dcache_to_poc(va, PAGE_SIZE);
  186. pfn++;
  187. size -= PAGE_SIZE;
  188. kunmap_atomic(va);
  189. }
  190. }
  191. static inline void __kvm_flush_dcache_pud(pud_t pud)
  192. {
  193. }
  194. #define kvm_virt_to_phys(x) virt_to_idmap((unsigned long)(x))
  195. void stage2_flush_vm(struct kvm *kvm);
  196. #endif /* !__ASSEMBLY__ */
  197. #endif /* __ARM_KVM_MMU_H__ */