mmu-hash64.h 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601
  1. #ifndef _ASM_POWERPC_MMU_HASH64_H_
  2. #define _ASM_POWERPC_MMU_HASH64_H_
  3. /*
  4. * PowerPC64 memory management structures
  5. *
  6. * Dave Engebretsen & Mike Corrigan <{engebret|mikejc}@us.ibm.com>
  7. * PPC64 rework.
  8. *
  9. * This program is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU General Public License
  11. * as published by the Free Software Foundation; either version
  12. * 2 of the License, or (at your option) any later version.
  13. */
  14. #include <asm/asm-compat.h>
  15. #include <asm/page.h>
  16. /*
  17. * This is necessary to get the definition of PGTABLE_RANGE which we
  18. * need for various slices related matters. Note that this isn't the
  19. * complete pgtable.h but only a portion of it.
  20. */
  21. #include <asm/pgtable-ppc64.h>
  22. #include <asm/bug.h>
  23. #include <asm/processor.h>
  24. /*
  25. * SLB
  26. */
  27. #define SLB_NUM_BOLTED 3
  28. #define SLB_CACHE_ENTRIES 8
  29. #define SLB_MIN_SIZE 32
  30. /* Bits in the SLB ESID word */
  31. #define SLB_ESID_V ASM_CONST(0x0000000008000000) /* valid */
  32. /* Bits in the SLB VSID word */
  33. #define SLB_VSID_SHIFT 12
  34. #define SLB_VSID_SHIFT_1T 24
  35. #define SLB_VSID_SSIZE_SHIFT 62
  36. #define SLB_VSID_B ASM_CONST(0xc000000000000000)
  37. #define SLB_VSID_B_256M ASM_CONST(0x0000000000000000)
  38. #define SLB_VSID_B_1T ASM_CONST(0x4000000000000000)
  39. #define SLB_VSID_KS ASM_CONST(0x0000000000000800)
  40. #define SLB_VSID_KP ASM_CONST(0x0000000000000400)
  41. #define SLB_VSID_N ASM_CONST(0x0000000000000200) /* no-execute */
  42. #define SLB_VSID_L ASM_CONST(0x0000000000000100)
  43. #define SLB_VSID_C ASM_CONST(0x0000000000000080) /* class */
  44. #define SLB_VSID_LP ASM_CONST(0x0000000000000030)
  45. #define SLB_VSID_LP_00 ASM_CONST(0x0000000000000000)
  46. #define SLB_VSID_LP_01 ASM_CONST(0x0000000000000010)
  47. #define SLB_VSID_LP_10 ASM_CONST(0x0000000000000020)
  48. #define SLB_VSID_LP_11 ASM_CONST(0x0000000000000030)
  49. #define SLB_VSID_LLP (SLB_VSID_L|SLB_VSID_LP)
  50. #define SLB_VSID_KERNEL (SLB_VSID_KP)
  51. #define SLB_VSID_USER (SLB_VSID_KP|SLB_VSID_KS|SLB_VSID_C)
  52. #define SLBIE_C (0x08000000)
  53. #define SLBIE_SSIZE_SHIFT 25
  54. /*
  55. * Hash table
  56. */
  57. #define HPTES_PER_GROUP 8
  58. #define HPTE_V_SSIZE_SHIFT 62
  59. #define HPTE_V_AVPN_SHIFT 7
  60. #define HPTE_V_AVPN ASM_CONST(0x3fffffffffffff80)
  61. #define HPTE_V_AVPN_VAL(x) (((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT)
  62. #define HPTE_V_COMPARE(x,y) (!(((x) ^ (y)) & 0xffffffffffffff80UL))
  63. #define HPTE_V_BOLTED ASM_CONST(0x0000000000000010)
  64. #define HPTE_V_LOCK ASM_CONST(0x0000000000000008)
  65. #define HPTE_V_LARGE ASM_CONST(0x0000000000000004)
  66. #define HPTE_V_SECONDARY ASM_CONST(0x0000000000000002)
  67. #define HPTE_V_VALID ASM_CONST(0x0000000000000001)
  68. #define HPTE_R_PP0 ASM_CONST(0x8000000000000000)
  69. #define HPTE_R_TS ASM_CONST(0x4000000000000000)
  70. #define HPTE_R_KEY_HI ASM_CONST(0x3000000000000000)
  71. #define HPTE_R_RPN_SHIFT 12
  72. #define HPTE_R_RPN ASM_CONST(0x0ffffffffffff000)
  73. #define HPTE_R_PP ASM_CONST(0x0000000000000003)
  74. #define HPTE_R_N ASM_CONST(0x0000000000000004)
  75. #define HPTE_R_G ASM_CONST(0x0000000000000008)
  76. #define HPTE_R_M ASM_CONST(0x0000000000000010)
  77. #define HPTE_R_I ASM_CONST(0x0000000000000020)
  78. #define HPTE_R_W ASM_CONST(0x0000000000000040)
  79. #define HPTE_R_WIMG ASM_CONST(0x0000000000000078)
  80. #define HPTE_R_C ASM_CONST(0x0000000000000080)
  81. #define HPTE_R_R ASM_CONST(0x0000000000000100)
  82. #define HPTE_R_KEY_LO ASM_CONST(0x0000000000000e00)
  83. #define HPTE_V_1TB_SEG ASM_CONST(0x4000000000000000)
  84. #define HPTE_V_VRMA_MASK ASM_CONST(0x4001ffffff000000)
  85. /* Values for PP (assumes Ks=0, Kp=1) */
  86. #define PP_RWXX 0 /* Supervisor read/write, User none */
  87. #define PP_RWRX 1 /* Supervisor read/write, User read */
  88. #define PP_RWRW 2 /* Supervisor read/write, User read/write */
  89. #define PP_RXRX 3 /* Supervisor read, User read */
  90. #define PP_RXXX (HPTE_R_PP0 | 2) /* Supervisor read, user none */
  91. /* Fields for tlbiel instruction in architecture 2.06 */
  92. #define TLBIEL_INVAL_SEL_MASK 0xc00 /* invalidation selector */
  93. #define TLBIEL_INVAL_PAGE 0x000 /* invalidate a single page */
  94. #define TLBIEL_INVAL_SET_LPID 0x800 /* invalidate a set for current LPID */
  95. #define TLBIEL_INVAL_SET 0xc00 /* invalidate a set for all LPIDs */
  96. #define TLBIEL_INVAL_SET_MASK 0xfff000 /* set number to inval. */
  97. #define TLBIEL_INVAL_SET_SHIFT 12
  98. #define POWER7_TLB_SETS 128 /* # sets in POWER7 TLB */
  99. #ifndef __ASSEMBLY__
  100. struct hash_pte {
  101. __be64 v;
  102. __be64 r;
  103. };
  104. extern struct hash_pte *htab_address;
  105. extern unsigned long htab_size_bytes;
  106. extern unsigned long htab_hash_mask;
  107. /*
  108. * Page size definition
  109. *
  110. * shift : is the "PAGE_SHIFT" value for that page size
  111. * sllp : is a bit mask with the value of SLB L || LP to be or'ed
  112. * directly to a slbmte "vsid" value
  113. * penc : is the HPTE encoding mask for the "LP" field:
  114. *
  115. */
  116. struct mmu_psize_def
  117. {
  118. unsigned int shift; /* number of bits */
  119. int penc[MMU_PAGE_COUNT]; /* HPTE encoding */
  120. unsigned int tlbiel; /* tlbiel supported for that page size */
  121. unsigned long avpnm; /* bits to mask out in AVPN in the HPTE */
  122. unsigned long sllp; /* SLB L||LP (exact mask to use in slbmte) */
  123. };
  124. extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
  125. static inline int shift_to_mmu_psize(unsigned int shift)
  126. {
  127. int psize;
  128. for (psize = 0; psize < MMU_PAGE_COUNT; ++psize)
  129. if (mmu_psize_defs[psize].shift == shift)
  130. return psize;
  131. return -1;
  132. }
  133. static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
  134. {
  135. if (mmu_psize_defs[mmu_psize].shift)
  136. return mmu_psize_defs[mmu_psize].shift;
  137. BUG();
  138. }
  139. #endif /* __ASSEMBLY__ */
  140. /*
  141. * Segment sizes.
  142. * These are the values used by hardware in the B field of
  143. * SLB entries and the first dword of MMU hashtable entries.
  144. * The B field is 2 bits; the values 2 and 3 are unused and reserved.
  145. */
  146. #define MMU_SEGSIZE_256M 0
  147. #define MMU_SEGSIZE_1T 1
  148. /*
  149. * encode page number shift.
  150. * in order to fit the 78 bit va in a 64 bit variable we shift the va by
  151. * 12 bits. This enable us to address upto 76 bit va.
  152. * For hpt hash from a va we can ignore the page size bits of va and for
  153. * hpte encoding we ignore up to 23 bits of va. So ignoring lower 12 bits ensure
  154. * we work in all cases including 4k page size.
  155. */
  156. #define VPN_SHIFT 12
  157. /*
  158. * HPTE Large Page (LP) details
  159. */
  160. #define LP_SHIFT 12
  161. #define LP_BITS 8
  162. #define LP_MASK(i) ((0xFF >> (i)) << LP_SHIFT)
  163. #ifndef __ASSEMBLY__
  164. static inline int slb_vsid_shift(int ssize)
  165. {
  166. if (ssize == MMU_SEGSIZE_256M)
  167. return SLB_VSID_SHIFT;
  168. return SLB_VSID_SHIFT_1T;
  169. }
  170. static inline int segment_shift(int ssize)
  171. {
  172. if (ssize == MMU_SEGSIZE_256M)
  173. return SID_SHIFT;
  174. return SID_SHIFT_1T;
  175. }
  176. /*
  177. * The current system page and segment sizes
  178. */
  179. extern int mmu_linear_psize;
  180. extern int mmu_virtual_psize;
  181. extern int mmu_vmalloc_psize;
  182. extern int mmu_vmemmap_psize;
  183. extern int mmu_io_psize;
  184. extern int mmu_kernel_ssize;
  185. extern int mmu_highuser_ssize;
  186. extern u16 mmu_slb_size;
  187. extern unsigned long tce_alloc_start, tce_alloc_end;
  188. /*
  189. * If the processor supports 64k normal pages but not 64k cache
  190. * inhibited pages, we have to be prepared to switch processes
  191. * to use 4k pages when they create cache-inhibited mappings.
  192. * If this is the case, mmu_ci_restrictions will be set to 1.
  193. */
  194. extern int mmu_ci_restrictions;
  195. /*
  196. * This computes the AVPN and B fields of the first dword of a HPTE,
  197. * for use when we want to match an existing PTE. The bottom 7 bits
  198. * of the returned value are zero.
  199. */
  200. static inline unsigned long hpte_encode_avpn(unsigned long vpn, int psize,
  201. int ssize)
  202. {
  203. unsigned long v;
  204. /*
  205. * The AVA field omits the low-order 23 bits of the 78 bits VA.
  206. * These bits are not needed in the PTE, because the
  207. * low-order b of these bits are part of the byte offset
  208. * into the virtual page and, if b < 23, the high-order
  209. * 23-b of these bits are always used in selecting the
  210. * PTEGs to be searched
  211. */
  212. v = (vpn >> (23 - VPN_SHIFT)) & ~(mmu_psize_defs[psize].avpnm);
  213. v <<= HPTE_V_AVPN_SHIFT;
  214. v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT;
  215. return v;
  216. }
  217. /*
  218. * This function sets the AVPN and L fields of the HPTE appropriately
  219. * using the base page size and actual page size.
  220. */
  221. static inline unsigned long hpte_encode_v(unsigned long vpn, int base_psize,
  222. int actual_psize, int ssize)
  223. {
  224. unsigned long v;
  225. v = hpte_encode_avpn(vpn, base_psize, ssize);
  226. if (actual_psize != MMU_PAGE_4K)
  227. v |= HPTE_V_LARGE;
  228. return v;
  229. }
  230. /*
  231. * This function sets the ARPN, and LP fields of the HPTE appropriately
  232. * for the page size. We assume the pa is already "clean" that is properly
  233. * aligned for the requested page size
  234. */
  235. static inline unsigned long hpte_encode_r(unsigned long pa, int base_psize,
  236. int actual_psize)
  237. {
  238. /* A 4K page needs no special encoding */
  239. if (actual_psize == MMU_PAGE_4K)
  240. return pa & HPTE_R_RPN;
  241. else {
  242. unsigned int penc = mmu_psize_defs[base_psize].penc[actual_psize];
  243. unsigned int shift = mmu_psize_defs[actual_psize].shift;
  244. return (pa & ~((1ul << shift) - 1)) | (penc << LP_SHIFT);
  245. }
  246. }
  247. /*
  248. * Build a VPN_SHIFT bit shifted va given VSID, EA and segment size.
  249. */
  250. static inline unsigned long hpt_vpn(unsigned long ea,
  251. unsigned long vsid, int ssize)
  252. {
  253. unsigned long mask;
  254. int s_shift = segment_shift(ssize);
  255. mask = (1ul << (s_shift - VPN_SHIFT)) - 1;
  256. return (vsid << (s_shift - VPN_SHIFT)) | ((ea >> VPN_SHIFT) & mask);
  257. }
  258. /*
  259. * This hashes a virtual address
  260. */
  261. static inline unsigned long hpt_hash(unsigned long vpn,
  262. unsigned int shift, int ssize)
  263. {
  264. int mask;
  265. unsigned long hash, vsid;
  266. /* VPN_SHIFT can be atmost 12 */
  267. if (ssize == MMU_SEGSIZE_256M) {
  268. mask = (1ul << (SID_SHIFT - VPN_SHIFT)) - 1;
  269. hash = (vpn >> (SID_SHIFT - VPN_SHIFT)) ^
  270. ((vpn & mask) >> (shift - VPN_SHIFT));
  271. } else {
  272. mask = (1ul << (SID_SHIFT_1T - VPN_SHIFT)) - 1;
  273. vsid = vpn >> (SID_SHIFT_1T - VPN_SHIFT);
  274. hash = vsid ^ (vsid << 25) ^
  275. ((vpn & mask) >> (shift - VPN_SHIFT)) ;
  276. }
  277. return hash & 0x7fffffffffUL;
  278. }
  279. extern int __hash_page_4K(unsigned long ea, unsigned long access,
  280. unsigned long vsid, pte_t *ptep, unsigned long trap,
  281. unsigned int local, int ssize, int subpage_prot);
  282. extern int __hash_page_64K(unsigned long ea, unsigned long access,
  283. unsigned long vsid, pte_t *ptep, unsigned long trap,
  284. unsigned int local, int ssize);
  285. struct mm_struct;
  286. unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap);
  287. extern int hash_page_mm(struct mm_struct *mm, unsigned long ea, unsigned long access, unsigned long trap);
  288. extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap);
  289. int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
  290. pte_t *ptep, unsigned long trap, int local, int ssize,
  291. unsigned int shift, unsigned int mmu_psize);
  292. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  293. extern int __hash_page_thp(unsigned long ea, unsigned long access,
  294. unsigned long vsid, pmd_t *pmdp, unsigned long trap,
  295. int local, int ssize, unsigned int psize);
  296. #else
  297. static inline int __hash_page_thp(unsigned long ea, unsigned long access,
  298. unsigned long vsid, pmd_t *pmdp,
  299. unsigned long trap, int local,
  300. int ssize, unsigned int psize)
  301. {
  302. BUG();
  303. return -1;
  304. }
  305. #endif
  306. extern void hash_failure_debug(unsigned long ea, unsigned long access,
  307. unsigned long vsid, unsigned long trap,
  308. int ssize, int psize, int lpsize,
  309. unsigned long pte);
  310. extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
  311. unsigned long pstart, unsigned long prot,
  312. int psize, int ssize);
  313. int htab_remove_mapping(unsigned long vstart, unsigned long vend,
  314. int psize, int ssize);
  315. extern void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages);
  316. extern void demote_segment_4k(struct mm_struct *mm, unsigned long addr);
  317. extern void hpte_init_native(void);
  318. extern void hpte_init_lpar(void);
  319. extern void hpte_init_beat(void);
  320. extern void hpte_init_beat_v3(void);
  321. extern void slb_initialize(void);
  322. extern void slb_flush_and_rebolt(void);
  323. extern void slb_vmalloc_update(void);
  324. extern void slb_set_size(u16 size);
  325. #endif /* __ASSEMBLY__ */
  326. /*
  327. * VSID allocation (256MB segment)
  328. *
  329. * We first generate a 37-bit "proto-VSID". Proto-VSIDs are generated
  330. * from mmu context id and effective segment id of the address.
  331. *
  332. * For user processes max context id is limited to ((1ul << 19) - 5)
  333. * for kernel space, we use the top 4 context ids to map address as below
  334. * NOTE: each context only support 64TB now.
  335. * 0x7fffc - [ 0xc000000000000000 - 0xc0003fffffffffff ]
  336. * 0x7fffd - [ 0xd000000000000000 - 0xd0003fffffffffff ]
  337. * 0x7fffe - [ 0xe000000000000000 - 0xe0003fffffffffff ]
  338. * 0x7ffff - [ 0xf000000000000000 - 0xf0003fffffffffff ]
  339. *
  340. * The proto-VSIDs are then scrambled into real VSIDs with the
  341. * multiplicative hash:
  342. *
  343. * VSID = (proto-VSID * VSID_MULTIPLIER) % VSID_MODULUS
  344. *
  345. * VSID_MULTIPLIER is prime, so in particular it is
  346. * co-prime to VSID_MODULUS, making this a 1:1 scrambling function.
  347. * Because the modulus is 2^n-1 we can compute it efficiently without
  348. * a divide or extra multiply (see below). The scramble function gives
  349. * robust scattering in the hash table (at least based on some initial
  350. * results).
  351. *
  352. * We also consider VSID 0 special. We use VSID 0 for slb entries mapping
  353. * bad address. This enables us to consolidate bad address handling in
  354. * hash_page.
  355. *
  356. * We also need to avoid the last segment of the last context, because that
  357. * would give a protovsid of 0x1fffffffff. That will result in a VSID 0
  358. * because of the modulo operation in vsid scramble. But the vmemmap
  359. * (which is what uses region 0xf) will never be close to 64TB in size
  360. * (it's 56 bytes per page of system memory).
  361. */
  362. #define CONTEXT_BITS 19
  363. #define ESID_BITS 18
  364. #define ESID_BITS_1T 6
  365. /*
  366. * 256MB segment
  367. * The proto-VSID space has 2^(CONTEX_BITS + ESID_BITS) - 1 segments
  368. * available for user + kernel mapping. The top 4 contexts are used for
  369. * kernel mapping. Each segment contains 2^28 bytes. Each
  370. * context maps 2^46 bytes (64TB) so we can support 2^19-1 contexts
  371. * (19 == 37 + 28 - 46).
  372. */
  373. #define MAX_USER_CONTEXT ((ASM_CONST(1) << CONTEXT_BITS) - 5)
  374. /*
  375. * This should be computed such that protovosid * vsid_mulitplier
  376. * doesn't overflow 64 bits. It should also be co-prime to vsid_modulus
  377. */
  378. #define VSID_MULTIPLIER_256M ASM_CONST(12538073) /* 24-bit prime */
  379. #define VSID_BITS_256M (CONTEXT_BITS + ESID_BITS)
  380. #define VSID_MODULUS_256M ((1UL<<VSID_BITS_256M)-1)
  381. #define VSID_MULTIPLIER_1T ASM_CONST(12538073) /* 24-bit prime */
  382. #define VSID_BITS_1T (CONTEXT_BITS + ESID_BITS_1T)
  383. #define VSID_MODULUS_1T ((1UL<<VSID_BITS_1T)-1)
  384. #define USER_VSID_RANGE (1UL << (ESID_BITS + SID_SHIFT))
  385. /*
  386. * This macro generates asm code to compute the VSID scramble
  387. * function. Used in slb_allocate() and do_stab_bolted. The function
  388. * computed is: (protovsid*VSID_MULTIPLIER) % VSID_MODULUS
  389. *
  390. * rt = register continaing the proto-VSID and into which the
  391. * VSID will be stored
  392. * rx = scratch register (clobbered)
  393. *
  394. * - rt and rx must be different registers
  395. * - The answer will end up in the low VSID_BITS bits of rt. The higher
  396. * bits may contain other garbage, so you may need to mask the
  397. * result.
  398. */
  399. #define ASM_VSID_SCRAMBLE(rt, rx, size) \
  400. lis rx,VSID_MULTIPLIER_##size@h; \
  401. ori rx,rx,VSID_MULTIPLIER_##size@l; \
  402. mulld rt,rt,rx; /* rt = rt * MULTIPLIER */ \
  403. \
  404. srdi rx,rt,VSID_BITS_##size; \
  405. clrldi rt,rt,(64-VSID_BITS_##size); \
  406. add rt,rt,rx; /* add high and low bits */ \
  407. /* NOTE: explanation based on VSID_BITS_##size = 36 \
  408. * Now, r3 == VSID (mod 2^36-1), and lies between 0 and \
  409. * 2^36-1+2^28-1. That in particular means that if r3 >= \
  410. * 2^36-1, then r3+1 has the 2^36 bit set. So, if r3+1 has \
  411. * the bit clear, r3 already has the answer we want, if it \
  412. * doesn't, the answer is the low 36 bits of r3+1. So in all \
  413. * cases the answer is the low 36 bits of (r3 + ((r3+1) >> 36))*/\
  414. addi rx,rt,1; \
  415. srdi rx,rx,VSID_BITS_##size; /* extract 2^VSID_BITS bit */ \
  416. add rt,rt,rx
  417. /* 4 bits per slice and we have one slice per 1TB */
  418. #define SLICE_ARRAY_SIZE (PGTABLE_RANGE >> 41)
  419. #ifndef __ASSEMBLY__
  420. #ifdef CONFIG_PPC_SUBPAGE_PROT
  421. /*
  422. * For the sub-page protection option, we extend the PGD with one of
  423. * these. Basically we have a 3-level tree, with the top level being
  424. * the protptrs array. To optimize speed and memory consumption when
  425. * only addresses < 4GB are being protected, pointers to the first
  426. * four pages of sub-page protection words are stored in the low_prot
  427. * array.
  428. * Each page of sub-page protection words protects 1GB (4 bytes
  429. * protects 64k). For the 3-level tree, each page of pointers then
  430. * protects 8TB.
  431. */
  432. struct subpage_prot_table {
  433. unsigned long maxaddr; /* only addresses < this are protected */
  434. unsigned int **protptrs[(TASK_SIZE_USER64 >> 43)];
  435. unsigned int *low_prot[4];
  436. };
  437. #define SBP_L1_BITS (PAGE_SHIFT - 2)
  438. #define SBP_L2_BITS (PAGE_SHIFT - 3)
  439. #define SBP_L1_COUNT (1 << SBP_L1_BITS)
  440. #define SBP_L2_COUNT (1 << SBP_L2_BITS)
  441. #define SBP_L2_SHIFT (PAGE_SHIFT + SBP_L1_BITS)
  442. #define SBP_L3_SHIFT (SBP_L2_SHIFT + SBP_L2_BITS)
  443. extern void subpage_prot_free(struct mm_struct *mm);
  444. extern void subpage_prot_init_new_context(struct mm_struct *mm);
  445. #else
  446. static inline void subpage_prot_free(struct mm_struct *mm) {}
  447. static inline void subpage_prot_init_new_context(struct mm_struct *mm) { }
  448. #endif /* CONFIG_PPC_SUBPAGE_PROT */
  449. typedef unsigned long mm_context_id_t;
  450. struct spinlock;
  451. typedef struct {
  452. mm_context_id_t id;
  453. u16 user_psize; /* page size index */
  454. #ifdef CONFIG_PPC_MM_SLICES
  455. u64 low_slices_psize; /* SLB page size encodings */
  456. unsigned char high_slices_psize[SLICE_ARRAY_SIZE];
  457. #else
  458. u16 sllp; /* SLB page size encoding */
  459. #endif
  460. unsigned long vdso_base;
  461. #ifdef CONFIG_PPC_SUBPAGE_PROT
  462. struct subpage_prot_table spt;
  463. #endif /* CONFIG_PPC_SUBPAGE_PROT */
  464. #ifdef CONFIG_PPC_ICSWX
  465. struct spinlock *cop_lockp; /* guard acop and cop_pid */
  466. unsigned long acop; /* mask of enabled coprocessor types */
  467. unsigned int cop_pid; /* pid value used with coprocessors */
  468. #endif /* CONFIG_PPC_ICSWX */
  469. #ifdef CONFIG_PPC_64K_PAGES
  470. /* for 4K PTE fragment support */
  471. void *pte_frag;
  472. #endif
  473. } mm_context_t;
  474. #if 0
  475. /*
  476. * The code below is equivalent to this function for arguments
  477. * < 2^VSID_BITS, which is all this should ever be called
  478. * with. However gcc is not clever enough to compute the
  479. * modulus (2^n-1) without a second multiply.
  480. */
  481. #define vsid_scramble(protovsid, size) \
  482. ((((protovsid) * VSID_MULTIPLIER_##size) % VSID_MODULUS_##size))
  483. #else /* 1 */
  484. #define vsid_scramble(protovsid, size) \
  485. ({ \
  486. unsigned long x; \
  487. x = (protovsid) * VSID_MULTIPLIER_##size; \
  488. x = (x >> VSID_BITS_##size) + (x & VSID_MODULUS_##size); \
  489. (x + ((x+1) >> VSID_BITS_##size)) & VSID_MODULUS_##size; \
  490. })
  491. #endif /* 1 */
  492. /* Returns the segment size indicator for a user address */
  493. static inline int user_segment_size(unsigned long addr)
  494. {
  495. /* Use 1T segments if possible for addresses >= 1T */
  496. if (addr >= (1UL << SID_SHIFT_1T))
  497. return mmu_highuser_ssize;
  498. return MMU_SEGSIZE_256M;
  499. }
  500. static inline unsigned long get_vsid(unsigned long context, unsigned long ea,
  501. int ssize)
  502. {
  503. /*
  504. * Bad address. We return VSID 0 for that
  505. */
  506. if ((ea & ~REGION_MASK) >= PGTABLE_RANGE)
  507. return 0;
  508. if (ssize == MMU_SEGSIZE_256M)
  509. return vsid_scramble((context << ESID_BITS)
  510. | (ea >> SID_SHIFT), 256M);
  511. return vsid_scramble((context << ESID_BITS_1T)
  512. | (ea >> SID_SHIFT_1T), 1T);
  513. }
  514. /*
  515. * This is only valid for addresses >= PAGE_OFFSET
  516. *
  517. * For kernel space, we use the top 4 context ids to map address as below
  518. * 0x7fffc - [ 0xc000000000000000 - 0xc0003fffffffffff ]
  519. * 0x7fffd - [ 0xd000000000000000 - 0xd0003fffffffffff ]
  520. * 0x7fffe - [ 0xe000000000000000 - 0xe0003fffffffffff ]
  521. * 0x7ffff - [ 0xf000000000000000 - 0xf0003fffffffffff ]
  522. */
  523. static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize)
  524. {
  525. unsigned long context;
  526. /*
  527. * kernel take the top 4 context from the available range
  528. */
  529. context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1;
  530. return get_vsid(context, ea, ssize);
  531. }
  532. #endif /* __ASSEMBLY__ */
  533. #endif /* _ASM_POWERPC_MMU_HASH64_H_ */