mtkpasr_hw.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369
  1. #define pr_fmt(fmt) "["KBUILD_MODNAME"]" fmt
  2. #include <linux/module.h>
  3. #include <linux/kernel.h>
  4. #include <linux/mm.h>
  5. #include <linux/memory.h>
  6. #include <linux/memblock.h>
  7. #include <linux/printk.h>
  8. #include <linux/sched.h>
  9. #include <mach/emi_mpu.h>
  10. #ifdef CONFIG_ARM_LPAE
  11. #include <mach/mt_lpae.h>
  12. #endif
  13. #include "mtkpasr_drv.h"
  14. /* Struct for parsing rank information (SW view) */
  15. struct view_rank {
  16. unsigned long start_pfn; /* The 1st pfn (kernel pfn) */
  17. unsigned long end_pfn; /* The pfn after the last valid one (kernel pfn) */
  18. unsigned long bank_pfn_size; /* Bank size in PFN */
  19. unsigned long valid_channel; /* Channels: 0x00000101 means there are 2 valid channels
  20. - 1st & 2nd (MAX: 4 channels) */
  21. };
  22. static struct view_rank rank_info[MAX_RANKS];
  23. /* Basic DRAM configuration */
  24. static struct basic_dram_setting pasrdpd;
  25. /* MTKPASR control variables */
  26. static unsigned int channel_count;
  27. static unsigned int rank_count;
  28. static unsigned int banks_per_rank;
  29. static unsigned long mtkpasr_start_pfn;
  30. static unsigned long mtkpasr_end_pfn;
  31. static unsigned long mtkpasr_segment_bits;
  32. #ifdef CONFIG_ARM_LPAE
  33. #define MAX_KERNEL_PFN (0x13FFFF)
  34. #define MAX_KPFN_MASK (0x0FFFFF)
  35. #define KPFN_TO_VIRT (0x100000)
  36. static unsigned long __init virt_to_kernel_pfn(unsigned long virt)
  37. {
  38. unsigned long ret = virt;
  39. if (enable_4G())
  40. if (virt > MAX_KERNEL_PFN)
  41. ret = virt & MAX_KPFN_MASK;
  42. return ret;
  43. }
  44. static unsigned long __init kernel_pfn_to_virt(unsigned long kpfn)
  45. {
  46. unsigned long ret = kpfn;
  47. if (enable_4G())
  48. ret = kpfn | KPFN_TO_VIRT;
  49. return ret;
  50. }
  51. static unsigned long __init rank_pfn_offset(void)
  52. {
  53. unsigned long ret = ARCH_PFN_OFFSET;
  54. if (enable_4G())
  55. ret = KPFN_TO_VIRT;
  56. return ret;
  57. }
  58. #else
  59. #define virt_to_kernel_pfn(x) (x)
  60. #define kernel_pfn_to_virt(x) (x)
  61. #define rank_pfn_offset() ((unsigned long)ARCH_PFN_OFFSET)
  62. #endif
  63. /* Round up by "base" from "offset" */
  64. static unsigned long __init round_up_base_offset(unsigned long input, unsigned long base, unsigned long offset)
  65. {
  66. return ((input - offset + base - 1) / base) * base + offset;
  67. }
  68. /* Round down by "base" from "offset" */
  69. static unsigned long __init round_down_base_offset(unsigned long input, unsigned long base, unsigned long offset)
  70. {
  71. return ((input - offset) / base) * base + offset;
  72. }
  73. /*
  74. * Check DRAM configuration - transform DRAM setting to temporary bank structure.
  75. * Return 0 on success, -1 on error.
  76. */
  77. extern void acquire_dram_setting(struct basic_dram_setting *pasrdpd)__attribute__((weak));
  78. static int __init check_dram_configuration(void)
  79. {
  80. int chan, rank, check_segment_num;
  81. unsigned long valid_channel;
  82. unsigned long check_rank_size, rank_pfn, start_pfn = rank_pfn_offset();
  83. /* Acquire basic DRAM setting */
  84. acquire_dram_setting(&pasrdpd);
  85. /* Parse DRAM setting */
  86. channel_count = pasrdpd.channel_nr;
  87. for (rank = 0; rank < MAX_RANKS; ++rank) {
  88. rank_pfn = 0;
  89. rank_info[rank].valid_channel = 0x0;
  90. valid_channel = 0x1;
  91. check_rank_size = 0x0;
  92. check_segment_num = 0x0;
  93. for (chan = 0; chan < channel_count; ++chan) {
  94. if (pasrdpd.channel[chan].rank[rank].valid_rank) {
  95. /* # Gb -> # pages */
  96. rank_pfn += (pasrdpd.channel[chan].rank[rank].rank_size << (27 - PAGE_SHIFT));
  97. rank_info[rank].valid_channel |= valid_channel;
  98. /* Sanity check for rank size */
  99. if (!check_rank_size) {
  100. check_rank_size = pasrdpd.channel[chan].rank[rank].rank_size;
  101. } else {
  102. /* We only support ranks with equal size */
  103. if (check_rank_size != pasrdpd.channel[chan].rank[rank].rank_size)
  104. return -1;
  105. }
  106. /* Sanity check for segment number */
  107. if (!check_segment_num) {
  108. check_segment_num = pasrdpd.channel[chan].rank[rank].segment_nr;
  109. } else {
  110. /* We only support ranks with equal segment number */
  111. if (check_segment_num != pasrdpd.channel[chan].rank[rank].segment_nr)
  112. return -1;
  113. }
  114. }
  115. valid_channel <<= 8;
  116. }
  117. /* Have we found a valid rank */
  118. if (check_rank_size != 0 && check_segment_num != 0) {
  119. rank_info[rank].start_pfn = virt_to_kernel_pfn(start_pfn);
  120. rank_info[rank].end_pfn = virt_to_kernel_pfn(start_pfn + rank_pfn);
  121. rank_info[rank].bank_pfn_size = rank_pfn/check_segment_num;
  122. start_pfn = kernel_pfn_to_virt(rank_info[rank].end_pfn);
  123. pr_debug(
  124. "Rank[%d] start_pfn[%8lu] end_pfn[%8lu] bank_pfn_size[%8lu] valid_channel[0x%-8lx]\n",
  125. rank, rank_info[rank].start_pfn, rank_info[rank].end_pfn, rank_info[rank].bank_pfn_size,
  126. rank_info[rank].valid_channel);
  127. rank_count++;
  128. banks_per_rank = check_segment_num;
  129. } else {
  130. rank_info[rank].start_pfn = virt_to_kernel_pfn(rank_pfn_offset());
  131. rank_info[rank].end_pfn = virt_to_kernel_pfn(rank_pfn_offset());
  132. rank_info[rank].bank_pfn_size = 0;
  133. rank_info[rank].valid_channel = 0x0;
  134. }
  135. }
  136. return 0;
  137. }
  138. /*
  139. * Check whether it is a valid rank
  140. */
  141. static bool __init is_valid_rank(int rank)
  142. {
  143. /* Check start/end pfn */
  144. if (rank_info[rank].start_pfn == rank_info[rank].end_pfn)
  145. return false;
  146. /* Check valid_channel */
  147. if (rank_info[rank].valid_channel == 0x0)
  148. return false;
  149. return true;
  150. }
  151. /*
  152. * Fill mtkpasr_segment_bits
  153. */
  154. static void __init find_mtkpasr_valid_segment(unsigned long start, unsigned long end)
  155. {
  156. int num_segment, rank;
  157. unsigned long spfn, epfn;
  158. unsigned long rspfn, repfn;
  159. unsigned long bank_pfn_size;
  160. num_segment = 0;
  161. for (rank = 0; rank < MAX_RANKS; ++rank) {
  162. spfn = kernel_pfn_to_virt(start);
  163. epfn = kernel_pfn_to_virt(end);
  164. rspfn = kernel_pfn_to_virt(rank_info[rank].start_pfn);
  165. repfn = kernel_pfn_to_virt(rank_info[rank].end_pfn);
  166. if (!is_valid_rank(rank))
  167. continue;
  168. bank_pfn_size = rank_info[rank].bank_pfn_size;
  169. if (epfn >= spfn) {
  170. /* Intersect */
  171. if (spfn < repfn && rspfn < epfn) {
  172. spfn = max(spfn, rspfn);
  173. epfn = min(epfn, repfn);
  174. spfn = round_up_base_offset(spfn, bank_pfn_size, rank_pfn_offset());
  175. epfn = round_down_base_offset(epfn, bank_pfn_size, rank_pfn_offset());
  176. while (epfn >= (spfn + bank_pfn_size)) {
  177. mtkpasr_segment_bits |= (1 << ((spfn - rspfn) / bank_pfn_size + num_segment));
  178. spfn += bank_pfn_size;
  179. }
  180. }
  181. } else {
  182. /* spfn ~ repfn */
  183. spfn = max(spfn, rspfn);
  184. if (spfn < repfn)
  185. spfn = round_up_base_offset(spfn, bank_pfn_size, rank_pfn_offset());
  186. while (repfn >= (spfn + bank_pfn_size)) {
  187. mtkpasr_segment_bits |= (1 << ((spfn - rspfn) / bank_pfn_size + num_segment));
  188. spfn += bank_pfn_size;
  189. }
  190. /* rspfn ~ epfn */
  191. epfn = min(epfn, repfn);
  192. if (rspfn < epfn)
  193. epfn = round_down_base_offset(epfn, bank_pfn_size, rank_pfn_offset());
  194. while ((epfn - bank_pfn_size) >= rspfn) {
  195. epfn -= bank_pfn_size;
  196. mtkpasr_segment_bits |= (1 << ((epfn - rspfn) / bank_pfn_size + num_segment));
  197. }
  198. }
  199. num_segment += 8; /* HW constraint - 8 segment-bits per rank */
  200. }
  201. }
  202. /*
  203. * We will set an offset on which active PASR will be imposed.
  204. * This is done by acquiring CMA's base and size.
  205. * Return <0 means "fail to init pasr range"
  206. * >0 means "the number of valid banks"
  207. */
  208. int __init mtkpasr_init_range(unsigned long start_pfn, unsigned long end_pfn)
  209. {
  210. int ret = 0;
  211. int rank;
  212. unsigned long pfn_bank_alignment = 0;
  213. unsigned long vseg, seg_num = 0;
  214. /* Check DRAM configuration */
  215. ret = check_dram_configuration();
  216. if (ret < 0)
  217. goto out;
  218. /* Find out which rank "start_pfn" belongs to */
  219. for (rank = 0; rank < MAX_RANKS; ++rank) {
  220. if (kernel_pfn_to_virt(start_pfn) < kernel_pfn_to_virt(rank_info[rank].end_pfn) &&
  221. kernel_pfn_to_virt(start_pfn) >=
  222. kernel_pfn_to_virt(rank_info[rank].start_pfn)) {
  223. pfn_bank_alignment = rank_info[rank].bank_pfn_size;
  224. break;
  225. }
  226. }
  227. /* Sanity check */
  228. if (!pfn_bank_alignment) {
  229. ret = -1;
  230. goto out;
  231. }
  232. /* 1st attempted bank size */
  233. bank_pfns = pfn_bank_alignment;
  234. /* Find out which rank "end_pfn" belongs to */
  235. for (rank = 0; rank < MAX_RANKS; ++rank) {
  236. if (kernel_pfn_to_virt(end_pfn) <= kernel_pfn_to_virt(rank_info[rank].end_pfn) &&
  237. kernel_pfn_to_virt(end_pfn) >
  238. kernel_pfn_to_virt(rank_info[rank].start_pfn)) {
  239. pfn_bank_alignment = rank_info[rank].bank_pfn_size;
  240. break;
  241. }
  242. }
  243. /* Sanity check: only allow equal bank size */
  244. if (bank_pfns != pfn_bank_alignment) {
  245. ret = -2;
  246. goto out;
  247. }
  248. /* Determine mtkpasr_start_pfn/end */
  249. mtkpasr_start_pfn = round_up_base_offset(start_pfn, pfn_bank_alignment, ARCH_PFN_OFFSET);
  250. mtkpasr_end_pfn = round_down_base_offset(end_pfn, pfn_bank_alignment, ARCH_PFN_OFFSET);
  251. /* Map PASR start/end kernel pfn to DRAM segments */
  252. find_mtkpasr_valid_segment(mtkpasr_start_pfn, mtkpasr_end_pfn);
  253. /* How many segments */
  254. vseg = mtkpasr_segment_bits;
  255. ret = 0;
  256. do {
  257. if (vseg & 0x1)
  258. ret++;
  259. vseg >>= 1;
  260. } while (++seg_num < BITS_PER_LONG);
  261. pr_debug("Start_pfn[%8lu] End_pfn[%8lu] Valid_segment[0x%8lx] Segments[%u]\n",
  262. mtkpasr_start_pfn, mtkpasr_end_pfn, mtkpasr_segment_bits, ret);
  263. out:
  264. return ret;
  265. }
  266. /*
  267. * Give bank, this function will return its (start_pfn, end_pfn) and corresponding rank
  268. * Return -1 means no valid banks, ranks
  269. * 0 means no corresponding rank
  270. * >0 means there are corresponding bank, rank (Caller should subtract 1 to get the correct rank number)
  271. */
  272. int __init query_bank_rank_information(int bank, unsigned long *spfn, unsigned long *epfn, int *segn)
  273. {
  274. int seg_num = 0, rank, num_segment = 0;
  275. unsigned long vseg = mtkpasr_segment_bits, vmask;
  276. /* Reset */
  277. *spfn = 0;
  278. *epfn = 0;
  279. /* Which segment */
  280. do {
  281. if (vseg & 0x1) {
  282. /* Found! */
  283. if (!bank)
  284. break;
  285. bank--;
  286. }
  287. vseg >>= 1;
  288. seg_num++;
  289. } while (seg_num < BITS_PER_LONG);
  290. /* Sanity check */
  291. if (seg_num == BITS_PER_LONG)
  292. return -1;
  293. /* Corresponding segment */
  294. *segn = seg_num;
  295. /* Which rank */
  296. vseg = mtkpasr_segment_bits;
  297. for (rank = 0; rank < MAX_RANKS; ++rank) {
  298. if (is_valid_rank(rank)) {
  299. num_segment = (kernel_pfn_to_virt(rank_info[rank].end_pfn) -
  300. kernel_pfn_to_virt(rank_info[rank].start_pfn)) /
  301. rank_info[rank].bank_pfn_size;
  302. if (seg_num < num_segment) {
  303. *spfn = virt_to_kernel_pfn(kernel_pfn_to_virt(rank_info[rank].start_pfn) +
  304. seg_num * rank_info[rank].bank_pfn_size);
  305. *epfn = virt_to_kernel_pfn(kernel_pfn_to_virt(*spfn) +
  306. rank_info[rank].bank_pfn_size);
  307. /* Fixup to meet bank range definition ??? */
  308. if (*epfn <= *spfn)
  309. *epfn = kernel_pfn_to_virt(*epfn);
  310. break;
  311. }
  312. seg_num -= num_segment;
  313. vseg >>= num_segment;
  314. }
  315. }
  316. /* Sanity check */
  317. if (rank == MAX_RANKS)
  318. return -1;
  319. /* Query rank information */
  320. vmask = (1 << num_segment) - 1;
  321. if ((vseg & vmask) == vmask)
  322. return (rank + 1);
  323. return 0;
  324. }