mtk_memcfg.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569
  1. #include <linux/kernel.h>
  2. #include <linux/module.h>
  3. #include <linux/types.h>
  4. #include <linux/delay.h>
  5. #include <linux/proc_fs.h>
  6. #include <linux/spinlock.h>
  7. #include <linux/seq_file.h>
  8. #include <linux/kthread.h>
  9. #include <linux/slab.h>
  10. #include <linux/uaccess.h>
  11. #include <asm/setup.h>
  12. #include <mt-plat/mtk_memcfg.h>
  13. #include <mt-plat/aee.h>
  14. #include <linux/of_fdt.h>
  15. #include <linux/of_reserved_mem.h>
  16. #include <linux/mod_devicetable.h>
  17. #include <linux/io.h>
  18. #include <linux/memblock.h>
  19. #define MTK_MEMCFG_SIMPLE_BUFFER_LEN 16
  20. #define MTK_MEMCFG_LARGE_BUFFER_LEN (2048)
  21. struct mtk_memcfg_info_buf {
  22. unsigned long max_len;
  23. unsigned long curr_pos;
  24. char buf[MTK_MEMCFG_LARGE_BUFFER_LEN];
  25. };
  26. static struct mtk_memcfg_info_buf mtk_memcfg_layout_buf = {
  27. .buf = {[0 ... (MTK_MEMCFG_LARGE_BUFFER_LEN - 1)] = 0,},
  28. .max_len = MTK_MEMCFG_LARGE_BUFFER_LEN,
  29. .curr_pos = 0,
  30. };
  31. static unsigned long mtk_memcfg_late_warning_flag;
  32. void mtk_memcfg_write_memory_layout_buf(char *fmt, ...)
  33. {
  34. va_list ap;
  35. struct mtk_memcfg_info_buf *layout_buf = &mtk_memcfg_layout_buf;
  36. if (layout_buf->curr_pos <= layout_buf->max_len) {
  37. va_start(ap, fmt);
  38. layout_buf->curr_pos +=
  39. vsnprintf((layout_buf->buf + layout_buf->curr_pos),
  40. (layout_buf->max_len - layout_buf->curr_pos), fmt,
  41. ap);
  42. va_end(ap);
  43. }
  44. }
  45. void mtk_memcfg_late_warning(unsigned long flag)
  46. {
  47. mtk_memcfg_late_warning_flag |= flag;
  48. }
  49. /* kenerl memory information */
  50. static int mtk_memcfg_memory_layout_show(struct seq_file *m, void *v)
  51. {
  52. seq_printf(m, "%s", mtk_memcfg_layout_buf.buf);
  53. seq_printf(m, "buffer usage: %lu/%lu\n",
  54. (mtk_memcfg_layout_buf.curr_pos <=
  55. mtk_memcfg_layout_buf.max_len ?
  56. mtk_memcfg_layout_buf.curr_pos :
  57. mtk_memcfg_layout_buf.max_len),
  58. mtk_memcfg_layout_buf.max_len);
  59. return 0;
  60. }
  61. static int mtk_memcfg_memory_layout_open(struct inode *inode, struct file *file)
  62. {
  63. return single_open(file, mtk_memcfg_memory_layout_show, NULL);
  64. }
  65. /* end of kenerl memory information */
  66. /* kenerl memory fragmentation trigger */
  67. static LIST_HEAD(frag_page_list);
  68. static DEFINE_SPINLOCK(frag_page_list_lock);
  69. static unsigned long mtk_memcfg_frag_round;
  70. static struct kmem_cache *frag_page_cache;
  71. struct frag_page {
  72. struct list_head list;
  73. struct page *page;
  74. };
  75. static int mtk_memcfg_frag_show(struct seq_file *m, void *v)
  76. {
  77. int cnt = 0;
  78. struct frag_page *frag_page, *n_frag_page;
  79. spin_lock(&frag_page_list_lock);
  80. list_for_each_entry_safe(frag_page, n_frag_page,
  81. &frag_page_list, list) {
  82. cnt++;
  83. }
  84. spin_unlock(&frag_page_list_lock);
  85. seq_printf(m, "round: %lu, fragmentation-trigger held %d pages, %d MB\n",
  86. mtk_memcfg_frag_round,
  87. cnt, (cnt << PAGE_SHIFT) >> 20);
  88. return 0;
  89. }
  90. static int mtk_memcfg_frag_open(struct inode *inode, struct file *file)
  91. {
  92. return single_open(file, mtk_memcfg_frag_show, NULL);
  93. }
  94. static int do_fragmentation(void *n)
  95. {
  96. struct frag_page *frag_page, *n_frag_page;
  97. struct page *page;
  98. gfp_t gfp_mask = GFP_ATOMIC;
  99. unsigned int max_order = 2;
  100. int cnt = 0, i;
  101. /* trigger fragmentation */
  102. /*
  103. * Allocate an order-2-page, split it into 4 order-0-pages,
  104. * and free 3 of them, repeatedly.
  105. * In this way, we split all high order pages to
  106. * order-0-pages and order-1-pages to create a
  107. * fragmentation scenario.
  108. *
  109. * In current stage, we only trigger fragmentation in
  110. * normal zone.
  111. */
  112. while (1) {
  113. #if 1
  114. if (cnt >= 10000) {
  115. /*
  116. * release all memory and restart the fragmentation
  117. * Allocating too much frag_page consumes
  118. * too mush order-0 pages
  119. */
  120. spin_lock(&frag_page_list_lock);
  121. list_for_each_entry_safe(frag_page, n_frag_page,
  122. &frag_page_list, list) {
  123. list_del(&frag_page->list);
  124. __free_page(frag_page->page);
  125. kmem_cache_free(frag_page_cache, frag_page);
  126. cnt--;
  127. }
  128. spin_unlock(&frag_page_list_lock);
  129. pr_alert("round: %lu, fragmentation-trigger free pages %d left\n",
  130. mtk_memcfg_frag_round, cnt);
  131. }
  132. #endif
  133. while (1) {
  134. frag_page = kmem_cache_alloc(frag_page_cache, gfp_mask);
  135. if (!frag_page)
  136. break;
  137. page = alloc_pages(gfp_mask, max_order);
  138. if (!page) {
  139. kfree(frag_page);
  140. break;
  141. }
  142. split_page(page, 0);
  143. INIT_LIST_HEAD(&frag_page->list);
  144. frag_page->page = page;
  145. spin_lock(&frag_page_list_lock);
  146. list_add(&frag_page->list, &frag_page_list);
  147. spin_unlock(&frag_page_list_lock);
  148. for (i = 1; i < (1 << max_order); i++)
  149. __free_page(page + i);
  150. cnt++;
  151. }
  152. mtk_memcfg_frag_round++;
  153. pr_alert("round: %lu, fragmentation-trigger allocate %d pages %d MB\n",
  154. mtk_memcfg_frag_round, cnt, (cnt << PAGE_SHIFT) >> 20);
  155. msleep(500);
  156. }
  157. return 0;
  158. }
  159. static ssize_t
  160. mtk_memcfg_frag_write(struct file *file, const char __user *buffer,
  161. size_t count, loff_t *pos)
  162. {
  163. static char state;
  164. static struct task_struct *p;
  165. if (count > 0) {
  166. if (get_user(state, buffer))
  167. return -EFAULT;
  168. state -= '0';
  169. pr_alert("%s state = %d\n", __func__, state);
  170. if (state) {
  171. pr_alert("activate do_fragmentation kthread\n");
  172. p = kthread_create(do_fragmentation, NULL,
  173. "fragmentationd");
  174. if (!IS_ERR(p))
  175. wake_up_process(p);
  176. }
  177. }
  178. return count;
  179. }
  180. /* end of kenerl memory fragmentation trigger */
  181. static int mtk_memcfg_oom_show(struct seq_file *m, void *v)
  182. {
  183. seq_puts(m, "oom-trigger\n");
  184. return 0;
  185. }
  186. static int mtk_memcfg_oom_open(struct inode *inode, struct file *file)
  187. {
  188. return single_open(file, mtk_memcfg_oom_show, NULL);
  189. }
  190. static ssize_t
  191. mtk_memcfg_oom_write(struct file *file, const char __user *buffer,
  192. size_t count, loff_t *pos)
  193. {
  194. static char state;
  195. if (count > 0) {
  196. if (get_user(state, buffer))
  197. return -EFAULT;
  198. state -= '0';
  199. pr_alert("%s state = %d\n", __func__, state);
  200. if (state) {
  201. pr_alert("oom test, trying to kill system under oom scenario\n");
  202. /* exhaust all memory */
  203. for (;;)
  204. alloc_pages(GFP_KERNEL, 0);
  205. }
  206. }
  207. return count;
  208. }
  209. /* end of kenerl out-of-memory(oom) trigger */
  210. static int __init mtk_memcfg_init(void)
  211. {
  212. return 0;
  213. }
  214. static void __exit mtk_memcfg_exit(void)
  215. {
  216. }
  217. static const struct file_operations mtk_memcfg_memory_layout_operations = {
  218. .open = mtk_memcfg_memory_layout_open,
  219. .read = seq_read,
  220. .llseek = seq_lseek,
  221. .release = single_release,
  222. };
  223. static const struct file_operations mtk_memcfg_frag_operations = {
  224. .open = mtk_memcfg_frag_open,
  225. .write = mtk_memcfg_frag_write,
  226. .read = seq_read,
  227. .llseek = seq_lseek,
  228. .release = single_release,
  229. };
  230. static const struct file_operations mtk_memcfg_oom_operations = {
  231. .open = mtk_memcfg_oom_open,
  232. .write = mtk_memcfg_oom_write,
  233. .read = seq_read,
  234. .llseek = seq_lseek,
  235. .release = single_release,
  236. };
  237. #ifdef CONFIG_SLUB_DEBUG
  238. static const struct file_operations proc_slabtrace_operations = {
  239. .open = slabtrace_open,
  240. .read = seq_read,
  241. .llseek = seq_lseek,
  242. .release = single_release,
  243. };
  244. #endif
  245. static int __init mtk_memcfg_late_init(void)
  246. {
  247. struct proc_dir_entry *entry = NULL;
  248. struct proc_dir_entry *mtk_memcfg_dir = NULL;
  249. mtk_memcfg_dir = proc_mkdir("mtk_memcfg", NULL);
  250. if (!mtk_memcfg_dir) {
  251. pr_err("[%s]: mkdir /proc/mtk_memcfg failed\n", __func__);
  252. } else {
  253. /* display kernel memory layout */
  254. entry = proc_create("memory_layout",
  255. S_IRUGO | S_IWUSR, mtk_memcfg_dir,
  256. &mtk_memcfg_memory_layout_operations);
  257. if (!entry)
  258. pr_err("create memory_layout proc entry failed\n");
  259. /* fragmentation test */
  260. entry = proc_create("frag-trigger",
  261. S_IRUGO | S_IWUSR, mtk_memcfg_dir,
  262. &mtk_memcfg_frag_operations);
  263. if (!entry)
  264. pr_err("create frag-trigger proc entry failed\n");
  265. frag_page_cache = kmem_cache_create("frag_page_cache",
  266. sizeof(struct frag_page),
  267. 0, SLAB_PANIC, NULL);
  268. if (!frag_page_cache)
  269. pr_err("create frag_page_cache failed\n");
  270. /* oom test */
  271. entry = proc_create("oom-trigger",
  272. S_IRUGO | S_IWUSR, mtk_memcfg_dir,
  273. &mtk_memcfg_oom_operations);
  274. if (!entry)
  275. pr_err("create oom entry failed\n");
  276. #ifdef CONFIG_SLUB_DEBUG
  277. /* slabtrace - full slub object backtrace */
  278. entry = proc_create("slabtrace",
  279. S_IRUSR, mtk_memcfg_dir,
  280. &proc_slabtrace_operations);
  281. if (!entry)
  282. pr_err("create slabtrace proc entry failed\n");
  283. #endif
  284. }
  285. return 0;
  286. }
  287. module_init(mtk_memcfg_init);
  288. module_exit(mtk_memcfg_exit);
  289. static int __init mtk_memcfg_late_sanity_test(void)
  290. {
  291. #if 0
  292. /* trigger kernel warning if warning flag is set */
  293. if (mtk_memcfg_late_warning_flag & WARN_MEMBLOCK_CONFLICT) {
  294. aee_kernel_warning("[memory layout conflict]",
  295. mtk_memcfg_layout_buf.buf);
  296. }
  297. if (mtk_memcfg_late_warning_flag & WARN_MEMSIZE_CONFLICT) {
  298. aee_kernel_warning("[memory size conflict]",
  299. mtk_memcfg_layout_buf.buf);
  300. }
  301. if (mtk_memcfg_late_warning_flag & WARN_API_NOT_INIT) {
  302. aee_kernel_warning("[API is not initialized]",
  303. mtk_memcfg_layout_buf.buf);
  304. }
  305. #ifdef CONFIG_HIGHMEM
  306. /* check highmem zone size */
  307. if (unlikely
  308. (totalhigh_pages && (totalhigh_pages << PAGE_SHIFT) < SZ_8M)) {
  309. aee_kernel_warning("[high zone lt 8MB]", __func__);
  310. }
  311. #endif /* end of CONFIG_HIGHMEM */
  312. #endif
  313. return 0;
  314. }
  315. /* scan memory layout */
  316. #ifdef CONFIG_OF
  317. static int dt_scan_memory(unsigned long node, const char *uname,
  318. int depth, void *data)
  319. {
  320. const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
  321. int i;
  322. int l;
  323. u64 kernel_mem_sz = 0;
  324. u64 phone_dram_sz = 0x0; /* original phone DRAM size */
  325. u64 dram_sz = 0; /* total DRAM size of all modules */
  326. struct dram_info *dram_info;
  327. struct mem_desc *mem_desc;
  328. struct mblock_info *mblock_info;
  329. const __be32 *reg, *endp;
  330. u64 fb_base = 0x12345678, fb_size = 0;
  331. /* We are scanning "memory" nodes only */
  332. if (type == NULL) {
  333. /*
  334. * The longtrail doesn't have a device_type on the
  335. * /memory node, so look for the node called /memory@0.
  336. */
  337. if (depth != 1 || strcmp(uname, "memory@0") != 0)
  338. return 0;
  339. } else if (strcmp(type, "memory") != 0) {
  340. return 0;
  341. }
  342. reg = of_get_flat_dt_prop(node, "reg", &l);
  343. if (reg == NULL)
  344. return 0;
  345. endp = reg + (l / sizeof(__be32));
  346. while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) {
  347. u64 base, size;
  348. base = dt_mem_next_cell(dt_root_addr_cells, &reg);
  349. size = dt_mem_next_cell(dt_root_size_cells, &reg);
  350. if (size == 0)
  351. continue;
  352. MTK_MEMCFG_LOG_AND_PRINTK(
  353. "[debug]DRAM size (dt) : 0x%llx - 0x%llx (0x%llx)\n",
  354. (unsigned long long)base,
  355. (unsigned long long)base +
  356. (unsigned long long)size - 1,
  357. (unsigned long long)size);
  358. kernel_mem_sz += size;
  359. }
  360. /* orig_dram_info */
  361. dram_info = (struct dram_info *)of_get_flat_dt_prop(node,
  362. "orig_dram_info", NULL);
  363. if (dram_info) {
  364. for (i = 0; i < dram_info->rank_num; i++) {
  365. MTK_MEMCFG_LOG_AND_PRINTK(
  366. "[debug]orig_dram rank[%d] : 0x%08llx - 0x%08llx (0x%llx)\n",
  367. i,
  368. dram_info->rank_info[i].start,
  369. dram_info->rank_info[i].start +
  370. dram_info->rank_info[i].size - 1,
  371. dram_info->rank_info[i].size
  372. );
  373. phone_dram_sz += dram_info->rank_info[i].size;
  374. }
  375. }
  376. /* mblock_info */
  377. mblock_info = (struct mblock_info *)of_get_flat_dt_prop(node,
  378. "mblock_info", NULL);
  379. if (mblock_info) {
  380. for (i = 0; i < mblock_info->mblock_num; i++) {
  381. MTK_MEMCFG_LOG_AND_PRINTK(
  382. "[debug]mblock[%d][r%d] : 0x%08llx - 0x%08llx (0x%llx)\n",
  383. i,
  384. mblock_info->mblock[i].rank,
  385. mblock_info->mblock[i].start,
  386. mblock_info->mblock[i].start +
  387. mblock_info->mblock[i].size - 1,
  388. mblock_info->mblock[i].size
  389. );
  390. dram_sz += mblock_info->mblock[i].size;
  391. }
  392. }
  393. /* lca reserved memory */
  394. mem_desc = (struct mem_desc *)of_get_flat_dt_prop(node,
  395. "lca_reserved_mem", NULL);
  396. if (mem_desc && mem_desc->size) {
  397. MTK_MEMCFG_LOG_AND_PRINTK(
  398. "[PHY layout]lca_reserved_mem : 0x%08llx - 0x%08llx (0x%llx)\n",
  399. mem_desc->start,
  400. mem_desc->start +
  401. mem_desc->size - 1,
  402. mem_desc->size
  403. );
  404. dram_sz += mem_desc->size;
  405. }
  406. /* tee reserved memory */
  407. mem_desc = (struct mem_desc *)of_get_flat_dt_prop(node,
  408. "tee_reserved_mem", NULL);
  409. if (mem_desc && mem_desc->size) {
  410. MTK_MEMCFG_LOG_AND_PRINTK(
  411. "[PHY layout]tee_reserved_mem : 0x%08llx - 0x%08llx (0x%llx)\n",
  412. mem_desc->start,
  413. mem_desc->start +
  414. mem_desc->size - 1,
  415. mem_desc->size
  416. );
  417. dram_sz += mem_desc->size;
  418. }
  419. /* frame buffer */
  420. fb_size = (u64)mtkfb_get_fb_size();
  421. fb_base = (u64)mtkfb_get_fb_base();
  422. dram_sz += fb_size;
  423. /* print memory information */
  424. MTK_MEMCFG_LOG_AND_PRINTK(
  425. "[debug]available DRAM size = 0x%llx\n[PHY layout]FB (dt) : 0x%llx - 0x%llx (0x%llx)\n",
  426. (unsigned long long)kernel_mem_sz,
  427. (unsigned long long)fb_base,
  428. (unsigned long long)fb_base + fb_size - 1,
  429. (unsigned long long)fb_size);
  430. return node;
  431. }
  432. static int __init display_early_memory_info(void)
  433. {
  434. int node;
  435. /* system memory */
  436. node = of_scan_flat_dt(dt_scan_memory, NULL);
  437. return 0;
  438. }
  439. #endif /* end of CONFIG_OF */
  440. late_initcall(mtk_memcfg_late_init);
  441. late_initcall(mtk_memcfg_late_sanity_test);
  442. #ifdef CONFIG_OF
  443. pure_initcall(display_early_memory_info);
  444. #endif /* end of CONFIG_OF */
  445. #if 0 /* test code of of_reserve */
  446. /* test memory-reservd code */
  447. phys_addr_t test_base = 0;
  448. phys_addr_t test_size = 0;
  449. reservedmem_of_init_fn reserve_memory_test_fn(struct reserved_mem *rmem,
  450. unsigned long node, const char *uname)
  451. {
  452. pr_alert("%s, name: %s, uname: %s, base: 0x%llx, size: 0x%llx\n",
  453. __func__, rmem->name, uname,
  454. (unsigned long long)rmem->base,
  455. (unsigned long long)rmem->size);
  456. /* memblock_free(rmem->base, rmem->size); */
  457. test_base = rmem->base;
  458. test_size = rmem->size;
  459. return 0;
  460. }
  461. static int __init init_test_reserve_memory(void)
  462. {
  463. void *p = 0;
  464. p = ioremap(test_base, (size_t)test_size);
  465. if (p) {
  466. pr_alert("%s:%d ioremap ok: %p\n", __func__, __LINE__,
  467. p);
  468. } else {
  469. pr_alert("%s:%d ioremap failed\n", __func__, __LINE__);
  470. }
  471. return 0;
  472. }
  473. late_initcall(init_test_reserve_memory);
  474. reservedmem_of_init_fn mrdump_reserve_initfn(struct reserved_mem *rmem,
  475. unsigned long node, const char *uname)
  476. {
  477. pr_alert("%s, name: %s, uname: %s, base: 0x%llx, size: 0x%llx\n",
  478. __func__, rmem->name, uname,
  479. (unsigned long long)rmem->base,
  480. (unsigned long long)rmem->size);
  481. return 0;
  482. }
  483. RESERVEDMEM_OF_DECLARE(reserve_memory_test1, "reserve-memory-test",
  484. reserve_memory_test_fn);
  485. RESERVEDMEM_OF_DECLARE(mrdump_reserved_memory, "mrdump-reserved-memory",
  486. mrdump_reserve_initfn);
  487. #endif /* end of test code of of_reserve */