mrdump_mini.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757
  1. #include <linux/init.h>
  2. #include <linux/mm.h>
  3. #include <linux/slab.h>
  4. #include <linux/memblock.h>
  5. #include <linux/elf.h>
  6. #include <linux/kdebug.h>
  7. #include <linux/module.h>
  8. #include <linux/vmalloc.h>
  9. #include <linux/spinlock.h>
  10. #include <linux/stacktrace.h>
  11. #include <asm/pgtable.h>
  12. #include <asm-generic/percpu.h>
  13. #include <asm-generic/sections.h>
  14. #include <asm/page.h>
  15. #include <smp.h>
  16. #include <mrdump.h>
  17. #include <mt-plat/aee.h>
  18. #include <linux/of.h>
  19. #include <linux/of_fdt.h>
  20. #include <linux/of_reserved_mem.h>
  21. #include "../../../../kernel/sched/sched.h"
  22. #include "mrdump_mini.h"
  23. #define LOG_DEBUG(fmt, ...) \
  24. do { \
  25. if (aee_in_nested_panic()) \
  26. aee_nested_printf(fmt, ##__VA_ARGS__); \
  27. else \
  28. pr_debug(fmt, ##__VA_ARGS__); \
  29. } while (0)
  30. #define LOG_ERROR(fmt, ...) \
  31. do { \
  32. if (aee_in_nested_panic()) \
  33. aee_nested_printf(fmt, ##__VA_ARGS__); \
  34. else \
  35. pr_err(fmt, ##__VA_ARGS__); \
  36. } while (0)
  37. #define LOGV(fmt, msg...)
  38. #define LOGD LOG_DEBUG
  39. #define LOGI LOG_DEBUG
  40. #define LOGW LOG_ERROR
  41. #define LOGE LOG_ERROR
  42. static struct mrdump_mini_elf_header *mrdump_mini_ehdr;
  43. static bool dump_all_cpus;
  44. __weak void get_android_log_buffer(unsigned long *addr, unsigned long *size, unsigned long *start,
  45. int type)
  46. {
  47. }
  48. __weak void get_disp_err_buffer(unsigned long *addr, unsigned long *size, unsigned long *start)
  49. {
  50. }
  51. __weak void get_disp_fence_buffer(unsigned long *addr, unsigned long *size, unsigned long *start)
  52. {
  53. }
  54. __weak void get_disp_dbg_buffer(unsigned long *addr, unsigned long *size, unsigned long *start)
  55. {
  56. }
  57. __weak void get_disp_dump_buffer(unsigned long *addr, unsigned long *size, unsigned long *start)
  58. {
  59. }
  60. __weak struct vm_struct *find_vm_area(const void *addr)
  61. {
  62. return NULL;
  63. }
  64. #undef virt_addr_valid
  65. #define virt_addr_valid(kaddr) ((void *)(kaddr) >= (void *)PAGE_OFFSET && \
  66. (void *)(kaddr) < (void *)high_memory && \
  67. pfn_valid(__pa(kaddr) >> PAGE_SHIFT))
  68. /* copy from fs/binfmt_elf.c */
  69. static void fill_elf_header(struct elfhdr *elf, int segs)
  70. {
  71. memcpy(elf->e_ident, ELFMAG, SELFMAG);
  72. elf->e_ident[EI_CLASS] = ELF_CLASS;
  73. elf->e_ident[EI_DATA] = ELF_DATA;
  74. elf->e_ident[EI_VERSION] = EV_CURRENT;
  75. elf->e_ident[EI_OSABI] = ELF_OSABI;
  76. elf->e_type = ET_CORE;
  77. elf->e_machine = ELF_ARCH;
  78. elf->e_version = EV_CURRENT;
  79. elf->e_phoff = sizeof(struct elfhdr);
  80. #ifndef ELF_CORE_EFLAGS
  81. #define ELF_CORE_EFLAGS 0
  82. #endif
  83. elf->e_flags = ELF_CORE_EFLAGS;
  84. elf->e_ehsize = sizeof(struct elfhdr);
  85. elf->e_phentsize = sizeof(struct elf_phdr);
  86. elf->e_phnum = segs;
  87. }
  88. static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, loff_t offset)
  89. {
  90. phdr->p_type = PT_NOTE;
  91. phdr->p_offset = offset;
  92. phdr->p_vaddr = 0;
  93. phdr->p_paddr = 0;
  94. phdr->p_filesz = sz;
  95. phdr->p_memsz = 0;
  96. phdr->p_flags = 0;
  97. phdr->p_align = 0;
  98. }
  99. static void fill_elf_load_phdr(struct elf_phdr *phdr, int sz,
  100. unsigned long vaddr, unsigned long paddr)
  101. {
  102. phdr->p_type = PT_LOAD;
  103. phdr->p_vaddr = vaddr;
  104. phdr->p_paddr = paddr;
  105. phdr->p_filesz = sz;
  106. phdr->p_memsz = 0;
  107. phdr->p_flags = 0;
  108. phdr->p_align = 0;
  109. }
  110. static noinline void fill_note(struct elf_note *note, const char *name, int type,
  111. unsigned int sz, unsigned int namesz)
  112. {
  113. char *n_name = (char *)note + sizeof(struct elf_note);
  114. /* char *n_name = container_of(note, struct mrdump_mini_elf_psinfo, note)->name; */
  115. note->n_namesz = namesz;
  116. note->n_type = type;
  117. note->n_descsz = sz;
  118. strncpy(n_name, name, note->n_namesz);
  119. }
  120. static void fill_note_L(struct elf_note *note, const char *name, int type, unsigned int sz)
  121. {
  122. fill_note(note, name, type, sz, NOTE_NAME_LONG);
  123. }
  124. static void fill_note_S(struct elf_note *note, const char *name, int type, unsigned int sz)
  125. {
  126. fill_note(note, name, type, sz, NOTE_NAME_SHORT);
  127. }
  128. /*
  129. * fill up all the fields in prstatus from the given task struct, except
  130. * registers which need to be filled up separately.
  131. */
  132. static void fill_prstatus(struct elf_prstatus *prstatus, struct pt_regs *regs,
  133. struct task_struct *p, unsigned long pid)
  134. {
  135. elf_core_copy_regs(&prstatus->pr_reg, regs);
  136. prstatus->pr_pid = pid;
  137. prstatus->pr_ppid = NR_CPUS;
  138. }
  139. static int fill_psinfo(struct elf_prpsinfo *psinfo)
  140. {
  141. unsigned int i;
  142. strncpy(psinfo->pr_psargs, saved_command_line, ELF_PRARGSZ - 1);
  143. for (i = 0; i < ELF_PRARGSZ - 1; i++)
  144. if (psinfo->pr_psargs[i] == 0)
  145. psinfo->pr_psargs[i] = ' ';
  146. psinfo->pr_psargs[ELF_PRARGSZ - 1] = 0;
  147. strncpy(psinfo->pr_fname, "vmlinux", sizeof(psinfo->pr_fname));
  148. return 0;
  149. }
  150. void mrdump_mini_add_misc_pa(unsigned long va, unsigned long pa, unsigned long size,
  151. unsigned long start, char *name)
  152. {
  153. int i;
  154. struct elf_note *note;
  155. for (i = 0; i < MRDUMP_MINI_NR_MISC; i++) {
  156. note = &mrdump_mini_ehdr->misc[i].note;
  157. if (note->n_type == NT_IPANIC_MISC) {
  158. if (strncmp(mrdump_mini_ehdr->misc[i].name, name, 16) != 0)
  159. continue;
  160. }
  161. mrdump_mini_ehdr->misc[i].data.vaddr = va;
  162. mrdump_mini_ehdr->misc[i].data.paddr = pa;
  163. mrdump_mini_ehdr->misc[i].data.size = size;
  164. mrdump_mini_ehdr->misc[i].data.start =
  165. virt_addr_valid((void *)start) ? __pa(start) : 0;
  166. fill_note_L(note, name, NT_IPANIC_MISC, sizeof(struct mrdump_mini_elf_misc));
  167. break;
  168. }
  169. }
  170. void mrdump_mini_add_misc(unsigned long addr, unsigned long size, unsigned long start, char *name)
  171. {
  172. if (!virt_addr_valid((void *)addr))
  173. return;
  174. mrdump_mini_add_misc_pa(addr, __pa(addr), size, start, name);
  175. }
  176. int kernel_addr_valid(unsigned long addr)
  177. {
  178. pgd_t *pgd;
  179. pud_t *pud;
  180. pmd_t *pmd;
  181. pte_t *pte;
  182. if (addr < PAGE_OFFSET)
  183. return 0;
  184. pgd = pgd_offset_k(addr);
  185. if (pgd_none(*pgd))
  186. return 0;
  187. pr_err("[%08lx] *pgd=%08llx", addr, (long long)pgd_val(*pgd));
  188. pud = pud_offset(pgd, addr);
  189. if (pud_none(*pud))
  190. return 0;
  191. pr_err("*pud=%08llx", (long long)pud_val(*pud));
  192. pmd = pmd_offset(pud, addr);
  193. if (pmd_none(*pmd))
  194. return 0;
  195. pr_err("*pmd=%08llx", (long long)pmd_val(*pmd));
  196. pte = pte_offset_kernel(pmd, addr);
  197. if (pte_none(*pte))
  198. return 0;
  199. pr_err("*pte=%08llx", (long long)pte_val(*pte));
  200. return pfn_valid(pte_pfn(*pte));
  201. }
  202. void mrdump_mini_add_entry(unsigned long addr, unsigned long size)
  203. {
  204. struct elf_phdr *phdr;
  205. /* struct vm_area_struct *vma; */
  206. struct vm_struct *vm;
  207. unsigned long laddr, haddr, lnew, hnew;
  208. unsigned long paddr;
  209. int i;
  210. if (addr < PAGE_OFFSET)
  211. return;
  212. hnew = ALIGN(addr + size / 2, PAGE_SIZE);
  213. lnew = hnew - ALIGN(size, PAGE_SIZE);
  214. if (!virt_addr_valid(addr)) {
  215. /* vma = find_vma(&init_mm, addr); */
  216. /* pr_err("mirdump: add: %p, vma: %x", addr, vma); */
  217. /* if (!vma) */
  218. /* return; */
  219. /* pr_err("mirdump: (%p, %p), (%p, %p)", vma->vm_start, vma->vm_end, lnew, hnew); */
  220. /* hnew = min(vma->vm_end, hnew); */
  221. /* lnew = max(vma->vm_start, lnew); */
  222. vm = find_vm_area((void *)addr);
  223. if (!vm)
  224. return;
  225. /* lnew = max((unsigned long)vm->addr, lnew); */
  226. /* hnew = min((unsigned long)vm->addr + vm->size - PAGE_SIZE, hnew); */
  227. /* only dump 1 page */
  228. lnew = max((unsigned long)vm->addr, PAGE_ALIGN(addr) - PAGE_SIZE);
  229. hnew = lnew + PAGE_SIZE;
  230. paddr = __pfn_to_phys(vmalloc_to_pfn((void *)lnew));
  231. } else {
  232. lnew = max(lnew, PAGE_OFFSET);
  233. hnew = min_t(unsigned long, hnew, (unsigned long)high_memory);
  234. paddr = __pa(lnew);
  235. }
  236. for (i = 0; i < MRDUMP_MINI_NR_SECTION; i++) {
  237. phdr = &mrdump_mini_ehdr->phdrs[i];
  238. if (phdr->p_type == PT_NULL)
  239. break;
  240. if (phdr->p_type != PT_LOAD)
  241. continue;
  242. laddr = phdr->p_vaddr;
  243. haddr = laddr + phdr->p_filesz;
  244. /* full overlap with exist */
  245. if (lnew >= laddr && hnew <= haddr)
  246. return;
  247. /* no overlap, new */
  248. if (lnew >= haddr || hnew <= laddr)
  249. continue;
  250. /* partial overlap with exist, joining */
  251. lnew = lnew < laddr ? lnew : laddr;
  252. hnew = hnew > haddr ? hnew : haddr;
  253. paddr = __pa(lnew);
  254. break;
  255. }
  256. if (i < MRDUMP_MINI_NR_SECTION)
  257. fill_elf_load_phdr(phdr, hnew - lnew, lnew, paddr);
  258. }
  259. static void mrdump_mini_add_tsk_ti(int cpu, struct pt_regs *regs, int stack)
  260. {
  261. struct task_struct *tsk = NULL;
  262. struct thread_info *ti = NULL;
  263. unsigned long *bottom = NULL;
  264. unsigned long *top = NULL;
  265. /*unsigned long *p;*/
  266. if (virt_addr_valid(regs->reg_sp)) {
  267. ti = (struct thread_info *)(regs->reg_sp & ~(THREAD_SIZE - 1));
  268. tsk = ti->task;
  269. bottom = (unsigned long *)regs->reg_sp;
  270. }
  271. if (!(virt_addr_valid(tsk) && ti == (struct thread_info *)tsk->stack)
  272. && virt_addr_valid(regs->reg_fp)) {
  273. ti = (struct thread_info *)(regs->reg_fp & ~(THREAD_SIZE - 1));
  274. tsk = ti->task;
  275. bottom = (unsigned long *)regs->reg_fp;
  276. }
  277. if (!virt_addr_valid(tsk) || ti != (struct thread_info *)tsk->stack) {
  278. tsk = cpu_curr(cpu);
  279. if (virt_addr_valid(tsk)) {
  280. ti = (struct thread_info *)tsk->stack;
  281. bottom = (unsigned long *)((void *)ti + sizeof(struct thread_info));
  282. }
  283. }
  284. mrdump_mini_add_entry(regs->reg_sp, MRDUMP_MINI_SECTION_SIZE);
  285. mrdump_mini_add_entry((unsigned long)ti, MRDUMP_MINI_SECTION_SIZE);
  286. mrdump_mini_add_entry((unsigned long)tsk, MRDUMP_MINI_SECTION_SIZE);
  287. LOGE("mrdump: cpu[%d] tsk:%p ti:%p\n", cpu, tsk, ti);
  288. if (!stack)
  289. return;
  290. top = (unsigned long *)((void *)ti + THREAD_SIZE);
  291. if (!virt_addr_valid(ti) || !virt_addr_valid(top) || bottom < (unsigned long *)ti
  292. || bottom > top)
  293. return;
  294. /*
  295. for (p = (unsigned long *)ALIGN((unsigned long)bottom, sizeof(unsigned long)); p < top; p++) {
  296. if (!virt_addr_valid(*p))
  297. continue;
  298. if (*p >= (unsigned long)ti && *p <= (unsigned long)top)
  299. continue;
  300. if (*p >= (unsigned long)_stext && *p <= (unsigned long)_etext)
  301. continue;
  302. mrdump_mini_add_entry(*p, MRDUMP_MINI_SECTION_SIZE);
  303. }
  304. */
  305. }
  306. static int mrdump_mini_cpu_regs(int cpu, struct pt_regs *regs, int main)
  307. {
  308. char name[NOTE_NAME_SHORT];
  309. int id;
  310. if (mrdump_mini_ehdr == NULL)
  311. mrdump_mini_init();
  312. if (cpu >= NR_CPUS || mrdump_mini_ehdr == NULL)
  313. return -1;
  314. id = main ? 0 : cpu + 1;
  315. if (strncmp(mrdump_mini_ehdr->prstatus[id].name, "NA", 2))
  316. return -1;
  317. snprintf(name, NOTE_NAME_SHORT - 1, main ? "ke%d" : "core%d", cpu);
  318. fill_prstatus(&mrdump_mini_ehdr->prstatus[id].data, regs, 0, id ? id : (100 + cpu));
  319. fill_note_S(&mrdump_mini_ehdr->prstatus[id].note, name, NT_PRSTATUS,
  320. sizeof(struct elf_prstatus));
  321. return 0;
  322. }
  323. void mrdump_mini_per_cpu_regs(int cpu, struct pt_regs *regs)
  324. {
  325. mrdump_mini_cpu_regs(cpu, regs, 0);
  326. }
  327. EXPORT_SYMBOL(mrdump_mini_per_cpu_regs);
  328. static inline void ipanic_save_regs(struct pt_regs *regs)
  329. {
  330. #ifdef __aarch64__
  331. __asm__ volatile ("stp x0, x1, [sp,#-16]!\n\t"
  332. "1: mov x1, %0\n\t"
  333. "add x0, x1, #16\n\t"
  334. "stp x2, x3, [x0],#16\n\t"
  335. "stp x4, x5, [x0],#16\n\t"
  336. "stp x6, x7, [x0],#16\n\t"
  337. "stp x8, x9, [x0],#16\n\t"
  338. "stp x10, x11, [x0],#16\n\t"
  339. "stp x12, x13, [x0],#16\n\t"
  340. "stp x14, x15, [x0],#16\n\t"
  341. "stp x16, x17, [x0],#16\n\t"
  342. "stp x18, x19, [x0],#16\n\t"
  343. "stp x20, x21, [x0],#16\n\t"
  344. "stp x22, x23, [x0],#16\n\t"
  345. "stp x24, x25, [x0],#16\n\t"
  346. "stp x26, x27, [x0],#16\n\t"
  347. "ldr x1, [x29]\n\t"
  348. "stp x28, x1, [x0],#16\n\t"
  349. "mov x1, sp\n\t"
  350. "stp x30, x1, [x0],#16\n\t"
  351. "mrs x1, daif\n\t"
  352. "adr x30, 1b\n\t"
  353. "stp x30, x1, [x0],#16\n\t"
  354. "sub x1, x0, #272\n\t"
  355. "ldr x0, [sp]\n\t"
  356. "str x0, [x1]\n\t"
  357. "ldr x0, [sp, #8]\n\t"
  358. "str x0, [x1, #8]\n\t" "ldp x0, x1, [sp],#16\n\t" : : "r" (regs) : "cc");
  359. #else
  360. asm volatile ("stmia %1, {r0 - r15}\n\t" "mrs %0, cpsr\n":"=r" (regs->uregs[16]) : "r"(regs) : "memory");
  361. #endif
  362. }
  363. void mrdump_mini_build_task_info(struct pt_regs *regs)
  364. {
  365. #define MAX_STACK_TRACE_DEPTH 32
  366. unsigned long ipanic_stack_entries[MAX_STACK_TRACE_DEPTH];
  367. char symbol[96];
  368. int sz;
  369. int off, plen;
  370. struct stack_trace trace;
  371. int i;
  372. struct task_struct *tsk, *cur;
  373. struct aee_process_info *cur_proc;
  374. if (!virt_addr_valid(current_thread_info())) {
  375. LOGE("current thread info invalid\n");
  376. return;
  377. }
  378. cur = current_thread_info()->task;
  379. tsk = cur;
  380. if (!virt_addr_valid(tsk)) {
  381. LOGE("tsk invalid\n");
  382. return;
  383. }
  384. cur_proc = (struct aee_process_info *)((void *)mrdump_mini_ehdr + MRDUMP_MINI_HEADER_SIZE);
  385. /* Current panic user tasks */
  386. sz = 0;
  387. do {
  388. if (!tsk) {
  389. LOGE("No tsk info\n");
  390. memset_io(cur_proc, 0x0, sizeof(struct aee_process_info));
  391. break;
  392. }
  393. /* FIXME: Check overflow ? */
  394. sz += snprintf(symbol + sz, 96 - sz, "[%s, %d]", tsk->comm, tsk->pid);
  395. tsk = tsk->real_parent;
  396. } while (tsk && (tsk->pid != 0) && (tsk->pid != 1));
  397. if (strncmp(cur_proc->process_path, symbol, sz) == 0) {
  398. LOGE("same process path\n");
  399. return;
  400. }
  401. memset_io(cur_proc, 0, sizeof(struct aee_process_info));
  402. memcpy(cur_proc->process_path, symbol, sz);
  403. /* Grab kernel task stack trace */
  404. trace.nr_entries = 0;
  405. trace.max_entries = MAX_STACK_TRACE_DEPTH;
  406. trace.entries = ipanic_stack_entries;
  407. trace.skip = 8;
  408. save_stack_trace_tsk(cur, &trace);
  409. /* Skip the entries - ipanic_save_current_tsk_info/save_stack_trace_tsk */
  410. for (i = 0; i < trace.nr_entries; i++) {
  411. off = strlen(cur_proc->backtrace);
  412. plen = AEE_BACKTRACE_LENGTH - ALIGN(off, 8);
  413. if (plen > 16) {
  414. sz = snprintf(symbol, 96, "[<%p>] %pS\n",
  415. (void *)ipanic_stack_entries[i],
  416. (void *)ipanic_stack_entries[i]);
  417. if (ALIGN(sz, 8) - sz) {
  418. memset_io(symbol + sz - 1, ' ', ALIGN(sz, 8) - sz);
  419. memset_io(symbol + ALIGN(sz, 8) - 1, '\n', 1);
  420. }
  421. if (ALIGN(sz, 8) <= plen)
  422. memcpy(cur_proc->backtrace + ALIGN(off, 8), symbol, ALIGN(sz, 8));
  423. }
  424. }
  425. if (regs) {
  426. cur_proc->ke_frame.pc = (__u64) regs->reg_pc;
  427. cur_proc->ke_frame.lr = (__u64) regs->reg_lr;
  428. } else {
  429. /* in case panic() is called without die */
  430. /* Todo: a UT for this */
  431. cur_proc->ke_frame.pc = ipanic_stack_entries[0];
  432. cur_proc->ke_frame.lr = ipanic_stack_entries[1];
  433. }
  434. snprintf(cur_proc->ke_frame.pc_symbol, AEE_SZ_SYMBOL_S, "[<%p>] %pS",
  435. (void *)(unsigned long)cur_proc->ke_frame.pc,
  436. (void *)(unsigned long)cur_proc->ke_frame.pc);
  437. snprintf(cur_proc->ke_frame.lr_symbol, AEE_SZ_SYMBOL_L, "[<%p>] %pS",
  438. (void *)(unsigned long)cur_proc->ke_frame.lr,
  439. (void *)(unsigned long)cur_proc->ke_frame.lr);
  440. }
  441. int mrdump_task_info(unsigned char *buffer, size_t sz_buf)
  442. {
  443. if (sz_buf < sizeof(struct aee_process_info))
  444. return -1;
  445. memcpy(buffer, (void *)mrdump_mini_ehdr + MRDUMP_MINI_HEADER_SIZE,
  446. sizeof(struct aee_process_info));
  447. return sizeof(struct aee_process_info);
  448. }
  449. static void mrdump_mini_add_loads(void);
  450. void mrdump_mini_ke_cpu_regs(struct pt_regs *regs)
  451. {
  452. int cpu;
  453. struct pt_regs context;
  454. if (!regs) {
  455. regs = &context;
  456. ipanic_save_regs(regs);
  457. }
  458. cpu = get_HW_cpuid();
  459. mrdump_mini_cpu_regs(cpu, regs, 1);
  460. mrdump_mini_add_loads();
  461. mrdump_mini_build_task_info(regs);
  462. }
  463. EXPORT_SYMBOL(mrdump_mini_ke_cpu_regs);
  464. static void mrdump_mini_build_elf_misc(void)
  465. {
  466. int i;
  467. struct mrdump_mini_elf_misc misc;
  468. char log_type[][16] = { "_MAIN_LOG_", "_EVENTS_LOG_", "_RADIO_LOG_", "_SYSTEM_LOG_" };
  469. unsigned long task_info_va =
  470. (unsigned long)((void *)mrdump_mini_ehdr + MRDUMP_MINI_HEADER_SIZE);
  471. unsigned long task_info_pa =
  472. MRDUMP_MINI_BUF_PADDR ? (MRDUMP_MINI_BUF_PADDR +
  473. MRDUMP_MINI_HEADER_SIZE) : __pa(task_info_va);
  474. mrdump_mini_add_misc_pa(task_info_va, task_info_pa, sizeof(struct aee_process_info), 0,
  475. "PROC_CUR_TSK");
  476. memset_io(&misc, 0, sizeof(struct mrdump_mini_elf_misc));
  477. get_kernel_log_buffer(&misc.vaddr, &misc.size, &misc.start);
  478. mrdump_mini_add_misc(misc.vaddr, misc.size, misc.start, "_KERNEL_LOG_");
  479. memset_io(&misc, 0, sizeof(struct mrdump_mini_elf_misc));
  480. get_disp_err_buffer(&misc.vaddr, &misc.size, &misc.start);
  481. mrdump_mini_add_misc(misc.vaddr, misc.size, misc.start, "_DISP_ERR_");
  482. memset_io(&misc, 0, sizeof(struct mrdump_mini_elf_misc));
  483. get_disp_dump_buffer(&misc.vaddr, &misc.size, &misc.start);
  484. mrdump_mini_add_misc(misc.vaddr, misc.size, misc.start, "_DISP_DUMP_");
  485. memset_io(&misc, 0, sizeof(struct mrdump_mini_elf_misc));
  486. get_disp_fence_buffer(&misc.vaddr, &misc.size, &misc.start);
  487. mrdump_mini_add_misc(misc.vaddr, misc.size, misc.start, "_DISP_FENCE_");
  488. memset_io(&misc, 0, sizeof(struct mrdump_mini_elf_misc));
  489. get_disp_dbg_buffer(&misc.vaddr, &misc.size, &misc.start);
  490. mrdump_mini_add_misc(misc.vaddr, misc.size, misc.start, "_DISP_DBG_");
  491. for (i = 0; i < 4; i++) {
  492. memset_io(&misc, 0, sizeof(struct mrdump_mini_elf_misc));
  493. get_android_log_buffer(&misc.vaddr, &misc.size, &misc.start, i + 1);
  494. mrdump_mini_add_misc(misc.vaddr, misc.size, misc.start, log_type[i]);
  495. }
  496. }
  497. static void mrdump_mini_add_loads(void)
  498. {
  499. int cpu, i, id;
  500. struct pt_regs regs;
  501. struct elf_prstatus *prstatus;
  502. struct task_struct *tsk = NULL;
  503. struct thread_info *ti = NULL;
  504. if (mrdump_mini_ehdr == NULL)
  505. return;
  506. for (id = 0; id < NR_CPUS + 1; id++) {
  507. if (!strncmp(mrdump_mini_ehdr->prstatus[id].name, "NA", 2))
  508. continue;
  509. prstatus = &mrdump_mini_ehdr->prstatus[id].data;
  510. memcpy(&regs, &prstatus->pr_reg, sizeof(prstatus->pr_reg));
  511. if (prstatus->pr_pid >= 100) {
  512. for (i = 0; i < ELF_NGREG; i++)
  513. mrdump_mini_add_entry(((unsigned long *)&regs)[i],
  514. MRDUMP_MINI_SECTION_SIZE);
  515. cpu = prstatus->pr_pid - 100;
  516. mrdump_mini_add_tsk_ti(cpu, &regs, 1);
  517. mrdump_mini_add_entry((unsigned long)cpu_rq(cpu), MRDUMP_MINI_SECTION_SIZE);
  518. } else if (prstatus->pr_pid <= NR_CPUS) {
  519. cpu = prstatus->pr_pid - 1;
  520. mrdump_mini_add_tsk_ti(cpu, &regs, 0);
  521. } else {
  522. LOGE("mrdump: wrong pr_pid: %d\n", prstatus->pr_pid);
  523. }
  524. }
  525. mrdump_mini_add_entry((unsigned long)__per_cpu_offset, MRDUMP_MINI_SECTION_SIZE);
  526. mrdump_mini_add_entry((unsigned long)&mem_map, MRDUMP_MINI_SECTION_SIZE);
  527. mrdump_mini_add_entry((unsigned long)mem_map, MRDUMP_MINI_SECTION_SIZE);
  528. if (dump_all_cpus) {
  529. for (cpu = 0; cpu < NR_CPUS; cpu++) {
  530. tsk = cpu_curr(cpu);
  531. if (virt_addr_valid(tsk))
  532. ti = (struct thread_info *)tsk->stack;
  533. else
  534. ti = NULL;
  535. mrdump_mini_add_entry((unsigned long)cpu_rq(cpu), MRDUMP_MINI_SECTION_SIZE);
  536. mrdump_mini_add_entry((unsigned long)tsk, MRDUMP_MINI_SECTION_SIZE);
  537. mrdump_mini_add_entry((unsigned long)ti, MRDUMP_MINI_SECTION_SIZE);
  538. }
  539. }
  540. #if 0
  541. if (logbuf_lock.owner_cpu < NR_CPUS) {
  542. tsk = cpu_curr(logbuf_lock.owner_cpu);
  543. if (virt_addr_valid(tsk))
  544. ti = (struct thread_info *)tsk->stack;
  545. else
  546. ti = NULL;
  547. mrdump_mini_add_entry((unsigned long)tsk, MRDUMP_MINI_SECTION_SIZE);
  548. mrdump_mini_add_entry((unsigned long)ti, MRDUMP_MINI_SECTION_SIZE);
  549. }
  550. mrdump_mini_add_entry((unsigned long)stack_trace, 256 * 1024);
  551. #endif
  552. }
  553. static void mrdump_mini_dump_loads(loff_t offset, mrdump_write write)
  554. {
  555. int errno;
  556. unsigned long start, size;
  557. int i;
  558. struct elf_phdr *phdr;
  559. loff_t pos = MRDUMP_MINI_HEADER_SIZE;
  560. for (i = 0; i < MRDUMP_MINI_NR_SECTION; i++) {
  561. phdr = &mrdump_mini_ehdr->phdrs[i];
  562. if (phdr->p_type == PT_NULL)
  563. break;
  564. if (phdr->p_type == PT_LOAD) {
  565. /* mrdump_mini_dump_phdr(phdr, &pos); */
  566. start = phdr->p_vaddr;
  567. size = ALIGN(phdr->p_filesz, SZ_512);
  568. phdr->p_offset = pos;
  569. errno = write((void *)start, pos + offset, size, 1);
  570. pos += size;
  571. if (IS_ERR(ERR_PTR(errno)))
  572. LOGD("mirdump: write fail");
  573. }
  574. }
  575. }
  576. int mrdump_mini_create_oops_dump(AEE_REBOOT_MODE reboot_mode, mrdump_write write,
  577. loff_t sd_offset, const char *msg, va_list ap)
  578. {
  579. mrdump_mini_dump_loads(sd_offset, write);
  580. write((void *)mrdump_mini_ehdr, sd_offset, MRDUMP_MINI_HEADER_SIZE, 1);
  581. return MRDUMP_MINI_BUF_SIZE;
  582. }
  583. EXPORT_SYMBOL(mrdump_mini_create_oops_dump);
  584. void mrdump_mini_ipanic_done(void)
  585. {
  586. mrdump_mini_ehdr->ehdr.e_ident[0] = 0;
  587. }
  588. static void __init *remap_lowmem(phys_addr_t start, phys_addr_t size)
  589. {
  590. struct page **pages;
  591. phys_addr_t page_start;
  592. unsigned int page_count;
  593. pgprot_t prot;
  594. unsigned int i;
  595. void *vaddr;
  596. page_start = start - offset_in_page(start);
  597. page_count = DIV_ROUND_UP(size + offset_in_page(start), PAGE_SIZE);
  598. prot = pgprot_noncached(PAGE_KERNEL);
  599. pages = kmalloc_array(page_count, sizeof(struct page *), GFP_KERNEL);
  600. if (!pages) {
  601. LOGE("%s: Failed to allocate array for %u pages\n", __func__, page_count);
  602. return NULL;
  603. }
  604. for (i = 0; i < page_count; i++) {
  605. phys_addr_t addr = page_start + i * PAGE_SIZE;
  606. pages[i] = pfn_to_page(addr >> PAGE_SHIFT);
  607. }
  608. vaddr = vmap(pages, page_count, VM_MAP, prot);
  609. kfree(pages);
  610. if (!vaddr) {
  611. LOGE("%s: Failed to map %u pages\n", __func__, page_count);
  612. return NULL;
  613. }
  614. return vaddr + offset_in_page(start);
  615. }
  616. #define TASK_INFO_SIZE PAGE_SIZE
  617. #define PSTORE_SIZE 0x8000
  618. static void __init mrdump_mini_elf_header_init(void)
  619. {
  620. if (MRDUMP_MINI_BUF_PADDR)
  621. mrdump_mini_ehdr =
  622. remap_lowmem(MRDUMP_MINI_BUF_PADDR,
  623. MRDUMP_MINI_HEADER_SIZE + TASK_INFO_SIZE + PSTORE_SIZE);
  624. else
  625. mrdump_mini_ehdr = kmalloc(MRDUMP_MINI_HEADER_SIZE, GFP_KERNEL);
  626. if (mrdump_mini_ehdr == NULL) {
  627. LOGE("mrdump mini reserve buffer fail");
  628. return;
  629. }
  630. LOGE("mirdump: reserved %x+%lx->%p", MRDUMP_MINI_BUF_PADDR,
  631. (unsigned long)MRDUMP_MINI_HEADER_SIZE, mrdump_mini_ehdr);
  632. memset_io(mrdump_mini_ehdr, 0, MRDUMP_MINI_HEADER_SIZE + sizeof(struct aee_process_info));
  633. fill_elf_header(&mrdump_mini_ehdr->ehdr, MRDUMP_MINI_NR_SECTION);
  634. }
  635. int mrdump_mini_init(void)
  636. {
  637. int i;
  638. unsigned long size, offset;
  639. struct pt_regs regs;
  640. mrdump_mini_elf_header_init();
  641. fill_psinfo(&mrdump_mini_ehdr->psinfo.data);
  642. fill_note_S(&mrdump_mini_ehdr->psinfo.note, "vmlinux", NT_PRPSINFO,
  643. sizeof(struct elf_prpsinfo));
  644. memset_io(&regs, 0, sizeof(struct pt_regs));
  645. for (i = 0; i < NR_CPUS + 1; i++) {
  646. fill_prstatus(&mrdump_mini_ehdr->prstatus[i].data, &regs, 0, i);
  647. fill_note_S(&mrdump_mini_ehdr->prstatus[i].note, "NA", NT_PRSTATUS,
  648. sizeof(struct elf_prstatus));
  649. }
  650. offset = offsetof(struct mrdump_mini_elf_header, psinfo);
  651. size = sizeof(mrdump_mini_ehdr->psinfo) + sizeof(mrdump_mini_ehdr->prstatus);
  652. fill_elf_note_phdr(&mrdump_mini_ehdr->phdrs[0], size, offset);
  653. for (i = 0; i < MRDUMP_MINI_NR_MISC; i++)
  654. fill_note_L(&mrdump_mini_ehdr->misc[i].note, "NA", 0,
  655. sizeof(struct mrdump_mini_elf_misc));
  656. mrdump_mini_build_elf_misc();
  657. fill_elf_note_phdr(&mrdump_mini_ehdr->phdrs[1], sizeof(mrdump_mini_ehdr->misc),
  658. offsetof(struct mrdump_mini_elf_header, misc));
  659. return 0;
  660. }
  661. module_init(mrdump_mini_init);
  662. void mrdump_mini_reserve_memory(void)
  663. {
  664. if (MRDUMP_MINI_BUF_PADDR)
  665. memblock_reserve(MRDUMP_MINI_BUF_PADDR,
  666. MRDUMP_MINI_HEADER_SIZE + TASK_INFO_SIZE + PSTORE_SIZE);
  667. }
  668. int mini_rdump_reserve_memory(struct reserved_mem *rmem)
  669. {
  670. pr_alert("[memblock]%s: 0x%llx - 0x%llx (0x%llx)\n", "mini_rdump",
  671. (unsigned long long)rmem->base,
  672. (unsigned long long)rmem->base + (unsigned long long)rmem->size,
  673. (unsigned long long)rmem->size);
  674. return 0;
  675. }
  676. RESERVEDMEM_OF_DECLARE(reserve_memory_minirdump, "mediatek,minirdump",
  677. mini_rdump_reserve_memory);
  678. module_param(dump_all_cpus, bool, S_IRUGO | S_IWUSR);