aed-main.c 53 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129
  1. /*
  2. * (C) Copyright 2010
  3. * MediaTek <www.MediaTek.com>
  4. *
  5. * Android Exception Device
  6. *
  7. */
  8. #include <linux/cdev.h>
  9. #include <linux/delay.h>
  10. #include <linux/device.h>
  11. #include <linux/fs.h>
  12. #include <linux/hardirq.h>
  13. #include <linux/init.h>
  14. #include <linux/kallsyms.h>
  15. #include <linux/miscdevice.h>
  16. #include <linux/module.h>
  17. #include <linux/poll.h>
  18. #include <linux/proc_fs.h>
  19. #include <linux/wait.h>
  20. #include <linux/sched.h>
  21. #include <linux/vmalloc.h>
  22. #include <disp_assert_layer.h>
  23. #include <linux/slab.h>
  24. #include <linux/spinlock.h>
  25. #include <linux/semaphore.h>
  26. #include <linux/workqueue.h>
  27. #include <linux/kthread.h>
  28. #include <linux/stacktrace.h>
  29. #include <linux/compat.h>
  30. #include <mt-plat/aee.h>
  31. #include <linux/seq_file.h>
  32. #include <linux/completion.h>
  33. #include "aed.h"
  34. struct aee_req_queue {
  35. struct list_head list;
  36. spinlock_t lock;
  37. };
  38. static struct aee_req_queue ke_queue;
  39. static struct work_struct ke_work;
  40. static DECLARE_COMPLETION(aed_ke_com);
  41. static struct aee_req_queue ee_queue;
  42. static struct work_struct ee_work;
  43. static DECLARE_COMPLETION(aed_ee_com);
  44. /*
  45. * may be accessed from irq
  46. */
  47. static spinlock_t aed_device_lock;
  48. int aee_mode = AEE_MODE_NOT_INIT;
  49. static int force_red_screen = AEE_FORCE_NOT_SET;
  50. static struct proc_dir_entry *aed_proc_dir;
  51. #define MaxStackSize 8100
  52. #define MaxMapsSize 8100
  53. /******************************************************************************
  54. * DEBUG UTILITIES
  55. *****************************************************************************/
  56. void msg_show(const char *prefix, struct AE_Msg *msg)
  57. {
  58. const char *cmd_type = NULL;
  59. const char *cmd_id = NULL;
  60. if (msg == NULL) {
  61. LOGD("%s: EMPTY msg\n", prefix);
  62. return;
  63. }
  64. switch (msg->cmdType) {
  65. case AE_REQ:
  66. cmd_type = "REQ";
  67. break;
  68. case AE_RSP:
  69. cmd_type = "RESPONSE";
  70. break;
  71. case AE_IND:
  72. cmd_type = "IND";
  73. break;
  74. default:
  75. cmd_type = "UNKNOWN";
  76. break;
  77. }
  78. switch (msg->cmdId) {
  79. case AE_REQ_IDX:
  80. cmd_id = "IDX";
  81. break;
  82. case AE_REQ_CLASS:
  83. cmd_id = "CLASS";
  84. break;
  85. case AE_REQ_TYPE:
  86. cmd_id = "TYPE";
  87. break;
  88. case AE_REQ_MODULE:
  89. cmd_id = "MODULE";
  90. break;
  91. case AE_REQ_PROCESS:
  92. cmd_id = "PROCESS";
  93. break;
  94. case AE_REQ_DETAIL:
  95. cmd_id = "DETAIL";
  96. break;
  97. case AE_REQ_BACKTRACE:
  98. cmd_id = "BACKTRACE";
  99. break;
  100. case AE_REQ_COREDUMP:
  101. cmd_id = "COREDUMP";
  102. break;
  103. case AE_IND_EXP_RAISED:
  104. cmd_id = "EXP_RAISED";
  105. break;
  106. case AE_IND_WRN_RAISED:
  107. cmd_id = "WARN_RAISED";
  108. break;
  109. case AE_IND_REM_RAISED:
  110. cmd_id = "REMIND_RAISED";
  111. break;
  112. case AE_IND_FATAL_RAISED:
  113. cmd_id = "FATAL_RAISED";
  114. break;
  115. case AE_IND_LOG_CLOSE:
  116. cmd_id = "CLOSE";
  117. break;
  118. case AE_REQ_USERSPACEBACKTRACE:
  119. cmd_id = "USERBACKTRACE";
  120. break;
  121. case AE_REQ_USER_REG:
  122. cmd_id = "USERREG";
  123. break;
  124. default:
  125. cmd_id = "UNKNOWN";
  126. break;
  127. }
  128. LOGD("%s: cmdType=%s[%d] cmdId=%s[%d] seq=%d arg=%x len=%d\n", prefix, cmd_type,
  129. msg->cmdType, cmd_id, msg->cmdId, msg->seq, msg->arg, msg->len);
  130. }
  131. /******************************************************************************
  132. * CONSTANT DEFINITIONS
  133. *****************************************************************************/
  134. #define CURRENT_KE_CONSOLE "current-ke-console"
  135. #define CURRENT_EE_COREDUMP "current-ee-coredump"
  136. #define CURRENT_KE_ANDROID_MAIN "current-ke-android_main"
  137. #define CURRENT_KE_ANDROID_RADIO "current-ke-android_radio"
  138. #define CURRENT_KE_ANDROID_SYSTEM "current-ke-android_system"
  139. #define CURRENT_KE_USERSPACE_INFO "current-ke-userspace_info"
  140. #define CURRENT_KE_MMPROFILE "current-ke-mmprofile"
  141. #define MAX_EE_COREDUMP 0x800000
  142. /******************************************************************************
  143. * STRUCTURE DEFINITIONS
  144. *****************************************************************************/
  145. struct aed_eerec { /* external exception record */
  146. struct list_head list;
  147. char assert_type[32];
  148. char exp_filename[512];
  149. unsigned int exp_linenum;
  150. unsigned int fatal1;
  151. unsigned int fatal2;
  152. int *ee_log;
  153. int ee_log_size;
  154. int *ee_phy;
  155. int ee_phy_size;
  156. char *msg;
  157. int db_opt;
  158. };
  159. struct aed_kerec { /* TODO: kernel exception record */
  160. char *msg;
  161. struct aee_oops *lastlog;
  162. };
  163. struct aed_dev {
  164. struct aed_eerec *eerec;
  165. wait_queue_head_t eewait;
  166. struct aed_kerec kerec;
  167. wait_queue_head_t kewait;
  168. };
  169. /******************************************************************************
  170. * FUNCTION PROTOTYPES
  171. *****************************************************************************/
  172. static long aed_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
  173. /******************************************************************************
  174. * GLOBAL DATA
  175. *****************************************************************************/
  176. static struct aed_dev aed_dev;
  177. /******************************************************************************
  178. * Message Utilities
  179. *****************************************************************************/
  180. inline void msg_destroy(char **ppmsg)
  181. {
  182. if (*ppmsg != NULL) {
  183. vfree(*ppmsg);
  184. *ppmsg = NULL;
  185. }
  186. }
  187. inline struct AE_Msg *msg_create(char **ppmsg, int extra_size)
  188. {
  189. int size;
  190. msg_destroy(ppmsg);
  191. size = sizeof(struct AE_Msg) + extra_size;
  192. *ppmsg = vzalloc(size);
  193. if (*ppmsg == NULL) {
  194. LOGE("%s : kzalloc() fail\n", __func__);
  195. return NULL;
  196. }
  197. ((struct AE_Msg *) (*ppmsg))->len = extra_size;
  198. return (struct AE_Msg *) *ppmsg;
  199. }
  200. static ssize_t msg_copy_to_user(const char *prefix, char *msg, char __user *buf,
  201. size_t count, loff_t *f_pos)
  202. {
  203. ssize_t ret = 0;
  204. int len;
  205. char *msg_tmp = NULL;
  206. if (msg == NULL)
  207. return 0;
  208. msg_show(prefix, (struct AE_Msg *) msg);
  209. msg_tmp = kzalloc(((struct AE_Msg *)msg)->len + sizeof(struct AE_Msg), GFP_KERNEL);
  210. if (msg_tmp != NULL) {
  211. memcpy(msg_tmp, msg, ((struct AE_Msg *)msg)->len + sizeof(struct AE_Msg));
  212. } else {
  213. LOGE("%s : kzalloc() fail!\n", __func__);
  214. msg_tmp = msg;
  215. }
  216. if (msg_tmp == NULL || ((struct AE_Msg *)msg_tmp)->cmdType < AE_REQ
  217. || ((struct AE_Msg *)msg_tmp)->cmdType > AE_CMD_TYPE_END)
  218. goto out;
  219. len = ((struct AE_Msg *) msg_tmp)->len + sizeof(struct AE_Msg);
  220. if (*f_pos >= len) {
  221. ret = 0;
  222. goto out;
  223. }
  224. /* TODO: semaphore */
  225. if ((*f_pos + count) > len) {
  226. LOGE("read size overflow, count=%zx, *f_pos=%llx\n", count, *f_pos);
  227. count = len - *f_pos;
  228. ret = -EFAULT;
  229. goto out;
  230. }
  231. if (copy_to_user(buf, msg_tmp + *f_pos, count)) {
  232. LOGE("copy_to_user failed\n");
  233. ret = -EFAULT;
  234. goto out;
  235. }
  236. *f_pos += count;
  237. ret = count;
  238. out:
  239. if (msg_tmp != msg)
  240. kfree(msg_tmp);
  241. return ret;
  242. }
  243. /******************************************************************************
  244. * Kernel message handlers
  245. *****************************************************************************/
  246. static void ke_gen_notavail_msg(void)
  247. {
  248. struct AE_Msg *rep_msg;
  249. LOGD("%s\n", __func__);
  250. rep_msg = msg_create(&aed_dev.kerec.msg, 0);
  251. if (rep_msg == NULL)
  252. return;
  253. rep_msg->cmdType = AE_RSP;
  254. rep_msg->arg = AE_NOT_AVAILABLE;
  255. rep_msg->len = 0;
  256. }
  257. static void ke_gen_class_msg(void)
  258. {
  259. #define KE_CLASS_STR "Kernel (KE)"
  260. #define KE_CLASS_SIZE 12
  261. struct AE_Msg *rep_msg;
  262. char *data;
  263. LOGD("%s\n", __func__);
  264. rep_msg = msg_create(&aed_dev.kerec.msg, KE_CLASS_SIZE);
  265. if (rep_msg == NULL)
  266. return;
  267. data = (char *)rep_msg + sizeof(struct AE_Msg);
  268. rep_msg->cmdType = AE_RSP;
  269. rep_msg->cmdId = AE_REQ_CLASS;
  270. rep_msg->len = KE_CLASS_SIZE;
  271. strncpy(data, KE_CLASS_STR, KE_CLASS_SIZE);
  272. }
  273. static void ke_gen_type_msg(void)
  274. {
  275. #define KE_TYPE_STR "PANIC"
  276. #define KE_TYPE_SIZE 6
  277. struct AE_Msg *rep_msg;
  278. char *data;
  279. LOGD("%s\n", __func__);
  280. rep_msg = msg_create(&aed_dev.kerec.msg, KE_TYPE_SIZE);
  281. if (rep_msg == NULL)
  282. return;
  283. data = (char *)rep_msg + sizeof(struct AE_Msg);
  284. rep_msg->cmdType = AE_RSP;
  285. rep_msg->cmdId = AE_REQ_TYPE;
  286. rep_msg->len = KE_TYPE_SIZE;
  287. strncpy(data, KE_TYPE_STR, KE_TYPE_SIZE);
  288. }
  289. static void ke_gen_module_msg(void)
  290. {
  291. struct AE_Msg *rep_msg;
  292. char *data;
  293. LOGD("%s\n", __func__);
  294. rep_msg = msg_create(&aed_dev.kerec.msg, strlen(aed_dev.kerec.lastlog->module) + 1);
  295. if (rep_msg == NULL)
  296. return;
  297. data = (char *)rep_msg + sizeof(struct AE_Msg);
  298. rep_msg->cmdType = AE_RSP;
  299. rep_msg->cmdId = AE_REQ_MODULE;
  300. rep_msg->len = strlen(aed_dev.kerec.lastlog->module) + 1;
  301. strlcpy(data, aed_dev.kerec.lastlog->module, sizeof(aed_dev.kerec.lastlog->module));
  302. }
  303. static void ke_gen_detail_msg(const struct AE_Msg *req_msg)
  304. {
  305. struct AE_Msg *rep_msg;
  306. char *data;
  307. LOGD("ke_gen_detail_msg is called\n");
  308. LOGD("%s req_msg arg:%d\n", __func__, req_msg->arg);
  309. rep_msg = msg_create(&aed_dev.kerec.msg, aed_dev.kerec.lastlog->detail_len + 1);
  310. if (rep_msg == NULL)
  311. return;
  312. data = (char *)rep_msg + sizeof(struct AE_Msg);
  313. rep_msg->cmdType = AE_RSP;
  314. rep_msg->cmdId = AE_REQ_DETAIL;
  315. rep_msg->len = aed_dev.kerec.lastlog->detail_len + 1;
  316. if (aed_dev.kerec.lastlog->detail != NULL)
  317. strlcpy(data, aed_dev.kerec.lastlog->detail, aed_dev.kerec.lastlog->detail_len);
  318. data[aed_dev.kerec.lastlog->detail_len] = 0;
  319. LOGD("ke_gen_detail_msg is return: %s\n", data);
  320. }
  321. static void ke_gen_process_msg(void)
  322. {
  323. struct AE_Msg *rep_msg;
  324. char *data;
  325. LOGD("%s\n", __func__);
  326. rep_msg = msg_create(&aed_dev.kerec.msg, AEE_PROCESS_NAME_LENGTH);
  327. if (rep_msg == NULL)
  328. return;
  329. data = (char *)rep_msg + sizeof(struct AE_Msg);
  330. rep_msg->cmdType = AE_RSP;
  331. rep_msg->cmdId = AE_REQ_PROCESS;
  332. strncpy(data, aed_dev.kerec.lastlog->process_path, AEE_PROCESS_NAME_LENGTH);
  333. /* Count into the NUL byte at end of string */
  334. rep_msg->len = strlen(data) + 1;
  335. }
  336. static void ke_gen_backtrace_msg(void)
  337. {
  338. struct AE_Msg *rep_msg;
  339. char *data;
  340. LOGD("%s\n", __func__);
  341. rep_msg = msg_create(&aed_dev.kerec.msg, AEE_BACKTRACE_LENGTH);
  342. if (rep_msg == NULL)
  343. return;
  344. data = (char *)rep_msg + sizeof(struct AE_Msg);
  345. rep_msg->cmdType = AE_RSP;
  346. rep_msg->cmdId = AE_REQ_BACKTRACE;
  347. strcpy(data, aed_dev.kerec.lastlog->backtrace);
  348. /* Count into the NUL byte at end of string */
  349. rep_msg->len = strlen(data) + 1;
  350. }
  351. static void ke_gen_userbacktrace_msg(void)
  352. {
  353. struct AE_Msg *rep_msg;
  354. char *data;
  355. int userinfo_len = 0;
  356. userinfo_len = aed_dev.kerec.lastlog->userthread_stack.StackLength + sizeof(pid_t)+sizeof(int);
  357. rep_msg = msg_create(&aed_dev.kerec.msg, MaxStackSize);
  358. if (rep_msg == NULL)
  359. return;
  360. data = (char *)rep_msg + sizeof(struct AE_Msg);
  361. rep_msg->cmdType = AE_RSP;
  362. rep_msg->cmdId = AE_REQ_USERSPACEBACKTRACE;
  363. rep_msg->len = userinfo_len;
  364. LOGD("%s rep_msg->len:%lx,\n", __func__, (long)rep_msg->len);
  365. memcpy(data, (char *) &(aed_dev.kerec.lastlog->userthread_stack), sizeof(pid_t) + sizeof(int));
  366. LOGD("len(pid+int):%lx\n", (long)(sizeof(pid_t)+sizeof(int)));
  367. LOGD("des :%lx\n", (long)(data + sizeof(pid_t)+sizeof(int)));
  368. LOGD("src addr :%lx\n", (long)((char *)(aed_dev.kerec.lastlog->userthread_stack.Userthread_Stack)));
  369. memcpy((data + sizeof(pid_t)+sizeof(int)),
  370. (char *)(aed_dev.kerec.lastlog->userthread_stack.Userthread_Stack),
  371. aed_dev.kerec.lastlog->userthread_stack.StackLength);
  372. #if 0 /* for debug */
  373. {
  374. int i = 0;
  375. for (i = 0; i < 64; i++)
  376. LOGD("%x\n ", data[i]);
  377. }
  378. #endif
  379. LOGD("%s +++\n", __func__);
  380. }
  381. static void ke_gen_usermaps_msg(void)
  382. {
  383. struct AE_Msg *rep_msg;
  384. char *data;
  385. int userinfo_len = 0;
  386. userinfo_len = aed_dev.kerec.lastlog->userthread_maps.Userthread_mapsLength + sizeof(pid_t)+sizeof(int);
  387. rep_msg = msg_create(&aed_dev.kerec.msg, MaxMapsSize);
  388. if (rep_msg == NULL)
  389. return;
  390. data = (char *)rep_msg + sizeof(struct AE_Msg);
  391. rep_msg->cmdType = AE_RSP;
  392. rep_msg->cmdId = AE_REQ_USER_MAPS;
  393. rep_msg->len = userinfo_len;
  394. LOGD("%s rep_msg->len:%lx,\n", __func__, (long)rep_msg->len);
  395. memcpy(data, (char *) &(aed_dev.kerec.lastlog->userthread_maps), sizeof(pid_t) + sizeof(int));
  396. LOGD("len(pid+int):%lx\n", (long)(sizeof(pid_t)+sizeof(int)));
  397. LOGD("des :%lx\n", (long)(data + sizeof(pid_t)+sizeof(int)));
  398. LOGD("src addr :%lx\n", (long)((char *)(aed_dev.kerec.lastlog->userthread_maps.Userthread_maps)));
  399. memcpy((data + sizeof(pid_t)+sizeof(int)),
  400. (char *)(aed_dev.kerec.lastlog->userthread_maps.Userthread_maps),
  401. aed_dev.kerec.lastlog->userthread_maps.Userthread_mapsLength);
  402. LOGD("%s +++\n", __func__);
  403. }
  404. static void ke_gen_user_reg_msg(void)
  405. {
  406. struct AE_Msg *rep_msg;
  407. char *data;
  408. rep_msg = msg_create(&aed_dev.kerec.msg, sizeof(struct aee_thread_reg));
  409. if (rep_msg == NULL)
  410. return;
  411. data = (char *)rep_msg + sizeof(struct AE_Msg);
  412. rep_msg->cmdType = AE_RSP;
  413. rep_msg->cmdId = AE_REQ_USER_REG;
  414. /* Count into the NUL byte at end of string */
  415. rep_msg->len = sizeof(struct aee_thread_reg);
  416. memcpy(data, (char *) &(aed_dev.kerec.lastlog->userthread_reg), sizeof(struct aee_thread_reg));
  417. #if 0 /* for debug */
  418. #ifdef __aarch64__ /* 64bit kernel+32 u */
  419. if (is_compat_task()) { /* K64_U32 */
  420. LOGE(" K64+ U32 pc/lr/sp 0x%16lx/0x%16lx/0x%16lx\n",
  421. (long)(aed_dev.kerec.lastlog->userthread_reg.regs.user_regs.pc),
  422. (long)(aed_dev.kerec.lastlog->userthread_reg.regs.regs[14]),
  423. (long)(aed_dev.kerec.lastlog->userthread_reg.regs.regs[13]));
  424. }
  425. #endif
  426. #endif
  427. LOGD("%s +++\n", __func__);
  428. }
  429. static int ke_gen_ind_msg(struct aee_oops *oops)
  430. {
  431. unsigned long flags = 0;
  432. LOGD("%s oops %p\n", __func__, oops);
  433. if (oops == NULL)
  434. return -1;
  435. spin_lock_irqsave(&aed_device_lock, flags);
  436. if (aed_dev.kerec.lastlog == NULL) {
  437. aed_dev.kerec.lastlog = oops;
  438. } else {
  439. /*
  440. * waaa.. Two ke api at the same time
  441. * or ke api during aed process is still busy at ke
  442. * discard the new oops!
  443. * Code should NEVER come here now!!!
  444. */
  445. LOGW("%s: BUG!!! More than one kernel message queued, AEE does not support concurrent KE dump\n",
  446. __func__);
  447. aee_oops_free(oops);
  448. spin_unlock_irqrestore(&aed_device_lock, flags);
  449. return -1;
  450. }
  451. spin_unlock_irqrestore(&aed_device_lock, flags);
  452. if (aed_dev.kerec.lastlog != NULL) {
  453. struct AE_Msg *rep_msg;
  454. rep_msg = msg_create(&aed_dev.kerec.msg, 0);
  455. if (rep_msg == NULL)
  456. return 0;
  457. rep_msg->cmdType = AE_IND;
  458. switch (oops->attr) {
  459. case AE_DEFECT_REMINDING:
  460. rep_msg->cmdId = AE_IND_REM_RAISED;
  461. break;
  462. case AE_DEFECT_WARNING:
  463. rep_msg->cmdId = AE_IND_WRN_RAISED;
  464. break;
  465. case AE_DEFECT_EXCEPTION:
  466. rep_msg->cmdId = AE_IND_EXP_RAISED;
  467. break;
  468. case AE_DEFECT_FATAL:
  469. rep_msg->cmdId = AE_IND_FATAL_RAISED;
  470. break;
  471. default:
  472. /* Huh... something wrong, just go to exception */
  473. rep_msg->cmdId = AE_IND_EXP_RAISED;
  474. break;
  475. }
  476. rep_msg->arg = oops->clazz;
  477. rep_msg->len = 0;
  478. rep_msg->dbOption = oops->dump_option;
  479. init_completion(&aed_ke_com);
  480. wake_up(&aed_dev.kewait);
  481. /* wait until current ke work is done, then aed_dev is available,
  482. add a 60s timeout in case of debuggerd quit abnormally */
  483. if (wait_for_completion_timeout(&aed_ke_com, msecs_to_jiffies(5 * 60 * 1000)))
  484. LOGE("%s: TIMEOUT, not receive close event, skip\n", __func__);
  485. }
  486. return 0;
  487. }
  488. static void ke_destroy_log(void)
  489. {
  490. LOGD("%s\n", __func__);
  491. msg_destroy(&aed_dev.kerec.msg);
  492. if (aed_dev.kerec.lastlog) {
  493. if (strncmp
  494. (aed_dev.kerec.lastlog->module, IPANIC_MODULE_TAG,
  495. strlen(IPANIC_MODULE_TAG)) == 0) {
  496. ipanic_oops_free(aed_dev.kerec.lastlog, 0);
  497. } else {
  498. aee_oops_free(aed_dev.kerec.lastlog);
  499. }
  500. aed_dev.kerec.lastlog = NULL;
  501. }
  502. }
  503. static int ke_log_avail(void)
  504. {
  505. if (aed_dev.kerec.lastlog != NULL) {
  506. #ifdef __aarch64__
  507. if (is_compat_task() != ((aed_dev.kerec.lastlog->dump_option & DB_OPT_AARCH64) == 0))
  508. return 0;
  509. #endif
  510. LOGI("AEE api log available\n");
  511. return 1;
  512. }
  513. return 0;
  514. }
  515. static void ke_queue_request(struct aee_oops *oops)
  516. {
  517. unsigned long flags = 0;
  518. int ret;
  519. spin_lock_irqsave(&ke_queue.lock, flags);
  520. list_add_tail(&oops->list, &ke_queue.list);
  521. spin_unlock_irqrestore(&ke_queue.lock, flags);
  522. ret = queue_work(system_wq, &ke_work);
  523. LOGI("%s: add new ke work, status %d\n", __func__, ret);
  524. }
  525. static void ke_worker(struct work_struct *work)
  526. {
  527. int ret = 0;
  528. struct aee_oops *oops, *n;
  529. unsigned long flags = 0;
  530. list_for_each_entry_safe(oops, n, &ke_queue.list, list) {
  531. if (oops == NULL) {
  532. LOGE("%s:Invalid aee_oops struct\n", __func__);
  533. return;
  534. }
  535. ret = ke_gen_ind_msg(oops);
  536. spin_lock_irqsave(&ke_queue.lock, flags);
  537. if (!ret)
  538. list_del(&oops->list);
  539. spin_unlock_irqrestore(&ke_queue.lock, flags);
  540. ke_destroy_log();
  541. }
  542. }
  543. /******************************************************************************
  544. * EE message handlers
  545. *****************************************************************************/
  546. static void ee_gen_notavail_msg(void)
  547. {
  548. struct AE_Msg *rep_msg;
  549. LOGD("%s\n", __func__);
  550. rep_msg = msg_create(&aed_dev.eerec->msg, 0);
  551. if (rep_msg == NULL)
  552. return;
  553. rep_msg->cmdType = AE_RSP;
  554. rep_msg->arg = AE_NOT_AVAILABLE;
  555. rep_msg->len = 0;
  556. }
  557. static void ee_gen_class_msg(void)
  558. {
  559. #define EX_CLASS_EE_STR "External (EE)"
  560. #define EX_CLASS_EE_SIZE 14
  561. struct AE_Msg *rep_msg;
  562. char *data;
  563. LOGD("%s\n", __func__);
  564. rep_msg = msg_create(&aed_dev.eerec->msg, EX_CLASS_EE_SIZE);
  565. if (rep_msg == NULL)
  566. return;
  567. data = (char *)rep_msg + sizeof(struct AE_Msg);
  568. rep_msg->cmdType = AE_RSP;
  569. rep_msg->cmdId = AE_REQ_CLASS;
  570. rep_msg->len = EX_CLASS_EE_SIZE;
  571. strncpy(data, EX_CLASS_EE_STR, EX_CLASS_EE_SIZE);
  572. }
  573. static void ee_gen_type_msg(void)
  574. {
  575. struct AE_Msg *rep_msg;
  576. char *data;
  577. struct aed_eerec *eerec = aed_dev.eerec;
  578. LOGD("%s\n", __func__);
  579. rep_msg =
  580. msg_create(&eerec->msg, strlen((char const *)&eerec->assert_type) + 1);
  581. if (rep_msg == NULL)
  582. return;
  583. data = (char *)rep_msg + sizeof(struct AE_Msg);
  584. rep_msg->cmdType = AE_RSP;
  585. rep_msg->cmdId = AE_REQ_TYPE;
  586. rep_msg->len = strlen((char const *)&eerec->assert_type) + 1;
  587. strncpy(data, (char const *)&eerec->assert_type,
  588. strlen((char const *)&eerec->assert_type));
  589. }
  590. static void ee_gen_process_msg(void)
  591. {
  592. #define PROCESS_STRLEN 512
  593. int n = 0;
  594. struct AE_Msg *rep_msg;
  595. char *data;
  596. struct aed_eerec *eerec = aed_dev.eerec;
  597. LOGD("%s\n", __func__);
  598. rep_msg = msg_create(&eerec->msg, PROCESS_STRLEN);
  599. if (rep_msg == NULL)
  600. return;
  601. data = (char *)rep_msg + sizeof(struct AE_Msg);
  602. if (eerec->exp_linenum != 0) {
  603. /* for old aed_md_exception1() */
  604. n = sprintf(data, "%s", eerec->assert_type);
  605. if (eerec->exp_filename[0] != 0) {
  606. n += sprintf(data + n, ", filename=%s,line=%d", eerec->exp_filename,
  607. eerec->exp_linenum);
  608. } else if (eerec->fatal1 != 0 && eerec->fatal2 != 0) {
  609. n += sprintf(data + n, ", err1=%d,err2=%d", eerec->fatal1,
  610. eerec->fatal2);
  611. }
  612. } else {
  613. LOGD("ee_gen_process_msg else\n");
  614. n = sprintf(data, "%s", eerec->exp_filename);
  615. }
  616. rep_msg->cmdType = AE_RSP;
  617. rep_msg->cmdId = AE_REQ_PROCESS;
  618. rep_msg->len = n + 1;
  619. }
  620. __weak int aee_dump_ccci_debug_info(int md_id, void **addr, int *size)
  621. {
  622. return -1;
  623. }
  624. static void ee_gen_detail_msg(void)
  625. {
  626. int i, n = 0, l = 0;
  627. struct AE_Msg *rep_msg;
  628. char *data;
  629. int *mem;
  630. int md_id;
  631. int msgsize;
  632. char *ccci_log = NULL;
  633. int ccci_log_size = 0;
  634. struct aed_eerec *eerec = aed_dev.eerec;
  635. LOGD("%s\n", __func__);
  636. if (strncmp(eerec->assert_type, "md32", 4) == 0) {
  637. msgsize = eerec->ee_log_size + 128;
  638. rep_msg = msg_create(&eerec->msg, msgsize);
  639. if (rep_msg == NULL)
  640. return;
  641. data = (char *)rep_msg + sizeof(struct AE_Msg);
  642. /* n += snprintf(data + n, msgsize - n, "== EXTERNAL EXCEPTION LOG ==\n"); */
  643. /* n += snprintf(data + n, msgsize - n, "%s\n", (char *)eerec->ee_log); */
  644. l = snprintf(data + n, msgsize - n, "== EXTERNAL EXCEPTION LOG ==\n%s\n", (char *)eerec->ee_log);
  645. if (l >= msgsize - n)
  646. LOGE("ee_log may overflow! %d >= %d\n", l, msgsize - n);
  647. n += min(l, msgsize - n);
  648. } else {
  649. if (strncmp(eerec->assert_type, "modem", 5) == 0) {
  650. if (1 == sscanf(eerec->exp_filename, "md%d:", &md_id)) {
  651. if (aee_dump_ccci_debug_info(md_id, (void **)&ccci_log, &ccci_log_size)) {
  652. ccci_log = NULL;
  653. ccci_log_size = 0;
  654. }
  655. }
  656. }
  657. msgsize = (eerec->ee_log_size + ccci_log_size) * 4 + 128;
  658. rep_msg = msg_create(&eerec->msg, msgsize);
  659. if (rep_msg == NULL)
  660. return;
  661. data = (char *)rep_msg + sizeof(struct AE_Msg);
  662. n += snprintf(data + n, msgsize - n, "== EXTERNAL EXCEPTION LOG ==\n");
  663. mem = (int *)eerec->ee_log;
  664. if (mem) {
  665. for (i = 0; i < eerec->ee_log_size / 4; i += 4) {
  666. n += snprintf(data + n, msgsize - n, "0x%08X 0x%08X 0x%08X 0x%08X\n",
  667. mem[i], mem[i + 1], mem[i + 2], mem[i + 3]);
  668. }
  669. } else {
  670. n += snprintf(data + n, msgsize - n, "kmalloc fail, no log available\n");
  671. }
  672. }
  673. l = snprintf(data + n, msgsize - n, "== MEM DUMP(%d) ==\n", eerec->ee_phy_size);
  674. n += min(l, msgsize - n);
  675. if (ccci_log) {
  676. n += snprintf(data + n, msgsize - n, "== CCCI LOG ==\n");
  677. mem = (int *)ccci_log;
  678. for (i = 0; i < ccci_log_size / 4; i += 4) {
  679. n += snprintf(data + n, msgsize - n, "0x%08X 0x%08X 0x%08X 0x%08X\n",
  680. mem[i], mem[i + 1], mem[i + 2], mem[i + 3]);
  681. }
  682. n += snprintf(data + n, msgsize - n, "== MEM DUMP(%d) ==\n", ccci_log_size);
  683. }
  684. rep_msg->cmdType = AE_RSP;
  685. rep_msg->cmdId = AE_REQ_DETAIL;
  686. rep_msg->arg = AE_PASS_BY_MEM;
  687. rep_msg->len = n + 1;
  688. }
  689. static void ee_gen_coredump_msg(void)
  690. {
  691. struct AE_Msg *rep_msg;
  692. char *data;
  693. LOGD("%s\n", __func__);
  694. rep_msg = msg_create(&aed_dev.eerec->msg, 256);
  695. if (rep_msg == NULL)
  696. return;
  697. data = (char *)rep_msg + sizeof(struct AE_Msg);
  698. rep_msg->cmdType = AE_RSP;
  699. rep_msg->cmdId = AE_REQ_COREDUMP;
  700. rep_msg->arg = 0;
  701. sprintf(data, "/proc/aed/%s", CURRENT_EE_COREDUMP);
  702. rep_msg->len = strlen(data) + 1;
  703. }
  704. static void ee_destroy_log(void)
  705. {
  706. struct aed_eerec *eerec = aed_dev.eerec;
  707. LOGD("%s\n", __func__);
  708. if (eerec == NULL)
  709. return;
  710. aed_dev.eerec = NULL;
  711. msg_destroy(&eerec->msg);
  712. if (eerec->ee_phy != NULL) {
  713. vfree(eerec->ee_phy);
  714. eerec->ee_phy = NULL;
  715. }
  716. eerec->ee_log_size = 0;
  717. eerec->ee_phy_size = 0;
  718. if (eerec->ee_log != NULL) {
  719. kfree(eerec->ee_log);
  720. /*after this, another ee can enter */
  721. eerec->ee_log = NULL;
  722. }
  723. kfree(eerec);
  724. }
  725. static int ee_log_avail(void)
  726. {
  727. return (aed_dev.eerec != NULL);
  728. }
  729. static char *ee_msg_avail(void)
  730. {
  731. if (aed_dev.eerec)
  732. return aed_dev.eerec->msg;
  733. return NULL;
  734. }
  735. static void ee_gen_ind_msg(struct aed_eerec *eerec)
  736. {
  737. unsigned long flags = 0;
  738. struct AE_Msg *rep_msg;
  739. LOGD("%s\n", __func__);
  740. if (eerec == NULL)
  741. return;
  742. /*
  743. Don't lock the whole function for the time is uncertain.
  744. we rely on the fact that ee_rec is not null if race here!
  745. */
  746. spin_lock_irqsave(&aed_device_lock, flags);
  747. if (aed_dev.eerec == NULL) {
  748. aed_dev.eerec = eerec;
  749. } else {
  750. /* should never come here, skip*/
  751. spin_unlock_irqrestore(&aed_device_lock, flags);
  752. LOGW("%s: More than one EE message queued\n", __func__);
  753. return;
  754. }
  755. spin_unlock_irqrestore(&aed_device_lock, flags);
  756. rep_msg = msg_create(&aed_dev.eerec->msg, 0);
  757. if (rep_msg == NULL)
  758. return;
  759. rep_msg->cmdType = AE_IND;
  760. rep_msg->cmdId = AE_IND_EXP_RAISED;
  761. rep_msg->arg = AE_EE;
  762. rep_msg->len = 0;
  763. rep_msg->dbOption = eerec->db_opt;
  764. init_completion(&aed_ee_com);
  765. wake_up(&aed_dev.eewait);
  766. if (wait_for_completion_timeout(&aed_ee_com, msecs_to_jiffies(5 * 60 * 1000)))
  767. LOGE("%s: TIMEOUT, not receive close event, skip\n", __func__);
  768. }
  769. static void ee_queue_request(struct aed_eerec *eerec)
  770. {
  771. int ret;
  772. unsigned long flags = 0;
  773. spin_lock_irqsave(&ee_queue.lock, flags);
  774. list_add_tail(&eerec->list, &ee_queue.list);
  775. spin_unlock_irqrestore(&ee_queue.lock, flags);
  776. ret = queue_work(system_wq, &ee_work);
  777. LOGI("%s: add new ee work, status %d\n", __func__, ret);
  778. }
  779. static void ee_worker(struct work_struct *work)
  780. {
  781. struct aed_eerec *eerec, *tmp;
  782. unsigned long flags = 0;
  783. list_for_each_entry_safe(eerec, tmp, &ee_queue.list, list) {
  784. if (eerec == NULL) {
  785. LOGE("%s:null eerec\n", __func__);
  786. return;
  787. }
  788. ee_gen_ind_msg(eerec);
  789. spin_lock_irqsave(&ee_queue.lock, flags);
  790. list_del(&eerec->list);
  791. spin_unlock_irqrestore(&ee_queue.lock, flags);
  792. ee_destroy_log();
  793. }
  794. }
  795. /******************************************************************************
  796. * AED EE File operations
  797. *****************************************************************************/
  798. static int aed_ee_open(struct inode *inode, struct file *filp)
  799. {
  800. LOGD("%s:%d:%d\n", __func__, MAJOR(inode->i_rdev), MINOR(inode->i_rdev));
  801. return 0;
  802. }
  803. static int aed_ee_release(struct inode *inode, struct file *filp)
  804. {
  805. LOGD("%s:%d:%d\n", __func__, MAJOR(inode->i_rdev), MINOR(inode->i_rdev));
  806. return 0;
  807. }
  808. static unsigned int aed_ee_poll(struct file *file, struct poll_table_struct *ptable)
  809. {
  810. /* LOGD("%s\n", __func__); */
  811. if (ee_log_avail() && ee_msg_avail())
  812. return POLLIN | POLLRDNORM | POLLOUT | POLLWRNORM;
  813. poll_wait(file, &aed_dev.eewait, ptable);
  814. return 0;
  815. }
  816. static ssize_t aed_ee_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
  817. {
  818. return msg_copy_to_user(__func__, aed_dev.eerec->msg, buf, count, f_pos);
  819. }
  820. static ssize_t aed_ee_write(struct file *filp, const char __user *buf, size_t count,
  821. loff_t *f_pos)
  822. {
  823. struct AE_Msg msg;
  824. int rsize;
  825. struct aed_eerec *eerec = aed_dev.eerec;
  826. /* recevied a new request means the previous response is unavilable */
  827. /* 1. set position to be zero */
  828. /* 2. destroy the previous response message */
  829. *f_pos = 0;
  830. if (!eerec)
  831. return -1;
  832. msg_destroy(&eerec->msg);
  833. /* the request must be an *struct AE_Msg buffer */
  834. if (count != sizeof(struct AE_Msg)) {
  835. LOGD("%s: ERR, aed_write count=%zx\n", __func__, count);
  836. return -1;
  837. }
  838. rsize = copy_from_user(&msg, buf, count);
  839. if (rsize != 0) {
  840. LOGE("%s: ERR, copy_from_user rsize=%d\n", __func__, rsize);
  841. return -1;
  842. }
  843. msg_show(__func__, &msg);
  844. if (msg.cmdType == AE_REQ) {
  845. if (!ee_log_avail()) {
  846. ee_gen_notavail_msg();
  847. return count;
  848. }
  849. switch (msg.cmdId) {
  850. case AE_REQ_CLASS:
  851. ee_gen_class_msg();
  852. break;
  853. case AE_REQ_TYPE:
  854. ee_gen_type_msg();
  855. break;
  856. case AE_REQ_DETAIL:
  857. ee_gen_detail_msg();
  858. break;
  859. case AE_REQ_PROCESS:
  860. ee_gen_process_msg();
  861. break;
  862. case AE_REQ_BACKTRACE:
  863. ee_gen_notavail_msg();
  864. break;
  865. case AE_REQ_COREDUMP:
  866. ee_gen_coredump_msg();
  867. break;
  868. default:
  869. LOGD("Unknown command id %d\n", msg.cmdId);
  870. ee_gen_notavail_msg();
  871. break;
  872. }
  873. } else if (msg.cmdType == AE_IND) {
  874. switch (msg.cmdId) {
  875. case AE_IND_LOG_CLOSE:
  876. complete(&aed_ee_com);
  877. break;
  878. default:
  879. /* IGNORE */
  880. break;
  881. }
  882. } else if (msg.cmdType == AE_RSP) { /* IGNORE */
  883. }
  884. return count;
  885. }
  886. /******************************************************************************
  887. * AED KE File operations
  888. *****************************************************************************/
  889. static int aed_ke_open(struct inode *inode, struct file *filp)
  890. {
  891. struct aee_oops *oops_open = NULL;
  892. int major = MAJOR(inode->i_rdev);
  893. int minor = MINOR(inode->i_rdev);
  894. unsigned char *devname = filp->f_path.dentry->d_iname;
  895. LOGD("%s:(%s)%d:%d\n", __func__, devname, major, minor);
  896. if (strstr(devname, "aed1")) { /* aed_ke_open is also used by other device */
  897. oops_open = ipanic_oops_copy();
  898. if (oops_open == NULL)
  899. return 0;
  900. /* The panic log only occur on system startup, so check it now */
  901. ke_queue_request(oops_open);
  902. }
  903. return 0;
  904. }
  905. static int aed_ke_release(struct inode *inode, struct file *filp)
  906. {
  907. LOGD("%s:%d:%d\n", __func__, MAJOR(inode->i_rdev), MINOR(inode->i_rdev));
  908. return 0;
  909. }
  910. static unsigned int aed_ke_poll(struct file *file, struct poll_table_struct *ptable)
  911. {
  912. if (ke_log_avail())
  913. return POLLIN | POLLRDNORM | POLLOUT | POLLWRNORM;
  914. poll_wait(file, &aed_dev.kewait, ptable);
  915. return 0;
  916. }
  917. struct current_ke_buffer {
  918. void *data;
  919. ssize_t size;
  920. };
  921. static void *current_ke_start(struct seq_file *m, loff_t *pos)
  922. {
  923. struct current_ke_buffer *ke_buffer;
  924. int index;
  925. ke_buffer = m->private;
  926. if (ke_buffer == NULL)
  927. return NULL;
  928. index = *pos * (PAGE_SIZE - 1);
  929. if (index < ke_buffer->size)
  930. return ke_buffer->data + index;
  931. return NULL;
  932. }
  933. static void *current_ke_next(struct seq_file *m, void *p, loff_t *pos)
  934. {
  935. struct current_ke_buffer *ke_buffer;
  936. int index;
  937. ke_buffer = m->private;
  938. if (ke_buffer == NULL)
  939. return NULL;
  940. ++*pos;
  941. index = *pos * (PAGE_SIZE - 1);
  942. if (index < ke_buffer->size)
  943. return ke_buffer->data + index;
  944. return NULL;
  945. }
  946. static void current_ke_stop(struct seq_file *m, void *p)
  947. {
  948. }
  949. static int current_ke_show(struct seq_file *m, void *p)
  950. {
  951. unsigned long len;
  952. struct current_ke_buffer *ke_buffer;
  953. ke_buffer = m->private;
  954. if (ke_buffer == NULL)
  955. return 0;
  956. if ((unsigned long)p >= (unsigned long)ke_buffer->data + ke_buffer->size)
  957. return 0;
  958. len = (unsigned long)ke_buffer->data + ke_buffer->size - (unsigned long)p;
  959. len = len < PAGE_SIZE ? len : (PAGE_SIZE - 1);
  960. if (seq_write(m, p, len)) {
  961. len = 0;
  962. return -1;
  963. }
  964. return 0;
  965. }
  966. static const struct seq_operations current_ke_op = {
  967. .start = current_ke_start,
  968. .next = current_ke_next,
  969. .stop = current_ke_stop,
  970. .show = current_ke_show
  971. };
  972. #define AED_CURRENT_KE_OPEN(ENTRY) \
  973. static int current_ke_##ENTRY##_open(struct inode *inode, struct file *file) \
  974. { \
  975. int ret; \
  976. struct aee_oops *oops; \
  977. struct seq_file *m; \
  978. struct current_ke_buffer *ke_buffer; \
  979. ret = seq_open_private(file, &current_ke_op, sizeof(struct current_ke_buffer)); \
  980. if (ret == 0) { \
  981. oops = aed_dev.kerec.lastlog; \
  982. m = file->private_data; \
  983. if (!oops) \
  984. return ret; \
  985. ke_buffer = (struct current_ke_buffer *)m->private; \
  986. ke_buffer->data = oops->ENTRY; \
  987. ke_buffer->size = oops->ENTRY##_len;\
  988. } \
  989. return ret; \
  990. }
  991. #define AED_PROC_CURRENT_KE_FOPS(ENTRY) \
  992. static const struct file_operations proc_current_ke_##ENTRY##_fops = { \
  993. .open = current_ke_##ENTRY##_open, \
  994. .read = seq_read, \
  995. .llseek = seq_lseek, \
  996. .release = seq_release_private, \
  997. }
  998. static ssize_t aed_ke_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
  999. {
  1000. return msg_copy_to_user(__func__, aed_dev.kerec.msg, buf, count, f_pos);
  1001. }
  1002. static ssize_t aed_ke_write(struct file *filp, const char __user *buf, size_t count,
  1003. loff_t *f_pos)
  1004. {
  1005. struct AE_Msg msg;
  1006. int rsize;
  1007. /* recevied a new request means the previous response is unavilable */
  1008. /* 1. set position to be zero */
  1009. /* 2. destroy the previous response message */
  1010. *f_pos = 0;
  1011. msg_destroy(&aed_dev.kerec.msg);
  1012. /* the request must be an * AE_Msg buffer */
  1013. if (count != sizeof(struct AE_Msg)) {
  1014. LOGD("ERR: aed_write count=%zx\n", count);
  1015. return -1;
  1016. }
  1017. rsize = copy_from_user(&msg, buf, count);
  1018. if (rsize != 0) {
  1019. LOGD("copy_from_user rsize=%d\n", rsize);
  1020. return -1;
  1021. }
  1022. msg_show(__func__, &msg);
  1023. if (msg.cmdType == AE_REQ) {
  1024. if (!ke_log_avail()) {
  1025. ke_gen_notavail_msg();
  1026. return count;
  1027. }
  1028. switch (msg.cmdId) {
  1029. case AE_REQ_CLASS:
  1030. ke_gen_class_msg();
  1031. break;
  1032. case AE_REQ_TYPE:
  1033. ke_gen_type_msg();
  1034. break;
  1035. case AE_REQ_MODULE:
  1036. ke_gen_module_msg();
  1037. break;
  1038. case AE_REQ_DETAIL:
  1039. ke_gen_detail_msg(&msg);
  1040. break;
  1041. case AE_REQ_PROCESS:
  1042. ke_gen_process_msg();
  1043. break;
  1044. case AE_REQ_BACKTRACE:
  1045. ke_gen_backtrace_msg();
  1046. break;
  1047. case AE_REQ_USERSPACEBACKTRACE:
  1048. ke_gen_userbacktrace_msg();
  1049. break;
  1050. case AE_REQ_USER_REG:
  1051. ke_gen_user_reg_msg();
  1052. break;
  1053. case AE_REQ_USER_MAPS:
  1054. ke_gen_usermaps_msg();
  1055. break;
  1056. default:
  1057. ke_gen_notavail_msg();
  1058. break;
  1059. }
  1060. } else if (msg.cmdType == AE_IND) {
  1061. switch (msg.cmdId) {
  1062. case AE_IND_LOG_CLOSE:
  1063. /* real release operation move to ke_worker(): ke_destroy_log(); */
  1064. complete(&aed_ke_com);
  1065. break;
  1066. default:
  1067. /* IGNORE */
  1068. break;
  1069. }
  1070. } else if (msg.cmdType == AE_RSP) { /* IGNORE */
  1071. }
  1072. return count;
  1073. }
  1074. static long aed_ioctl_bt(unsigned long arg)
  1075. {
  1076. int ret = 0;
  1077. struct aee_ioctl ioctl;
  1078. struct aee_process_bt bt;
  1079. if (copy_from_user(&ioctl, (struct aee_ioctl __user *)arg, sizeof(struct aee_ioctl))) {
  1080. ret = -EFAULT;
  1081. return ret;
  1082. }
  1083. bt.pid = ioctl.pid;
  1084. ret = aed_get_process_bt(&bt);
  1085. if (ret == 0) {
  1086. ioctl.detail = 0xAEE00001;
  1087. ioctl.size = bt.nr_entries;
  1088. if (copy_to_user((struct aee_ioctl __user *)arg, &ioctl, sizeof(struct aee_ioctl))) {
  1089. ret = -EFAULT;
  1090. return ret;
  1091. }
  1092. if (!ioctl.out) {
  1093. ret = -EFAULT;
  1094. } else
  1095. if (copy_to_user
  1096. ((struct aee_bt_frame __user *)(unsigned long)ioctl.out,
  1097. (const void *)bt.entries, sizeof(struct aee_bt_frame) * AEE_NR_FRAME)) {
  1098. ret = -EFAULT;
  1099. }
  1100. }
  1101. return ret;
  1102. }
  1103. /*
  1104. * aed process daemon and other command line may access me
  1105. * concurrently
  1106. */
  1107. DEFINE_SEMAPHORE(aed_dal_sem);
  1108. static long aed_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
  1109. {
  1110. int ret = 0;
  1111. if (cmd == AEEIOCTL_GET_PROCESS_BT)
  1112. return aed_ioctl_bt(arg);
  1113. if (down_interruptible(&aed_dal_sem) < 0)
  1114. return -ERESTARTSYS;
  1115. switch (cmd) {
  1116. case AEEIOCTL_SET_AEE_MODE:
  1117. {
  1118. if (copy_from_user(&aee_mode, (void __user *)arg, sizeof(aee_mode))) {
  1119. ret = -EFAULT;
  1120. goto EXIT;
  1121. }
  1122. LOGD("set aee mode = %d\n", aee_mode);
  1123. break;
  1124. }
  1125. case AEEIOCTL_DAL_SHOW:
  1126. {
  1127. /*It's troublesome to allocate more than 1KB size on stack */
  1128. struct aee_dal_show *dal_show = kzalloc(sizeof(struct aee_dal_show),
  1129. GFP_KERNEL);
  1130. if (dal_show == NULL) {
  1131. ret = -EFAULT;
  1132. goto EXIT;
  1133. }
  1134. if (copy_from_user(dal_show, (struct aee_dal_show __user *)arg,
  1135. sizeof(struct aee_dal_show))) {
  1136. ret = -EFAULT;
  1137. goto OUT;
  1138. }
  1139. if (aee_mode >= AEE_MODE_CUSTOMER_ENG) {
  1140. LOGD("DAL_SHOW not allowed (mode %d)\n", aee_mode);
  1141. goto OUT;
  1142. }
  1143. /* Try to prevent overrun */
  1144. dal_show->msg[sizeof(dal_show->msg) - 1] = 0;
  1145. #ifdef CONFIG_MTK_FB
  1146. LOGD("AEE CALL DAL_Printf now\n");
  1147. DAL_Printf("%s", dal_show->msg);
  1148. #endif
  1149. OUT:
  1150. kfree(dal_show);
  1151. dal_show = NULL;
  1152. goto EXIT;
  1153. }
  1154. case AEEIOCTL_DAL_CLEAN:
  1155. {
  1156. /* set default bgcolor to red, it will be used in DAL_Clean */
  1157. struct aee_dal_setcolor dal_setcolor;
  1158. dal_setcolor.foreground = 0x00ff00; /*green */
  1159. dal_setcolor.background = 0xff0000; /*red */
  1160. #ifdef CONFIG_MTK_FB
  1161. LOGD("AEE CALL DAL_SetColor now\n");
  1162. DAL_SetColor(dal_setcolor.foreground, dal_setcolor.background);
  1163. LOGD("AEE CALL DAL_Clean now\n");
  1164. DAL_Clean();
  1165. #endif
  1166. break;
  1167. }
  1168. case AEEIOCTL_SETCOLOR:
  1169. {
  1170. struct aee_dal_setcolor dal_setcolor;
  1171. if (aee_mode >= AEE_MODE_CUSTOMER_ENG) {
  1172. LOGD("SETCOLOR not allowed (mode %d)\n", aee_mode);
  1173. goto EXIT;
  1174. }
  1175. if (copy_from_user(&dal_setcolor, (struct aee_dal_setcolor __user *)arg,
  1176. sizeof(struct aee_dal_setcolor))) {
  1177. ret = -EFAULT;
  1178. goto EXIT;
  1179. }
  1180. #ifdef CONFIG_MTK_FB
  1181. LOGD("AEE CALL DAL_SetColor now\n");
  1182. DAL_SetColor(dal_setcolor.foreground, dal_setcolor.background);
  1183. LOGD("AEE CALL DAL_SetScreenColor now\n");
  1184. DAL_SetScreenColor(dal_setcolor.screencolor);
  1185. #endif
  1186. break;
  1187. }
  1188. case AEEIOCTL_GET_THREAD_REG:
  1189. {
  1190. struct aee_thread_reg *tmp;
  1191. LOGD("%s: get thread registers ioctl\n", __func__);
  1192. tmp = kzalloc(sizeof(struct aee_thread_reg), GFP_KERNEL);
  1193. if (tmp == NULL) {
  1194. ret = -ENOMEM;
  1195. goto EXIT;
  1196. }
  1197. if (copy_from_user
  1198. (tmp, (struct aee_thread_reg __user *)arg,
  1199. sizeof(struct aee_thread_reg))) {
  1200. kfree(tmp);
  1201. ret = -EFAULT;
  1202. goto EXIT;
  1203. }
  1204. if (tmp->tid > 0) {
  1205. struct task_struct *task;
  1206. struct pt_regs *user_ret = NULL;
  1207. task = find_task_by_vpid(tmp->tid);
  1208. if (task == NULL) {
  1209. kfree(tmp);
  1210. ret = -EINVAL;
  1211. goto EXIT;
  1212. }
  1213. user_ret = task_pt_regs(task);
  1214. if (NULL == user_ret) {
  1215. kfree(tmp);
  1216. ret = -EINVAL;
  1217. goto EXIT;
  1218. }
  1219. memcpy(&(tmp->regs), user_ret, sizeof(struct pt_regs));
  1220. if (copy_to_user
  1221. ((struct aee_thread_reg __user *)arg, tmp,
  1222. sizeof(struct aee_thread_reg))) {
  1223. kfree(tmp);
  1224. ret = -EFAULT;
  1225. goto EXIT;
  1226. }
  1227. } else {
  1228. LOGD("%s: get thread registers ioctl tid invalid\n", __func__);
  1229. kfree(tmp);
  1230. ret = -EINVAL;
  1231. goto EXIT;
  1232. }
  1233. kfree(tmp);
  1234. break;
  1235. }
  1236. case AEEIOCTL_USER_IOCTL_TO_KERNEL_WANING: /* get current user space reg when call aee_kernel_warning_api */
  1237. {
  1238. LOGD("%s: AEEIOCTL_USER_IOCTL_TO_KERNEL_WANING,call kthread create ,is ok\n", __func__);
  1239. /* kthread_create(Dstate_test, NULL, "D-state"); */
  1240. aee_kernel_warning_api(__FILE__, __LINE__, DB_OPT_DEFAULT|DB_OPT_NATIVE_BACKTRACE,
  1241. "AEEIOCTL_USER_IOCTL_TO_KERNEL_WANING",
  1242. "Trigger Kernel warning");
  1243. break;
  1244. }
  1245. case AEEIOCTL_CHECK_SUID_DUMPABLE:
  1246. {
  1247. int pid;
  1248. LOGD("%s: check suid dumpable ioctl\n", __func__);
  1249. if (copy_from_user(&pid, (void __user *)arg, sizeof(int))) {
  1250. ret = -EFAULT;
  1251. goto EXIT;
  1252. }
  1253. if (pid > 0) {
  1254. struct task_struct *task;
  1255. int dumpable = -1;
  1256. task = find_task_by_vpid(pid);
  1257. if (task == NULL) {
  1258. LOGD("%s: process:%d task null\n", __func__, pid);
  1259. ret = -EINVAL;
  1260. goto EXIT;
  1261. }
  1262. if (task->mm == NULL) {
  1263. LOGD("%s: process:%d task mm null\n", __func__, pid);
  1264. ret = -EINVAL;
  1265. goto EXIT;
  1266. }
  1267. dumpable = get_dumpable(task->mm);
  1268. if (dumpable == 0) {
  1269. LOGD("%s: set process:%d dumpable\n", __func__, pid);
  1270. set_dumpable(task->mm, 1);
  1271. } else
  1272. LOGD("%s: get process:%d dumpable:%d\n", __func__, pid,
  1273. dumpable);
  1274. } else {
  1275. LOGD("%s: check suid dumpable ioctl pid invalid\n", __func__);
  1276. ret = -EINVAL;
  1277. }
  1278. break;
  1279. }
  1280. case AEEIOCTL_SET_FORECE_RED_SCREEN:
  1281. {
  1282. if (copy_from_user
  1283. (&force_red_screen, (void __user *)arg, sizeof(force_red_screen))) {
  1284. ret = -EFAULT;
  1285. goto EXIT;
  1286. }
  1287. LOGD("force aee red screen = %d\n", force_red_screen);
  1288. break;
  1289. }
  1290. default:
  1291. ret = -EINVAL;
  1292. }
  1293. EXIT:
  1294. up(&aed_dal_sem);
  1295. return ret;
  1296. }
  1297. static void aed_get_traces(char *msg)
  1298. {
  1299. struct stack_trace trace;
  1300. unsigned long stacks[32];
  1301. int i;
  1302. int offset;
  1303. trace.entries = stacks;
  1304. /*save backtraces */
  1305. trace.nr_entries = 0;
  1306. trace.max_entries = 32;
  1307. trace.skip = 2;
  1308. save_stack_trace_tsk(current, &trace);
  1309. offset = strlen(msg);
  1310. for (i = 0; i < trace.nr_entries; i++) {
  1311. offset += snprintf(msg + offset, AEE_BACKTRACE_LENGTH - offset, "[<%p>] %pS\n",
  1312. (void *)trace.entries[i], (void *)trace.entries[i]);
  1313. }
  1314. }
  1315. void Log2Buffer(struct aee_oops *oops, const char *fmt, ...)
  1316. {
  1317. char buf[256];
  1318. int len = 0;
  1319. va_list ap;
  1320. va_start(ap, fmt);
  1321. len = strlen(oops->userthread_maps.Userthread_maps);
  1322. if ((len + sizeof(buf)) < MaxMapsSize) {
  1323. vsnprintf(&oops->userthread_maps.Userthread_maps[len], sizeof(buf), fmt, ap);
  1324. oops->userthread_maps.Userthread_mapsLength = len + sizeof(buf);
  1325. }
  1326. va_end(ap);
  1327. }
  1328. int DumpThreadNativeInfo(struct aee_oops *oops)
  1329. {
  1330. struct task_struct *current_task;
  1331. struct pt_regs *user_ret;
  1332. struct vm_area_struct *vma;
  1333. unsigned long userstack_start = 0;
  1334. unsigned long userstack_end = 0, length = 0;
  1335. int mapcount = 0;
  1336. struct file *file;
  1337. int flags;
  1338. struct mm_struct *mm;
  1339. int ret = 0;
  1340. current_task = get_current();
  1341. user_ret = task_pt_regs(current_task);
  1342. /* CurrentUserPid=current_task->pid; //Thread id */
  1343. oops->userthread_reg.tid = current_task->tgid;
  1344. oops->userthread_stack.tid = current_task->tgid;
  1345. oops->userthread_maps.tid = current_task->tgid;
  1346. memcpy(&oops->userthread_reg.regs, user_ret, sizeof(struct pt_regs));
  1347. LOGE(" pid:%d /// tgid:%d, stack:0x%08lx\n",
  1348. current_task->pid, current_task->tgid,
  1349. (long)oops->userthread_stack.Userthread_Stack);
  1350. if (!user_mode(user_ret))
  1351. return 0;
  1352. if (current_task->mm == NULL)
  1353. return 0;
  1354. #if 1
  1355. vma = current_task->mm->mmap;
  1356. while (vma && (mapcount < current_task->mm->map_count)) {
  1357. file = vma->vm_file;
  1358. flags = vma->vm_flags;
  1359. if (file) {
  1360. LOGE("%08lx-%08lx %c%c%c%c %s\n", vma->vm_start, vma->vm_end,
  1361. flags & VM_READ ? 'r' : '-',
  1362. flags & VM_WRITE ? 'w' : '-',
  1363. flags & VM_EXEC ? 'x' : '-',
  1364. flags & VM_MAYSHARE ? 's' : 'p', (unsigned char *)(file->f_path.dentry->d_iname));
  1365. Log2Buffer(oops, "%08lx-%08lx %c%c%c%c %s\n", vma->vm_start, vma->vm_end,
  1366. flags & VM_READ ? 'r' : '-',
  1367. flags & VM_WRITE ? 'w' : '-',
  1368. flags & VM_EXEC ? 'x' : '-',
  1369. flags & VM_MAYSHARE ? 's' : 'p',
  1370. (unsigned char *)(file->f_path.dentry->d_iname));
  1371. } else {
  1372. const char *name = arch_vma_name(vma);
  1373. mm = vma->vm_mm;
  1374. if (!name) {
  1375. if (mm) {
  1376. if (vma->vm_start <= mm->start_brk &&
  1377. vma->vm_end >= mm->brk) {
  1378. name = "[heap]";
  1379. } else if (vma->vm_start <= mm->start_stack &&
  1380. vma->vm_end >= mm->start_stack) {
  1381. name = "[stack]";
  1382. }
  1383. } else {
  1384. name = "[vdso]";
  1385. }
  1386. }
  1387. /* if (name) */
  1388. {
  1389. LOGE("%08lx-%08lx %c%c%c%c %s\n", vma->vm_start, vma->vm_end,
  1390. flags & VM_READ ? 'r' : '-',
  1391. flags & VM_WRITE ? 'w' : '-',
  1392. flags & VM_EXEC ? 'x' : '-',
  1393. flags & VM_MAYSHARE ? 's' : 'p', name);
  1394. Log2Buffer(oops, "%08lx-%08lx %c%c%c%c %s\n", vma->vm_start, vma->vm_end,
  1395. flags & VM_READ ? 'r' : '-',
  1396. flags & VM_WRITE ? 'w' : '-',
  1397. flags & VM_EXEC ? 'x' : '-',
  1398. flags & VM_MAYSHARE ? 's' : 'p', name);
  1399. }
  1400. }
  1401. vma = vma->vm_next;
  1402. mapcount++;
  1403. }
  1404. #endif
  1405. LOGE("maps addr(0x%08lx), maps len:%d\n",
  1406. (long)oops->userthread_maps.Userthread_maps,
  1407. oops->userthread_maps.Userthread_mapsLength);
  1408. #ifndef __aarch64__ /* 32bit */
  1409. LOGE(" pc/lr/sp 0x%08lx/0x%08lx/0x%08lx\n", user_ret->ARM_pc, user_ret->ARM_lr,
  1410. user_ret->ARM_sp);
  1411. userstack_start = (unsigned long)user_ret->ARM_sp;
  1412. vma = current_task->mm->mmap;
  1413. while (vma != NULL) {
  1414. if (vma->vm_start <= userstack_start && vma->vm_end >= userstack_start) {
  1415. userstack_end = vma->vm_end;
  1416. break;
  1417. }
  1418. vma = vma->vm_next;
  1419. if (vma == current_task->mm->mmap)
  1420. break;
  1421. }
  1422. if (userstack_end == 0) {
  1423. LOGE("Dump native stack failed:\n");
  1424. return 0;
  1425. }
  1426. LOGE("Dump stack range (0x%08lx:0x%08lx)\n", userstack_start, userstack_end);
  1427. length = ((userstack_end - userstack_start) <
  1428. (MaxStackSize-1)) ? (userstack_end - userstack_start) : (MaxStackSize-1);
  1429. oops->userthread_stack.StackLength = length;
  1430. ret = copy_from_user((void *)(oops->userthread_stack.Userthread_Stack),
  1431. (const void __user *)(userstack_start), length);
  1432. LOGE("u+k 32 copy_from_user ret(0x%08x),len:%lx\n", ret, length);
  1433. LOGE("end dump native stack:\n");
  1434. #else /* 64bit, First deal with K64+U64, the last time to deal with K64+U32 */
  1435. if (is_compat_task()) { /* K64_U32 */
  1436. LOGE(" K64+ U32 pc/lr/sp 0x%16lx/0x%16lx/0x%16lx\n",
  1437. (long)(user_ret->user_regs.pc),
  1438. (long)(user_ret->user_regs.regs[14]),
  1439. (long)(user_ret->user_regs.regs[13]));
  1440. userstack_start = (unsigned long)user_ret->user_regs.regs[13];
  1441. vma = current_task->mm->mmap;
  1442. while (vma != NULL) {
  1443. if (vma->vm_start <= userstack_start && vma->vm_end >= userstack_start) {
  1444. userstack_end = vma->vm_end;
  1445. break;
  1446. }
  1447. vma = vma->vm_next;
  1448. if (vma == current_task->mm->mmap)
  1449. break;
  1450. }
  1451. if (userstack_end == 0) {
  1452. LOGE("Dump native stack failed:\n");
  1453. return 0;
  1454. }
  1455. LOGE("Dump stack range (0x%08lx:0x%08lx)\n", userstack_start, userstack_end);
  1456. length = ((userstack_end - userstack_start) <
  1457. (MaxStackSize-1)) ? (userstack_end - userstack_start) : (MaxStackSize-1);
  1458. oops->userthread_stack.StackLength = length;
  1459. ret = copy_from_user((void *)(oops->userthread_stack.Userthread_Stack),
  1460. (const void __user *)(userstack_start), length);
  1461. LOGE("copy_from_user ret(0x%16x),len:%lx\n", ret, length);
  1462. } else { /*K64+U64*/
  1463. LOGE(" K64+ U64 pc/lr/sp 0x%16lx/0x%16lx/0x%16lx\n",
  1464. (long)(user_ret->user_regs.pc),
  1465. (long)(user_ret->user_regs.regs[30]),
  1466. (long)(user_ret->user_regs.sp));
  1467. userstack_start = (unsigned long)user_ret->user_regs.sp;
  1468. vma = current_task->mm->mmap;
  1469. while (vma != NULL) {
  1470. if (vma->vm_start <= userstack_start && vma->vm_end >= userstack_start) {
  1471. userstack_end = vma->vm_end;
  1472. break;
  1473. }
  1474. vma = vma->vm_next;
  1475. if (vma == current_task->mm->mmap)
  1476. break;
  1477. }
  1478. if (userstack_end == 0) {
  1479. LOGE("Dump native stack failed:\n");
  1480. return 0;
  1481. }
  1482. LOGE("Dump stack range (0x%16lx:0x%16lx)\n", userstack_start, userstack_end);
  1483. length = ((userstack_end - userstack_start) <
  1484. (MaxStackSize-1)) ? (userstack_end - userstack_start) : (MaxStackSize-1);
  1485. oops->userthread_stack.StackLength = length;
  1486. ret = copy_from_user((void *)(oops->userthread_stack.Userthread_Stack),
  1487. (const void __user *)(userstack_start), length);
  1488. LOGE("copy_from_user ret(0x%08x),len:%lx\n", ret, length);
  1489. }
  1490. #endif
  1491. return 0;
  1492. }
  1493. static void kernel_reportAPI(const AE_DEFECT_ATTR attr, const int db_opt, const char *module,
  1494. const char *msg)
  1495. {
  1496. struct aee_oops *oops;
  1497. int n = 0;
  1498. if (aee_mode == AEE_MODE_CUSTOMER_USER || (aee_mode == AEE_MODE_CUSTOMER_ENG && attr == AE_DEFECT_WARNING))
  1499. return;
  1500. oops = aee_oops_create(attr, AE_KERNEL_PROBLEM_REPORT, module);
  1501. if (NULL != oops) {
  1502. n += snprintf(oops->backtrace, AEE_BACKTRACE_LENGTH, msg);
  1503. snprintf(oops->backtrace + n, AEE_BACKTRACE_LENGTH - n, "\nBacktrace:\n");
  1504. aed_get_traces(oops->backtrace);
  1505. oops->detail = (char *)(oops->backtrace);
  1506. oops->detail_len = strlen(oops->backtrace) + 1;
  1507. oops->dump_option = db_opt;
  1508. #ifdef __aarch64__
  1509. if ((db_opt & DB_OPT_NATIVE_BACKTRACE) && !is_compat_task())
  1510. oops->dump_option |= DB_OPT_AARCH64;
  1511. #endif
  1512. if (db_opt & DB_OPT_NATIVE_BACKTRACE) {
  1513. oops->userthread_stack.Userthread_Stack = vzalloc(MaxStackSize);
  1514. if (oops->userthread_stack.Userthread_Stack == NULL) {
  1515. LOGE("%s: oops->userthread_stack.Userthread_Stack Vmalloc fail", __func__);
  1516. return;
  1517. }
  1518. oops->userthread_maps.Userthread_maps = vzalloc(MaxMapsSize);
  1519. if (oops->userthread_maps.Userthread_maps == NULL) {
  1520. LOGE("%s: oops->userthread_maps.Userthread_maps Vmalloc fail", __func__);
  1521. return;
  1522. }
  1523. LOGE("%s: oops->userthread_stack.Userthread_Stack :0x%08lx,maps:0x%08lx",
  1524. __func__,
  1525. (long)oops->userthread_stack.Userthread_Stack,
  1526. (long)oops->userthread_maps.Userthread_maps);
  1527. oops->userthread_stack.StackLength = MaxStackSize;
  1528. oops->userthread_maps.Userthread_mapsLength = MaxMapsSize;
  1529. DumpThreadNativeInfo(oops);
  1530. }
  1531. LOGI("%s,%s,%s,0x%x\n", __func__, module, msg, db_opt);
  1532. ke_queue_request(oops);
  1533. }
  1534. }
  1535. #ifndef PARTIAL_BUILD
  1536. void aee_kernel_dal_api(const char *file, const int line, const char *msg)
  1537. {
  1538. LOGW("aee_kernel_dal_api : <%s:%d> %s ", file, line, msg);
  1539. if (in_interrupt()) {
  1540. LOGE("aee_kernel_dal_api: in interrupt context, skip");
  1541. return;
  1542. }
  1543. #if defined(CONFIG_MTK_AEE_AED) && defined(CONFIG_MTK_FB)
  1544. if (down_interruptible(&aed_dal_sem) < 0) {
  1545. LOGI("ERROR : aee_kernel_dal_api() get aed_dal_sem fail ");
  1546. return;
  1547. }
  1548. if (msg != NULL) {
  1549. struct aee_dal_setcolor dal_setcolor;
  1550. struct aee_dal_show *dal_show = kzalloc(sizeof(struct aee_dal_show), GFP_KERNEL);
  1551. if (dal_show == NULL) {
  1552. LOGI("ERROR : aee_kernel_dal_api() kzalloc fail\n ");
  1553. up(&aed_dal_sem);
  1554. return;
  1555. }
  1556. if (((aee_mode == AEE_MODE_MTK_ENG) && (force_red_screen == AEE_FORCE_NOT_SET))
  1557. || ((aee_mode < AEE_MODE_CUSTOMER_ENG)
  1558. && (force_red_screen == AEE_FORCE_RED_SCREEN))) {
  1559. dal_setcolor.foreground = 0xff00ff; /* fg: purple */
  1560. dal_setcolor.background = 0x00ff00; /* bg: green */
  1561. LOGD("AEE CALL DAL_SetColor now\n");
  1562. DAL_SetColor(dal_setcolor.foreground, dal_setcolor.background);
  1563. dal_setcolor.screencolor = 0xff0000; /* screen:red */
  1564. LOGD("AEE CALL DAL_SetScreenColor now\n");
  1565. DAL_SetScreenColor(dal_setcolor.screencolor);
  1566. strncpy(dal_show->msg, msg, sizeof(dal_show->msg) - 1);
  1567. dal_show->msg[sizeof(dal_show->msg) - 1] = 0;
  1568. LOGD("AEE CALL DAL_Printf now\n");
  1569. DAL_Printf("%s", dal_show->msg);
  1570. } else {
  1571. LOGD("DAL not allowed (mode %d)\n", aee_mode);
  1572. }
  1573. kfree(dal_show);
  1574. }
  1575. up(&aed_dal_sem);
  1576. #endif
  1577. }
  1578. #else
  1579. void aee_kernel_dal_api(const char *file, const int line, const char *msg)
  1580. {
  1581. LOGW("aee_kernel_dal_api : <%s:%d> %s ", file, line, msg);
  1582. }
  1583. #endif
  1584. EXPORT_SYMBOL(aee_kernel_dal_api);
  1585. static void external_exception(const char *assert_type, const int *log, int log_size,
  1586. const int *phy, int phy_size, const char *detail, const int db_opt)
  1587. {
  1588. int *ee_log = NULL;
  1589. struct aed_eerec *eerec;
  1590. LOGD("%s : [%s] log ptr %p size %d, phy ptr %p size %d\n", __func__,
  1591. assert_type, log, log_size, phy, phy_size);
  1592. if (aee_mode >= AEE_MODE_CUSTOMER_USER)
  1593. return;
  1594. eerec = kzalloc(sizeof(struct aed_eerec), GFP_ATOMIC);
  1595. if (eerec == NULL) {
  1596. LOGE("%s: kmalloc fail", __func__);
  1597. return;
  1598. }
  1599. if ((log_size > 0) && (log != NULL)) {
  1600. eerec->ee_log_size = log_size;
  1601. ee_log = kmalloc(log_size, GFP_ATOMIC);
  1602. if (NULL != ee_log) {
  1603. eerec->ee_log = ee_log;
  1604. memcpy(ee_log, log, log_size);
  1605. }
  1606. } else {
  1607. eerec->ee_log_size = 16;
  1608. ee_log = kzalloc(eerec->ee_log_size, GFP_ATOMIC);
  1609. eerec->ee_log = ee_log;
  1610. }
  1611. if (NULL == ee_log) {
  1612. LOGE("%s : memory alloc() fail\n", __func__);
  1613. kfree(eerec);
  1614. return;
  1615. }
  1616. memset(eerec->assert_type, 0, sizeof(eerec->assert_type));
  1617. strncpy(eerec->assert_type, assert_type, sizeof(eerec->assert_type) - 1);
  1618. memset(eerec->exp_filename, 0, sizeof(eerec->exp_filename));
  1619. strncpy(eerec->exp_filename, detail, sizeof(eerec->exp_filename) - 1);
  1620. LOGD("EE [%s]\n", eerec->assert_type);
  1621. eerec->exp_linenum = 0;
  1622. eerec->fatal1 = 0;
  1623. eerec->fatal2 = 0;
  1624. /* Check if we can dump memory */
  1625. if (in_interrupt()) {
  1626. /* kernel vamlloc cannot be used in interrupt context */
  1627. LOGD("External exception occur in interrupt context, no coredump");
  1628. phy_size = 0;
  1629. } else if ((phy < 0) || (phy_size > MAX_EE_COREDUMP)) {
  1630. LOGD("EE Physical memory size(%d) too large or invalid", phy_size);
  1631. phy_size = 0;
  1632. }
  1633. if (phy_size > 0) {
  1634. eerec->ee_phy = (int *)vmalloc_user(phy_size);
  1635. if (eerec->ee_phy != NULL) {
  1636. memcpy(eerec->ee_phy, phy, phy_size);
  1637. eerec->ee_phy_size = phy_size;
  1638. } else {
  1639. LOGD("Losing ee phy mem due to vmalloc return NULL\n");
  1640. eerec->ee_phy_size = 0;
  1641. }
  1642. } else {
  1643. eerec->ee_phy = NULL;
  1644. eerec->ee_phy_size = 0;
  1645. }
  1646. eerec->db_opt = db_opt;
  1647. ee_queue_request(eerec);
  1648. LOGD("external_exception out\n");
  1649. }
  1650. static bool rr_reported;
  1651. module_param(rr_reported, bool, S_IRUSR | S_IWUSR);
  1652. static struct aee_kernel_api kernel_api = {
  1653. .kernel_reportAPI = kernel_reportAPI,
  1654. .md_exception = external_exception,
  1655. .md32_exception = external_exception,
  1656. .scp_exception = external_exception,
  1657. .combo_exception = external_exception
  1658. };
  1659. AED_CURRENT_KE_OPEN(console);
  1660. AED_PROC_CURRENT_KE_FOPS(console);
  1661. AED_CURRENT_KE_OPEN(userspace_info);
  1662. AED_PROC_CURRENT_KE_FOPS(userspace_info);
  1663. AED_CURRENT_KE_OPEN(android_main);
  1664. AED_PROC_CURRENT_KE_FOPS(android_main);
  1665. AED_CURRENT_KE_OPEN(android_radio);
  1666. AED_PROC_CURRENT_KE_FOPS(android_radio);
  1667. AED_CURRENT_KE_OPEN(android_system);
  1668. AED_PROC_CURRENT_KE_FOPS(android_system);
  1669. AED_CURRENT_KE_OPEN(mmprofile);
  1670. AED_PROC_CURRENT_KE_FOPS(mmprofile);
  1671. AED_CURRENT_KE_OPEN(mini_rdump);
  1672. AED_PROC_CURRENT_KE_FOPS(mini_rdump);
  1673. static int current_ke_ee_coredump_open(struct inode *inode, struct file *file)
  1674. {
  1675. int ret = seq_open_private(file, &current_ke_op, sizeof(struct current_ke_buffer));
  1676. if (ret == 0) {
  1677. struct aed_eerec *eerec = aed_dev.eerec;
  1678. struct seq_file *m = file->private_data;
  1679. struct current_ke_buffer *ee_buffer;
  1680. if (!eerec)
  1681. return ret;
  1682. ee_buffer = (struct current_ke_buffer *)m->private;
  1683. ee_buffer->data = eerec->ee_phy;
  1684. ee_buffer->size = eerec->ee_phy_size;
  1685. }
  1686. return ret;
  1687. }
  1688. /* AED_CURRENT_KE_OPEN(ee_coredump); */
  1689. AED_PROC_CURRENT_KE_FOPS(ee_coredump);
  1690. static int aed_proc_init(void)
  1691. {
  1692. aed_proc_dir = proc_mkdir("aed", NULL);
  1693. if (aed_proc_dir == NULL) {
  1694. LOGE("aed proc_mkdir failed\n");
  1695. return -ENOMEM;
  1696. }
  1697. AED_PROC_ENTRY(current-ke-console, current_ke_console, S_IRUSR);
  1698. AED_PROC_ENTRY(current-ke-userspace_info, current_ke_userspace_info, S_IRUSR);
  1699. AED_PROC_ENTRY(current-ke-android_system, current_ke_android_system, S_IRUSR);
  1700. AED_PROC_ENTRY(current-ke-android_radio, current_ke_android_radio, S_IRUSR);
  1701. AED_PROC_ENTRY(current-ke-android_main, current_ke_android_main, S_IRUSR);
  1702. AED_PROC_ENTRY(current-ke-mmprofile, current_ke_mmprofile, S_IRUSR);
  1703. AED_PROC_ENTRY(current-ke-mini_rdump, current_ke_mini_rdump, S_IRUSR);
  1704. AED_PROC_ENTRY(current-ee-coredump, current_ke_ee_coredump, S_IRUSR);
  1705. aee_rr_proc_init(aed_proc_dir);
  1706. aed_proc_debug_init(aed_proc_dir);
  1707. dram_console_init(aed_proc_dir);
  1708. return 0;
  1709. }
  1710. static int aed_proc_done(void)
  1711. {
  1712. remove_proc_entry(CURRENT_KE_CONSOLE, aed_proc_dir);
  1713. remove_proc_entry(CURRENT_EE_COREDUMP, aed_proc_dir);
  1714. aed_proc_debug_done(aed_proc_dir);
  1715. dram_console_done(aed_proc_dir);
  1716. remove_proc_entry("aed", NULL);
  1717. return 0;
  1718. }
  1719. /******************************************************************************
  1720. * Module related
  1721. *****************************************************************************/
  1722. static const struct file_operations aed_ee_fops = {
  1723. .owner = THIS_MODULE,
  1724. .open = aed_ee_open,
  1725. .release = aed_ee_release,
  1726. .poll = aed_ee_poll,
  1727. .read = aed_ee_read,
  1728. .write = aed_ee_write,
  1729. .unlocked_ioctl = aed_ioctl,
  1730. #ifdef CONFIG_COMPAT
  1731. .compat_ioctl = aed_ioctl,
  1732. #endif
  1733. };
  1734. static const struct file_operations aed_ke_fops = {
  1735. .owner = THIS_MODULE,
  1736. .open = aed_ke_open,
  1737. .release = aed_ke_release,
  1738. .poll = aed_ke_poll,
  1739. .read = aed_ke_read,
  1740. .write = aed_ke_write,
  1741. .unlocked_ioctl = aed_ioctl,
  1742. #ifdef CONFIG_COMPAT
  1743. .compat_ioctl = aed_ioctl,
  1744. #endif
  1745. };
  1746. /* QHQ RT Monitor end */
  1747. static struct miscdevice aed_ee_dev = {
  1748. .minor = MISC_DYNAMIC_MINOR,
  1749. .name = "aed0",
  1750. .fops = &aed_ee_fops,
  1751. };
  1752. static struct miscdevice aed_ke_dev = {
  1753. .minor = MISC_DYNAMIC_MINOR,
  1754. .name = "aed1",
  1755. .fops = &aed_ke_fops,
  1756. };
  1757. static int __init aed_init(void)
  1758. {
  1759. int err = 0;
  1760. err = aed_proc_init();
  1761. if (err != 0)
  1762. return err;
  1763. err = ksysfs_bootinfo_init();
  1764. if (err != 0)
  1765. return err;
  1766. spin_lock_init(&ke_queue.lock);
  1767. spin_lock_init(&ee_queue.lock);
  1768. INIT_LIST_HEAD(&ke_queue.list);
  1769. INIT_LIST_HEAD(&ee_queue.list);
  1770. init_waitqueue_head(&aed_dev.eewait);
  1771. memset(&aed_dev.kerec, 0, sizeof(struct aed_kerec));
  1772. init_waitqueue_head(&aed_dev.kewait);
  1773. INIT_WORK(&ke_work, ke_worker);
  1774. INIT_WORK(&ee_work, ee_worker);
  1775. aee_register_api(&kernel_api);
  1776. spin_lock_init(&aed_device_lock);
  1777. err = misc_register(&aed_ee_dev);
  1778. if (unlikely(err)) {
  1779. LOGE("aee: failed to register aed0(ee) device!\n");
  1780. return err;
  1781. }
  1782. err = misc_register(&aed_ke_dev);
  1783. if (unlikely(err)) {
  1784. LOGE("aee: failed to register aed1(ke) device!\n");
  1785. return err;
  1786. }
  1787. return err;
  1788. }
  1789. static void __exit aed_exit(void)
  1790. {
  1791. int err;
  1792. err = misc_deregister(&aed_ee_dev);
  1793. if (unlikely(err))
  1794. LOGE("xLog: failed to unregister aed(ee) device!\n");
  1795. err = misc_deregister(&aed_ke_dev);
  1796. if (unlikely(err))
  1797. LOGE("xLog: failed to unregister aed(ke) device!\n");
  1798. ee_destroy_log();
  1799. ke_destroy_log();
  1800. aed_proc_done();
  1801. ksysfs_bootinfo_exit();
  1802. }
  1803. module_init(aed_init);
  1804. module_exit(aed_exit);
  1805. MODULE_LICENSE("GPL");
  1806. MODULE_DESCRIPTION("MediaTek AED Driver");
  1807. MODULE_AUTHOR("MediaTek Inc.");