trace_functions_graph.c 39 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607
  1. /*
  2. *
  3. * Function graph tracer.
  4. * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
  5. * Mostly borrowed from function tracer which
  6. * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
  7. *
  8. */
  9. #include <linux/debugfs.h>
  10. #include <linux/uaccess.h>
  11. #include <linux/ftrace.h>
  12. #include <linux/slab.h>
  13. #include <linux/fs.h>
  14. #include "trace.h"
  15. #include "trace_output.h"
  16. static bool kill_ftrace_graph;
  17. /**
  18. * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called
  19. *
  20. * ftrace_graph_stop() is called when a severe error is detected in
  21. * the function graph tracing. This function is called by the critical
  22. * paths of function graph to keep those paths from doing any more harm.
  23. */
  24. bool ftrace_graph_is_dead(void)
  25. {
  26. return kill_ftrace_graph;
  27. }
  28. /**
  29. * ftrace_graph_stop - set to permanently disable function graph tracincg
  30. *
  31. * In case of an error int function graph tracing, this is called
  32. * to try to keep function graph tracing from causing any more harm.
  33. * Usually this is pretty severe and this is called to try to at least
  34. * get a warning out to the user.
  35. */
  36. void ftrace_graph_stop(void)
  37. {
  38. kill_ftrace_graph = true;
  39. }
  40. /* When set, irq functions will be ignored */
  41. static int ftrace_graph_skip_irqs;
  42. struct fgraph_cpu_data {
  43. pid_t last_pid;
  44. int depth;
  45. int depth_irq;
  46. int ignore;
  47. unsigned long enter_funcs[FTRACE_RETFUNC_DEPTH];
  48. };
  49. struct fgraph_data {
  50. struct fgraph_cpu_data __percpu *cpu_data;
  51. /* Place to preserve last processed entry. */
  52. struct ftrace_graph_ent_entry ent;
  53. struct ftrace_graph_ret_entry ret;
  54. int failed;
  55. int cpu;
  56. };
  57. #define TRACE_GRAPH_INDENT 2
  58. /* Flag options */
  59. #define TRACE_GRAPH_PRINT_FLAT 0x80
  60. static unsigned int max_depth;
  61. static struct tracer_opt trace_opts[] = {
  62. /* Display overruns? (for self-debug purpose) */
  63. { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
  64. /* Display CPU ? */
  65. { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
  66. /* Display Overhead ? */
  67. { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
  68. /* Display proc name/pid */
  69. { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
  70. /* Display duration of execution */
  71. { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
  72. /* Display absolute time of an entry */
  73. { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
  74. /* Display interrupts */
  75. { TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
  76. /* Display function name after trailing } */
  77. { TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) },
  78. /* Use standard trace formatting rather than hierarchical */
  79. { TRACER_OPT(funcgraph-flat, TRACE_GRAPH_PRINT_FLAT) },
  80. { } /* Empty entry */
  81. };
  82. static struct tracer_flags tracer_flags = {
  83. /* Don't display overruns, proc, or tail by default */
  84. .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
  85. TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS,
  86. .opts = trace_opts
  87. };
  88. static struct trace_array *graph_array;
  89. /*
  90. * DURATION column is being also used to display IRQ signs,
  91. * following values are used by print_graph_irq and others
  92. * to fill in space into DURATION column.
  93. */
  94. enum {
  95. FLAGS_FILL_FULL = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT,
  96. FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT,
  97. FLAGS_FILL_END = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT,
  98. };
  99. static enum print_line_t
  100. print_graph_duration(unsigned long long duration, struct trace_seq *s,
  101. u32 flags);
  102. /* Add a function return address to the trace stack on thread info.*/
  103. int
  104. ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
  105. unsigned long frame_pointer)
  106. {
  107. unsigned long long calltime;
  108. int index;
  109. if (unlikely(ftrace_graph_is_dead()))
  110. return -EBUSY;
  111. if (!current->ret_stack)
  112. return -EBUSY;
  113. /*
  114. * We must make sure the ret_stack is tested before we read
  115. * anything else.
  116. */
  117. smp_rmb();
  118. /* The return trace stack is full */
  119. if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
  120. atomic_inc(&current->trace_overrun);
  121. return -EBUSY;
  122. }
  123. /*
  124. * The curr_ret_stack is an index to ftrace return stack of
  125. * current task. Its value should be in [0, FTRACE_RETFUNC_
  126. * DEPTH) when the function graph tracer is used. To support
  127. * filtering out specific functions, it makes the index
  128. * negative by subtracting huge value (FTRACE_NOTRACE_DEPTH)
  129. * so when it sees a negative index the ftrace will ignore
  130. * the record. And the index gets recovered when returning
  131. * from the filtered function by adding the FTRACE_NOTRACE_
  132. * DEPTH and then it'll continue to record functions normally.
  133. *
  134. * The curr_ret_stack is initialized to -1 and get increased
  135. * in this function. So it can be less than -1 only if it was
  136. * filtered out via ftrace_graph_notrace_addr() which can be
  137. * set from set_graph_notrace file in debugfs by user.
  138. */
  139. if (current->curr_ret_stack < -1)
  140. return -EBUSY;
  141. calltime = trace_clock_local();
  142. index = ++current->curr_ret_stack;
  143. if (ftrace_graph_notrace_addr(func))
  144. current->curr_ret_stack -= FTRACE_NOTRACE_DEPTH;
  145. barrier();
  146. current->ret_stack[index].ret = ret;
  147. current->ret_stack[index].func = func;
  148. current->ret_stack[index].calltime = calltime;
  149. current->ret_stack[index].subtime = 0;
  150. current->ret_stack[index].fp = frame_pointer;
  151. *depth = current->curr_ret_stack;
  152. return 0;
  153. }
  154. /* Retrieve a function return address to the trace stack on thread info.*/
  155. static void
  156. ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
  157. unsigned long frame_pointer)
  158. {
  159. int index;
  160. index = current->curr_ret_stack;
  161. /*
  162. * A negative index here means that it's just returned from a
  163. * notrace'd function. Recover index to get an original
  164. * return address. See ftrace_push_return_trace().
  165. *
  166. * TODO: Need to check whether the stack gets corrupted.
  167. */
  168. if (index < 0)
  169. index += FTRACE_NOTRACE_DEPTH;
  170. if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) {
  171. ftrace_graph_stop();
  172. WARN_ON(1);
  173. /* Might as well panic, otherwise we have no where to go */
  174. *ret = (unsigned long)panic;
  175. return;
  176. }
  177. #if defined(CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST) && !defined(CC_USING_FENTRY)
  178. /*
  179. * The arch may choose to record the frame pointer used
  180. * and check it here to make sure that it is what we expect it
  181. * to be. If gcc does not set the place holder of the return
  182. * address in the frame pointer, and does a copy instead, then
  183. * the function graph trace will fail. This test detects this
  184. * case.
  185. *
  186. * Currently, x86_32 with optimize for size (-Os) makes the latest
  187. * gcc do the above.
  188. *
  189. * Note, -mfentry does not use frame pointers, and this test
  190. * is not needed if CC_USING_FENTRY is set.
  191. */
  192. if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
  193. ftrace_graph_stop();
  194. WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
  195. " from func %ps return to %lx\n",
  196. current->ret_stack[index].fp,
  197. frame_pointer,
  198. (void *)current->ret_stack[index].func,
  199. current->ret_stack[index].ret);
  200. *ret = (unsigned long)panic;
  201. return;
  202. }
  203. #endif
  204. *ret = current->ret_stack[index].ret;
  205. trace->func = current->ret_stack[index].func;
  206. trace->calltime = current->ret_stack[index].calltime;
  207. trace->overrun = atomic_read(&current->trace_overrun);
  208. trace->depth = index;
  209. }
  210. /*
  211. * Send the trace to the ring-buffer.
  212. * @return the original return address.
  213. */
  214. unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
  215. {
  216. struct ftrace_graph_ret trace;
  217. unsigned long ret;
  218. ftrace_pop_return_trace(&trace, &ret, frame_pointer);
  219. trace.rettime = trace_clock_local();
  220. barrier();
  221. current->curr_ret_stack--;
  222. /*
  223. * The curr_ret_stack can be less than -1 only if it was
  224. * filtered out and it's about to return from the function.
  225. * Recover the index and continue to trace normal functions.
  226. */
  227. if (current->curr_ret_stack < -1) {
  228. current->curr_ret_stack += FTRACE_NOTRACE_DEPTH;
  229. return ret;
  230. }
  231. /*
  232. * The trace should run after decrementing the ret counter
  233. * in case an interrupt were to come in. We don't want to
  234. * lose the interrupt if max_depth is set.
  235. */
  236. ftrace_graph_return(&trace);
  237. if (unlikely(!ret)) {
  238. ftrace_graph_stop();
  239. WARN_ON(1);
  240. /* Might as well panic. What else to do? */
  241. ret = (unsigned long)panic;
  242. }
  243. return ret;
  244. }
  245. int __trace_graph_entry(struct trace_array *tr,
  246. struct ftrace_graph_ent *trace,
  247. unsigned long flags,
  248. int pc)
  249. {
  250. struct ftrace_event_call *call = &event_funcgraph_entry;
  251. struct ring_buffer_event *event;
  252. struct ring_buffer *buffer = tr->trace_buffer.buffer;
  253. struct ftrace_graph_ent_entry *entry;
  254. if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
  255. return 0;
  256. event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
  257. sizeof(*entry), flags, pc);
  258. if (!event)
  259. return 0;
  260. entry = ring_buffer_event_data(event);
  261. entry->graph_ent = *trace;
  262. if (!call_filter_check_discard(call, entry, buffer, event))
  263. __buffer_unlock_commit(buffer, event);
  264. return 1;
  265. }
  266. static inline int ftrace_graph_ignore_irqs(void)
  267. {
  268. if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
  269. return 0;
  270. return in_irq();
  271. }
  272. int trace_graph_entry(struct ftrace_graph_ent *trace)
  273. {
  274. struct trace_array *tr = graph_array;
  275. struct trace_array_cpu *data;
  276. unsigned long flags;
  277. long disabled;
  278. int ret;
  279. int cpu;
  280. int pc;
  281. if (!ftrace_trace_task(current))
  282. return 0;
  283. /* trace it when it is-nested-in or is a function enabled. */
  284. if ((!(trace->depth || ftrace_graph_addr(trace->func)) ||
  285. ftrace_graph_ignore_irqs()) || (trace->depth < 0) ||
  286. (max_depth && trace->depth >= max_depth))
  287. return 0;
  288. /*
  289. * Do not trace a function if it's filtered by set_graph_notrace.
  290. * Make the index of ret stack negative to indicate that it should
  291. * ignore further functions. But it needs its own ret stack entry
  292. * to recover the original index in order to continue tracing after
  293. * returning from the function.
  294. */
  295. if (ftrace_graph_notrace_addr(trace->func))
  296. return 1;
  297. local_irq_save(flags);
  298. cpu = raw_smp_processor_id();
  299. data = per_cpu_ptr(tr->trace_buffer.data, cpu);
  300. disabled = atomic_inc_return(&data->disabled);
  301. if (likely(disabled == 1)) {
  302. pc = preempt_count();
  303. ret = __trace_graph_entry(tr, trace, flags, pc);
  304. } else {
  305. ret = 0;
  306. }
  307. atomic_dec(&data->disabled);
  308. local_irq_restore(flags);
  309. return ret;
  310. }
  311. static int trace_graph_thresh_entry(struct ftrace_graph_ent *trace)
  312. {
  313. if (tracing_thresh)
  314. return 1;
  315. else
  316. return trace_graph_entry(trace);
  317. }
  318. static void
  319. __trace_graph_function(struct trace_array *tr,
  320. unsigned long ip, unsigned long flags, int pc)
  321. {
  322. u64 time = trace_clock_local();
  323. struct ftrace_graph_ent ent = {
  324. .func = ip,
  325. .depth = 0,
  326. };
  327. struct ftrace_graph_ret ret = {
  328. .func = ip,
  329. .depth = 0,
  330. .calltime = time,
  331. .rettime = time,
  332. };
  333. __trace_graph_entry(tr, &ent, flags, pc);
  334. __trace_graph_return(tr, &ret, flags, pc);
  335. }
  336. void
  337. trace_graph_function(struct trace_array *tr,
  338. unsigned long ip, unsigned long parent_ip,
  339. unsigned long flags, int pc)
  340. {
  341. __trace_graph_function(tr, ip, flags, pc);
  342. }
  343. void __trace_graph_return(struct trace_array *tr,
  344. struct ftrace_graph_ret *trace,
  345. unsigned long flags,
  346. int pc)
  347. {
  348. struct ftrace_event_call *call = &event_funcgraph_exit;
  349. struct ring_buffer_event *event;
  350. struct ring_buffer *buffer = tr->trace_buffer.buffer;
  351. struct ftrace_graph_ret_entry *entry;
  352. if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
  353. return;
  354. event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
  355. sizeof(*entry), flags, pc);
  356. if (!event)
  357. return;
  358. entry = ring_buffer_event_data(event);
  359. entry->ret = *trace;
  360. if (!call_filter_check_discard(call, entry, buffer, event))
  361. __buffer_unlock_commit(buffer, event);
  362. }
  363. void trace_graph_return(struct ftrace_graph_ret *trace)
  364. {
  365. struct trace_array *tr = graph_array;
  366. struct trace_array_cpu *data;
  367. unsigned long flags;
  368. long disabled;
  369. int cpu;
  370. int pc;
  371. local_irq_save(flags);
  372. cpu = raw_smp_processor_id();
  373. data = per_cpu_ptr(tr->trace_buffer.data, cpu);
  374. disabled = atomic_inc_return(&data->disabled);
  375. if (likely(disabled == 1)) {
  376. pc = preempt_count();
  377. __trace_graph_return(tr, trace, flags, pc);
  378. }
  379. atomic_dec(&data->disabled);
  380. local_irq_restore(flags);
  381. }
  382. void set_graph_array(struct trace_array *tr)
  383. {
  384. graph_array = tr;
  385. /* Make graph_array visible before we start tracing */
  386. smp_mb();
  387. }
  388. static void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
  389. {
  390. if (tracing_thresh &&
  391. (trace->rettime - trace->calltime < tracing_thresh))
  392. return;
  393. else
  394. trace_graph_return(trace);
  395. }
  396. static int graph_trace_init(struct trace_array *tr)
  397. {
  398. int ret;
  399. set_graph_array(tr);
  400. if (tracing_thresh)
  401. ret = register_ftrace_graph(&trace_graph_thresh_return,
  402. &trace_graph_thresh_entry);
  403. else
  404. ret = register_ftrace_graph(&trace_graph_return,
  405. &trace_graph_entry);
  406. if (ret)
  407. return ret;
  408. tracing_start_cmdline_record();
  409. return 0;
  410. }
  411. static void graph_trace_reset(struct trace_array *tr)
  412. {
  413. tracing_stop_cmdline_record();
  414. unregister_ftrace_graph();
  415. }
  416. static int graph_trace_update_thresh(struct trace_array *tr)
  417. {
  418. graph_trace_reset(tr);
  419. return graph_trace_init(tr);
  420. }
  421. static int max_bytes_for_cpu;
  422. static enum print_line_t
  423. print_graph_cpu(struct trace_seq *s, int cpu)
  424. {
  425. int ret;
  426. /*
  427. * Start with a space character - to make it stand out
  428. * to the right a bit when trace output is pasted into
  429. * email:
  430. */
  431. ret = trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
  432. if (!ret)
  433. return TRACE_TYPE_PARTIAL_LINE;
  434. return TRACE_TYPE_HANDLED;
  435. }
  436. #define TRACE_GRAPH_PROCINFO_LENGTH 14
  437. static enum print_line_t
  438. print_graph_proc(struct trace_seq *s, pid_t pid)
  439. {
  440. char comm[TASK_COMM_LEN];
  441. /* sign + log10(MAX_INT) + '\0' */
  442. char pid_str[11];
  443. int spaces = 0;
  444. int ret;
  445. int len;
  446. int i;
  447. trace_find_cmdline(pid, comm);
  448. comm[7] = '\0';
  449. sprintf(pid_str, "%d", pid);
  450. /* 1 stands for the "-" character */
  451. len = strlen(comm) + strlen(pid_str) + 1;
  452. if (len < TRACE_GRAPH_PROCINFO_LENGTH)
  453. spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
  454. /* First spaces to align center */
  455. for (i = 0; i < spaces / 2; i++) {
  456. ret = trace_seq_putc(s, ' ');
  457. if (!ret)
  458. return TRACE_TYPE_PARTIAL_LINE;
  459. }
  460. ret = trace_seq_printf(s, "%s-%s", comm, pid_str);
  461. if (!ret)
  462. return TRACE_TYPE_PARTIAL_LINE;
  463. /* Last spaces to align center */
  464. for (i = 0; i < spaces - (spaces / 2); i++) {
  465. ret = trace_seq_putc(s, ' ');
  466. if (!ret)
  467. return TRACE_TYPE_PARTIAL_LINE;
  468. }
  469. return TRACE_TYPE_HANDLED;
  470. }
  471. static enum print_line_t
  472. print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
  473. {
  474. if (!trace_seq_putc(s, ' '))
  475. return 0;
  476. return trace_print_lat_fmt(s, entry);
  477. }
  478. /* If the pid changed since the last trace, output this event */
  479. static enum print_line_t
  480. verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
  481. {
  482. pid_t prev_pid;
  483. pid_t *last_pid;
  484. int ret;
  485. if (!data)
  486. return TRACE_TYPE_HANDLED;
  487. last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
  488. if (*last_pid == pid)
  489. return TRACE_TYPE_HANDLED;
  490. prev_pid = *last_pid;
  491. *last_pid = pid;
  492. if (prev_pid == -1)
  493. return TRACE_TYPE_HANDLED;
  494. /*
  495. * Context-switch trace line:
  496. ------------------------------------------
  497. | 1) migration/0--1 => sshd-1755
  498. ------------------------------------------
  499. */
  500. ret = trace_seq_puts(s,
  501. " ------------------------------------------\n");
  502. if (!ret)
  503. return TRACE_TYPE_PARTIAL_LINE;
  504. ret = print_graph_cpu(s, cpu);
  505. if (ret == TRACE_TYPE_PARTIAL_LINE)
  506. return TRACE_TYPE_PARTIAL_LINE;
  507. ret = print_graph_proc(s, prev_pid);
  508. if (ret == TRACE_TYPE_PARTIAL_LINE)
  509. return TRACE_TYPE_PARTIAL_LINE;
  510. ret = trace_seq_puts(s, " => ");
  511. if (!ret)
  512. return TRACE_TYPE_PARTIAL_LINE;
  513. ret = print_graph_proc(s, pid);
  514. if (ret == TRACE_TYPE_PARTIAL_LINE)
  515. return TRACE_TYPE_PARTIAL_LINE;
  516. ret = trace_seq_puts(s,
  517. "\n ------------------------------------------\n\n");
  518. if (!ret)
  519. return TRACE_TYPE_PARTIAL_LINE;
  520. return TRACE_TYPE_HANDLED;
  521. }
  522. static struct ftrace_graph_ret_entry *
  523. get_return_for_leaf(struct trace_iterator *iter,
  524. struct ftrace_graph_ent_entry *curr)
  525. {
  526. struct fgraph_data *data = iter->private;
  527. struct ring_buffer_iter *ring_iter = NULL;
  528. struct ring_buffer_event *event;
  529. struct ftrace_graph_ret_entry *next;
  530. /*
  531. * If the previous output failed to write to the seq buffer,
  532. * then we just reuse the data from before.
  533. */
  534. if (data && data->failed) {
  535. curr = &data->ent;
  536. next = &data->ret;
  537. } else {
  538. ring_iter = trace_buffer_iter(iter, iter->cpu);
  539. /* First peek to compare current entry and the next one */
  540. if (ring_iter)
  541. event = ring_buffer_iter_peek(ring_iter, NULL);
  542. else {
  543. /*
  544. * We need to consume the current entry to see
  545. * the next one.
  546. */
  547. ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu,
  548. NULL, NULL);
  549. event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu,
  550. NULL, NULL);
  551. }
  552. if (!event)
  553. return NULL;
  554. next = ring_buffer_event_data(event);
  555. if (data) {
  556. /*
  557. * Save current and next entries for later reference
  558. * if the output fails.
  559. */
  560. data->ent = *curr;
  561. /*
  562. * If the next event is not a return type, then
  563. * we only care about what type it is. Otherwise we can
  564. * safely copy the entire event.
  565. */
  566. if (next->ent.type == TRACE_GRAPH_RET)
  567. data->ret = *next;
  568. else
  569. data->ret.ent.type = next->ent.type;
  570. }
  571. }
  572. if (next->ent.type != TRACE_GRAPH_RET)
  573. return NULL;
  574. if (curr->ent.pid != next->ent.pid ||
  575. curr->graph_ent.func != next->ret.func)
  576. return NULL;
  577. /* this is a leaf, now advance the iterator */
  578. if (ring_iter)
  579. ring_buffer_read(ring_iter, NULL);
  580. return next;
  581. }
  582. static int print_graph_abs_time(u64 t, struct trace_seq *s)
  583. {
  584. unsigned long usecs_rem;
  585. usecs_rem = do_div(t, NSEC_PER_SEC);
  586. usecs_rem /= 1000;
  587. return trace_seq_printf(s, "%5lu.%06lu | ",
  588. (unsigned long)t, usecs_rem);
  589. }
  590. static enum print_line_t
  591. print_graph_irq(struct trace_iterator *iter, unsigned long addr,
  592. enum trace_type type, int cpu, pid_t pid, u32 flags)
  593. {
  594. int ret;
  595. struct trace_seq *s = &iter->seq;
  596. if (addr < (unsigned long)__irqentry_text_start ||
  597. addr >= (unsigned long)__irqentry_text_end)
  598. return TRACE_TYPE_UNHANDLED;
  599. if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
  600. /* Absolute time */
  601. if (flags & TRACE_GRAPH_PRINT_ABS_TIME) {
  602. ret = print_graph_abs_time(iter->ts, s);
  603. if (!ret)
  604. return TRACE_TYPE_PARTIAL_LINE;
  605. }
  606. /* Cpu */
  607. if (flags & TRACE_GRAPH_PRINT_CPU) {
  608. ret = print_graph_cpu(s, cpu);
  609. if (ret == TRACE_TYPE_PARTIAL_LINE)
  610. return TRACE_TYPE_PARTIAL_LINE;
  611. }
  612. /* Proc */
  613. if (flags & TRACE_GRAPH_PRINT_PROC) {
  614. ret = print_graph_proc(s, pid);
  615. if (ret == TRACE_TYPE_PARTIAL_LINE)
  616. return TRACE_TYPE_PARTIAL_LINE;
  617. ret = trace_seq_puts(s, " | ");
  618. if (!ret)
  619. return TRACE_TYPE_PARTIAL_LINE;
  620. }
  621. }
  622. /* No overhead */
  623. ret = print_graph_duration(0, s, flags | FLAGS_FILL_START);
  624. if (ret != TRACE_TYPE_HANDLED)
  625. return ret;
  626. if (type == TRACE_GRAPH_ENT)
  627. ret = trace_seq_puts(s, "==========>");
  628. else
  629. ret = trace_seq_puts(s, "<==========");
  630. if (!ret)
  631. return TRACE_TYPE_PARTIAL_LINE;
  632. ret = print_graph_duration(0, s, flags | FLAGS_FILL_END);
  633. if (ret != TRACE_TYPE_HANDLED)
  634. return ret;
  635. ret = trace_seq_putc(s, '\n');
  636. if (!ret)
  637. return TRACE_TYPE_PARTIAL_LINE;
  638. return TRACE_TYPE_HANDLED;
  639. }
  640. enum print_line_t
  641. trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
  642. {
  643. unsigned long nsecs_rem = do_div(duration, 1000);
  644. /* log10(ULONG_MAX) + '\0' */
  645. char msecs_str[21];
  646. char nsecs_str[5];
  647. int ret, len;
  648. int i;
  649. sprintf(msecs_str, "%lu", (unsigned long) duration);
  650. /* Print msecs */
  651. ret = trace_seq_printf(s, "%s", msecs_str);
  652. if (!ret)
  653. return TRACE_TYPE_PARTIAL_LINE;
  654. len = strlen(msecs_str);
  655. /* Print nsecs (we don't want to exceed 7 numbers) */
  656. if (len < 7) {
  657. size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);
  658. snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
  659. ret = trace_seq_printf(s, ".%s", nsecs_str);
  660. if (!ret)
  661. return TRACE_TYPE_PARTIAL_LINE;
  662. len += strlen(nsecs_str);
  663. }
  664. ret = trace_seq_puts(s, " us ");
  665. if (!ret)
  666. return TRACE_TYPE_PARTIAL_LINE;
  667. /* Print remaining spaces to fit the row's width */
  668. for (i = len; i < 7; i++) {
  669. ret = trace_seq_putc(s, ' ');
  670. if (!ret)
  671. return TRACE_TYPE_PARTIAL_LINE;
  672. }
  673. return TRACE_TYPE_HANDLED;
  674. }
  675. static enum print_line_t
  676. print_graph_duration(unsigned long long duration, struct trace_seq *s,
  677. u32 flags)
  678. {
  679. int ret = -1;
  680. if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
  681. !(trace_flags & TRACE_ITER_CONTEXT_INFO))
  682. return TRACE_TYPE_HANDLED;
  683. /* No real adata, just filling the column with spaces */
  684. switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) {
  685. case FLAGS_FILL_FULL:
  686. ret = trace_seq_puts(s, " | ");
  687. return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
  688. case FLAGS_FILL_START:
  689. ret = trace_seq_puts(s, " ");
  690. return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
  691. case FLAGS_FILL_END:
  692. ret = trace_seq_puts(s, " |");
  693. return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
  694. }
  695. /* Signal a overhead of time execution to the output */
  696. if (flags & TRACE_GRAPH_PRINT_OVERHEAD) {
  697. /* Duration exceeded 100 msecs */
  698. if (duration > 100000ULL)
  699. ret = trace_seq_puts(s, "! ");
  700. /* Duration exceeded 10 msecs */
  701. else if (duration > 10000ULL)
  702. ret = trace_seq_puts(s, "+ ");
  703. }
  704. /*
  705. * The -1 means we either did not exceed the duration tresholds
  706. * or we dont want to print out the overhead. Either way we need
  707. * to fill out the space.
  708. */
  709. if (ret == -1)
  710. ret = trace_seq_puts(s, " ");
  711. /* Catching here any failure happenned above */
  712. if (!ret)
  713. return TRACE_TYPE_PARTIAL_LINE;
  714. ret = trace_print_graph_duration(duration, s);
  715. if (ret != TRACE_TYPE_HANDLED)
  716. return ret;
  717. ret = trace_seq_puts(s, "| ");
  718. if (!ret)
  719. return TRACE_TYPE_PARTIAL_LINE;
  720. return TRACE_TYPE_HANDLED;
  721. }
  722. /* Case of a leaf function on its call entry */
  723. static enum print_line_t
  724. print_graph_entry_leaf(struct trace_iterator *iter,
  725. struct ftrace_graph_ent_entry *entry,
  726. struct ftrace_graph_ret_entry *ret_entry,
  727. struct trace_seq *s, u32 flags)
  728. {
  729. struct fgraph_data *data = iter->private;
  730. struct ftrace_graph_ret *graph_ret;
  731. struct ftrace_graph_ent *call;
  732. unsigned long long duration;
  733. int ret;
  734. int i;
  735. graph_ret = &ret_entry->ret;
  736. call = &entry->graph_ent;
  737. duration = graph_ret->rettime - graph_ret->calltime;
  738. if (data) {
  739. struct fgraph_cpu_data *cpu_data;
  740. int cpu = iter->cpu;
  741. cpu_data = per_cpu_ptr(data->cpu_data, cpu);
  742. /*
  743. * Comments display at + 1 to depth. Since
  744. * this is a leaf function, keep the comments
  745. * equal to this depth.
  746. */
  747. cpu_data->depth = call->depth - 1;
  748. /* No need to keep this function around for this depth */
  749. if (call->depth < FTRACE_RETFUNC_DEPTH)
  750. cpu_data->enter_funcs[call->depth] = 0;
  751. }
  752. /* Overhead and duration */
  753. ret = print_graph_duration(duration, s, flags);
  754. if (ret == TRACE_TYPE_PARTIAL_LINE)
  755. return TRACE_TYPE_PARTIAL_LINE;
  756. /* Function */
  757. for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
  758. ret = trace_seq_putc(s, ' ');
  759. if (!ret)
  760. return TRACE_TYPE_PARTIAL_LINE;
  761. }
  762. ret = trace_seq_printf(s, "%ps();\n", (void *)call->func);
  763. if (!ret)
  764. return TRACE_TYPE_PARTIAL_LINE;
  765. return TRACE_TYPE_HANDLED;
  766. }
  767. static enum print_line_t
  768. print_graph_entry_nested(struct trace_iterator *iter,
  769. struct ftrace_graph_ent_entry *entry,
  770. struct trace_seq *s, int cpu, u32 flags)
  771. {
  772. struct ftrace_graph_ent *call = &entry->graph_ent;
  773. struct fgraph_data *data = iter->private;
  774. int ret;
  775. int i;
  776. if (data) {
  777. struct fgraph_cpu_data *cpu_data;
  778. int cpu = iter->cpu;
  779. cpu_data = per_cpu_ptr(data->cpu_data, cpu);
  780. cpu_data->depth = call->depth;
  781. /* Save this function pointer to see if the exit matches */
  782. if (call->depth < FTRACE_RETFUNC_DEPTH)
  783. cpu_data->enter_funcs[call->depth] = call->func;
  784. }
  785. /* No time */
  786. ret = print_graph_duration(0, s, flags | FLAGS_FILL_FULL);
  787. if (ret != TRACE_TYPE_HANDLED)
  788. return ret;
  789. /* Function */
  790. for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) {
  791. ret = trace_seq_putc(s, ' ');
  792. if (!ret)
  793. return TRACE_TYPE_PARTIAL_LINE;
  794. }
  795. ret = trace_seq_printf(s, "%ps() {\n", (void *)call->func);
  796. if (!ret)
  797. return TRACE_TYPE_PARTIAL_LINE;
  798. /*
  799. * we already consumed the current entry to check the next one
  800. * and see if this is a leaf.
  801. */
  802. return TRACE_TYPE_NO_CONSUME;
  803. }
  804. static enum print_line_t
  805. print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
  806. int type, unsigned long addr, u32 flags)
  807. {
  808. struct fgraph_data *data = iter->private;
  809. struct trace_entry *ent = iter->ent;
  810. int cpu = iter->cpu;
  811. int ret;
  812. /* Pid */
  813. if (verif_pid(s, ent->pid, cpu, data) == TRACE_TYPE_PARTIAL_LINE)
  814. return TRACE_TYPE_PARTIAL_LINE;
  815. if (type) {
  816. /* Interrupt */
  817. ret = print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
  818. if (ret == TRACE_TYPE_PARTIAL_LINE)
  819. return TRACE_TYPE_PARTIAL_LINE;
  820. }
  821. if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
  822. return 0;
  823. /* Absolute time */
  824. if (flags & TRACE_GRAPH_PRINT_ABS_TIME) {
  825. ret = print_graph_abs_time(iter->ts, s);
  826. if (!ret)
  827. return TRACE_TYPE_PARTIAL_LINE;
  828. }
  829. /* Cpu */
  830. if (flags & TRACE_GRAPH_PRINT_CPU) {
  831. ret = print_graph_cpu(s, cpu);
  832. if (ret == TRACE_TYPE_PARTIAL_LINE)
  833. return TRACE_TYPE_PARTIAL_LINE;
  834. }
  835. /* Proc */
  836. if (flags & TRACE_GRAPH_PRINT_PROC) {
  837. ret = print_graph_proc(s, ent->pid);
  838. if (ret == TRACE_TYPE_PARTIAL_LINE)
  839. return TRACE_TYPE_PARTIAL_LINE;
  840. ret = trace_seq_puts(s, " | ");
  841. if (!ret)
  842. return TRACE_TYPE_PARTIAL_LINE;
  843. }
  844. /* Latency format */
  845. if (trace_flags & TRACE_ITER_LATENCY_FMT) {
  846. ret = print_graph_lat_fmt(s, ent);
  847. if (ret == TRACE_TYPE_PARTIAL_LINE)
  848. return TRACE_TYPE_PARTIAL_LINE;
  849. }
  850. return 0;
  851. }
  852. /*
  853. * Entry check for irq code
  854. *
  855. * returns 1 if
  856. * - we are inside irq code
  857. * - we just entered irq code
  858. *
  859. * retunns 0 if
  860. * - funcgraph-interrupts option is set
  861. * - we are not inside irq code
  862. */
  863. static int
  864. check_irq_entry(struct trace_iterator *iter, u32 flags,
  865. unsigned long addr, int depth)
  866. {
  867. int cpu = iter->cpu;
  868. int *depth_irq;
  869. struct fgraph_data *data = iter->private;
  870. /*
  871. * If we are either displaying irqs, or we got called as
  872. * a graph event and private data does not exist,
  873. * then we bypass the irq check.
  874. */
  875. if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
  876. (!data))
  877. return 0;
  878. depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
  879. /*
  880. * We are inside the irq code
  881. */
  882. if (*depth_irq >= 0)
  883. return 1;
  884. if ((addr < (unsigned long)__irqentry_text_start) ||
  885. (addr >= (unsigned long)__irqentry_text_end))
  886. return 0;
  887. /*
  888. * We are entering irq code.
  889. */
  890. *depth_irq = depth;
  891. return 1;
  892. }
  893. /*
  894. * Return check for irq code
  895. *
  896. * returns 1 if
  897. * - we are inside irq code
  898. * - we just left irq code
  899. *
  900. * returns 0 if
  901. * - funcgraph-interrupts option is set
  902. * - we are not inside irq code
  903. */
  904. static int
  905. check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
  906. {
  907. int cpu = iter->cpu;
  908. int *depth_irq;
  909. struct fgraph_data *data = iter->private;
  910. /*
  911. * If we are either displaying irqs, or we got called as
  912. * a graph event and private data does not exist,
  913. * then we bypass the irq check.
  914. */
  915. if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
  916. (!data))
  917. return 0;
  918. depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
  919. /*
  920. * We are not inside the irq code.
  921. */
  922. if (*depth_irq == -1)
  923. return 0;
  924. /*
  925. * We are inside the irq code, and this is returning entry.
  926. * Let's not trace it and clear the entry depth, since
  927. * we are out of irq code.
  928. *
  929. * This condition ensures that we 'leave the irq code' once
  930. * we are out of the entry depth. Thus protecting us from
  931. * the RETURN entry loss.
  932. */
  933. if (*depth_irq >= depth) {
  934. *depth_irq = -1;
  935. return 1;
  936. }
  937. /*
  938. * We are inside the irq code, and this is not the entry.
  939. */
  940. return 1;
  941. }
  942. static enum print_line_t
  943. print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
  944. struct trace_iterator *iter, u32 flags)
  945. {
  946. struct fgraph_data *data = iter->private;
  947. struct ftrace_graph_ent *call = &field->graph_ent;
  948. struct ftrace_graph_ret_entry *leaf_ret;
  949. static enum print_line_t ret;
  950. int cpu = iter->cpu;
  951. if (check_irq_entry(iter, flags, call->func, call->depth))
  952. return TRACE_TYPE_HANDLED;
  953. if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags))
  954. return TRACE_TYPE_PARTIAL_LINE;
  955. leaf_ret = get_return_for_leaf(iter, field);
  956. if (leaf_ret)
  957. ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
  958. else
  959. ret = print_graph_entry_nested(iter, field, s, cpu, flags);
  960. if (data) {
  961. /*
  962. * If we failed to write our output, then we need to make
  963. * note of it. Because we already consumed our entry.
  964. */
  965. if (s->full) {
  966. data->failed = 1;
  967. data->cpu = cpu;
  968. } else
  969. data->failed = 0;
  970. }
  971. return ret;
  972. }
  973. static enum print_line_t
  974. print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
  975. struct trace_entry *ent, struct trace_iterator *iter,
  976. u32 flags)
  977. {
  978. unsigned long long duration = trace->rettime - trace->calltime;
  979. struct fgraph_data *data = iter->private;
  980. pid_t pid = ent->pid;
  981. int cpu = iter->cpu;
  982. int func_match = 1;
  983. int ret;
  984. int i;
  985. if (check_irq_return(iter, flags, trace->depth))
  986. return TRACE_TYPE_HANDLED;
  987. if (data) {
  988. struct fgraph_cpu_data *cpu_data;
  989. int cpu = iter->cpu;
  990. cpu_data = per_cpu_ptr(data->cpu_data, cpu);
  991. /*
  992. * Comments display at + 1 to depth. This is the
  993. * return from a function, we now want the comments
  994. * to display at the same level of the bracket.
  995. */
  996. cpu_data->depth = trace->depth - 1;
  997. if (trace->depth < FTRACE_RETFUNC_DEPTH) {
  998. if (cpu_data->enter_funcs[trace->depth] != trace->func)
  999. func_match = 0;
  1000. cpu_data->enter_funcs[trace->depth] = 0;
  1001. }
  1002. }
  1003. if (print_graph_prologue(iter, s, 0, 0, flags))
  1004. return TRACE_TYPE_PARTIAL_LINE;
  1005. /* Overhead and duration */
  1006. ret = print_graph_duration(duration, s, flags);
  1007. if (ret == TRACE_TYPE_PARTIAL_LINE)
  1008. return TRACE_TYPE_PARTIAL_LINE;
  1009. /* Closing brace */
  1010. for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) {
  1011. ret = trace_seq_putc(s, ' ');
  1012. if (!ret)
  1013. return TRACE_TYPE_PARTIAL_LINE;
  1014. }
  1015. /*
  1016. * If the return function does not have a matching entry,
  1017. * then the entry was lost. Instead of just printing
  1018. * the '}' and letting the user guess what function this
  1019. * belongs to, write out the function name. Always do
  1020. * that if the funcgraph-tail option is enabled.
  1021. */
  1022. if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL)) {
  1023. ret = trace_seq_puts(s, "}\n");
  1024. if (!ret)
  1025. return TRACE_TYPE_PARTIAL_LINE;
  1026. } else {
  1027. ret = trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
  1028. if (!ret)
  1029. return TRACE_TYPE_PARTIAL_LINE;
  1030. }
  1031. /* Overrun */
  1032. if (flags & TRACE_GRAPH_PRINT_OVERRUN) {
  1033. ret = trace_seq_printf(s, " (Overruns: %lu)\n",
  1034. trace->overrun);
  1035. if (!ret)
  1036. return TRACE_TYPE_PARTIAL_LINE;
  1037. }
  1038. ret = print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
  1039. cpu, pid, flags);
  1040. if (ret == TRACE_TYPE_PARTIAL_LINE)
  1041. return TRACE_TYPE_PARTIAL_LINE;
  1042. return TRACE_TYPE_HANDLED;
  1043. }
  1044. static enum print_line_t
  1045. print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
  1046. struct trace_iterator *iter, u32 flags)
  1047. {
  1048. unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
  1049. struct fgraph_data *data = iter->private;
  1050. struct trace_event *event;
  1051. int depth = 0;
  1052. int ret;
  1053. int i;
  1054. if (data)
  1055. depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
  1056. if (print_graph_prologue(iter, s, 0, 0, flags))
  1057. return TRACE_TYPE_PARTIAL_LINE;
  1058. /* No time */
  1059. ret = print_graph_duration(0, s, flags | FLAGS_FILL_FULL);
  1060. if (ret != TRACE_TYPE_HANDLED)
  1061. return ret;
  1062. /* Indentation */
  1063. if (depth > 0)
  1064. for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) {
  1065. ret = trace_seq_putc(s, ' ');
  1066. if (!ret)
  1067. return TRACE_TYPE_PARTIAL_LINE;
  1068. }
  1069. /* The comment */
  1070. ret = trace_seq_puts(s, "/* ");
  1071. if (!ret)
  1072. return TRACE_TYPE_PARTIAL_LINE;
  1073. switch (iter->ent->type) {
  1074. case TRACE_BPRINT:
  1075. ret = trace_print_bprintk_msg_only(iter);
  1076. if (ret != TRACE_TYPE_HANDLED)
  1077. return ret;
  1078. break;
  1079. case TRACE_PRINT:
  1080. ret = trace_print_printk_msg_only(iter);
  1081. if (ret != TRACE_TYPE_HANDLED)
  1082. return ret;
  1083. break;
  1084. default:
  1085. event = ftrace_find_event(ent->type);
  1086. if (!event)
  1087. return TRACE_TYPE_UNHANDLED;
  1088. ret = event->funcs->trace(iter, sym_flags, event);
  1089. if (ret != TRACE_TYPE_HANDLED)
  1090. return ret;
  1091. }
  1092. /* Strip ending newline */
  1093. if (s->buffer[s->len - 1] == '\n') {
  1094. s->buffer[s->len - 1] = '\0';
  1095. s->len--;
  1096. }
  1097. ret = trace_seq_puts(s, " */\n");
  1098. if (!ret)
  1099. return TRACE_TYPE_PARTIAL_LINE;
  1100. return TRACE_TYPE_HANDLED;
  1101. }
  1102. enum print_line_t
  1103. print_graph_function_flags(struct trace_iterator *iter, u32 flags)
  1104. {
  1105. struct ftrace_graph_ent_entry *field;
  1106. struct fgraph_data *data = iter->private;
  1107. struct trace_entry *entry = iter->ent;
  1108. struct trace_seq *s = &iter->seq;
  1109. int cpu = iter->cpu;
  1110. int ret;
  1111. if (flags & TRACE_GRAPH_PRINT_FLAT)
  1112. return TRACE_TYPE_UNHANDLED;
  1113. if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
  1114. per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
  1115. return TRACE_TYPE_HANDLED;
  1116. }
  1117. /*
  1118. * If the last output failed, there's a possibility we need
  1119. * to print out the missing entry which would never go out.
  1120. */
  1121. if (data && data->failed) {
  1122. field = &data->ent;
  1123. iter->cpu = data->cpu;
  1124. ret = print_graph_entry(field, s, iter, flags);
  1125. if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
  1126. per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
  1127. ret = TRACE_TYPE_NO_CONSUME;
  1128. }
  1129. iter->cpu = cpu;
  1130. return ret;
  1131. }
  1132. switch (entry->type) {
  1133. case TRACE_GRAPH_ENT: {
  1134. /*
  1135. * print_graph_entry() may consume the current event,
  1136. * thus @field may become invalid, so we need to save it.
  1137. * sizeof(struct ftrace_graph_ent_entry) is very small,
  1138. * it can be safely saved at the stack.
  1139. */
  1140. struct ftrace_graph_ent_entry saved;
  1141. trace_assign_type(field, entry);
  1142. saved = *field;
  1143. return print_graph_entry(&saved, s, iter, flags);
  1144. }
  1145. case TRACE_GRAPH_RET: {
  1146. struct ftrace_graph_ret_entry *field;
  1147. trace_assign_type(field, entry);
  1148. return print_graph_return(&field->ret, s, entry, iter, flags);
  1149. }
  1150. case TRACE_STACK:
  1151. case TRACE_FN:
  1152. /* dont trace stack and functions as comments */
  1153. return TRACE_TYPE_UNHANDLED;
  1154. default:
  1155. return print_graph_comment(s, entry, iter, flags);
  1156. }
  1157. return TRACE_TYPE_HANDLED;
  1158. }
  1159. static enum print_line_t
  1160. print_graph_function(struct trace_iterator *iter)
  1161. {
  1162. return print_graph_function_flags(iter, tracer_flags.val);
  1163. }
  1164. static void print_lat_header(struct seq_file *s, u32 flags)
  1165. {
  1166. static const char spaces[] = " " /* 16 spaces */
  1167. " " /* 4 spaces */
  1168. " "; /* 17 spaces */
  1169. int size = 0;
  1170. if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
  1171. size += 16;
  1172. if (flags & TRACE_GRAPH_PRINT_CPU)
  1173. size += 4;
  1174. if (flags & TRACE_GRAPH_PRINT_PROC)
  1175. size += 17;
  1176. seq_printf(s, "#%.*s _-----=> irqs-off \n", size, spaces);
  1177. seq_printf(s, "#%.*s / _----=> need-resched \n", size, spaces);
  1178. seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
  1179. seq_printf(s, "#%.*s|| / _--=> preempt-depth \n", size, spaces);
  1180. seq_printf(s, "#%.*s||| / \n", size, spaces);
  1181. }
  1182. static void __print_graph_headers_flags(struct seq_file *s, u32 flags)
  1183. {
  1184. int lat = trace_flags & TRACE_ITER_LATENCY_FMT;
  1185. if (lat)
  1186. print_lat_header(s, flags);
  1187. /* 1st line */
  1188. seq_printf(s, "#");
  1189. if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
  1190. seq_printf(s, " TIME ");
  1191. if (flags & TRACE_GRAPH_PRINT_CPU)
  1192. seq_printf(s, " CPU");
  1193. if (flags & TRACE_GRAPH_PRINT_PROC)
  1194. seq_printf(s, " TASK/PID ");
  1195. if (lat)
  1196. seq_printf(s, "||||");
  1197. if (flags & TRACE_GRAPH_PRINT_DURATION)
  1198. seq_printf(s, " DURATION ");
  1199. seq_printf(s, " FUNCTION CALLS\n");
  1200. /* 2nd line */
  1201. seq_printf(s, "#");
  1202. if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
  1203. seq_printf(s, " | ");
  1204. if (flags & TRACE_GRAPH_PRINT_CPU)
  1205. seq_printf(s, " | ");
  1206. if (flags & TRACE_GRAPH_PRINT_PROC)
  1207. seq_printf(s, " | | ");
  1208. if (lat)
  1209. seq_printf(s, "||||");
  1210. if (flags & TRACE_GRAPH_PRINT_DURATION)
  1211. seq_printf(s, " | | ");
  1212. seq_printf(s, " | | | |\n");
  1213. }
  1214. static void print_graph_headers(struct seq_file *s)
  1215. {
  1216. print_graph_headers_flags(s, tracer_flags.val);
  1217. }
  1218. void print_graph_headers_flags(struct seq_file *s, u32 flags)
  1219. {
  1220. struct trace_iterator *iter = s->private;
  1221. if (flags & TRACE_GRAPH_PRINT_FLAT) {
  1222. trace_default_header(s);
  1223. return;
  1224. }
  1225. if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
  1226. return;
  1227. if (trace_flags & TRACE_ITER_LATENCY_FMT) {
  1228. /* print nothing if the buffers are empty */
  1229. if (trace_empty(iter))
  1230. return;
  1231. print_trace_header(s, iter);
  1232. }
  1233. __print_graph_headers_flags(s, flags);
  1234. }
  1235. void graph_trace_open(struct trace_iterator *iter)
  1236. {
  1237. /* pid and depth on the last trace processed */
  1238. struct fgraph_data *data;
  1239. gfp_t gfpflags;
  1240. int cpu;
  1241. iter->private = NULL;
  1242. /* We can be called in atomic context via ftrace_dump() */
  1243. gfpflags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
  1244. data = kzalloc(sizeof(*data), gfpflags);
  1245. if (!data)
  1246. goto out_err;
  1247. data->cpu_data = alloc_percpu_gfp(struct fgraph_cpu_data, gfpflags);
  1248. if (!data->cpu_data)
  1249. goto out_err_free;
  1250. for_each_possible_cpu(cpu) {
  1251. pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
  1252. int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
  1253. int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
  1254. int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
  1255. *pid = -1;
  1256. *depth = 0;
  1257. *ignore = 0;
  1258. *depth_irq = -1;
  1259. }
  1260. iter->private = data;
  1261. return;
  1262. out_err_free:
  1263. kfree(data);
  1264. out_err:
  1265. pr_warning("function graph tracer: not enough memory\n");
  1266. }
  1267. void graph_trace_close(struct trace_iterator *iter)
  1268. {
  1269. struct fgraph_data *data = iter->private;
  1270. if (data) {
  1271. free_percpu(data->cpu_data);
  1272. kfree(data);
  1273. }
  1274. }
  1275. static int
  1276. func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
  1277. {
  1278. if (bit == TRACE_GRAPH_PRINT_IRQS)
  1279. ftrace_graph_skip_irqs = !set;
  1280. return 0;
  1281. }
  1282. static struct tracer graph_trace __tracer_data = {
  1283. .name = "function_graph",
  1284. .update_thresh = graph_trace_update_thresh,
  1285. .open = graph_trace_open,
  1286. .pipe_open = graph_trace_open,
  1287. .close = graph_trace_close,
  1288. .pipe_close = graph_trace_close,
  1289. .init = graph_trace_init,
  1290. .reset = graph_trace_reset,
  1291. .print_line = print_graph_function,
  1292. .print_header = print_graph_headers,
  1293. .flags = &tracer_flags,
  1294. .set_flag = func_graph_set_flag,
  1295. #ifdef CONFIG_FTRACE_SELFTEST
  1296. .selftest = trace_selftest_startup_function_graph,
  1297. #endif
  1298. };
  1299. static ssize_t
  1300. graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt,
  1301. loff_t *ppos)
  1302. {
  1303. unsigned long val;
  1304. int ret;
  1305. ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
  1306. if (ret)
  1307. return ret;
  1308. max_depth = val;
  1309. *ppos += cnt;
  1310. return cnt;
  1311. }
  1312. static ssize_t
  1313. graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt,
  1314. loff_t *ppos)
  1315. {
  1316. char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/
  1317. int n;
  1318. n = sprintf(buf, "%d\n", max_depth);
  1319. return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
  1320. }
  1321. static const struct file_operations graph_depth_fops = {
  1322. .open = tracing_open_generic,
  1323. .write = graph_depth_write,
  1324. .read = graph_depth_read,
  1325. .llseek = generic_file_llseek,
  1326. };
  1327. static __init int init_graph_debugfs(void)
  1328. {
  1329. struct dentry *d_tracer;
  1330. d_tracer = tracing_init_dentry();
  1331. if (!d_tracer)
  1332. return 0;
  1333. trace_create_file("max_graph_depth", 0644, d_tracer,
  1334. NULL, &graph_depth_fops);
  1335. return 0;
  1336. }
  1337. fs_initcall(init_graph_debugfs);
  1338. static __init int init_graph_trace(void)
  1339. {
  1340. max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1);
  1341. return register_tracer(&graph_trace);
  1342. }
  1343. core_initcall(init_graph_trace);