trace_output.c 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453
  1. /*
  2. * trace_output.c
  3. *
  4. * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
  5. *
  6. */
  7. #include <linux/module.h>
  8. #include <linux/mutex.h>
  9. #include <linux/ftrace.h>
  10. #include "trace_output.h"
  11. /* must be a power of 2 */
  12. #define EVENT_HASHSIZE 128
  13. DECLARE_RWSEM(trace_event_sem);
  14. static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly;
  15. static int next_event_type = __TRACE_LAST_TYPE + 1;
  16. enum print_line_t trace_print_bputs_msg_only(struct trace_iterator *iter)
  17. {
  18. struct trace_seq *s = &iter->seq;
  19. struct trace_entry *entry = iter->ent;
  20. struct bputs_entry *field;
  21. int ret;
  22. trace_assign_type(field, entry);
  23. ret = trace_seq_puts(s, field->str);
  24. if (!ret)
  25. return TRACE_TYPE_PARTIAL_LINE;
  26. return TRACE_TYPE_HANDLED;
  27. }
  28. enum print_line_t trace_print_bprintk_msg_only(struct trace_iterator *iter)
  29. {
  30. struct trace_seq *s = &iter->seq;
  31. struct trace_entry *entry = iter->ent;
  32. struct bprint_entry *field;
  33. int ret;
  34. trace_assign_type(field, entry);
  35. ret = trace_seq_bprintf(s, field->fmt, field->buf);
  36. if (!ret)
  37. return TRACE_TYPE_PARTIAL_LINE;
  38. return TRACE_TYPE_HANDLED;
  39. }
  40. enum print_line_t trace_print_printk_msg_only(struct trace_iterator *iter)
  41. {
  42. struct trace_seq *s = &iter->seq;
  43. struct trace_entry *entry = iter->ent;
  44. struct print_entry *field;
  45. int ret;
  46. trace_assign_type(field, entry);
  47. ret = trace_seq_puts(s, field->buf);
  48. if (!ret)
  49. return TRACE_TYPE_PARTIAL_LINE;
  50. return TRACE_TYPE_HANDLED;
  51. }
  52. const char *
  53. ftrace_print_flags_seq(struct trace_seq *p, const char *delim,
  54. unsigned long flags,
  55. const struct trace_print_flags *flag_array)
  56. {
  57. unsigned long mask;
  58. const char *str;
  59. const char *ret = trace_seq_buffer_ptr(p);
  60. int i, first = 1;
  61. for (i = 0; flag_array[i].name && flags; i++) {
  62. mask = flag_array[i].mask;
  63. if ((flags & mask) != mask)
  64. continue;
  65. str = flag_array[i].name;
  66. flags &= ~mask;
  67. if (!first && delim)
  68. trace_seq_puts(p, delim);
  69. else
  70. first = 0;
  71. trace_seq_puts(p, str);
  72. }
  73. /* check for left over flags */
  74. if (flags) {
  75. if (!first && delim)
  76. trace_seq_puts(p, delim);
  77. trace_seq_printf(p, "0x%lx", flags);
  78. }
  79. trace_seq_putc(p, 0);
  80. return ret;
  81. }
  82. EXPORT_SYMBOL(ftrace_print_flags_seq);
  83. const char *
  84. ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val,
  85. const struct trace_print_flags *symbol_array)
  86. {
  87. int i;
  88. const char *ret = trace_seq_buffer_ptr(p);
  89. for (i = 0; symbol_array[i].name; i++) {
  90. if (val != symbol_array[i].mask)
  91. continue;
  92. trace_seq_puts(p, symbol_array[i].name);
  93. break;
  94. }
  95. if (ret == (const char *)(trace_seq_buffer_ptr(p)))
  96. trace_seq_printf(p, "0x%lx", val);
  97. trace_seq_putc(p, 0);
  98. return ret;
  99. }
  100. EXPORT_SYMBOL(ftrace_print_symbols_seq);
  101. #if BITS_PER_LONG == 32
  102. const char *
  103. ftrace_print_symbols_seq_u64(struct trace_seq *p, unsigned long long val,
  104. const struct trace_print_flags_u64 *symbol_array)
  105. {
  106. int i;
  107. const char *ret = trace_seq_buffer_ptr(p);
  108. for (i = 0; symbol_array[i].name; i++) {
  109. if (val != symbol_array[i].mask)
  110. continue;
  111. trace_seq_puts(p, symbol_array[i].name);
  112. break;
  113. }
  114. if (ret == (const char *)(trace_seq_buffer_ptr(p)))
  115. trace_seq_printf(p, "0x%llx", val);
  116. trace_seq_putc(p, 0);
  117. return ret;
  118. }
  119. EXPORT_SYMBOL(ftrace_print_symbols_seq_u64);
  120. #endif
  121. const char *
  122. ftrace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr,
  123. unsigned int bitmask_size)
  124. {
  125. const char *ret = trace_seq_buffer_ptr(p);
  126. trace_seq_bitmask(p, bitmask_ptr, bitmask_size * 8);
  127. trace_seq_putc(p, 0);
  128. return ret;
  129. }
  130. EXPORT_SYMBOL_GPL(ftrace_print_bitmask_seq);
  131. const char *
  132. ftrace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int buf_len)
  133. {
  134. int i;
  135. const char *ret = trace_seq_buffer_ptr(p);
  136. for (i = 0; i < buf_len; i++)
  137. trace_seq_printf(p, "%s%2.2x", i == 0 ? "" : " ", buf[i]);
  138. trace_seq_putc(p, 0);
  139. return ret;
  140. }
  141. EXPORT_SYMBOL(ftrace_print_hex_seq);
  142. int ftrace_raw_output_prep(struct trace_iterator *iter,
  143. struct trace_event *trace_event)
  144. {
  145. struct ftrace_event_call *event;
  146. struct trace_seq *s = &iter->seq;
  147. struct trace_seq *p = &iter->tmp_seq;
  148. struct trace_entry *entry;
  149. int ret;
  150. event = container_of(trace_event, struct ftrace_event_call, event);
  151. entry = iter->ent;
  152. if (entry->type != event->event.type) {
  153. WARN_ON_ONCE(1);
  154. return TRACE_TYPE_UNHANDLED;
  155. }
  156. trace_seq_init(p);
  157. ret = trace_seq_printf(s, "%s: ", ftrace_event_name(event));
  158. if (!ret)
  159. return TRACE_TYPE_PARTIAL_LINE;
  160. return 0;
  161. }
  162. EXPORT_SYMBOL(ftrace_raw_output_prep);
  163. static int ftrace_output_raw(struct trace_iterator *iter, char *name,
  164. char *fmt, va_list ap)
  165. {
  166. struct trace_seq *s = &iter->seq;
  167. int ret;
  168. ret = trace_seq_printf(s, "%s: ", name);
  169. if (!ret)
  170. return TRACE_TYPE_PARTIAL_LINE;
  171. ret = trace_seq_vprintf(s, fmt, ap);
  172. if (!ret)
  173. return TRACE_TYPE_PARTIAL_LINE;
  174. return TRACE_TYPE_HANDLED;
  175. }
  176. int ftrace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...)
  177. {
  178. va_list ap;
  179. int ret;
  180. va_start(ap, fmt);
  181. ret = ftrace_output_raw(iter, name, fmt, ap);
  182. va_end(ap);
  183. return ret;
  184. }
  185. EXPORT_SYMBOL_GPL(ftrace_output_call);
  186. #ifdef CONFIG_KRETPROBES
  187. static inline const char *kretprobed(const char *name)
  188. {
  189. static const char tramp_name[] = "kretprobe_trampoline";
  190. int size = sizeof(tramp_name);
  191. if (strncmp(tramp_name, name, size) == 0)
  192. return "[unknown/kretprobe'd]";
  193. return name;
  194. }
  195. #else
  196. static inline const char *kretprobed(const char *name)
  197. {
  198. return name;
  199. }
  200. #endif /* CONFIG_KRETPROBES */
  201. static int
  202. seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address)
  203. {
  204. #ifdef CONFIG_KALLSYMS
  205. char str[KSYM_SYMBOL_LEN];
  206. const char *name;
  207. kallsyms_lookup(address, NULL, NULL, NULL, str);
  208. name = kretprobed(str);
  209. return trace_seq_printf(s, fmt, name);
  210. #endif
  211. return 1;
  212. }
  213. static int
  214. seq_print_sym_offset(struct trace_seq *s, const char *fmt,
  215. unsigned long address)
  216. {
  217. #ifdef CONFIG_KALLSYMS
  218. char str[KSYM_SYMBOL_LEN];
  219. const char *name;
  220. sprint_symbol(str, address);
  221. name = kretprobed(str);
  222. return trace_seq_printf(s, fmt, name);
  223. #endif
  224. return 1;
  225. }
  226. #ifndef CONFIG_64BIT
  227. # define IP_FMT "%08lx"
  228. #else
  229. # define IP_FMT "%016lx"
  230. #endif
  231. int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
  232. unsigned long ip, unsigned long sym_flags)
  233. {
  234. struct file *file = NULL;
  235. unsigned long vmstart = 0;
  236. int ret = 1;
  237. if (s->full)
  238. return 0;
  239. if (mm) {
  240. const struct vm_area_struct *vma;
  241. down_read(&mm->mmap_sem);
  242. vma = find_vma(mm, ip);
  243. if (vma) {
  244. file = vma->vm_file;
  245. vmstart = vma->vm_start;
  246. }
  247. if (file) {
  248. ret = trace_seq_path(s, &file->f_path);
  249. if (ret)
  250. ret = trace_seq_printf(s, "[+0x%lx]",
  251. ip - vmstart);
  252. }
  253. up_read(&mm->mmap_sem);
  254. }
  255. if (ret && ((sym_flags & TRACE_ITER_SYM_ADDR) || !file))
  256. ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
  257. return ret;
  258. }
  259. int
  260. seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s,
  261. unsigned long sym_flags)
  262. {
  263. struct mm_struct *mm = NULL;
  264. int ret = 1;
  265. unsigned int i;
  266. if (trace_flags & TRACE_ITER_SYM_USEROBJ) {
  267. struct task_struct *task;
  268. /*
  269. * we do the lookup on the thread group leader,
  270. * since individual threads might have already quit!
  271. */
  272. rcu_read_lock();
  273. task = find_task_by_vpid(entry->tgid);
  274. if (task)
  275. mm = get_task_mm(task);
  276. rcu_read_unlock();
  277. }
  278. for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
  279. unsigned long ip = entry->caller[i];
  280. if (ip == ULONG_MAX || !ret)
  281. break;
  282. if (ret)
  283. ret = trace_seq_puts(s, " => ");
  284. if (!ip) {
  285. if (ret)
  286. ret = trace_seq_puts(s, "??");
  287. if (ret)
  288. ret = trace_seq_putc(s, '\n');
  289. continue;
  290. }
  291. if (!ret)
  292. break;
  293. if (ret)
  294. ret = seq_print_user_ip(s, mm, ip, sym_flags);
  295. ret = trace_seq_putc(s, '\n');
  296. }
  297. if (mm)
  298. mmput(mm);
  299. return ret;
  300. }
  301. int
  302. seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
  303. {
  304. int ret;
  305. if (!ip)
  306. return trace_seq_putc(s, '0');
  307. if (sym_flags & TRACE_ITER_SYM_OFFSET)
  308. ret = seq_print_sym_offset(s, "%s", ip);
  309. else
  310. ret = seq_print_sym_short(s, "%s", ip);
  311. if (!ret)
  312. return 0;
  313. if (sym_flags & TRACE_ITER_SYM_ADDR)
  314. ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
  315. return ret;
  316. }
  317. /**
  318. * trace_print_lat_fmt - print the irq, preempt and lockdep fields
  319. * @s: trace seq struct to write to
  320. * @entry: The trace entry field from the ring buffer
  321. *
  322. * Prints the generic fields of irqs off, in hard or softirq, preempt
  323. * count.
  324. */
  325. int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
  326. {
  327. char hardsoft_irq;
  328. char need_resched;
  329. char irqs_off;
  330. int hardirq;
  331. int softirq;
  332. int ret;
  333. hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
  334. softirq = entry->flags & TRACE_FLAG_SOFTIRQ;
  335. irqs_off =
  336. (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' :
  337. (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ? 'X' :
  338. '.';
  339. switch (entry->flags & (TRACE_FLAG_NEED_RESCHED |
  340. TRACE_FLAG_PREEMPT_RESCHED)) {
  341. case TRACE_FLAG_NEED_RESCHED | TRACE_FLAG_PREEMPT_RESCHED:
  342. need_resched = 'N';
  343. break;
  344. case TRACE_FLAG_NEED_RESCHED:
  345. need_resched = 'n';
  346. break;
  347. case TRACE_FLAG_PREEMPT_RESCHED:
  348. need_resched = 'p';
  349. break;
  350. default:
  351. need_resched = '.';
  352. break;
  353. }
  354. hardsoft_irq =
  355. (hardirq && softirq) ? 'H' :
  356. hardirq ? 'h' :
  357. softirq ? 's' :
  358. '.';
  359. if (!trace_seq_printf(s, "%c%c%c",
  360. irqs_off, need_resched, hardsoft_irq))
  361. return 0;
  362. if (entry->preempt_count)
  363. ret = trace_seq_printf(s, "%x", entry->preempt_count);
  364. else
  365. ret = trace_seq_putc(s, '.');
  366. return ret;
  367. }
  368. static int
  369. lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
  370. {
  371. char comm[TASK_COMM_LEN];
  372. trace_find_cmdline(entry->pid, comm);
  373. if (!trace_seq_printf(s, "%8.8s-%-5d %3d",
  374. comm, entry->pid, cpu))
  375. return 0;
  376. return trace_print_lat_fmt(s, entry);
  377. }
  378. static unsigned long preempt_mark_thresh_us = 100;
  379. static int
  380. lat_print_timestamp(struct trace_iterator *iter, u64 next_ts)
  381. {
  382. unsigned long verbose = trace_flags & TRACE_ITER_VERBOSE;
  383. unsigned long in_ns = iter->iter_flags & TRACE_FILE_TIME_IN_NS;
  384. unsigned long long abs_ts = iter->ts - iter->trace_buffer->time_start;
  385. unsigned long long rel_ts = next_ts - iter->ts;
  386. struct trace_seq *s = &iter->seq;
  387. if (in_ns) {
  388. abs_ts = ns2usecs(abs_ts);
  389. rel_ts = ns2usecs(rel_ts);
  390. }
  391. if (verbose && in_ns) {
  392. unsigned long abs_usec = do_div(abs_ts, USEC_PER_MSEC);
  393. unsigned long abs_msec = (unsigned long)abs_ts;
  394. unsigned long rel_usec = do_div(rel_ts, USEC_PER_MSEC);
  395. unsigned long rel_msec = (unsigned long)rel_ts;
  396. return trace_seq_printf(
  397. s, "[%08llx] %ld.%03ldms (+%ld.%03ldms): ",
  398. ns2usecs(iter->ts),
  399. abs_msec, abs_usec,
  400. rel_msec, rel_usec);
  401. } else if (verbose && !in_ns) {
  402. return trace_seq_printf(
  403. s, "[%016llx] %lld (+%lld): ",
  404. iter->ts, abs_ts, rel_ts);
  405. } else if (!verbose && in_ns) {
  406. return trace_seq_printf(
  407. s, " %4lldus%c: ",
  408. abs_ts,
  409. rel_ts > preempt_mark_thresh_us ? '!' :
  410. rel_ts > 1 ? '+' : ' ');
  411. } else { /* !verbose && !in_ns */
  412. return trace_seq_printf(s, " %4lld: ", abs_ts);
  413. }
  414. }
  415. int trace_print_context(struct trace_iterator *iter)
  416. {
  417. struct trace_seq *s = &iter->seq;
  418. struct trace_entry *entry = iter->ent;
  419. unsigned long long t;
  420. unsigned long secs, usec_rem;
  421. char comm[TASK_COMM_LEN];
  422. int ret;
  423. int tgid;
  424. trace_find_cmdline(entry->pid, comm);
  425. ret = trace_seq_printf(s, "%16s-%-5d ", comm, entry->pid);
  426. if (!ret)
  427. return 0;
  428. if (trace_flags & TRACE_ITER_TGID) {
  429. tgid = trace_find_tgid(entry->pid);
  430. if (tgid < 0)
  431. ret = trace_seq_puts(s, "(-----) ");
  432. else
  433. ret = trace_seq_printf(s, "(%5d) ", tgid);
  434. if (!ret)
  435. return 0;
  436. }
  437. ret = trace_seq_printf(s, "[%03d] ", iter->cpu);
  438. if (!ret)
  439. return 0;
  440. if (trace_flags & TRACE_ITER_IRQ_INFO) {
  441. ret = trace_print_lat_fmt(s, entry);
  442. if (!ret)
  443. return 0;
  444. }
  445. if (iter->iter_flags & TRACE_FILE_TIME_IN_NS) {
  446. t = ns2usecs(iter->ts);
  447. usec_rem = do_div(t, USEC_PER_SEC);
  448. secs = (unsigned long)t;
  449. return trace_seq_printf(s, " %5lu.%06lu: ", secs, usec_rem);
  450. } else
  451. return trace_seq_printf(s, " %12llu: ", iter->ts);
  452. }
  453. int trace_print_lat_context(struct trace_iterator *iter)
  454. {
  455. u64 next_ts;
  456. int ret;
  457. /* trace_find_next_entry will reset ent_size */
  458. int ent_size = iter->ent_size;
  459. struct trace_seq *s = &iter->seq;
  460. struct trace_entry *entry = iter->ent,
  461. *next_entry = trace_find_next_entry(iter, NULL,
  462. &next_ts);
  463. unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE);
  464. /* Restore the original ent_size */
  465. iter->ent_size = ent_size;
  466. if (!next_entry)
  467. next_ts = iter->ts;
  468. if (verbose) {
  469. char comm[TASK_COMM_LEN];
  470. trace_find_cmdline(entry->pid, comm);
  471. ret = trace_seq_printf(
  472. s, "%16s %5d %3d %d %08x %08lx ",
  473. comm, entry->pid, iter->cpu, entry->flags,
  474. entry->preempt_count, iter->idx);
  475. } else {
  476. ret = lat_print_generic(s, entry, iter->cpu);
  477. }
  478. if (ret)
  479. ret = lat_print_timestamp(iter, next_ts);
  480. return ret;
  481. }
  482. static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
  483. static int task_state_char(unsigned long state)
  484. {
  485. int bit = state ? __ffs(state) + 1 : 0;
  486. return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?';
  487. }
  488. /**
  489. * ftrace_find_event - find a registered event
  490. * @type: the type of event to look for
  491. *
  492. * Returns an event of type @type otherwise NULL
  493. * Called with trace_event_read_lock() held.
  494. */
  495. struct trace_event *ftrace_find_event(int type)
  496. {
  497. struct trace_event *event;
  498. unsigned key;
  499. key = type & (EVENT_HASHSIZE - 1);
  500. hlist_for_each_entry(event, &event_hash[key], node) {
  501. if (event->type == type)
  502. return event;
  503. }
  504. return NULL;
  505. }
  506. static LIST_HEAD(ftrace_event_list);
  507. static int trace_search_list(struct list_head **list)
  508. {
  509. struct trace_event *e;
  510. int last = __TRACE_LAST_TYPE;
  511. if (list_empty(&ftrace_event_list)) {
  512. *list = &ftrace_event_list;
  513. return last + 1;
  514. }
  515. /*
  516. * We used up all possible max events,
  517. * lets see if somebody freed one.
  518. */
  519. list_for_each_entry(e, &ftrace_event_list, list) {
  520. if (e->type != last + 1)
  521. break;
  522. last++;
  523. }
  524. /* Did we used up all 65 thousand events??? */
  525. if ((last + 1) > FTRACE_MAX_EVENT)
  526. return 0;
  527. *list = &e->list;
  528. return last + 1;
  529. }
  530. void trace_event_read_lock(void)
  531. {
  532. down_read(&trace_event_sem);
  533. }
  534. void trace_event_read_unlock(void)
  535. {
  536. up_read(&trace_event_sem);
  537. }
  538. /**
  539. * register_ftrace_event - register output for an event type
  540. * @event: the event type to register
  541. *
  542. * Event types are stored in a hash and this hash is used to
  543. * find a way to print an event. If the @event->type is set
  544. * then it will use that type, otherwise it will assign a
  545. * type to use.
  546. *
  547. * If you assign your own type, please make sure it is added
  548. * to the trace_type enum in trace.h, to avoid collisions
  549. * with the dynamic types.
  550. *
  551. * Returns the event type number or zero on error.
  552. */
  553. int register_ftrace_event(struct trace_event *event)
  554. {
  555. unsigned key;
  556. int ret = 0;
  557. down_write(&trace_event_sem);
  558. if (WARN_ON(!event))
  559. goto out;
  560. if (WARN_ON(!event->funcs))
  561. goto out;
  562. INIT_LIST_HEAD(&event->list);
  563. if (!event->type) {
  564. struct list_head *list = NULL;
  565. if (next_event_type > FTRACE_MAX_EVENT) {
  566. event->type = trace_search_list(&list);
  567. if (!event->type)
  568. goto out;
  569. } else {
  570. event->type = next_event_type++;
  571. list = &ftrace_event_list;
  572. }
  573. if (WARN_ON(ftrace_find_event(event->type)))
  574. goto out;
  575. list_add_tail(&event->list, list);
  576. } else if (event->type > __TRACE_LAST_TYPE) {
  577. printk(KERN_WARNING "Need to add type to trace.h\n");
  578. WARN_ON(1);
  579. goto out;
  580. } else {
  581. /* Is this event already used */
  582. if (ftrace_find_event(event->type))
  583. goto out;
  584. }
  585. if (event->funcs->trace == NULL)
  586. event->funcs->trace = trace_nop_print;
  587. if (event->funcs->raw == NULL)
  588. event->funcs->raw = trace_nop_print;
  589. if (event->funcs->hex == NULL)
  590. event->funcs->hex = trace_nop_print;
  591. if (event->funcs->binary == NULL)
  592. event->funcs->binary = trace_nop_print;
  593. key = event->type & (EVENT_HASHSIZE - 1);
  594. hlist_add_head(&event->node, &event_hash[key]);
  595. ret = event->type;
  596. out:
  597. up_write(&trace_event_sem);
  598. return ret;
  599. }
  600. EXPORT_SYMBOL_GPL(register_ftrace_event);
  601. /*
  602. * Used by module code with the trace_event_sem held for write.
  603. */
  604. int __unregister_ftrace_event(struct trace_event *event)
  605. {
  606. hlist_del(&event->node);
  607. list_del(&event->list);
  608. return 0;
  609. }
  610. /**
  611. * unregister_ftrace_event - remove a no longer used event
  612. * @event: the event to remove
  613. */
  614. int unregister_ftrace_event(struct trace_event *event)
  615. {
  616. down_write(&trace_event_sem);
  617. __unregister_ftrace_event(event);
  618. up_write(&trace_event_sem);
  619. return 0;
  620. }
  621. EXPORT_SYMBOL_GPL(unregister_ftrace_event);
  622. /*
  623. * Standard events
  624. */
  625. enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags,
  626. struct trace_event *event)
  627. {
  628. if (!trace_seq_printf(&iter->seq, "type: %d\n", iter->ent->type))
  629. return TRACE_TYPE_PARTIAL_LINE;
  630. return TRACE_TYPE_HANDLED;
  631. }
  632. /* TRACE_FN */
  633. static enum print_line_t trace_fn_trace(struct trace_iterator *iter, int flags,
  634. struct trace_event *event)
  635. {
  636. struct ftrace_entry *field;
  637. struct trace_seq *s = &iter->seq;
  638. trace_assign_type(field, iter->ent);
  639. if (!seq_print_ip_sym(s, field->ip, flags))
  640. goto partial;
  641. if ((flags & TRACE_ITER_PRINT_PARENT) && field->parent_ip) {
  642. if (!trace_seq_puts(s, " <-"))
  643. goto partial;
  644. if (!seq_print_ip_sym(s,
  645. field->parent_ip,
  646. flags))
  647. goto partial;
  648. }
  649. if (!trace_seq_putc(s, '\n'))
  650. goto partial;
  651. return TRACE_TYPE_HANDLED;
  652. partial:
  653. return TRACE_TYPE_PARTIAL_LINE;
  654. }
  655. static enum print_line_t trace_fn_raw(struct trace_iterator *iter, int flags,
  656. struct trace_event *event)
  657. {
  658. struct ftrace_entry *field;
  659. trace_assign_type(field, iter->ent);
  660. if (!trace_seq_printf(&iter->seq, "%lx %lx\n",
  661. field->ip,
  662. field->parent_ip))
  663. return TRACE_TYPE_PARTIAL_LINE;
  664. return TRACE_TYPE_HANDLED;
  665. }
  666. static enum print_line_t trace_fn_hex(struct trace_iterator *iter, int flags,
  667. struct trace_event *event)
  668. {
  669. struct ftrace_entry *field;
  670. struct trace_seq *s = &iter->seq;
  671. trace_assign_type(field, iter->ent);
  672. SEQ_PUT_HEX_FIELD_RET(s, field->ip);
  673. SEQ_PUT_HEX_FIELD_RET(s, field->parent_ip);
  674. return TRACE_TYPE_HANDLED;
  675. }
  676. static enum print_line_t trace_fn_bin(struct trace_iterator *iter, int flags,
  677. struct trace_event *event)
  678. {
  679. struct ftrace_entry *field;
  680. struct trace_seq *s = &iter->seq;
  681. trace_assign_type(field, iter->ent);
  682. SEQ_PUT_FIELD_RET(s, field->ip);
  683. SEQ_PUT_FIELD_RET(s, field->parent_ip);
  684. return TRACE_TYPE_HANDLED;
  685. }
  686. static struct trace_event_functions trace_fn_funcs = {
  687. .trace = trace_fn_trace,
  688. .raw = trace_fn_raw,
  689. .hex = trace_fn_hex,
  690. .binary = trace_fn_bin,
  691. };
  692. static struct trace_event trace_fn_event = {
  693. .type = TRACE_FN,
  694. .funcs = &trace_fn_funcs,
  695. };
  696. /* TRACE_GRAPH_ENT */
  697. static enum print_line_t trace_graph_ent_trace(struct trace_iterator *iter, int flags,
  698. struct trace_event *event)
  699. {
  700. struct trace_seq *s = &iter->seq;
  701. struct ftrace_graph_ent_entry *field;
  702. trace_assign_type(field, iter->ent);
  703. if (!trace_seq_puts(s, "graph_ent: func="))
  704. return TRACE_TYPE_PARTIAL_LINE;
  705. if (!seq_print_ip_sym(s, field->graph_ent.func, flags))
  706. return TRACE_TYPE_PARTIAL_LINE;
  707. if (!trace_seq_puts(s, "\n"))
  708. return TRACE_TYPE_PARTIAL_LINE;
  709. return TRACE_TYPE_HANDLED;
  710. }
  711. static enum print_line_t trace_graph_ent_raw(struct trace_iterator *iter, int flags,
  712. struct trace_event *event)
  713. {
  714. struct ftrace_graph_ent_entry *field;
  715. trace_assign_type(field, iter->ent);
  716. if (!trace_seq_printf(&iter->seq, "%lx %d\n",
  717. field->graph_ent.func,
  718. field->graph_ent.depth))
  719. return TRACE_TYPE_PARTIAL_LINE;
  720. return TRACE_TYPE_HANDLED;
  721. }
  722. static enum print_line_t trace_graph_ent_hex(struct trace_iterator *iter, int flags,
  723. struct trace_event *event)
  724. {
  725. struct ftrace_graph_ent_entry *field;
  726. struct trace_seq *s = &iter->seq;
  727. trace_assign_type(field, iter->ent);
  728. SEQ_PUT_HEX_FIELD_RET(s, field->graph_ent.func);
  729. SEQ_PUT_HEX_FIELD_RET(s, field->graph_ent.depth);
  730. return TRACE_TYPE_HANDLED;
  731. }
  732. static enum print_line_t trace_graph_ent_bin(struct trace_iterator *iter, int flags,
  733. struct trace_event *event)
  734. {
  735. struct ftrace_graph_ent_entry *field;
  736. struct trace_seq *s = &iter->seq;
  737. trace_assign_type(field, iter->ent);
  738. SEQ_PUT_FIELD_RET(s, field->graph_ent.func);
  739. SEQ_PUT_FIELD_RET(s, field->graph_ent.depth);
  740. return TRACE_TYPE_HANDLED;
  741. }
  742. static struct trace_event_functions trace_graph_ent_funcs = {
  743. .trace = trace_graph_ent_trace,
  744. .raw = trace_graph_ent_raw,
  745. .hex = trace_graph_ent_hex,
  746. .binary = trace_graph_ent_bin,
  747. };
  748. static struct trace_event trace_graph_ent_event = {
  749. .type = TRACE_GRAPH_ENT,
  750. .funcs = &trace_graph_ent_funcs,
  751. };
  752. /* TRACE_GRAPH_RET */
  753. static enum print_line_t trace_graph_ret_trace(struct trace_iterator *iter, int flags,
  754. struct trace_event *event)
  755. {
  756. struct trace_seq *s = &iter->seq;
  757. struct trace_entry *entry = iter->ent;
  758. struct ftrace_graph_ret_entry *field;
  759. trace_assign_type(field, entry);
  760. if (!trace_seq_puts(s, "graph_ret: func="))
  761. return TRACE_TYPE_PARTIAL_LINE;
  762. if (!seq_print_ip_sym(s, field->ret.func, flags))
  763. return TRACE_TYPE_PARTIAL_LINE;
  764. if (!trace_seq_puts(s, "\n"))
  765. return TRACE_TYPE_PARTIAL_LINE;
  766. return TRACE_TYPE_HANDLED;
  767. }
  768. static enum print_line_t trace_graph_ret_raw(struct trace_iterator *iter, int flags,
  769. struct trace_event *event)
  770. {
  771. struct ftrace_graph_ret_entry *field;
  772. trace_assign_type(field, iter->ent);
  773. if (!trace_seq_printf(&iter->seq, "%lx %lld %lld %ld %d\n",
  774. field->ret.func,
  775. field->ret.calltime,
  776. field->ret.rettime,
  777. field->ret.overrun,
  778. field->ret.depth));
  779. return TRACE_TYPE_PARTIAL_LINE;
  780. return TRACE_TYPE_HANDLED;
  781. }
  782. static enum print_line_t trace_graph_ret_hex(struct trace_iterator *iter, int flags,
  783. struct trace_event *event)
  784. {
  785. struct ftrace_graph_ret_entry *field;
  786. struct trace_seq *s = &iter->seq;
  787. trace_assign_type(field, iter->ent);
  788. SEQ_PUT_HEX_FIELD_RET(s, field->ret.func);
  789. SEQ_PUT_HEX_FIELD_RET(s, field->ret.calltime);
  790. SEQ_PUT_HEX_FIELD_RET(s, field->ret.rettime);
  791. SEQ_PUT_HEX_FIELD_RET(s, field->ret.overrun);
  792. SEQ_PUT_HEX_FIELD_RET(s, field->ret.depth);
  793. return TRACE_TYPE_HANDLED;
  794. }
  795. static enum print_line_t trace_graph_ret_bin(struct trace_iterator *iter, int flags,
  796. struct trace_event *event)
  797. {
  798. struct ftrace_graph_ret_entry *field;
  799. struct trace_seq *s = &iter->seq;
  800. trace_assign_type(field, iter->ent);
  801. SEQ_PUT_FIELD_RET(s, field->ret.func);
  802. SEQ_PUT_FIELD_RET(s, field->ret.calltime);
  803. SEQ_PUT_FIELD_RET(s, field->ret.rettime);
  804. SEQ_PUT_FIELD_RET(s, field->ret.overrun);
  805. SEQ_PUT_FIELD_RET(s, field->ret.depth);
  806. return TRACE_TYPE_HANDLED;
  807. }
  808. static struct trace_event_functions trace_graph_ret_funcs = {
  809. .trace = trace_graph_ret_trace,
  810. .raw = trace_graph_ret_raw,
  811. .hex = trace_graph_ret_hex,
  812. .binary = trace_graph_ret_bin,
  813. };
  814. static struct trace_event trace_graph_ret_event = {
  815. .type = TRACE_GRAPH_RET,
  816. .funcs = &trace_graph_ret_funcs,
  817. };
  818. /* TRACE_CTX an TRACE_WAKE */
  819. static enum print_line_t trace_ctxwake_print(struct trace_iterator *iter,
  820. char *delim)
  821. {
  822. struct ctx_switch_entry *field;
  823. char comm[TASK_COMM_LEN];
  824. int S, T;
  825. trace_assign_type(field, iter->ent);
  826. T = task_state_char(field->next_state);
  827. S = task_state_char(field->prev_state);
  828. trace_find_cmdline(field->next_pid, comm);
  829. if (!trace_seq_printf(&iter->seq,
  830. " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n",
  831. field->prev_pid,
  832. field->prev_prio,
  833. S, delim,
  834. field->next_cpu,
  835. field->next_pid,
  836. field->next_prio,
  837. T, comm))
  838. return TRACE_TYPE_PARTIAL_LINE;
  839. return TRACE_TYPE_HANDLED;
  840. }
  841. static enum print_line_t trace_ctx_print(struct trace_iterator *iter, int flags,
  842. struct trace_event *event)
  843. {
  844. return trace_ctxwake_print(iter, "==>");
  845. }
  846. static enum print_line_t trace_wake_print(struct trace_iterator *iter,
  847. int flags, struct trace_event *event)
  848. {
  849. return trace_ctxwake_print(iter, " +");
  850. }
  851. static int trace_ctxwake_raw(struct trace_iterator *iter, char S)
  852. {
  853. struct ctx_switch_entry *field;
  854. int T;
  855. trace_assign_type(field, iter->ent);
  856. if (!S)
  857. S = task_state_char(field->prev_state);
  858. T = task_state_char(field->next_state);
  859. if (!trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n",
  860. field->prev_pid,
  861. field->prev_prio,
  862. S,
  863. field->next_cpu,
  864. field->next_pid,
  865. field->next_prio,
  866. T))
  867. return TRACE_TYPE_PARTIAL_LINE;
  868. return TRACE_TYPE_HANDLED;
  869. }
  870. static enum print_line_t trace_ctx_raw(struct trace_iterator *iter, int flags,
  871. struct trace_event *event)
  872. {
  873. return trace_ctxwake_raw(iter, 0);
  874. }
  875. static enum print_line_t trace_wake_raw(struct trace_iterator *iter, int flags,
  876. struct trace_event *event)
  877. {
  878. return trace_ctxwake_raw(iter, '+');
  879. }
  880. static int trace_ctxwake_hex(struct trace_iterator *iter, char S)
  881. {
  882. struct ctx_switch_entry *field;
  883. struct trace_seq *s = &iter->seq;
  884. int T;
  885. trace_assign_type(field, iter->ent);
  886. if (!S)
  887. S = task_state_char(field->prev_state);
  888. T = task_state_char(field->next_state);
  889. SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid);
  890. SEQ_PUT_HEX_FIELD_RET(s, field->prev_prio);
  891. SEQ_PUT_HEX_FIELD_RET(s, S);
  892. SEQ_PUT_HEX_FIELD_RET(s, field->next_cpu);
  893. SEQ_PUT_HEX_FIELD_RET(s, field->next_pid);
  894. SEQ_PUT_HEX_FIELD_RET(s, field->next_prio);
  895. SEQ_PUT_HEX_FIELD_RET(s, T);
  896. return TRACE_TYPE_HANDLED;
  897. }
  898. static enum print_line_t trace_ctx_hex(struct trace_iterator *iter, int flags,
  899. struct trace_event *event)
  900. {
  901. return trace_ctxwake_hex(iter, 0);
  902. }
  903. static enum print_line_t trace_wake_hex(struct trace_iterator *iter, int flags,
  904. struct trace_event *event)
  905. {
  906. return trace_ctxwake_hex(iter, '+');
  907. }
  908. static enum print_line_t trace_ctxwake_bin(struct trace_iterator *iter,
  909. int flags, struct trace_event *event)
  910. {
  911. struct ctx_switch_entry *field;
  912. struct trace_seq *s = &iter->seq;
  913. trace_assign_type(field, iter->ent);
  914. SEQ_PUT_FIELD_RET(s, field->prev_pid);
  915. SEQ_PUT_FIELD_RET(s, field->prev_prio);
  916. SEQ_PUT_FIELD_RET(s, field->prev_state);
  917. SEQ_PUT_FIELD_RET(s, field->next_pid);
  918. SEQ_PUT_FIELD_RET(s, field->next_prio);
  919. SEQ_PUT_FIELD_RET(s, field->next_state);
  920. return TRACE_TYPE_HANDLED;
  921. }
  922. static struct trace_event_functions trace_ctx_funcs = {
  923. .trace = trace_ctx_print,
  924. .raw = trace_ctx_raw,
  925. .hex = trace_ctx_hex,
  926. .binary = trace_ctxwake_bin,
  927. };
  928. static struct trace_event trace_ctx_event = {
  929. .type = TRACE_CTX,
  930. .funcs = &trace_ctx_funcs,
  931. };
  932. static struct trace_event_functions trace_wake_funcs = {
  933. .trace = trace_wake_print,
  934. .raw = trace_wake_raw,
  935. .hex = trace_wake_hex,
  936. .binary = trace_ctxwake_bin,
  937. };
  938. static struct trace_event trace_wake_event = {
  939. .type = TRACE_WAKE,
  940. .funcs = &trace_wake_funcs,
  941. };
  942. /* TRACE_STACK */
  943. static enum print_line_t trace_stack_print(struct trace_iterator *iter,
  944. int flags, struct trace_event *event)
  945. {
  946. struct stack_entry *field;
  947. struct trace_seq *s = &iter->seq;
  948. unsigned long *p;
  949. unsigned long *end;
  950. trace_assign_type(field, iter->ent);
  951. end = (unsigned long *)((long)iter->ent + iter->ent_size);
  952. if (!trace_seq_puts(s, "<stack trace>\n"))
  953. goto partial;
  954. for (p = field->caller; p && *p != ULONG_MAX && p < end; p++) {
  955. if (!trace_seq_puts(s, " => "))
  956. goto partial;
  957. if (!seq_print_ip_sym(s, *p, flags))
  958. goto partial;
  959. if (!trace_seq_putc(s, '\n'))
  960. goto partial;
  961. }
  962. return TRACE_TYPE_HANDLED;
  963. partial:
  964. return TRACE_TYPE_PARTIAL_LINE;
  965. }
  966. static struct trace_event_functions trace_stack_funcs = {
  967. .trace = trace_stack_print,
  968. };
  969. static struct trace_event trace_stack_event = {
  970. .type = TRACE_STACK,
  971. .funcs = &trace_stack_funcs,
  972. };
  973. /* TRACE_USER_STACK */
  974. static enum print_line_t trace_user_stack_print(struct trace_iterator *iter,
  975. int flags, struct trace_event *event)
  976. {
  977. struct userstack_entry *field;
  978. struct trace_seq *s = &iter->seq;
  979. trace_assign_type(field, iter->ent);
  980. if (!trace_seq_puts(s, "<user stack trace>\n"))
  981. goto partial;
  982. if (!seq_print_userip_objs(field, s, flags))
  983. goto partial;
  984. return TRACE_TYPE_HANDLED;
  985. partial:
  986. return TRACE_TYPE_PARTIAL_LINE;
  987. }
  988. static struct trace_event_functions trace_user_stack_funcs = {
  989. .trace = trace_user_stack_print,
  990. };
  991. static struct trace_event trace_user_stack_event = {
  992. .type = TRACE_USER_STACK,
  993. .funcs = &trace_user_stack_funcs,
  994. };
  995. /* TRACE_BPUTS */
  996. static enum print_line_t
  997. trace_bputs_print(struct trace_iterator *iter, int flags,
  998. struct trace_event *event)
  999. {
  1000. struct trace_entry *entry = iter->ent;
  1001. struct trace_seq *s = &iter->seq;
  1002. struct bputs_entry *field;
  1003. trace_assign_type(field, entry);
  1004. if (!seq_print_ip_sym(s, field->ip, flags))
  1005. goto partial;
  1006. if (!trace_seq_puts(s, ": "))
  1007. goto partial;
  1008. if (!trace_seq_puts(s, field->str))
  1009. goto partial;
  1010. return TRACE_TYPE_HANDLED;
  1011. partial:
  1012. return TRACE_TYPE_PARTIAL_LINE;
  1013. }
  1014. static enum print_line_t
  1015. trace_bputs_raw(struct trace_iterator *iter, int flags,
  1016. struct trace_event *event)
  1017. {
  1018. struct bputs_entry *field;
  1019. struct trace_seq *s = &iter->seq;
  1020. trace_assign_type(field, iter->ent);
  1021. if (!trace_seq_printf(s, ": %lx : ", field->ip))
  1022. goto partial;
  1023. if (!trace_seq_puts(s, field->str))
  1024. goto partial;
  1025. return TRACE_TYPE_HANDLED;
  1026. partial:
  1027. return TRACE_TYPE_PARTIAL_LINE;
  1028. }
  1029. static struct trace_event_functions trace_bputs_funcs = {
  1030. .trace = trace_bputs_print,
  1031. .raw = trace_bputs_raw,
  1032. };
  1033. static struct trace_event trace_bputs_event = {
  1034. .type = TRACE_BPUTS,
  1035. .funcs = &trace_bputs_funcs,
  1036. };
  1037. /* TRACE_BPRINT */
  1038. static enum print_line_t
  1039. trace_bprint_print(struct trace_iterator *iter, int flags,
  1040. struct trace_event *event)
  1041. {
  1042. struct trace_entry *entry = iter->ent;
  1043. struct trace_seq *s = &iter->seq;
  1044. struct bprint_entry *field;
  1045. trace_assign_type(field, entry);
  1046. if (!seq_print_ip_sym(s, field->ip, flags))
  1047. goto partial;
  1048. if (!trace_seq_puts(s, ": "))
  1049. goto partial;
  1050. if (!trace_seq_bprintf(s, field->fmt, field->buf))
  1051. goto partial;
  1052. return TRACE_TYPE_HANDLED;
  1053. partial:
  1054. return TRACE_TYPE_PARTIAL_LINE;
  1055. }
  1056. static enum print_line_t
  1057. trace_bprint_raw(struct trace_iterator *iter, int flags,
  1058. struct trace_event *event)
  1059. {
  1060. struct bprint_entry *field;
  1061. struct trace_seq *s = &iter->seq;
  1062. trace_assign_type(field, iter->ent);
  1063. if (!trace_seq_printf(s, ": %lx : ", field->ip))
  1064. goto partial;
  1065. if (!trace_seq_bprintf(s, field->fmt, field->buf))
  1066. goto partial;
  1067. return TRACE_TYPE_HANDLED;
  1068. partial:
  1069. return TRACE_TYPE_PARTIAL_LINE;
  1070. }
  1071. static struct trace_event_functions trace_bprint_funcs = {
  1072. .trace = trace_bprint_print,
  1073. .raw = trace_bprint_raw,
  1074. };
  1075. static struct trace_event trace_bprint_event = {
  1076. .type = TRACE_BPRINT,
  1077. .funcs = &trace_bprint_funcs,
  1078. };
  1079. /* TRACE_PRINT */
  1080. static enum print_line_t trace_print_print(struct trace_iterator *iter,
  1081. int flags, struct trace_event *event)
  1082. {
  1083. struct print_entry *field;
  1084. struct trace_seq *s = &iter->seq;
  1085. trace_assign_type(field, iter->ent);
  1086. if (!seq_print_ip_sym(s, field->ip, flags))
  1087. goto partial;
  1088. if (!trace_seq_printf(s, ": %s", field->buf))
  1089. goto partial;
  1090. return TRACE_TYPE_HANDLED;
  1091. partial:
  1092. return TRACE_TYPE_PARTIAL_LINE;
  1093. }
  1094. static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags,
  1095. struct trace_event *event)
  1096. {
  1097. struct print_entry *field;
  1098. trace_assign_type(field, iter->ent);
  1099. if (!trace_seq_printf(&iter->seq, "# %lx %s", field->ip, field->buf))
  1100. goto partial;
  1101. return TRACE_TYPE_HANDLED;
  1102. partial:
  1103. return TRACE_TYPE_PARTIAL_LINE;
  1104. }
  1105. static struct trace_event_functions trace_print_funcs = {
  1106. .trace = trace_print_print,
  1107. .raw = trace_print_raw,
  1108. };
  1109. static struct trace_event trace_print_event = {
  1110. .type = TRACE_PRINT,
  1111. .funcs = &trace_print_funcs,
  1112. };
  1113. static struct trace_event *events[] __initdata = {
  1114. &trace_fn_event,
  1115. &trace_graph_ent_event,
  1116. &trace_graph_ret_event,
  1117. &trace_ctx_event,
  1118. &trace_wake_event,
  1119. &trace_stack_event,
  1120. &trace_user_stack_event,
  1121. &trace_bputs_event,
  1122. &trace_bprint_event,
  1123. &trace_print_event,
  1124. NULL
  1125. };
  1126. __init static int init_events(void)
  1127. {
  1128. struct trace_event *event;
  1129. int i, ret;
  1130. for (i = 0; events[i]; i++) {
  1131. event = events[i];
  1132. ret = register_ftrace_event(event);
  1133. if (!ret) {
  1134. printk(KERN_WARNING "event %d failed to register\n",
  1135. event->type);
  1136. WARN_ON_ONCE(1);
  1137. }
  1138. }
  1139. return 0;
  1140. }
  1141. early_initcall(init_events);