mtk_trace.c 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270
  1. #include <linux/ring_buffer.h>
  2. #include <linux/ftrace_event.h>
  3. #include "mtk_ftrace.h"
  4. #include "trace.h"
  5. #ifdef CONFIG_MTK_KERNEL_MARKER
  6. static unsigned long __read_mostly mark_addr;
  7. static int kernel_marker_on;
  8. static inline void update_tracing_mark_write_addr(void)
  9. {
  10. if (unlikely(mark_addr == 0))
  11. mark_addr = kallsyms_lookup_name("tracing_mark_write");
  12. }
  13. inline void trace_begin(char *name)
  14. {
  15. if (unlikely(kernel_marker_on) && name) {
  16. preempt_disable();
  17. event_trace_printk(mark_addr, "B|%d|%s\n",
  18. current->tgid, name);
  19. preempt_enable();
  20. }
  21. }
  22. EXPORT_SYMBOL(trace_begin);
  23. inline void trace_counter(char *name, int count)
  24. {
  25. if (unlikely(kernel_marker_on) && name) {
  26. preempt_disable();
  27. event_trace_printk(mark_addr, "C|%d|%s|%d\n",
  28. current->tgid, name, count);
  29. preempt_enable();
  30. }
  31. }
  32. EXPORT_SYMBOL(trace_counter);
  33. inline void trace_end(void)
  34. {
  35. if (unlikely(kernel_marker_on)) {
  36. preempt_disable();
  37. event_trace_printk(mark_addr, "E\n");
  38. preempt_enable();
  39. }
  40. }
  41. EXPORT_SYMBOL(trace_end);
  42. static ssize_t
  43. kernel_marker_on_simple_read(struct file *filp, char __user *ubuf,
  44. size_t cnt, loff_t *ppos)
  45. {
  46. char buf[64];
  47. int r;
  48. r = sprintf(buf, "%d\n", kernel_marker_on);
  49. return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
  50. }
  51. static ssize_t
  52. kernel_marker_on_simple_write(struct file *filp, const char __user *ubuf,
  53. size_t cnt, loff_t *ppos)
  54. {
  55. unsigned long val;
  56. int ret;
  57. ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
  58. if (ret)
  59. return ret;
  60. kernel_marker_on = !!val;
  61. if (kernel_marker_on)
  62. update_tracing_mark_write_addr();
  63. (*ppos)++;
  64. return cnt;
  65. }
  66. static const struct file_operations kernel_marker_on_simple_fops = {
  67. .open = tracing_open_generic,
  68. .read = kernel_marker_on_simple_read,
  69. .write = kernel_marker_on_simple_write,
  70. .llseek = default_llseek,
  71. };
  72. static __init int init_kernel_marker(void)
  73. {
  74. struct dentry *d_tracer;
  75. d_tracer = tracing_init_dentry();
  76. if (!d_tracer)
  77. return 0;
  78. trace_create_file("kernel_marker_on", 0644, d_tracer, NULL,
  79. &kernel_marker_on_simple_fops);
  80. return 0;
  81. }
  82. fs_initcall(init_kernel_marker);
  83. #endif
  84. #if defined(CONFIG_MTK_HIBERNATION) && defined(CONFIG_MTK_SCHED_TRACERS)
  85. int resize_ring_buffer_for_hibernation(int enable)
  86. {
  87. int ret = 0;
  88. struct trace_array *tr = NULL;
  89. if (enable) {
  90. ring_buffer_expanded = 0;
  91. ret = tracing_update_buffers();
  92. } else {
  93. tr = top_trace_array();
  94. if (!tr)
  95. return -ENODEV;
  96. ret = tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
  97. }
  98. return ret;
  99. }
  100. #endif
  101. #ifdef CONFIG_MTK_SCHED_TRACERS
  102. bool boot_trace;
  103. static unsigned long buf_size = 25165824UL;
  104. static __init int boot_trace_cmdline(char *str)
  105. {
  106. boot_trace = true;
  107. update_buf_size(buf_size);
  108. return 0;
  109. }
  110. __setup("boot_trace", boot_trace_cmdline);
  111. void print_enabled_events(struct seq_file *m)
  112. {
  113. struct ftrace_event_call *call;
  114. struct ftrace_event_file *file;
  115. struct trace_array *tr;
  116. seq_puts(m, "# enabled events:");
  117. /* mutex_lock(&event_mutex); */
  118. list_for_each_entry(tr, &ftrace_trace_arrays, list) {
  119. list_for_each_entry(file, &tr->events, list) {
  120. call = file->event_call;
  121. if (file->flags & FTRACE_EVENT_FL_ENABLED)
  122. seq_printf(m, " %s:%s", call->class->system,
  123. ftrace_event_name(call));
  124. }
  125. }
  126. /* mutex_unlock(&event_mutex); */
  127. seq_puts(m, "\n");
  128. }
  129. /* ftrace's switch function for MTK solution */
  130. static void ftrace_events_enable(int enable)
  131. {
  132. if (enable) {
  133. trace_set_clr_event(NULL, "sched_switch", 1);
  134. trace_set_clr_event(NULL, "sched_wakeup", 1);
  135. trace_set_clr_event(NULL, "sched_wakeup_new", 1);
  136. trace_set_clr_event(NULL, "softirq_entry", 1);
  137. trace_set_clr_event(NULL, "softirq_exit", 1);
  138. trace_set_clr_event(NULL, "softirq_raise", 1);
  139. #ifdef CONFIG_SMP
  140. trace_set_clr_event(NULL, "sched_migrate_task", 1);
  141. #endif
  142. trace_set_clr_event(NULL, "workqueue_execute_start", 1);
  143. trace_set_clr_event(NULL, "workqueue_execute_end", 1);
  144. trace_set_clr_event(NULL, "cpu_frequency", 1);
  145. trace_set_clr_event(NULL, "block_bio_frontmerge", 1);
  146. trace_set_clr_event(NULL, "block_bio_backmerge", 1);
  147. trace_set_clr_event(NULL, "block_rq_issue", 1);
  148. trace_set_clr_event(NULL, "block_rq_insert", 1);
  149. trace_set_clr_event(NULL, "block_rq_complete", 1);
  150. trace_set_clr_event(NULL, "debug_allocate_large_pages", 1);
  151. trace_set_clr_event(NULL, "dump_allocate_large_pages", 1);
  152. trace_set_clr_event("mtk_events", NULL, 1);
  153. trace_set_clr_event("ipi", NULL, 1);
  154. trace_set_clr_event("met_bio", NULL, 1);
  155. trace_set_clr_event("met_fuse", NULL, 1);
  156. tracing_on();
  157. } else {
  158. tracing_off();
  159. trace_set_clr_event(NULL, NULL, 0);
  160. }
  161. }
  162. static __init int boot_ftrace(void)
  163. {
  164. struct trace_array *tr;
  165. if (boot_trace) {
  166. tr = top_trace_array();
  167. tracing_update_buffers();
  168. ftrace_events_enable(1);
  169. set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 0);
  170. pr_debug("[ftrace]boot-time profiling...\n");
  171. }
  172. return 0;
  173. }
  174. core_initcall(boot_ftrace);
  175. #ifdef CONFIG_MTK_FTRACE_DEFAULT_ENABLE
  176. static __init int enable_ftrace(void)
  177. {
  178. if (!boot_trace) {
  179. /* enable ftrace facilities */
  180. ftrace_events_enable(1);
  181. /* only update buffer eariler
  182. * if we want to collect boot-time ftrace
  183. * to avoid the boot time impacted by
  184. * early-expanded ring buffer */
  185. tracing_update_buffers();
  186. pr_debug("[ftrace]ftrace ready...\n");
  187. }
  188. return 0;
  189. }
  190. late_initcall(enable_ftrace);
  191. #endif
  192. #endif
  193. #if defined(CONFIG_MTK_SCHED_TRACERS) && defined(CONFIG_HOTPLUG_CPU)
  194. #include <linux/cpu.h>
  195. #include <trace/events/mtk_events.h>
  196. static DEFINE_PER_CPU(unsigned long long, last_event_ts);
  197. static struct notifier_block hotplug_event_notifier;
  198. static int
  199. hotplug_event_notify(struct notifier_block *self,
  200. unsigned long action, void *hcpu)
  201. {
  202. long cpu = (long)hcpu;
  203. switch (action) {
  204. case CPU_STARTING:
  205. case CPU_STARTING_FROZEN:
  206. trace_cpu_hotplug(cpu, 1, per_cpu(last_event_ts, cpu));
  207. per_cpu(last_event_ts, cpu) = ns2usecs(ftrace_now(cpu));
  208. break;
  209. case CPU_DEAD:
  210. case CPU_DEAD_FROZEN:
  211. trace_cpu_hotplug(cpu, 0, per_cpu(last_event_ts, cpu));
  212. per_cpu(last_event_ts, cpu) = ns2usecs(ftrace_now(cpu));
  213. break;
  214. default:
  215. break;
  216. }
  217. return NOTIFY_OK;
  218. }
  219. static __init int hotplug_events_init(void)
  220. {
  221. hotplug_event_notifier.notifier_call = hotplug_event_notify;
  222. hotplug_event_notifier.priority = 0;
  223. register_cpu_notifier(&hotplug_event_notifier);
  224. return 0;
  225. }
  226. early_initcall(hotplug_events_init);
  227. #endif