tuxonice_alloc.c 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317
  1. /*
  2. * kernel/power/tuxonice_alloc.c
  3. *
  4. * Copyright (C) 2008-2014 Nigel Cunningham (nigel at tuxonice net)
  5. *
  6. * This file is released under the GPLv2.
  7. *
  8. */
  9. #ifdef CONFIG_PM_DEBUG
  10. #include <linux/export.h>
  11. #include <linux/slab.h>
  12. #include "tuxonice_modules.h"
  13. #include "tuxonice_alloc.h"
  14. #include "tuxonice_sysfs.h"
  15. #include "tuxonice.h"
  16. #define TOI_ALLOC_PATHS 40
  17. static DEFINE_MUTEX(toi_alloc_mutex);
  18. static struct toi_module_ops toi_alloc_ops;
  19. static int toi_fail_num;
  20. static atomic_t toi_alloc_count[TOI_ALLOC_PATHS],
  21. toi_free_count[TOI_ALLOC_PATHS],
  22. toi_test_count[TOI_ALLOC_PATHS],
  23. toi_fail_count[TOI_ALLOC_PATHS];
  24. static int toi_cur_allocd[TOI_ALLOC_PATHS], toi_max_allocd[TOI_ALLOC_PATHS];
  25. static int cur_allocd, max_allocd;
  26. static char *toi_alloc_desc[TOI_ALLOC_PATHS] = {
  27. "", /* 0 */
  28. "get_io_info_struct",
  29. "extent",
  30. "extent (loading chain)",
  31. "userui channel",
  32. "userui arg", /* 5 */
  33. "attention list metadata",
  34. "extra pagedir memory metadata",
  35. "bdev metadata",
  36. "extra pagedir memory",
  37. "header_locations_read", /* 10 */
  38. "bio queue",
  39. "prepare_readahead",
  40. "i/o buffer",
  41. "writer buffer in bio_init",
  42. "checksum buffer", /* 15 */
  43. "compression buffer",
  44. "filewriter signature op",
  45. "set resume param alloc1",
  46. "set resume param alloc2",
  47. "debugging info buffer", /* 20 */
  48. "check can resume buffer",
  49. "write module config buffer",
  50. "read module config buffer",
  51. "write image header buffer",
  52. "read pageset1 buffer", /* 25 */
  53. "get_have_image_data buffer",
  54. "checksum page",
  55. "worker rw loop",
  56. "get nonconflicting page",
  57. "ps1 load addresses", /* 30 */
  58. "remove swap image",
  59. "swap image exists",
  60. "swap parse sig location",
  61. "sysfs kobj",
  62. "swap mark resume attempted buffer", /* 35 */
  63. "cluster member",
  64. "boot kernel data buffer",
  65. "setting swap signature",
  66. "block i/o bdev struct"
  67. };
  68. static inline int might_fail(int fail_num)
  69. {
  70. BUG_ON(fail_num >= TOI_ALLOC_PATHS);
  71. if (fail_num == toi_fail_num) {
  72. atomic_inc(&toi_test_count[fail_num]);
  73. toi_fail_num = 0;
  74. return 0;
  75. }
  76. return 1;
  77. }
  78. static void alloc_update_stats(int fail_num, void *result, int size)
  79. {
  80. if (!result) {
  81. atomic_inc(&toi_fail_count[fail_num]);
  82. return;
  83. }
  84. atomic_inc(&toi_alloc_count[fail_num]);
  85. if (unlikely(test_action_state(TOI_GET_MAX_MEM_ALLOCD))) {
  86. mutex_lock(&toi_alloc_mutex);
  87. toi_cur_allocd[fail_num]++;
  88. cur_allocd += size;
  89. if (unlikely(cur_allocd > max_allocd)) {
  90. int i;
  91. for (i = 0; i < TOI_ALLOC_PATHS; i++)
  92. toi_max_allocd[i] = toi_cur_allocd[i];
  93. max_allocd = cur_allocd;
  94. }
  95. mutex_unlock(&toi_alloc_mutex);
  96. }
  97. }
  98. static void free_update_stats(int fail_num, int size)
  99. {
  100. BUG_ON(fail_num >= TOI_ALLOC_PATHS);
  101. atomic_inc(&toi_free_count[fail_num]);
  102. if (unlikely(atomic_read(&toi_free_count[fail_num]) >
  103. atomic_read(&toi_alloc_count[fail_num])))
  104. dump_stack();
  105. if (unlikely(test_action_state(TOI_GET_MAX_MEM_ALLOCD))) {
  106. mutex_lock(&toi_alloc_mutex);
  107. cur_allocd -= size;
  108. toi_cur_allocd[fail_num]--;
  109. mutex_unlock(&toi_alloc_mutex);
  110. }
  111. }
  112. void *toi_kzalloc(int fail_num, size_t size, gfp_t flags)
  113. {
  114. void *result;
  115. if (toi_alloc_ops.enabled)
  116. if (might_fail(fail_num) == 0)
  117. return NULL;
  118. result = kzalloc(size, flags);
  119. if (toi_alloc_ops.enabled)
  120. alloc_update_stats(fail_num, result, size);
  121. if (fail_num == toi_trace_allocs)
  122. dump_stack();
  123. return result;
  124. }
  125. EXPORT_SYMBOL_GPL(toi_kzalloc);
  126. unsigned long toi_get_free_pages(int fail_num, gfp_t mask,
  127. unsigned int order)
  128. {
  129. unsigned long result;
  130. if (toi_alloc_ops.enabled)
  131. if (might_fail(fail_num) == 0)
  132. return 0;
  133. result = __get_free_pages(mask, order);
  134. if (toi_alloc_ops.enabled)
  135. alloc_update_stats(fail_num, (void *) result,
  136. PAGE_SIZE << order);
  137. if (fail_num == toi_trace_allocs)
  138. dump_stack();
  139. return result;
  140. }
  141. EXPORT_SYMBOL_GPL(toi_get_free_pages);
  142. struct page *toi_alloc_page(int fail_num, gfp_t mask)
  143. {
  144. struct page *result;
  145. if (toi_alloc_ops.enabled)
  146. if (might_fail(fail_num) == 0)
  147. return NULL;
  148. result = alloc_page(mask);
  149. if (toi_alloc_ops.enabled)
  150. alloc_update_stats(fail_num, (void *) result, PAGE_SIZE);
  151. if (fail_num == toi_trace_allocs)
  152. dump_stack();
  153. return result;
  154. }
  155. EXPORT_SYMBOL_GPL(toi_alloc_page);
  156. unsigned long toi_get_zeroed_page(int fail_num, gfp_t mask)
  157. {
  158. unsigned long result;
  159. if (toi_alloc_ops.enabled)
  160. if (might_fail(fail_num) == 0)
  161. return 0;
  162. result = get_zeroed_page(mask);
  163. if (toi_alloc_ops.enabled)
  164. alloc_update_stats(fail_num, (void *) result, PAGE_SIZE);
  165. if (fail_num == toi_trace_allocs)
  166. dump_stack();
  167. return result;
  168. }
  169. EXPORT_SYMBOL_GPL(toi_get_zeroed_page);
  170. void toi_kfree(int fail_num, const void *arg, int size)
  171. {
  172. if (arg && toi_alloc_ops.enabled)
  173. free_update_stats(fail_num, size);
  174. if (fail_num == toi_trace_allocs)
  175. dump_stack();
  176. kfree(arg);
  177. }
  178. EXPORT_SYMBOL_GPL(toi_kfree);
  179. void toi_free_page(int fail_num, unsigned long virt)
  180. {
  181. if (virt && toi_alloc_ops.enabled)
  182. free_update_stats(fail_num, PAGE_SIZE);
  183. if (fail_num == toi_trace_allocs)
  184. dump_stack();
  185. free_page(virt);
  186. }
  187. EXPORT_SYMBOL_GPL(toi_free_page);
  188. void toi__free_page(int fail_num, struct page *page)
  189. {
  190. if (page && toi_alloc_ops.enabled)
  191. free_update_stats(fail_num, PAGE_SIZE);
  192. if (fail_num == toi_trace_allocs)
  193. dump_stack();
  194. __free_page(page);
  195. }
  196. EXPORT_SYMBOL_GPL(toi__free_page);
  197. void toi_free_pages(int fail_num, struct page *page, int order)
  198. {
  199. if (page && toi_alloc_ops.enabled)
  200. free_update_stats(fail_num, PAGE_SIZE << order);
  201. if (fail_num == toi_trace_allocs)
  202. dump_stack();
  203. __free_pages(page, order);
  204. }
  205. void toi_alloc_print_debug_stats(void)
  206. {
  207. int i, header_done = 0;
  208. if (!toi_alloc_ops.enabled)
  209. return;
  210. for (i = 0; i < TOI_ALLOC_PATHS; i++)
  211. if (atomic_read(&toi_alloc_count[i]) !=
  212. atomic_read(&toi_free_count[i])) {
  213. if (!header_done) {
  214. pr_warn("Idx Allocs Frees Tests Fails Max Description\n");
  215. header_done = 1;
  216. }
  217. pri_warn("%3d %7d %7d %7d %7d %7d %s\n", i,
  218. atomic_read(&toi_alloc_count[i]),
  219. atomic_read(&toi_free_count[i]),
  220. atomic_read(&toi_test_count[i]),
  221. atomic_read(&toi_fail_count[i]),
  222. toi_max_allocd[i],
  223. toi_alloc_desc[i]);
  224. }
  225. }
  226. EXPORT_SYMBOL_GPL(toi_alloc_print_debug_stats);
  227. static int toi_alloc_initialise(int starting_cycle)
  228. {
  229. int i;
  230. if (!starting_cycle)
  231. return 0;
  232. if (toi_trace_allocs)
  233. dump_stack();
  234. for (i = 0; i < TOI_ALLOC_PATHS; i++) {
  235. atomic_set(&toi_alloc_count[i], 0);
  236. atomic_set(&toi_free_count[i], 0);
  237. atomic_set(&toi_test_count[i], 0);
  238. atomic_set(&toi_fail_count[i], 0);
  239. toi_cur_allocd[i] = 0;
  240. toi_max_allocd[i] = 0;
  241. };
  242. max_allocd = 0;
  243. cur_allocd = 0;
  244. return 0;
  245. }
  246. static struct toi_sysfs_data sysfs_params[] = {
  247. SYSFS_INT("failure_test", SYSFS_RW, &toi_fail_num, 0, 99, 0, NULL),
  248. SYSFS_INT("trace", SYSFS_RW, &toi_trace_allocs, 0, TOI_ALLOC_PATHS, 0,
  249. NULL),
  250. SYSFS_BIT("find_max_mem_allocated", SYSFS_RW, &toi_bkd.toi_action,
  251. TOI_GET_MAX_MEM_ALLOCD, 0),
  252. SYSFS_INT("enabled", SYSFS_RW, &toi_alloc_ops.enabled, 0, 1, 0,
  253. NULL)
  254. };
  255. static struct toi_module_ops toi_alloc_ops = {
  256. .type = MISC_HIDDEN_MODULE,
  257. .name = "allocation debugging",
  258. .directory = "alloc",
  259. .module = THIS_MODULE,
  260. .early = 1,
  261. .initialise = toi_alloc_initialise,
  262. .sysfs_data = sysfs_params,
  263. .num_sysfs_entries = sizeof(sysfs_params) /
  264. sizeof(struct toi_sysfs_data),
  265. };
  266. int toi_alloc_init(void)
  267. {
  268. int result = toi_register_module(&toi_alloc_ops);
  269. return result;
  270. }
  271. void toi_alloc_exit(void)
  272. {
  273. toi_unregister_module(&toi_alloc_ops);
  274. }
  275. #endif