tuxonice_atomic_copy.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487
  1. /*
  2. * kernel/power/tuxonice_atomic_copy.c
  3. *
  4. * Copyright 2004-2014 Nigel Cunningham (nigel at tuxonice net)
  5. *
  6. * Distributed under GPLv2.
  7. *
  8. * Routines for doing the atomic save/restore.
  9. */
  10. #include <linux/suspend.h>
  11. #include <linux/highmem.h>
  12. #include <linux/cpu.h>
  13. #include <linux/freezer.h>
  14. #include <linux/console.h>
  15. #include <linux/syscore_ops.h>
  16. #include <linux/ftrace.h>
  17. #include <asm/suspend.h>
  18. #include "tuxonice.h"
  19. #include "tuxonice_storage.h"
  20. #include "tuxonice_power_off.h"
  21. #include "tuxonice_ui.h"
  22. #include "tuxonice_io.h"
  23. #include "tuxonice_prepare_image.h"
  24. #include "tuxonice_pageflags.h"
  25. #include "tuxonice_checksum.h"
  26. #include "tuxonice_builtin.h"
  27. #include "tuxonice_atomic_copy.h"
  28. #include "tuxonice_alloc.h"
  29. #include "tuxonice_modules.h"
  30. unsigned long extra_pd1_pages_used;
  31. /**
  32. * free_pbe_list - free page backup entries used by the atomic copy code.
  33. * @list: List to free.
  34. * @highmem: Whether the list is in highmem.
  35. *
  36. * Normally, this function isn't used. If, however, we need to abort before
  37. * doing the atomic copy, we use this to free the pbes previously allocated.
  38. **/
  39. static void free_pbe_list(struct pbe **list, int highmem)
  40. {
  41. while (*list) {
  42. int i;
  43. struct pbe *free_pbe, *next_page = NULL;
  44. struct page *page;
  45. if (highmem) {
  46. page = (struct page *)*list;
  47. free_pbe = (struct pbe *)kmap(page);
  48. } else {
  49. page = virt_to_page(*list);
  50. free_pbe = *list;
  51. }
  52. for (i = 0; i < PBES_PER_PAGE; i++) {
  53. if (!free_pbe)
  54. break;
  55. if (highmem)
  56. toi__free_page(29, free_pbe->address);
  57. else
  58. toi_free_page(29, (unsigned long)free_pbe->address);
  59. free_pbe = free_pbe->next;
  60. }
  61. if (highmem) {
  62. if (free_pbe)
  63. next_page = free_pbe;
  64. kunmap(page);
  65. } else {
  66. if (free_pbe)
  67. next_page = free_pbe;
  68. }
  69. toi__free_page(29, page);
  70. *list = (struct pbe *)next_page;
  71. };
  72. }
  73. /**
  74. * copyback_post - post atomic-restore actions
  75. *
  76. * After doing the atomic restore, we have a few more things to do:
  77. * 1) We want to retain some values across the restore, so we now copy
  78. * these from the nosave variables to the normal ones.
  79. * 2) Set the status flags.
  80. * 3) Resume devices.
  81. * 4) Tell userui so it can redraw & restore settings.
  82. * 5) Reread the page cache.
  83. **/
  84. void copyback_post(void)
  85. {
  86. struct toi_boot_kernel_data *bkd = (struct toi_boot_kernel_data *)boot_kernel_data_buffer;
  87. if (toi_activate_storage(1))
  88. panic("Failed to reactivate our storage.");
  89. toi_post_atomic_restore_modules(bkd);
  90. toi_cond_pause(1, "About to reload secondary pagedir.");
  91. if (read_pageset2(0))
  92. panic("Unable to successfully reread the page cache.");
  93. /*
  94. * If the user wants to sleep again after resuming from full-off,
  95. * it's most likely to be in order to suspend to ram, so we'll
  96. * do this check after loading pageset2, to give them the fastest
  97. * wakeup when they are ready to use the computer again.
  98. */
  99. toi_check_resleep();
  100. }
  101. /**
  102. * toi_copy_pageset1 - do the atomic copy of pageset1
  103. *
  104. * Make the atomic copy of pageset1. We can't use copy_page (as we once did)
  105. * because we can't be sure what side effects it has. On my old Duron, with
  106. * 3DNOW, kernel_fpu_begin increments preempt count, making our preempt
  107. * count at resume time 4 instead of 3.
  108. *
  109. * We don't want to call kmap_atomic unconditionally because it has the side
  110. * effect of incrementing the preempt count, which will leave it one too high
  111. * post resume (the page containing the preempt count will be copied after
  112. * its incremented. This is essentially the same problem.
  113. **/
  114. void toi_copy_pageset1(void)
  115. {
  116. int i;
  117. unsigned long source_index, dest_index;
  118. memory_bm_position_reset(pageset1_map);
  119. memory_bm_position_reset(pageset1_copy_map);
  120. source_index = memory_bm_next_pfn(pageset1_map, 0);
  121. dest_index = memory_bm_next_pfn(pageset1_copy_map, 0);
  122. for (i = 0; i < pagedir1.size; i++) {
  123. unsigned long *origvirt, *copyvirt;
  124. struct page *origpage, *copypage;
  125. int loop = (PAGE_SIZE / sizeof(unsigned long)) - 1, was_present1, was_present2;
  126. #ifdef CONFIG_TOI_ENHANCE
  127. if (!pfn_valid(source_index) || !pfn_valid(dest_index)) {
  128. pr_emerg("[%s] (%d) dest_index:%lu, source_index:%lu\n", __func__, i,
  129. dest_index, source_index);
  130. set_abort_result(TOI_ARCH_PREPARE_FAILED);
  131. return;
  132. }
  133. #endif
  134. origpage = pfn_to_page(source_index);
  135. copypage = pfn_to_page(dest_index);
  136. origvirt = PageHighMem(origpage) ? kmap_atomic(origpage) : page_address(origpage);
  137. copyvirt = PageHighMem(copypage) ? kmap_atomic(copypage) : page_address(copypage);
  138. was_present1 = kernel_page_present(origpage);
  139. if (!was_present1)
  140. kernel_map_pages(origpage, 1, 1);
  141. was_present2 = kernel_page_present(copypage);
  142. if (!was_present2)
  143. kernel_map_pages(copypage, 1, 1);
  144. while (loop >= 0) {
  145. *(copyvirt + loop) = *(origvirt + loop);
  146. loop--;
  147. }
  148. if (!was_present1)
  149. kernel_map_pages(origpage, 1, 0);
  150. if (!was_present2)
  151. kernel_map_pages(copypage, 1, 0);
  152. if (PageHighMem(origpage))
  153. kunmap_atomic(origvirt);
  154. if (PageHighMem(copypage))
  155. kunmap_atomic(copyvirt);
  156. source_index = memory_bm_next_pfn(pageset1_map, 0);
  157. dest_index = memory_bm_next_pfn(pageset1_copy_map, 0);
  158. }
  159. }
  160. /**
  161. * __toi_post_context_save - steps after saving the cpu context
  162. *
  163. * Steps taken after saving the CPU state to make the actual
  164. * atomic copy.
  165. *
  166. * Called from swsusp_save in snapshot.c via toi_post_context_save.
  167. **/
  168. int __toi_post_context_save(void)
  169. {
  170. unsigned long old_ps1_size = pagedir1.size;
  171. check_checksums();
  172. free_checksum_pages();
  173. toi_recalculate_image_contents(1);
  174. extra_pd1_pages_used = pagedir1.size > old_ps1_size ? pagedir1.size - old_ps1_size : 0;
  175. if (extra_pd1_pages_used > extra_pd1_pages_allowance) {
  176. pr_warn("Pageset1 has grown by %lu pages. extra_pages_allowance is currently only %lu.\n",
  177. pagedir1.size - old_ps1_size, extra_pd1_pages_allowance);
  178. /*
  179. * Highlevel code will see this, clear the state and
  180. * retry if we haven't already done so twice.
  181. */
  182. if (any_to_free(1)) {
  183. set_abort_result(TOI_EXTRA_PAGES_ALLOW_TOO_SMALL);
  184. return 1;
  185. }
  186. if (try_allocate_extra_memory()) {
  187. pr_warn("Failed to allocate the extra memory needed. Restarting the process.");
  188. set_abort_result(TOI_EXTRA_PAGES_ALLOW_TOO_SMALL);
  189. return 1;
  190. }
  191. pr_warn("Looks like there's enough free ram and storage to handle this, so continuing anyway.");
  192. /*
  193. * What if try_allocate_extra_memory above calls
  194. * toi_allocate_extra_pagedir_memory and it allocs a new
  195. * slab page via toi_kzalloc which should be in ps1? So...
  196. */
  197. toi_recalculate_image_contents(1);
  198. }
  199. if (!test_action_state(TOI_TEST_FILTER_SPEED) && !test_action_state(TOI_TEST_BIO))
  200. toi_copy_pageset1();
  201. return 0;
  202. }
  203. /**
  204. * toi_hibernate - high level code for doing the atomic copy
  205. *
  206. * High-level code which prepares to do the atomic copy. Loosely based
  207. * on the swsusp version, but with the following twists:
  208. * - We set toi_running so the swsusp code uses our code paths.
  209. * - We give better feedback regarding what goes wrong if there is a
  210. * problem.
  211. * - We use an extra function to call the assembly, just in case this code
  212. * is in a module (return address).
  213. **/
  214. int toi_hibernate(void)
  215. {
  216. int error;
  217. error = toi_lowlevel_builtin();
  218. if (!error) {
  219. struct toi_boot_kernel_data *bkd =
  220. (struct toi_boot_kernel_data *)boot_kernel_data_buffer;
  221. /*
  222. * The boot kernel's data may be larger (newer version) or
  223. * smaller (older version) than ours. Copy the minimum
  224. * of the two sizes, so that we don't overwrite valid values
  225. * from pre-atomic copy.
  226. */
  227. memcpy(&toi_bkd, (char *)boot_kernel_data_buffer,
  228. min_t(int, sizeof(struct toi_boot_kernel_data), bkd->size));
  229. }
  230. return error;
  231. }
  232. /**
  233. * toi_atomic_restore - prepare to do the atomic restore
  234. *
  235. * Get ready to do the atomic restore. This part gets us into the same
  236. * state we are in prior to do calling do_toi_lowlevel while
  237. * hibernating: hot-unplugging secondary cpus and freeze processes,
  238. * before starting the thread that will do the restore.
  239. **/
  240. int toi_atomic_restore(void)
  241. {
  242. int error;
  243. toi_prepare_status(DONT_CLEAR_BAR, "Atomic restore.");
  244. memcpy(&toi_bkd.toi_nosave_commandline, saved_command_line, strlen(saved_command_line));
  245. toi_pre_atomic_restore_modules(&toi_bkd);
  246. if (add_boot_kernel_data_pbe())
  247. goto Failed;
  248. toi_prepare_status(DONT_CLEAR_BAR, "Doing atomic copy/restore.");
  249. if (toi_go_atomic(PMSG_QUIESCE, 0))
  250. goto Failed;
  251. /* We'll ignore saved state, but this gets preempt count (etc) right */
  252. save_processor_state();
  253. error = swsusp_arch_resume();
  254. /*
  255. * Code below is only ever reached in case of failure. Otherwise
  256. * execution continues at place where swsusp_arch_suspend was called.
  257. *
  258. * We don't know whether it's safe to continue (this shouldn't happen),
  259. * so lets err on the side of caution.
  260. */
  261. BUG();
  262. Failed:
  263. free_pbe_list(&restore_pblist, 0);
  264. #ifdef CONFIG_HIGHMEM
  265. pr_warn("[%s] 0x%p 0x%p 0x%p\n", __func__,
  266. restore_highmem_pblist->address, restore_highmem_pblist->orig_address,
  267. restore_highmem_pblist->next);
  268. if (restore_highmem_pblist->next != NULL)
  269. free_pbe_list(&restore_highmem_pblist, 1);
  270. #endif
  271. return 1;
  272. }
  273. /**
  274. * toi_go_atomic - do the actual atomic copy/restore
  275. * @state: The state to use for dpm_suspend_start & power_down calls.
  276. * @suspend_time: Whether we're suspending or resuming.
  277. **/
  278. int toi_go_atomic(pm_message_t state, int suspend_time)
  279. {
  280. if (suspend_time) {
  281. if (platform_begin(1)) {
  282. set_abort_result(TOI_PLATFORM_PREP_FAILED);
  283. toi_end_atomic(ATOMIC_STEP_PLATFORM_END, suspend_time, 3);
  284. hib_log("FAILED @line:%d suspend(%d) pm_state(%d)\n", __LINE__,
  285. suspend_time, state.event);
  286. return 1;
  287. }
  288. if (dpm_prepare(PMSG_FREEZE)) {
  289. set_abort_result(TOI_DPM_PREPARE_FAILED);
  290. dpm_complete(PMSG_RECOVER);
  291. toi_end_atomic(ATOMIC_STEP_PLATFORM_END, suspend_time, 3);
  292. hib_log("FAILED @line:%d suspend(%d) pm_state(%d)\n", __LINE__,
  293. suspend_time, state.event);
  294. return 1;
  295. }
  296. }
  297. suspend_console();
  298. pm_restrict_gfp_mask();
  299. if (suspend_time) {
  300. if (dpm_suspend(state)) {
  301. set_abort_result(TOI_DPM_SUSPEND_FAILED);
  302. toi_end_atomic(ATOMIC_STEP_DEVICE_RESUME, suspend_time, 3);
  303. hib_log("FAILED @line:%d suspend(%d) pm_state(%d) toi_result(0x%#lx)\n",
  304. __LINE__, suspend_time, state.event, toi_result);
  305. return 1;
  306. }
  307. } else {
  308. if (dpm_suspend_start(state)) {
  309. set_abort_result(TOI_DPM_SUSPEND_FAILED);
  310. toi_end_atomic(ATOMIC_STEP_DEVICE_RESUME, suspend_time, 3);
  311. hib_log("FAILED @line:%d suspend(%d) pm_state(%d) toi_result(0x%#lx)\n",
  312. __LINE__, suspend_time, state.event, toi_result);
  313. return 1;
  314. }
  315. }
  316. /* At this point, dpm_suspend_start() has been called, but *not*
  317. * dpm_suspend_noirq(). We *must* dpm_suspend_noirq() now.
  318. * Otherwise, drivers for some devices (e.g. interrupt controllers)
  319. * become desynchronized with the actual state of the hardware
  320. * at resume time, and evil weirdness ensues.
  321. */
  322. if (dpm_suspend_end(state)) {
  323. set_abort_result(TOI_DEVICE_REFUSED);
  324. toi_end_atomic(ATOMIC_STEP_DEVICE_RESUME, suspend_time, 1);
  325. hib_log("FAILED @line:%d suspend(%d) pm_state(%d) toi_result(0x%#lx)\n", __LINE__,
  326. suspend_time, state.event, toi_result);
  327. return 1;
  328. }
  329. if (suspend_time) {
  330. if (platform_pre_snapshot(1))
  331. set_abort_result(TOI_PRE_SNAPSHOT_FAILED);
  332. } else {
  333. if (platform_pre_restore(1))
  334. set_abort_result(TOI_PRE_RESTORE_FAILED);
  335. }
  336. if (test_result_state(TOI_ABORTED)) {
  337. toi_end_atomic(ATOMIC_STEP_PLATFORM_FINISH, suspend_time, 1);
  338. hib_log("FAILED @line:%d suspend(%d) pm_state(%d) toi_result(0x%#lx)\n", __LINE__,
  339. suspend_time, state.event, toi_result);
  340. return 1;
  341. }
  342. if (disable_nonboot_cpus()) {
  343. set_abort_result(TOI_CPU_HOTPLUG_FAILED);
  344. toi_end_atomic(ATOMIC_STEP_CPU_HOTPLUG, suspend_time, 1);
  345. hib_log("FAILED @line:%d suspend(%d) pm_state(%d) toi_result(0x%#lx)\n",
  346. __LINE__, suspend_time, state.event, toi_result);
  347. return 1;
  348. }
  349. local_irq_disable();
  350. if (syscore_suspend()) {
  351. set_abort_result(TOI_SYSCORE_REFUSED);
  352. toi_end_atomic(ATOMIC_STEP_IRQS, suspend_time, 1);
  353. hib_log("FAILED @line:%d suspend(%d) pm_state(%d) toi_result(0x%#lx)\n", __LINE__,
  354. suspend_time, state.event, toi_result);
  355. return 1;
  356. }
  357. if (suspend_time && pm_wakeup_pending()) {
  358. set_abort_result(TOI_WAKEUP_EVENT);
  359. toi_end_atomic(ATOMIC_STEP_SYSCORE_RESUME, suspend_time, 1);
  360. hib_log("FAILED @line:%d suspend(%d) pm_state(%d) toi_result(0x%#lx)\n", __LINE__,
  361. suspend_time, state.event, toi_result);
  362. return 1;
  363. }
  364. hib_log("SUCCEEDED @line:%d suspend(%d) pm_state(%d)\n", __LINE__, suspend_time,
  365. state.event);
  366. return 0;
  367. }
  368. /**
  369. * toi_end_atomic - post atomic copy/restore routines
  370. * @stage: What step to start at.
  371. * @suspend_time: Whether we're suspending or resuming.
  372. * @error: Whether we're recovering from an error.
  373. **/
  374. void toi_end_atomic(int stage, int suspend_time, int error)
  375. {
  376. pm_message_t msg = suspend_time ? (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE;
  377. switch (stage) {
  378. case ATOMIC_ALL_STEPS:
  379. if (!suspend_time)
  380. events_check_enabled = false;
  381. platform_leave(1);
  382. /* fall-through */
  383. case ATOMIC_STEP_SYSCORE_RESUME:
  384. syscore_resume();
  385. /* fall-through */
  386. case ATOMIC_STEP_IRQS:
  387. local_irq_enable();
  388. /* fall-through */
  389. case ATOMIC_STEP_CPU_HOTPLUG:
  390. enable_nonboot_cpus();
  391. /* fall-through */
  392. case ATOMIC_STEP_PLATFORM_FINISH:
  393. if (!suspend_time && error & 2)
  394. platform_restore_cleanup(1);
  395. else
  396. platform_finish(1);
  397. dpm_resume_start(msg);
  398. /* fall-through */
  399. case ATOMIC_STEP_DEVICE_RESUME:
  400. if (suspend_time && (error & 2))
  401. platform_recover(1);
  402. dpm_resume(msg);
  403. if (!toi_in_suspend())
  404. dpm_resume_end(PMSG_RECOVER);
  405. if (error || !toi_in_suspend())
  406. pm_restore_gfp_mask();
  407. resume_console();
  408. /* fall-through */
  409. case ATOMIC_STEP_DPM_COMPLETE:
  410. dpm_complete(msg);
  411. /* fall-through */
  412. case ATOMIC_STEP_PLATFORM_END:
  413. platform_end(1);
  414. toi_prepare_status(DONT_CLEAR_BAR, "Post atomic.");
  415. /* fall-through */
  416. default:
  417. break;
  418. }
  419. }