tuxonice_pagedir.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399
  1. /*
  2. * kernel/power/tuxonice_pagedir.c
  3. *
  4. * Copyright (C) 1998-2001 Gabor Kuti <seasons@fornax.hu>
  5. * Copyright (C) 1998,2001,2002 Pavel Machek <pavel@suse.cz>
  6. * Copyright (C) 2002-2003 Florent Chabaud <fchabaud@free.fr>
  7. * Copyright (C) 2006-2014 Nigel Cunningham (nigel at tuxonice net)
  8. *
  9. * This file is released under the GPLv2.
  10. *
  11. * Routines for handling pagesets.
  12. * Note that pbes aren't actually stored as such. They're stored as
  13. * bitmaps and extents.
  14. */
  15. #include <linux/suspend.h>
  16. #include <linux/highmem.h>
  17. #include <linux/bootmem.h>
  18. #include <linux/hardirq.h>
  19. #include <linux/sched.h>
  20. #include <linux/cpu.h>
  21. #include <asm/tlbflush.h>
  22. #include "tuxonice_pageflags.h"
  23. #include "tuxonice_ui.h"
  24. #include "tuxonice_pagedir.h"
  25. #include "tuxonice_prepare_image.h"
  26. #include "tuxonice.h"
  27. #include "tuxonice_builtin.h"
  28. #include "tuxonice_alloc.h"
  29. static int ptoi_pfn;
  30. static struct pbe *this_low_pbe;
  31. static struct pbe **last_low_pbe_ptr;
  32. void toi_reset_alt_image_pageset2_pfn(void)
  33. {
  34. memory_bm_position_reset(pageset2_map);
  35. }
  36. static struct page *first_conflicting_page;
  37. /*
  38. * free_conflicting_pages
  39. */
  40. static void free_conflicting_pages(void)
  41. {
  42. while (first_conflicting_page) {
  43. struct page *next = *((struct page **)kmap(first_conflicting_page));
  44. kunmap(first_conflicting_page);
  45. toi__free_page(29, first_conflicting_page);
  46. first_conflicting_page = next;
  47. }
  48. }
  49. /* __toi_get_nonconflicting_page
  50. *
  51. * Description: Gets order zero pages that won't be overwritten
  52. * while copying the original pages.
  53. */
  54. struct page *___toi_get_nonconflicting_page(int can_be_highmem)
  55. {
  56. struct page *page;
  57. gfp_t flags = TOI_ATOMIC_GFP;
  58. if (can_be_highmem)
  59. flags |= __GFP_HIGHMEM;
  60. if (test_toi_state(TOI_LOADING_ALT_IMAGE) && pageset2_map && ptoi_pfn) {
  61. do {
  62. ptoi_pfn = memory_bm_next_pfn(pageset2_map, 0);
  63. if (ptoi_pfn != BM_END_OF_MAP) {
  64. page = pfn_to_page(ptoi_pfn);
  65. if (!PagePageset1(page) && (can_be_highmem || !PageHighMem(page)))
  66. return page;
  67. }
  68. } while (ptoi_pfn);
  69. }
  70. do {
  71. page = toi_alloc_page(29, flags | __GFP_ZERO);
  72. if (!page) {
  73. pr_warn("Failed to get nonconflicting " "page.\n");
  74. return NULL;
  75. }
  76. if (PagePageset1(page)) {
  77. struct page **next = (struct page **)kmap(page);
  78. *next = first_conflicting_page;
  79. first_conflicting_page = page;
  80. kunmap(page);
  81. }
  82. } while (PagePageset1(page));
  83. return page;
  84. }
  85. unsigned long __toi_get_nonconflicting_page(void)
  86. {
  87. struct page *page = ___toi_get_nonconflicting_page(0);
  88. return page ? (unsigned long)page_address(page) : 0;
  89. }
  90. static struct pbe *get_next_pbe(struct page **page_ptr, struct pbe *this_pbe, int highmem)
  91. {
  92. if (((((unsigned long)this_pbe) & (PAGE_SIZE - 1))
  93. + 2 * sizeof(struct pbe)) > PAGE_SIZE) {
  94. struct page *new_page = ___toi_get_nonconflicting_page(highmem);
  95. if (!new_page)
  96. return ERR_PTR(-ENOMEM);
  97. this_pbe = (struct pbe *)kmap(new_page);
  98. memset(this_pbe, 0, PAGE_SIZE);
  99. *page_ptr = new_page;
  100. } else
  101. this_pbe++;
  102. return this_pbe;
  103. }
  104. /**
  105. * get_pageset1_load_addresses - generate pbes for conflicting pages
  106. *
  107. * We check here that pagedir & pages it points to won't collide
  108. * with pages where we're going to restore from the loaded pages
  109. * later.
  110. *
  111. * Returns:
  112. * Zero on success, one if couldn't find enough pages (shouldn't
  113. * happen).
  114. **/
  115. int toi_get_pageset1_load_addresses(void)
  116. {
  117. int pfn, highallocd = 0, lowallocd = 0;
  118. int low_needed = pagedir1.size - get_highmem_size(pagedir1);
  119. int high_needed = get_highmem_size(pagedir1);
  120. int low_pages_for_highmem = 0;
  121. gfp_t flags = GFP_ATOMIC | __GFP_NOWARN | __GFP_HIGHMEM;
  122. struct page *page, *high_pbe_page = NULL, *last_high_pbe_page = NULL,
  123. *low_pbe_page, *last_low_pbe_page = NULL;
  124. struct pbe **last_high_pbe_ptr = &restore_highmem_pblist, *this_high_pbe = NULL;
  125. unsigned long orig_low_pfn, orig_high_pfn;
  126. int high_pbes_done = 0, low_pbes_done = 0;
  127. int low_direct = 0, high_direct = 0, result = 0, i;
  128. int high_page = 1, high_offset = 0, low_page = 1, low_offset = 0;
  129. int low_for_high_count = 0, high_direct_count = 0, low_direct_count = 0;
  130. toi_trace_index++;
  131. memory_bm_position_reset(pageset1_map);
  132. memory_bm_position_reset(pageset1_copy_map);
  133. last_low_pbe_ptr = &restore_pblist;
  134. /* First, allocate pages for the start of our pbe lists. */
  135. if (high_needed) {
  136. high_pbe_page = ___toi_get_nonconflicting_page(1);
  137. if (!high_pbe_page) {
  138. result = -ENOMEM;
  139. goto out;
  140. }
  141. this_high_pbe = (struct pbe *)kmap(high_pbe_page);
  142. memset(this_high_pbe, 0, PAGE_SIZE);
  143. }
  144. low_pbe_page = ___toi_get_nonconflicting_page(0);
  145. if (!low_pbe_page) {
  146. result = -ENOMEM;
  147. goto out;
  148. }
  149. this_low_pbe = (struct pbe *)page_address(low_pbe_page);
  150. pr_warn("%s: %d. %d, %d, %d, %d, %d, %d, %d\n", __func__, __LINE__,
  151. low_needed, lowallocd, low_direct,
  152. high_needed, highallocd, high_direct, low_pages_for_highmem);
  153. /*
  154. * Next, allocate the number of pages we need.
  155. */
  156. i = low_needed + high_needed;
  157. do {
  158. int is_high;
  159. if (i == low_needed)
  160. flags &= ~__GFP_HIGHMEM;
  161. page = toi_alloc_page(30, flags);
  162. BUG_ON(!page);
  163. SetPagePageset1Copy(page);
  164. is_high = PageHighMem(page);
  165. if (PagePageset1(page)) {
  166. if (is_high)
  167. high_direct++;
  168. else
  169. low_direct++;
  170. } else {
  171. if (is_high)
  172. highallocd++;
  173. else
  174. lowallocd++;
  175. }
  176. } while (--i);
  177. pr_warn("%s: %d. %d, %d, %d, %d, %d, %d, %d\n", __func__, __LINE__,
  178. low_needed, lowallocd, low_direct,
  179. high_needed, highallocd, high_direct, low_pages_for_highmem);
  180. high_needed -= high_direct;
  181. low_needed -= low_direct;
  182. pr_warn("%s: %d. %d, %d, %d, %d, %d, %d, %d\n", __func__, __LINE__,
  183. low_needed, lowallocd, low_direct,
  184. high_needed, highallocd, high_direct, low_pages_for_highmem);
  185. /*
  186. * Do we need to use some lowmem pages for the copies of highmem
  187. * pages?
  188. */
  189. if (high_needed > highallocd) {
  190. low_pages_for_highmem = high_needed - highallocd;
  191. high_needed -= low_pages_for_highmem;
  192. low_needed += low_pages_for_highmem;
  193. }
  194. pr_warn("%s: %d. %d, %d, %d, %d, %d, %d, %d\n", __func__, __LINE__,
  195. low_needed, lowallocd, low_direct,
  196. high_needed, highallocd, high_direct, low_pages_for_highmem);
  197. /*
  198. * Now generate our pbes (which will be used for the atomic restore),
  199. * and free unneeded pages.
  200. */
  201. memory_bm_position_reset(pageset1_copy_map);
  202. for (pfn = memory_bm_next_pfn(pageset1_copy_map, 0); pfn != BM_END_OF_MAP;
  203. pfn = memory_bm_next_pfn(pageset1_copy_map, 0)) {
  204. int is_high;
  205. #ifdef CONFIG_TOI_FIXUP
  206. int orig_is_high;
  207. unsigned long orig_pfn;
  208. struct page *orig_page;
  209. #endif
  210. page = pfn_to_page(pfn);
  211. is_high = PageHighMem(page);
  212. if (PagePageset1(page)) {
  213. is_high ? high_direct_count++ : low_direct_count++;
  214. continue;
  215. }
  216. #ifdef CONFIG_TOI_FIXUP
  217. do {
  218. orig_pfn = memory_bm_next_pfn(pageset1_map, 0);
  219. BUG_ON(orig_pfn == BM_END_OF_MAP);
  220. orig_page = pfn_to_page(orig_pfn);
  221. } while (PagePageset1Copy(orig_page));
  222. orig_is_high = PageHighMem(orig_page);
  223. if (orig_is_high &&
  224. (is_high || low_pages_for_highmem)) {
  225. #else
  226. /* Nope. We're going to use this page. Add a pbe. */
  227. if (is_high || low_pages_for_highmem) {
  228. struct page *orig_page;
  229. #endif
  230. high_pbes_done++;
  231. if (!is_high)
  232. low_pages_for_highmem--;
  233. else
  234. low_for_high_count++;
  235. #ifdef CONFIG_TOI_FIXUP
  236. orig_high_pfn = orig_pfn;
  237. #else
  238. do {
  239. orig_high_pfn = memory_bm_next_pfn(pageset1_map, 0);
  240. BUG_ON(orig_high_pfn == BM_END_OF_MAP);
  241. orig_page = pfn_to_page(orig_high_pfn);
  242. } while (!PageHighMem(orig_page) || PagePageset1Copy(orig_page));
  243. #endif
  244. this_high_pbe->orig_address = (void *)orig_high_pfn;
  245. this_high_pbe->address = page;
  246. this_high_pbe->next = NULL;
  247. toi_message(TOI_PAGEDIR, TOI_VERBOSE, 0, "High pbe %d/%d: %p(%lu)=>%p",
  248. high_page, high_offset, page, orig_high_pfn, orig_page);
  249. if (last_high_pbe_page != high_pbe_page) {
  250. *last_high_pbe_ptr = (struct pbe *)high_pbe_page;
  251. if (last_high_pbe_page) {
  252. kunmap(last_high_pbe_page);
  253. high_page++;
  254. high_offset = 0;
  255. } else
  256. high_offset++;
  257. last_high_pbe_page = high_pbe_page;
  258. } else {
  259. *last_high_pbe_ptr = this_high_pbe;
  260. high_offset++;
  261. }
  262. last_high_pbe_ptr = &this_high_pbe->next;
  263. this_high_pbe = get_next_pbe(&high_pbe_page, this_high_pbe, 1);
  264. if (IS_ERR(this_high_pbe)) {
  265. pr_warn("This high pbe is an error.\n");
  266. return -ENOMEM;
  267. }
  268. #ifdef CONFIG_TOI_FIXUP
  269. } else if (!orig_is_high) {
  270. #else
  271. } else {
  272. struct page *orig_page;
  273. #endif
  274. low_pbes_done++;
  275. #ifdef CONFIG_TOI_FIXUP
  276. orig_low_pfn = orig_pfn;
  277. #else
  278. do {
  279. orig_low_pfn = memory_bm_next_pfn(pageset1_map, 0);
  280. BUG_ON(orig_low_pfn == BM_END_OF_MAP);
  281. orig_page = pfn_to_page(orig_low_pfn);
  282. } while (PageHighMem(orig_page) || PagePageset1Copy(orig_page));
  283. #endif
  284. this_low_pbe->orig_address = page_address(orig_page);
  285. this_low_pbe->address = page_address(page);
  286. this_low_pbe->next = NULL;
  287. toi_message(TOI_PAGEDIR, TOI_VERBOSE, 0, "Low pbe %d/%d: %p(%lu)=>%p",
  288. low_page, low_offset, this_low_pbe->orig_address,
  289. orig_low_pfn, this_low_pbe->address);
  290. TOI_TRACE_DEBUG(orig_low_pfn, "LoadAddresses (%d/%d): %p=>%p", low_page,
  291. low_offset, this_low_pbe->orig_address,
  292. this_low_pbe->address);
  293. *last_low_pbe_ptr = this_low_pbe;
  294. last_low_pbe_ptr = &this_low_pbe->next;
  295. this_low_pbe = get_next_pbe(&low_pbe_page, this_low_pbe, 0);
  296. if (low_pbe_page != last_low_pbe_page) {
  297. if (last_low_pbe_page) {
  298. low_page++;
  299. low_offset = 0;
  300. } else {
  301. low_offset++;
  302. }
  303. last_low_pbe_page = low_pbe_page;
  304. } else
  305. low_offset++;
  306. if (IS_ERR(this_low_pbe)) {
  307. pr_warn("this_low_pbe is an error.\n");
  308. return -ENOMEM;
  309. }
  310. }
  311. }
  312. pr_warn("%s: %d. %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d\n", __func__, __LINE__,
  313. low_needed, lowallocd, low_direct,
  314. high_needed, highallocd, high_direct, low_pages_for_highmem,
  315. low_pbes_done, low_direct_count,
  316. high_pbes_done, high_direct_count, low_for_high_count);
  317. if (high_pbe_page)
  318. kunmap(high_pbe_page);
  319. if (last_high_pbe_page != high_pbe_page) {
  320. if (last_high_pbe_page)
  321. kunmap(last_high_pbe_page);
  322. toi__free_page(29, high_pbe_page);
  323. }
  324. free_conflicting_pages();
  325. out:
  326. return result;
  327. }
  328. int add_boot_kernel_data_pbe(void)
  329. {
  330. this_low_pbe->address = (char *)__toi_get_nonconflicting_page();
  331. if (!this_low_pbe->address) {
  332. pr_warn("Failed to get bkd atomic restore buffer.");
  333. return -ENOMEM;
  334. }
  335. toi_bkd.size = sizeof(toi_bkd);
  336. memcpy(this_low_pbe->address, &toi_bkd, sizeof(toi_bkd));
  337. *last_low_pbe_ptr = this_low_pbe;
  338. this_low_pbe->orig_address = (char *)boot_kernel_data_buffer;
  339. this_low_pbe->next = NULL;
  340. return 0;
  341. }