tuxonice_bio_chains.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994
  1. /*
  2. * kernel/power/tuxonice_bio_devinfo.c
  3. *
  4. * Copyright (C) 2009-2014 Nigel Cunningham (nigel at tuxonice net)
  5. *
  6. * Distributed under GPLv2.
  7. *
  8. */
  9. #include <linux/mm_types.h>
  10. #include "tuxonice_bio.h"
  11. #include "tuxonice_bio_internal.h"
  12. #include "tuxonice_alloc.h"
  13. #include "tuxonice_ui.h"
  14. #include "tuxonice.h"
  15. #include "tuxonice_io.h"
  16. static struct toi_bdev_info *prio_chain_head;
  17. static int num_chains;
  18. /* Pointer to current entry being loaded/saved. */
  19. struct toi_extent_iterate_state toi_writer_posn;
  20. #define metadata_size (sizeof(struct toi_bdev_info) - \
  21. offsetof(struct toi_bdev_info, uuid))
  22. /*
  23. * After section 0 (header) comes 2 => next_section[0] = 2
  24. */
  25. static int next_section[3] = { 2, 3, 1 };
  26. /**
  27. * dump_block_chains - print the contents of the bdev info array.
  28. **/
  29. void dump_block_chains(void)
  30. {
  31. int i = 0;
  32. int j;
  33. struct toi_bdev_info *cur_chain = prio_chain_head;
  34. while (cur_chain) {
  35. struct hibernate_extent *this = cur_chain->blocks.first;
  36. pr_debug("Chain %d (prio %d):", i, cur_chain->prio);
  37. while (this) {
  38. pr_warn(" [%lu-%lu]%s", this->start,
  39. this->end, this->next ? "," : "");
  40. this = this->next;
  41. }
  42. cur_chain = cur_chain->next;
  43. i++;
  44. }
  45. pr_debug("Saved states:\n");
  46. for (i = 0; i < 4; i++) {
  47. pr_debug("Slot %d: Chain %d.\n", i, toi_writer_posn.saved_chain_number[i]);
  48. cur_chain = prio_chain_head;
  49. j = 0;
  50. while (cur_chain) {
  51. pr_debug(" Chain %d: Extent %d. Offset %lu.\n",
  52. j, cur_chain->saved_state[i].extent_num,
  53. cur_chain->saved_state[i].offset);
  54. cur_chain = cur_chain->next;
  55. j++;
  56. }
  57. pr_warn("\n");
  58. }
  59. }
  60. /**
  61. *
  62. **/
  63. static void toi_extent_chain_next(void)
  64. {
  65. struct toi_bdev_info *this = toi_writer_posn.current_chain;
  66. if (!this->blocks.current_extent)
  67. return;
  68. if (this->blocks.current_offset == this->blocks.current_extent->end) {
  69. if (this->blocks.current_extent->next) {
  70. this->blocks.current_extent = this->blocks.current_extent->next;
  71. this->blocks.current_offset = this->blocks.current_extent->start;
  72. } else {
  73. this->blocks.current_extent = NULL;
  74. this->blocks.current_offset = 0;
  75. }
  76. } else
  77. this->blocks.current_offset++;
  78. }
  79. /**
  80. *
  81. */
  82. static struct toi_bdev_info *__find_next_chain_same_prio(void)
  83. {
  84. struct toi_bdev_info *start_chain = toi_writer_posn.current_chain;
  85. struct toi_bdev_info *this = start_chain;
  86. int orig_prio = this->prio;
  87. do {
  88. this = this->next;
  89. if (!this)
  90. this = prio_chain_head;
  91. /* Back on original chain? Use it again. */
  92. if (this == start_chain)
  93. return start_chain;
  94. } while (!this->blocks.current_extent || this->prio != orig_prio);
  95. return this;
  96. }
  97. static void find_next_chain(void)
  98. {
  99. struct toi_bdev_info *this;
  100. this = __find_next_chain_same_prio();
  101. /*
  102. * If we didn't get another chain of the same priority that we
  103. * can use, look for the next priority.
  104. */
  105. while (this && !this->blocks.current_extent)
  106. this = this->next;
  107. toi_writer_posn.current_chain = this;
  108. }
  109. /**
  110. * toi_extent_state_next - go to the next extent
  111. * @blocks: The number of values to progress.
  112. * @stripe_mode: Whether to spread usage across all chains.
  113. *
  114. * Given a state, progress to the next valid entry. We may begin in an
  115. * invalid state, as we do when invoked after extent_state_goto_start below.
  116. *
  117. * When using compression and expected_compression > 0, we let the image size
  118. * be larger than storage, so we can validly run out of data to return.
  119. **/
  120. static unsigned long toi_extent_state_next(int blocks, int current_stream)
  121. {
  122. int i;
  123. if (!toi_writer_posn.current_chain)
  124. return -ENOSPC;
  125. /* Assume chains always have lengths that are multiples of @blocks */
  126. for (i = 0; i < blocks; i++)
  127. toi_extent_chain_next();
  128. /* The header stream is not striped */
  129. if (current_stream || !toi_writer_posn.current_chain->blocks.current_extent)
  130. find_next_chain();
  131. return toi_writer_posn.current_chain ? 0 : -ENOSPC;
  132. }
  133. static void toi_insert_chain_in_prio_list(struct toi_bdev_info *this)
  134. {
  135. struct toi_bdev_info **prev_ptr;
  136. struct toi_bdev_info *cur;
  137. /* Loop through the existing chain, finding where to insert it */
  138. prev_ptr = &prio_chain_head;
  139. cur = prio_chain_head;
  140. while (cur && cur->prio >= this->prio) {
  141. prev_ptr = &cur->next;
  142. cur = cur->next;
  143. }
  144. this->next = *prev_ptr;
  145. *prev_ptr = this;
  146. this = prio_chain_head;
  147. while (this)
  148. this = this->next;
  149. num_chains++;
  150. }
  151. /**
  152. * toi_extent_state_goto_start - reinitialize an extent chain iterator
  153. * @state: Iterator to reinitialize
  154. **/
  155. void toi_extent_state_goto_start(void)
  156. {
  157. struct toi_bdev_info *this = prio_chain_head;
  158. while (this) {
  159. toi_message(TOI_BIO, TOI_VERBOSE, 0,
  160. "Setting current extent to %p.", this->blocks.first);
  161. this->blocks.current_extent = this->blocks.first;
  162. if (this->blocks.current_extent) {
  163. toi_message(TOI_BIO, TOI_VERBOSE, 0,
  164. "Setting current offset to %lu.",
  165. this->blocks.current_extent->start);
  166. this->blocks.current_offset = this->blocks.current_extent->start;
  167. }
  168. this = this->next;
  169. }
  170. toi_message(TOI_BIO, TOI_VERBOSE, 0, "Setting current chain to %p.", prio_chain_head);
  171. toi_writer_posn.current_chain = prio_chain_head;
  172. toi_message(TOI_BIO, TOI_VERBOSE, 0, "Leaving extent state goto start.");
  173. }
  174. /**
  175. * toi_extent_state_save - save state of the iterator
  176. * @state: Current state of the chain
  177. * @saved_state: Iterator to populate
  178. *
  179. * Given a state and a struct hibernate_extent_state_store, save the current
  180. * position in a format that can be used with relocated chains (at
  181. * resume time).
  182. **/
  183. void toi_extent_state_save(int slot)
  184. {
  185. struct toi_bdev_info *cur_chain = prio_chain_head;
  186. struct hibernate_extent *extent;
  187. struct hibernate_extent_saved_state *chain_state;
  188. int i = 0;
  189. toi_message(TOI_BIO, TOI_VERBOSE, 0, "toi_extent_state_save, slot %d.", slot);
  190. if (!toi_writer_posn.current_chain) {
  191. toi_message(TOI_BIO, TOI_VERBOSE, 0, "No current chain => " "chain_num = -1.");
  192. toi_writer_posn.saved_chain_number[slot] = -1;
  193. return;
  194. }
  195. while (cur_chain) {
  196. i++;
  197. toi_message(TOI_BIO, TOI_VERBOSE, 0, "Saving chain %d (%p) state, slot %d.", i, cur_chain, slot);
  198. chain_state = &cur_chain->saved_state[slot];
  199. chain_state->offset = cur_chain->blocks.current_offset;
  200. if (toi_writer_posn.current_chain == cur_chain) {
  201. toi_writer_posn.saved_chain_number[slot] = i;
  202. toi_message(TOI_BIO, TOI_VERBOSE, 0, "This is the chain we were on => chain_num is %d.", i);
  203. }
  204. if (!cur_chain->blocks.current_extent) {
  205. chain_state->extent_num = 0;
  206. toi_message(TOI_BIO, TOI_VERBOSE, 0, "No current extent for chain => extent_num %d is 0.", i);
  207. cur_chain = cur_chain->next;
  208. continue;
  209. }
  210. extent = cur_chain->blocks.first;
  211. chain_state->extent_num = 1;
  212. while (extent != cur_chain->blocks.current_extent) {
  213. chain_state->extent_num++;
  214. extent = extent->next;
  215. }
  216. toi_message(TOI_BIO, TOI_VERBOSE, 0, "extent num %d is %d.", i,
  217. chain_state->extent_num);
  218. cur_chain = cur_chain->next;
  219. }
  220. toi_message(TOI_BIO, TOI_VERBOSE, 0, "Completed saving extent state slot %d.", slot);
  221. }
  222. /**
  223. * toi_extent_state_restore - restore the position saved by extent_state_save
  224. * @state: State to populate
  225. * @saved_state: Iterator saved to restore
  226. **/
  227. void toi_extent_state_restore(int slot)
  228. {
  229. int i = 0;
  230. struct toi_bdev_info *cur_chain = prio_chain_head;
  231. struct hibernate_extent_saved_state *chain_state;
  232. toi_message(TOI_BIO, TOI_VERBOSE, 0, "toi_extent_state_restore - slot %d.", slot);
  233. if (toi_writer_posn.saved_chain_number[slot] == -1) {
  234. toi_writer_posn.current_chain = NULL;
  235. return;
  236. }
  237. while (cur_chain) {
  238. int posn;
  239. int j;
  240. i++;
  241. toi_message(TOI_BIO, TOI_VERBOSE, 0, "Restoring chain %d (%p) state, slot %d.", i, cur_chain, slot);
  242. chain_state = &cur_chain->saved_state[slot];
  243. posn = chain_state->extent_num;
  244. cur_chain->blocks.current_extent = cur_chain->blocks.first;
  245. cur_chain->blocks.current_offset = chain_state->offset;
  246. if (i == toi_writer_posn.saved_chain_number[slot]) {
  247. toi_writer_posn.current_chain = cur_chain;
  248. toi_message(TOI_BIO, TOI_VERBOSE, 0, "Found current chain.");
  249. }
  250. for (j = 0; j < 4; j++)
  251. if (i == toi_writer_posn.saved_chain_number[j]) {
  252. toi_writer_posn.saved_chain_ptr[j] = cur_chain;
  253. toi_message(TOI_BIO, TOI_VERBOSE, 0,
  254. "Found saved chain ptr %d (%p) (offset %lu).", j, cur_chain,
  255. cur_chain->saved_state[j].offset);
  256. }
  257. if (posn) {
  258. while (--posn)
  259. cur_chain->blocks.current_extent =
  260. cur_chain->blocks.current_extent->next;
  261. } else
  262. cur_chain->blocks.current_extent = NULL;
  263. cur_chain = cur_chain->next;
  264. }
  265. toi_message(TOI_BIO, TOI_VERBOSE, 0, "Done.");
  266. if (test_action_state(TOI_LOGALL))
  267. dump_block_chains();
  268. }
  269. /*
  270. * Storage needed
  271. *
  272. * Returns amount of space in the image header required
  273. * for the chain data. This ignores the links between
  274. * pages, which we factor in when allocating the space.
  275. */
  276. int toi_bio_devinfo_storage_needed(void)
  277. {
  278. int result = sizeof(num_chains);
  279. struct toi_bdev_info *chain = prio_chain_head;
  280. while (chain) {
  281. result += metadata_size;
  282. /* Chain size */
  283. result += sizeof(int);
  284. /* Extents */
  285. result += (2 * sizeof(unsigned long) * chain->blocks.num_extents);
  286. chain = chain->next;
  287. }
  288. result += 4 * sizeof(int);
  289. return result;
  290. }
  291. static unsigned long chain_pages_used(struct toi_bdev_info *chain)
  292. {
  293. struct hibernate_extent *this = chain->blocks.first;
  294. struct hibernate_extent_saved_state *state = &chain->saved_state[3];
  295. unsigned long size = 0;
  296. int extent_idx = 1;
  297. if (!state->extent_num) {
  298. if (!this)
  299. return 0;
  300. else
  301. return chain->blocks.size;
  302. }
  303. while (extent_idx < state->extent_num) {
  304. size += (this->end - this->start + 1);
  305. this = this->next;
  306. extent_idx++;
  307. }
  308. /* We didn't use the one we're sitting on, so don't count it */
  309. return size + state->offset - this->start;
  310. }
  311. /**
  312. * toi_serialise_extent_chain - write a chain in the image
  313. * @chain: Chain to write.
  314. **/
  315. static int toi_serialise_extent_chain(struct toi_bdev_info *chain)
  316. {
  317. struct hibernate_extent *this;
  318. int ret;
  319. int i = 1;
  320. chain->pages_used = chain_pages_used(chain);
  321. if (test_action_state(TOI_LOGALL))
  322. dump_block_chains();
  323. toi_message(TOI_BIO, TOI_VERBOSE, 0, "Serialising chain (dev_t %x).", (unsigned int) chain->dev_t);
  324. /* Device info - dev_t, prio, bmap_shift, blocks per page, positions */
  325. ret = toiActiveAllocator->rw_header_chunk(WRITE, &toi_blockwriter_ops,
  326. (char *)&chain->uuid, metadata_size);
  327. if (ret)
  328. return ret;
  329. /* Num extents */
  330. ret = toiActiveAllocator->rw_header_chunk(WRITE, &toi_blockwriter_ops,
  331. (char *)&chain->blocks.num_extents, sizeof(int));
  332. if (ret)
  333. return ret;
  334. toi_message(TOI_BIO, TOI_VERBOSE, 0, "%d extents.", chain->blocks.num_extents);
  335. this = chain->blocks.first;
  336. while (this) {
  337. toi_message(TOI_BIO, TOI_VERBOSE, 0, "Extent %d.", i);
  338. ret = toiActiveAllocator->rw_header_chunk(WRITE,
  339. &toi_blockwriter_ops,
  340. (char *)this, 2 * sizeof(this->start));
  341. if (ret)
  342. return ret;
  343. this = this->next;
  344. i++;
  345. }
  346. return ret;
  347. }
  348. int toi_serialise_extent_chains(void)
  349. {
  350. struct toi_bdev_info *this = prio_chain_head;
  351. int result;
  352. /* Write the number of chains */
  353. toi_message(TOI_BIO, TOI_VERBOSE, 0, "Write number of chains (%d)", num_chains);
  354. result = toiActiveAllocator->rw_header_chunk(WRITE,
  355. &toi_blockwriter_ops, (char *)&num_chains,
  356. sizeof(int));
  357. if (result)
  358. return result;
  359. /* Then the chains themselves */
  360. while (this) {
  361. result = toi_serialise_extent_chain(this);
  362. if (result)
  363. return result;
  364. this = this->next;
  365. }
  366. /*
  367. * Finally, the chain we should be on at the start of each
  368. * section.
  369. */
  370. toi_message(TOI_BIO, TOI_VERBOSE, 0, "Saved chain numbers.");
  371. result = toiActiveAllocator->rw_header_chunk(WRITE,
  372. &toi_blockwriter_ops,
  373. (char *)&toi_writer_posn.saved_chain_number[0],
  374. 4 * sizeof(int));
  375. return result;
  376. }
  377. int toi_register_storage_chain(struct toi_bdev_info *new)
  378. {
  379. toi_message(TOI_BIO, TOI_VERBOSE, 0, "Inserting chain %p into list.", new);
  380. toi_insert_chain_in_prio_list(new);
  381. return 0;
  382. }
  383. static void free_bdev_info(struct toi_bdev_info *chain)
  384. {
  385. toi_message(TOI_BIO, TOI_VERBOSE, 0, "Free chain %p.", chain);
  386. toi_message(TOI_BIO, TOI_VERBOSE, 0, " - Block extents.");
  387. toi_put_extent_chain(&chain->blocks);
  388. /*
  389. * The allocator may need to do more than just free the chains
  390. * (swap_free, for example). Don't call from boot kernel.
  391. */
  392. toi_message(TOI_BIO, TOI_VERBOSE, 0, " - Allocator extents.");
  393. if (chain->allocator)
  394. chain->allocator->bio_allocator_ops->free_storage(chain);
  395. /*
  396. * Dropping out of reading atomic copy? Need to undo
  397. * toi_open_by_devnum.
  398. */
  399. toi_message(TOI_BIO, TOI_VERBOSE, 0, " - Bdev.");
  400. if (chain->bdev && !IS_ERR(chain->bdev) &&
  401. chain->bdev != resume_block_device &&
  402. chain->bdev != header_block_device && test_toi_state(TOI_TRYING_TO_RESUME))
  403. toi_close_bdev(chain->bdev);
  404. /* Poison */
  405. toi_message(TOI_BIO, TOI_VERBOSE, 0, " - Struct.");
  406. toi_kfree(39, chain, sizeof(*chain));
  407. if (prio_chain_head == chain)
  408. prio_chain_head = NULL;
  409. num_chains--;
  410. }
  411. void free_all_bdev_info(void)
  412. {
  413. struct toi_bdev_info *this = prio_chain_head;
  414. while (this) {
  415. struct toi_bdev_info *next = this->next;
  416. free_bdev_info(this);
  417. this = next;
  418. }
  419. memset((char *)&toi_writer_posn, 0, sizeof(toi_writer_posn));
  420. prio_chain_head = NULL;
  421. }
  422. static void set_up_start_position(void)
  423. {
  424. toi_writer_posn.current_chain = prio_chain_head;
  425. go_next_page(0, 0);
  426. }
  427. /**
  428. * toi_load_extent_chain - read back a chain saved in the image
  429. * @chain: Chain to load
  430. *
  431. * The linked list of extents is reconstructed from the disk. chain will point
  432. * to the first entry.
  433. **/
  434. int toi_load_extent_chain(int index, int *num_loaded)
  435. {
  436. struct toi_bdev_info *chain = toi_kzalloc(39,
  437. sizeof(struct toi_bdev_info), GFP_ATOMIC);
  438. struct hibernate_extent *this, *last = NULL;
  439. int i, ret;
  440. toi_message(TOI_BIO, TOI_VERBOSE, 0, "Loading extent chain %d.", index);
  441. /* Get dev_t, prio, bmap_shift, blocks per page, positions */
  442. ret = toiActiveAllocator->rw_header_chunk_noreadahead(READ, NULL,
  443. (char *)&chain->uuid, metadata_size);
  444. if (ret) {
  445. pr_err("Failed to read the size of extent chain.\n");
  446. toi_kfree(39, chain, sizeof(*chain));
  447. return 1;
  448. }
  449. toi_bkd.pages_used[index] = chain->pages_used;
  450. ret = toiActiveAllocator->rw_header_chunk_noreadahead(READ, NULL,
  451. (char *)&chain->blocks.num_extents,
  452. sizeof(int));
  453. if (ret) {
  454. pr_err("Failed to read the size of extent chain.\n");
  455. toi_kfree(39, chain, sizeof(*chain));
  456. return 1;
  457. }
  458. toi_message(TOI_BIO, TOI_VERBOSE, 0, "%d extents.", chain->blocks.num_extents);
  459. for (i = 0; i < chain->blocks.num_extents; i++) {
  460. toi_message(TOI_BIO, TOI_VERBOSE, 0, "Extent %d.", i + 1);
  461. this = toi_kzalloc(2, sizeof(struct hibernate_extent), TOI_ATOMIC_GFP);
  462. if (!this) {
  463. pr_warn("Failed to allocate a new extent.\n");
  464. free_bdev_info(chain);
  465. return -ENOMEM;
  466. }
  467. this->next = NULL;
  468. /* Get the next page */
  469. ret = toiActiveAllocator->rw_header_chunk_noreadahead(READ,
  470. NULL, (char *)this,
  471. 2 * sizeof(this->start));
  472. if (ret) {
  473. pr_warn("Failed to read an extent.\n");
  474. toi_kfree(2, this, sizeof(struct hibernate_extent));
  475. free_bdev_info(chain);
  476. return 1;
  477. }
  478. if (last)
  479. last->next = this;
  480. else {
  481. char b1[32], b2[32], b3[32];
  482. /*
  483. * Open the bdev
  484. */
  485. toi_message(TOI_BIO, TOI_VERBOSE, 0,
  486. "Chain dev_t is %s. Resume dev t is %s. Header bdev_t is %s.\n",
  487. format_dev_t(b1, chain->dev_t),
  488. format_dev_t(b2, resume_dev_t),
  489. format_dev_t(b3, toi_sig_data->header_dev_t));
  490. if (chain->dev_t == resume_dev_t)
  491. chain->bdev = resume_block_device;
  492. else if (chain->dev_t == toi_sig_data->header_dev_t)
  493. chain->bdev = header_block_device;
  494. else {
  495. chain->bdev = toi_open_bdev(chain->uuid, chain->dev_t, 1);
  496. if (IS_ERR(chain->bdev)) {
  497. free_bdev_info(chain);
  498. return -ENODEV;
  499. }
  500. }
  501. toi_message(TOI_BIO, TOI_VERBOSE, 0, "Chain bmap shift is %d and blocks per page is %d.",
  502. chain->bmap_shift, chain->blocks_per_page);
  503. chain->blocks.first = this;
  504. /*
  505. * Couldn't do this earlier, but can't do
  506. * goto_start now - we may have already used blocks
  507. * in the first chain.
  508. */
  509. chain->blocks.current_extent = this;
  510. chain->blocks.current_offset = this->start;
  511. /*
  512. * Can't wait until we've read the whole chain
  513. * before we insert it in the list. We might need
  514. * this chain to read the next page in the header
  515. */
  516. toi_insert_chain_in_prio_list(chain);
  517. }
  518. /*
  519. * We have to wait until 2 extents are loaded before setting up
  520. * properly because if the first extent has only one page, we
  521. * will need to put the position on the second extent. Sounds
  522. * obvious, but it wasn't!
  523. */
  524. (*num_loaded)++;
  525. if ((*num_loaded) == 2)
  526. set_up_start_position();
  527. last = this;
  528. }
  529. /*
  530. * Shouldn't get empty chains, but it's not impossible. Link them in so
  531. * they get freed properly later.
  532. */
  533. if (!chain->blocks.num_extents)
  534. toi_insert_chain_in_prio_list(chain);
  535. if (!chain->blocks.current_extent) {
  536. chain->blocks.current_extent = chain->blocks.first;
  537. if (chain->blocks.current_extent)
  538. chain->blocks.current_offset = chain->blocks.current_extent->start;
  539. }
  540. return 0;
  541. }
  542. int toi_load_extent_chains(void)
  543. {
  544. int result;
  545. int to_load;
  546. int i;
  547. int extents_loaded = 0;
  548. result = toiActiveAllocator->rw_header_chunk_noreadahead(READ, NULL,
  549. (char *)&to_load, sizeof(int));
  550. if (result)
  551. return result;
  552. toi_message(TOI_BIO, TOI_VERBOSE, 0, "%d chains to read.", to_load);
  553. for (i = 0; i < to_load; i++) {
  554. toi_message(TOI_BIO, TOI_VERBOSE, 0, " >> Loading chain %d/%d.", i, to_load);
  555. result = toi_load_extent_chain(i, &extents_loaded);
  556. if (result)
  557. return result;
  558. }
  559. /* If we never got to a second extent, we still need to do this. */
  560. if (extents_loaded == 1)
  561. set_up_start_position();
  562. toi_message(TOI_BIO, TOI_VERBOSE, 0, "Save chain numbers.");
  563. result = toiActiveAllocator->rw_header_chunk_noreadahead(READ,
  564. &toi_blockwriter_ops,
  565. (char *)&toi_writer_posn.saved_chain_number[0],
  566. 4 * sizeof(int));
  567. return result;
  568. }
  569. static int toi_end_of_stream(int writing, int section_barrier)
  570. {
  571. struct toi_bdev_info *cur_chain = toi_writer_posn.current_chain;
  572. int compare_to = next_section[current_stream];
  573. struct toi_bdev_info *compare_chain = toi_writer_posn.saved_chain_ptr[compare_to];
  574. int compare_offset = compare_chain ? compare_chain->saved_state[compare_to].offset : 0;
  575. if (!section_barrier)
  576. return 0;
  577. if (!cur_chain)
  578. return 1;
  579. if (cur_chain == compare_chain && cur_chain->blocks.current_offset == compare_offset) {
  580. if (writing) {
  581. if (!current_stream) {
  582. debug_broken_header();
  583. return 1;
  584. }
  585. } else {
  586. more_readahead = 0;
  587. toi_message(TOI_BIO, TOI_VERBOSE, 0,
  588. "Reached the end of stream %d (not an error).", current_stream);
  589. return 1;
  590. }
  591. }
  592. return 0;
  593. }
  594. /**
  595. * go_next_page - skip blocks to the start of the next page
  596. * @writing: Whether we're reading or writing the image.
  597. *
  598. * Go forward one page.
  599. **/
  600. int go_next_page(int writing, int section_barrier)
  601. {
  602. struct toi_bdev_info *cur_chain = toi_writer_posn.current_chain;
  603. int max = cur_chain ? cur_chain->blocks_per_page : 1;
  604. /* Nope. Go foward a page - or maybe two. Don't stripe the header,
  605. * so that bad fragmentation doesn't put the extent data containing
  606. * the location of the second page out of the first header page.
  607. */
  608. if (toi_extent_state_next(max, current_stream)) {
  609. /* Don't complain if readahead falls off the end */
  610. if (writing && section_barrier) {
  611. toi_message(TOI_BIO, TOI_VERBOSE, 0, "Extent eof. Expected compress-ratio too optimistic?");
  612. if (test_action_state(TOI_LOGALL))
  613. dump_block_chains();
  614. }
  615. toi_message(TOI_BIO, TOI_VERBOSE, 0, "Ran out of extents to read/write. (Not necessarily fatal.");
  616. return -ENOSPC;
  617. }
  618. return 0;
  619. }
  620. int devices_of_same_priority(struct toi_bdev_info *this)
  621. {
  622. struct toi_bdev_info *check = prio_chain_head;
  623. int i = 0;
  624. while (check) {
  625. if (check->prio == this->prio)
  626. i++;
  627. check = check->next;
  628. }
  629. return i;
  630. }
  631. /**
  632. * toi_bio_rw_page - do i/o on the next disk page in the image
  633. * @writing: Whether reading or writing.
  634. * @page: Page to do i/o on.
  635. * @is_readahead: Whether we're doing readahead
  636. * @free_group: The group used in allocating the page
  637. *
  638. * Submit a page for reading or writing, possibly readahead.
  639. * Pass the group used in allocating the page as well, as it should
  640. * be freed on completion of the bio if we're writing the page.
  641. **/
  642. int toi_bio_rw_page(int writing, struct page *page, int is_readahead, int free_group)
  643. {
  644. int result = toi_end_of_stream(writing, 1);
  645. struct toi_bdev_info *dev_info = toi_writer_posn.current_chain;
  646. if (result) {
  647. if (writing)
  648. abort_hibernate(TOI_INSUFFICIENT_STORAGE,
  649. "Insufficient storage for your image.");
  650. else
  651. toi_message(TOI_BIO, TOI_VERBOSE, 0, "Seeking another page when stream has ended.");
  652. return -ENOSPC;
  653. }
  654. toi_message(TOI_BIO, TOI_VERBOSE, 0,
  655. "%s %x:%lu",
  656. writing ? "Write" : "Read", (unsigned int) dev_info->dev_t, dev_info->blocks.current_offset);
  657. result = toi_do_io(writing, dev_info->bdev,
  658. dev_info->blocks.current_offset << dev_info->bmap_shift,
  659. page, is_readahead, 0, free_group);
  660. /* Ignore the result here - will check end of stream if come in again */
  661. go_next_page(writing, 1);
  662. if (result)
  663. pr_err("toi_do_io returned %d.\n", result);
  664. return result;
  665. }
  666. dev_t get_header_dev_t(void)
  667. {
  668. return prio_chain_head->dev_t;
  669. }
  670. struct block_device *get_header_bdev(void)
  671. {
  672. return prio_chain_head->bdev;
  673. }
  674. unsigned long get_headerblock(void)
  675. {
  676. return prio_chain_head->blocks.first->start << prio_chain_head->bmap_shift;
  677. }
  678. int get_main_pool_phys_params(void)
  679. {
  680. struct toi_bdev_info *this = prio_chain_head;
  681. int result;
  682. while (this) {
  683. result = this->allocator->bio_allocator_ops->bmap(this);
  684. if (result)
  685. return result;
  686. this = this->next;
  687. }
  688. return 0;
  689. }
  690. static int apply_header_reservation(void)
  691. {
  692. int i;
  693. if (!header_pages_reserved) {
  694. toi_message(TOI_BIO, TOI_VERBOSE, 0, "No header pages reserved at the moment.");
  695. return 0;
  696. }
  697. toi_message(TOI_BIO, TOI_VERBOSE, 0, "Applying header reservation.");
  698. /* Apply header space reservation */
  699. toi_extent_state_goto_start();
  700. for (i = 0; i < header_pages_reserved; i++)
  701. if (go_next_page(1, 0))
  702. return -ENOSPC;
  703. /* The end of header pages will be the start of pageset 2 */
  704. toi_extent_state_save(2);
  705. toi_message(TOI_BIO, TOI_VERBOSE, 0, "Finished applying header reservation.");
  706. return 0;
  707. }
  708. static int toi_bio_register_storage(void)
  709. {
  710. int result = 0;
  711. struct toi_module_ops *this_module;
  712. list_for_each_entry(this_module, &toi_modules, module_list) {
  713. if (!this_module->enabled || this_module->type != BIO_ALLOCATOR_MODULE)
  714. continue;
  715. toi_message(TOI_BIO, TOI_VERBOSE, 0,
  716. "Registering storage from %s.", this_module->name);
  717. result = this_module->bio_allocator_ops->register_storage();
  718. if (result)
  719. break;
  720. }
  721. return result;
  722. }
  723. int toi_bio_allocate_storage(unsigned long request)
  724. {
  725. struct toi_bdev_info *chain = prio_chain_head;
  726. unsigned long to_get = request;
  727. unsigned long extra_pages, needed;
  728. int no_free = 0;
  729. if (!chain) {
  730. int result = toi_bio_register_storage();
  731. toi_message(TOI_BIO, TOI_VERBOSE, 0, "toi_bio_allocate_storage: Registering storage.");
  732. if (result)
  733. return 0;
  734. chain = prio_chain_head;
  735. if (!chain) {
  736. pr_warn("TuxOnIce: No storage was registered.\n");
  737. return 0;
  738. }
  739. }
  740. toi_message(TOI_BIO, TOI_VERBOSE, 0, "toi_bio_allocate_storage: Request is %lu pages.", request);
  741. extra_pages = DIV_ROUND_UP(request * (sizeof(unsigned long)
  742. + sizeof(int)), PAGE_SIZE);
  743. needed = request + extra_pages + header_pages_reserved;
  744. toi_message(TOI_BIO, TOI_VERBOSE, 0, "Adding %lu extra pages and %lu for header => %lu.",
  745. extra_pages, header_pages_reserved, needed);
  746. toi_message(TOI_BIO, TOI_VERBOSE, 0, "Already allocated %lu pages.", raw_pages_allocd);
  747. to_get = needed > raw_pages_allocd ? needed - raw_pages_allocd : 0;
  748. toi_message(TOI_BIO, TOI_VERBOSE, 0, "Need to get %lu pages.", to_get);
  749. if (!to_get)
  750. return apply_header_reservation();
  751. while (to_get && chain) {
  752. int num_group = devices_of_same_priority(chain);
  753. int divisor = num_group - no_free;
  754. int i;
  755. unsigned long portion = DIV_ROUND_UP(to_get, divisor);
  756. unsigned long got = 0;
  757. unsigned long got_this_round = 0;
  758. struct toi_bdev_info *top = chain;
  759. toi_message(TOI_BIO, TOI_VERBOSE, 0,
  760. " Start of loop. To get is %lu. Divisor is %d.", to_get, divisor);
  761. no_free = 0;
  762. /*
  763. * We're aiming to spread the allocated storage as evenly
  764. * as possible, but we also want to get all the storage we
  765. * can off this priority.
  766. */
  767. for (i = 0; i < num_group; i++) {
  768. struct toi_bio_allocator_ops *ops = chain->allocator->bio_allocator_ops;
  769. toi_message(TOI_BIO, TOI_VERBOSE, 0,
  770. " Asking for %lu pages from chain %p.", portion, chain);
  771. got = ops->allocate_storage(chain, portion);
  772. toi_message(TOI_BIO, TOI_VERBOSE, 0,
  773. " Got %lu pages from allocator %p.", got, chain);
  774. if (!got)
  775. no_free++;
  776. got_this_round += got;
  777. chain = chain->next;
  778. }
  779. toi_message(TOI_BIO, TOI_VERBOSE, 0, " Loop finished. Got a total of %lu pages from %d allocators.",
  780. got_this_round, divisor - no_free);
  781. raw_pages_allocd += got_this_round;
  782. to_get = needed > raw_pages_allocd ? needed - raw_pages_allocd : 0;
  783. /*
  784. * If we got anything from chains of this priority and we
  785. * still have storage to allocate, go over this priority
  786. * again.
  787. */
  788. if (got_this_round && to_get)
  789. chain = top;
  790. else
  791. no_free = 0;
  792. }
  793. toi_message(TOI_BIO, TOI_VERBOSE, 0, "Finished allocating. Calling get_main_pool_phys_params");
  794. /* Now let swap allocator bmap the pages */
  795. get_main_pool_phys_params();
  796. toi_message(TOI_BIO, TOI_VERBOSE, 0, "Done. Reserving header.");
  797. return apply_header_reservation();
  798. }
  799. void toi_bio_chains_post_atomic(struct toi_boot_kernel_data *bkd)
  800. {
  801. int i = 0;
  802. struct toi_bdev_info *cur_chain = prio_chain_head;
  803. while (cur_chain) {
  804. cur_chain->pages_used = bkd->pages_used[i];
  805. cur_chain = cur_chain->next;
  806. i++;
  807. }
  808. }
  809. int toi_bio_chains_debug_info(char *buffer, int size)
  810. {
  811. /* Show what we actually used */
  812. struct toi_bdev_info *cur_chain = prio_chain_head;
  813. int len = 0;
  814. while (cur_chain) {
  815. len += scnprintf(buffer + len, size - len, " Used %lu pages from %s.\n",
  816. cur_chain->pages_used, cur_chain->name);
  817. cur_chain = cur_chain->next;
  818. }
  819. return len;
  820. }