drbd_req.c 52 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651
  1. /*
  2. drbd_req.c
  3. This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
  4. Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
  5. Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
  6. Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
  7. drbd is free software; you can redistribute it and/or modify
  8. it under the terms of the GNU General Public License as published by
  9. the Free Software Foundation; either version 2, or (at your option)
  10. any later version.
  11. drbd is distributed in the hope that it will be useful,
  12. but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. GNU General Public License for more details.
  15. You should have received a copy of the GNU General Public License
  16. along with drbd; see the file COPYING. If not, write to
  17. the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
  18. */
  19. #include <linux/module.h>
  20. #include <linux/slab.h>
  21. #include <linux/drbd.h>
  22. #include "drbd_int.h"
  23. #include "drbd_req.h"
  24. static bool drbd_may_do_local_read(struct drbd_device *device, sector_t sector, int size);
  25. /* Update disk stats at start of I/O request */
  26. static void _drbd_start_io_acct(struct drbd_device *device, struct drbd_request *req)
  27. {
  28. const int rw = bio_data_dir(req->master_bio);
  29. int cpu;
  30. cpu = part_stat_lock();
  31. part_round_stats(cpu, &device->vdisk->part0);
  32. part_stat_inc(cpu, &device->vdisk->part0, ios[rw]);
  33. part_stat_add(cpu, &device->vdisk->part0, sectors[rw], req->i.size >> 9);
  34. (void) cpu; /* The macro invocations above want the cpu argument, I do not like
  35. the compiler warning about cpu only assigned but never used... */
  36. part_inc_in_flight(&device->vdisk->part0, rw);
  37. part_stat_unlock();
  38. }
  39. /* Update disk stats when completing request upwards */
  40. static void _drbd_end_io_acct(struct drbd_device *device, struct drbd_request *req)
  41. {
  42. int rw = bio_data_dir(req->master_bio);
  43. unsigned long duration = jiffies - req->start_jif;
  44. int cpu;
  45. cpu = part_stat_lock();
  46. part_stat_add(cpu, &device->vdisk->part0, ticks[rw], duration);
  47. part_round_stats(cpu, &device->vdisk->part0);
  48. part_dec_in_flight(&device->vdisk->part0, rw);
  49. part_stat_unlock();
  50. }
  51. static struct drbd_request *drbd_req_new(struct drbd_device *device,
  52. struct bio *bio_src)
  53. {
  54. struct drbd_request *req;
  55. req = mempool_alloc(drbd_request_mempool, GFP_NOIO | __GFP_ZERO);
  56. if (!req)
  57. return NULL;
  58. drbd_req_make_private_bio(req, bio_src);
  59. req->rq_state = bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0;
  60. req->device = device;
  61. req->master_bio = bio_src;
  62. req->epoch = 0;
  63. drbd_clear_interval(&req->i);
  64. req->i.sector = bio_src->bi_iter.bi_sector;
  65. req->i.size = bio_src->bi_iter.bi_size;
  66. req->i.local = true;
  67. req->i.waiting = false;
  68. INIT_LIST_HEAD(&req->tl_requests);
  69. INIT_LIST_HEAD(&req->w.list);
  70. INIT_LIST_HEAD(&req->req_pending_master_completion);
  71. INIT_LIST_HEAD(&req->req_pending_local);
  72. /* one reference to be put by __drbd_make_request */
  73. atomic_set(&req->completion_ref, 1);
  74. /* one kref as long as completion_ref > 0 */
  75. kref_init(&req->kref);
  76. return req;
  77. }
  78. static void drbd_remove_request_interval(struct rb_root *root,
  79. struct drbd_request *req)
  80. {
  81. struct drbd_device *device = req->device;
  82. struct drbd_interval *i = &req->i;
  83. drbd_remove_interval(root, i);
  84. /* Wake up any processes waiting for this request to complete. */
  85. if (i->waiting)
  86. wake_up(&device->misc_wait);
  87. }
  88. void drbd_req_destroy(struct kref *kref)
  89. {
  90. struct drbd_request *req = container_of(kref, struct drbd_request, kref);
  91. struct drbd_device *device = req->device;
  92. const unsigned s = req->rq_state;
  93. if ((req->master_bio && !(s & RQ_POSTPONED)) ||
  94. atomic_read(&req->completion_ref) ||
  95. (s & RQ_LOCAL_PENDING) ||
  96. ((s & RQ_NET_MASK) && !(s & RQ_NET_DONE))) {
  97. drbd_err(device, "drbd_req_destroy: Logic BUG rq_state = 0x%x, completion_ref = %d\n",
  98. s, atomic_read(&req->completion_ref));
  99. return;
  100. }
  101. /* If called from mod_rq_state (expected normal case) or
  102. * drbd_send_and_submit (the less likely normal path), this holds the
  103. * req_lock, and req->tl_requests will typicaly be on ->transfer_log,
  104. * though it may be still empty (never added to the transfer log).
  105. *
  106. * If called from do_retry(), we do NOT hold the req_lock, but we are
  107. * still allowed to unconditionally list_del(&req->tl_requests),
  108. * because it will be on a local on-stack list only. */
  109. list_del_init(&req->tl_requests);
  110. /* finally remove the request from the conflict detection
  111. * respective block_id verification interval tree. */
  112. if (!drbd_interval_empty(&req->i)) {
  113. struct rb_root *root;
  114. if (s & RQ_WRITE)
  115. root = &device->write_requests;
  116. else
  117. root = &device->read_requests;
  118. drbd_remove_request_interval(root, req);
  119. } else if (s & (RQ_NET_MASK & ~RQ_NET_DONE) && req->i.size != 0)
  120. drbd_err(device, "drbd_req_destroy: Logic BUG: interval empty, but: rq_state=0x%x, sect=%llu, size=%u\n",
  121. s, (unsigned long long)req->i.sector, req->i.size);
  122. /* if it was a write, we may have to set the corresponding
  123. * bit(s) out-of-sync first. If it had a local part, we need to
  124. * release the reference to the activity log. */
  125. if (s & RQ_WRITE) {
  126. /* Set out-of-sync unless both OK flags are set
  127. * (local only or remote failed).
  128. * Other places where we set out-of-sync:
  129. * READ with local io-error */
  130. /* There is a special case:
  131. * we may notice late that IO was suspended,
  132. * and postpone, or schedule for retry, a write,
  133. * before it even was submitted or sent.
  134. * In that case we do not want to touch the bitmap at all.
  135. */
  136. if ((s & (RQ_POSTPONED|RQ_LOCAL_MASK|RQ_NET_MASK)) != RQ_POSTPONED) {
  137. if (!(s & RQ_NET_OK) || !(s & RQ_LOCAL_OK))
  138. drbd_set_out_of_sync(device, req->i.sector, req->i.size);
  139. if ((s & RQ_NET_OK) && (s & RQ_LOCAL_OK) && (s & RQ_NET_SIS))
  140. drbd_set_in_sync(device, req->i.sector, req->i.size);
  141. }
  142. /* one might be tempted to move the drbd_al_complete_io
  143. * to the local io completion callback drbd_request_endio.
  144. * but, if this was a mirror write, we may only
  145. * drbd_al_complete_io after this is RQ_NET_DONE,
  146. * otherwise the extent could be dropped from the al
  147. * before it has actually been written on the peer.
  148. * if we crash before our peer knows about the request,
  149. * but after the extent has been dropped from the al,
  150. * we would forget to resync the corresponding extent.
  151. */
  152. if (s & RQ_IN_ACT_LOG) {
  153. if (get_ldev_if_state(device, D_FAILED)) {
  154. drbd_al_complete_io(device, &req->i);
  155. put_ldev(device);
  156. } else if (__ratelimit(&drbd_ratelimit_state)) {
  157. drbd_warn(device, "Should have called drbd_al_complete_io(, %llu, %u), "
  158. "but my Disk seems to have failed :(\n",
  159. (unsigned long long) req->i.sector, req->i.size);
  160. }
  161. }
  162. }
  163. mempool_free(req, drbd_request_mempool);
  164. }
  165. static void wake_all_senders(struct drbd_connection *connection)
  166. {
  167. wake_up(&connection->sender_work.q_wait);
  168. }
  169. /* must hold resource->req_lock */
  170. void start_new_tl_epoch(struct drbd_connection *connection)
  171. {
  172. /* no point closing an epoch, if it is empty, anyways. */
  173. if (connection->current_tle_writes == 0)
  174. return;
  175. connection->current_tle_writes = 0;
  176. atomic_inc(&connection->current_tle_nr);
  177. wake_all_senders(connection);
  178. }
  179. void complete_master_bio(struct drbd_device *device,
  180. struct bio_and_error *m)
  181. {
  182. bio_endio(m->bio, m->error);
  183. dec_ap_bio(device);
  184. }
  185. /* Helper for __req_mod().
  186. * Set m->bio to the master bio, if it is fit to be completed,
  187. * or leave it alone (it is initialized to NULL in __req_mod),
  188. * if it has already been completed, or cannot be completed yet.
  189. * If m->bio is set, the error status to be returned is placed in m->error.
  190. */
  191. static
  192. void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
  193. {
  194. const unsigned s = req->rq_state;
  195. struct drbd_device *device = req->device;
  196. int rw;
  197. int error, ok;
  198. /* we must not complete the master bio, while it is
  199. * still being processed by _drbd_send_zc_bio (drbd_send_dblock)
  200. * not yet acknowledged by the peer
  201. * not yet completed by the local io subsystem
  202. * these flags may get cleared in any order by
  203. * the worker,
  204. * the receiver,
  205. * the bio_endio completion callbacks.
  206. */
  207. if ((s & RQ_LOCAL_PENDING && !(s & RQ_LOCAL_ABORTED)) ||
  208. (s & RQ_NET_QUEUED) || (s & RQ_NET_PENDING) ||
  209. (s & RQ_COMPLETION_SUSP)) {
  210. drbd_err(device, "drbd_req_complete: Logic BUG rq_state = 0x%x\n", s);
  211. return;
  212. }
  213. if (!req->master_bio) {
  214. drbd_err(device, "drbd_req_complete: Logic BUG, master_bio == NULL!\n");
  215. return;
  216. }
  217. rw = bio_rw(req->master_bio);
  218. /*
  219. * figure out whether to report success or failure.
  220. *
  221. * report success when at least one of the operations succeeded.
  222. * or, to put the other way,
  223. * only report failure, when both operations failed.
  224. *
  225. * what to do about the failures is handled elsewhere.
  226. * what we need to do here is just: complete the master_bio.
  227. *
  228. * local completion error, if any, has been stored as ERR_PTR
  229. * in private_bio within drbd_request_endio.
  230. */
  231. ok = (s & RQ_LOCAL_OK) || (s & RQ_NET_OK);
  232. error = PTR_ERR(req->private_bio);
  233. /* Before we can signal completion to the upper layers,
  234. * we may need to close the current transfer log epoch.
  235. * We are within the request lock, so we can simply compare
  236. * the request epoch number with the current transfer log
  237. * epoch number. If they match, increase the current_tle_nr,
  238. * and reset the transfer log epoch write_cnt.
  239. */
  240. if (rw == WRITE &&
  241. req->epoch == atomic_read(&first_peer_device(device)->connection->current_tle_nr))
  242. start_new_tl_epoch(first_peer_device(device)->connection);
  243. /* Update disk stats */
  244. _drbd_end_io_acct(device, req);
  245. /* If READ failed,
  246. * have it be pushed back to the retry work queue,
  247. * so it will re-enter __drbd_make_request(),
  248. * and be re-assigned to a suitable local or remote path,
  249. * or failed if we do not have access to good data anymore.
  250. *
  251. * Unless it was failed early by __drbd_make_request(),
  252. * because no path was available, in which case
  253. * it was not even added to the transfer_log.
  254. *
  255. * READA may fail, and will not be retried.
  256. *
  257. * WRITE should have used all available paths already.
  258. */
  259. if (!ok && rw == READ && !list_empty(&req->tl_requests))
  260. req->rq_state |= RQ_POSTPONED;
  261. if (!(req->rq_state & RQ_POSTPONED)) {
  262. m->error = ok ? 0 : (error ?: -EIO);
  263. m->bio = req->master_bio;
  264. req->master_bio = NULL;
  265. /* We leave it in the tree, to be able to verify later
  266. * write-acks in protocol != C during resync.
  267. * But we mark it as "complete", so it won't be counted as
  268. * conflict in a multi-primary setup. */
  269. req->i.completed = true;
  270. }
  271. if (req->i.waiting)
  272. wake_up(&device->misc_wait);
  273. /* Either we are about to complete to upper layers,
  274. * or we will restart this request.
  275. * In either case, the request object will be destroyed soon,
  276. * so better remove it from all lists. */
  277. list_del_init(&req->req_pending_master_completion);
  278. }
  279. /* still holds resource->req_lock */
  280. static int drbd_req_put_completion_ref(struct drbd_request *req, struct bio_and_error *m, int put)
  281. {
  282. struct drbd_device *device = req->device;
  283. D_ASSERT(device, m || (req->rq_state & RQ_POSTPONED));
  284. if (!atomic_sub_and_test(put, &req->completion_ref))
  285. return 0;
  286. drbd_req_complete(req, m);
  287. if (req->rq_state & RQ_POSTPONED) {
  288. /* don't destroy the req object just yet,
  289. * but queue it for retry */
  290. drbd_restart_request(req);
  291. return 0;
  292. }
  293. return 1;
  294. }
  295. static void set_if_null_req_next(struct drbd_peer_device *peer_device, struct drbd_request *req)
  296. {
  297. struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
  298. if (!connection)
  299. return;
  300. if (connection->req_next == NULL)
  301. connection->req_next = req;
  302. }
  303. static void advance_conn_req_next(struct drbd_peer_device *peer_device, struct drbd_request *req)
  304. {
  305. struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
  306. if (!connection)
  307. return;
  308. if (connection->req_next != req)
  309. return;
  310. list_for_each_entry_continue(req, &connection->transfer_log, tl_requests) {
  311. const unsigned s = req->rq_state;
  312. if (s & RQ_NET_QUEUED)
  313. break;
  314. }
  315. if (&req->tl_requests == &connection->transfer_log)
  316. req = NULL;
  317. connection->req_next = req;
  318. }
  319. static void set_if_null_req_ack_pending(struct drbd_peer_device *peer_device, struct drbd_request *req)
  320. {
  321. struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
  322. if (!connection)
  323. return;
  324. if (connection->req_ack_pending == NULL)
  325. connection->req_ack_pending = req;
  326. }
  327. static void advance_conn_req_ack_pending(struct drbd_peer_device *peer_device, struct drbd_request *req)
  328. {
  329. struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
  330. if (!connection)
  331. return;
  332. if (connection->req_ack_pending != req)
  333. return;
  334. list_for_each_entry_continue(req, &connection->transfer_log, tl_requests) {
  335. const unsigned s = req->rq_state;
  336. if ((s & RQ_NET_SENT) && (s & RQ_NET_PENDING))
  337. break;
  338. }
  339. if (&req->tl_requests == &connection->transfer_log)
  340. req = NULL;
  341. connection->req_ack_pending = req;
  342. }
  343. static void set_if_null_req_not_net_done(struct drbd_peer_device *peer_device, struct drbd_request *req)
  344. {
  345. struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
  346. if (!connection)
  347. return;
  348. if (connection->req_not_net_done == NULL)
  349. connection->req_not_net_done = req;
  350. }
  351. static void advance_conn_req_not_net_done(struct drbd_peer_device *peer_device, struct drbd_request *req)
  352. {
  353. struct drbd_connection *connection = peer_device ? peer_device->connection : NULL;
  354. if (!connection)
  355. return;
  356. if (connection->req_not_net_done != req)
  357. return;
  358. list_for_each_entry_continue(req, &connection->transfer_log, tl_requests) {
  359. const unsigned s = req->rq_state;
  360. if ((s & RQ_NET_SENT) && !(s & RQ_NET_DONE))
  361. break;
  362. }
  363. if (&req->tl_requests == &connection->transfer_log)
  364. req = NULL;
  365. connection->req_not_net_done = req;
  366. }
  367. /* I'd like this to be the only place that manipulates
  368. * req->completion_ref and req->kref. */
  369. static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m,
  370. int clear, int set)
  371. {
  372. struct drbd_device *device = req->device;
  373. struct drbd_peer_device *peer_device = first_peer_device(device);
  374. unsigned s = req->rq_state;
  375. int c_put = 0;
  376. int k_put = 0;
  377. if (drbd_suspended(device) && !((s | clear) & RQ_COMPLETION_SUSP))
  378. set |= RQ_COMPLETION_SUSP;
  379. /* apply */
  380. req->rq_state &= ~clear;
  381. req->rq_state |= set;
  382. /* no change? */
  383. if (req->rq_state == s)
  384. return;
  385. /* intent: get references */
  386. if (!(s & RQ_LOCAL_PENDING) && (set & RQ_LOCAL_PENDING))
  387. atomic_inc(&req->completion_ref);
  388. if (!(s & RQ_NET_PENDING) && (set & RQ_NET_PENDING)) {
  389. inc_ap_pending(device);
  390. atomic_inc(&req->completion_ref);
  391. }
  392. if (!(s & RQ_NET_QUEUED) && (set & RQ_NET_QUEUED)) {
  393. atomic_inc(&req->completion_ref);
  394. set_if_null_req_next(peer_device, req);
  395. }
  396. if (!(s & RQ_EXP_BARR_ACK) && (set & RQ_EXP_BARR_ACK))
  397. kref_get(&req->kref); /* wait for the DONE */
  398. if (!(s & RQ_NET_SENT) && (set & RQ_NET_SENT)) {
  399. /* potentially already completed in the asender thread */
  400. if (!(s & RQ_NET_DONE)) {
  401. atomic_add(req->i.size >> 9, &device->ap_in_flight);
  402. set_if_null_req_not_net_done(peer_device, req);
  403. }
  404. if (s & RQ_NET_PENDING)
  405. set_if_null_req_ack_pending(peer_device, req);
  406. }
  407. if (!(s & RQ_COMPLETION_SUSP) && (set & RQ_COMPLETION_SUSP))
  408. atomic_inc(&req->completion_ref);
  409. /* progress: put references */
  410. if ((s & RQ_COMPLETION_SUSP) && (clear & RQ_COMPLETION_SUSP))
  411. ++c_put;
  412. if (!(s & RQ_LOCAL_ABORTED) && (set & RQ_LOCAL_ABORTED)) {
  413. D_ASSERT(device, req->rq_state & RQ_LOCAL_PENDING);
  414. /* local completion may still come in later,
  415. * we need to keep the req object around. */
  416. kref_get(&req->kref);
  417. ++c_put;
  418. }
  419. if ((s & RQ_LOCAL_PENDING) && (clear & RQ_LOCAL_PENDING)) {
  420. if (req->rq_state & RQ_LOCAL_ABORTED)
  421. ++k_put;
  422. else
  423. ++c_put;
  424. list_del_init(&req->req_pending_local);
  425. }
  426. if ((s & RQ_NET_PENDING) && (clear & RQ_NET_PENDING)) {
  427. dec_ap_pending(device);
  428. ++c_put;
  429. req->acked_jif = jiffies;
  430. advance_conn_req_ack_pending(peer_device, req);
  431. }
  432. if ((s & RQ_NET_QUEUED) && (clear & RQ_NET_QUEUED)) {
  433. ++c_put;
  434. advance_conn_req_next(peer_device, req);
  435. }
  436. if (!(s & RQ_NET_DONE) && (set & RQ_NET_DONE)) {
  437. if (s & RQ_NET_SENT)
  438. atomic_sub(req->i.size >> 9, &device->ap_in_flight);
  439. if (s & RQ_EXP_BARR_ACK)
  440. ++k_put;
  441. req->net_done_jif = jiffies;
  442. /* in ahead/behind mode, or just in case,
  443. * before we finally destroy this request,
  444. * the caching pointers must not reference it anymore */
  445. advance_conn_req_next(peer_device, req);
  446. advance_conn_req_ack_pending(peer_device, req);
  447. advance_conn_req_not_net_done(peer_device, req);
  448. }
  449. /* potentially complete and destroy */
  450. if (k_put || c_put) {
  451. /* Completion does it's own kref_put. If we are going to
  452. * kref_sub below, we need req to be still around then. */
  453. int at_least = k_put + !!c_put;
  454. int refcount = atomic_read(&req->kref.refcount);
  455. if (refcount < at_least)
  456. drbd_err(device,
  457. "mod_rq_state: Logic BUG: %x -> %x: refcount = %d, should be >= %d\n",
  458. s, req->rq_state, refcount, at_least);
  459. }
  460. /* If we made progress, retry conflicting peer requests, if any. */
  461. if (req->i.waiting)
  462. wake_up(&device->misc_wait);
  463. if (c_put)
  464. k_put += drbd_req_put_completion_ref(req, m, c_put);
  465. if (k_put)
  466. kref_sub(&req->kref, k_put, drbd_req_destroy);
  467. }
  468. static void drbd_report_io_error(struct drbd_device *device, struct drbd_request *req)
  469. {
  470. char b[BDEVNAME_SIZE];
  471. if (!__ratelimit(&drbd_ratelimit_state))
  472. return;
  473. drbd_warn(device, "local %s IO error sector %llu+%u on %s\n",
  474. (req->rq_state & RQ_WRITE) ? "WRITE" : "READ",
  475. (unsigned long long)req->i.sector,
  476. req->i.size >> 9,
  477. bdevname(device->ldev->backing_bdev, b));
  478. }
  479. /* Helper for HANDED_OVER_TO_NETWORK.
  480. * Is this a protocol A write (neither WRITE_ACK nor RECEIVE_ACK expected)?
  481. * Is it also still "PENDING"?
  482. * --> If so, clear PENDING and set NET_OK below.
  483. * If it is a protocol A write, but not RQ_PENDING anymore, neg-ack was faster
  484. * (and we must not set RQ_NET_OK) */
  485. static inline bool is_pending_write_protocol_A(struct drbd_request *req)
  486. {
  487. return (req->rq_state &
  488. (RQ_WRITE|RQ_NET_PENDING|RQ_EXP_WRITE_ACK|RQ_EXP_RECEIVE_ACK))
  489. == (RQ_WRITE|RQ_NET_PENDING);
  490. }
  491. /* obviously this could be coded as many single functions
  492. * instead of one huge switch,
  493. * or by putting the code directly in the respective locations
  494. * (as it has been before).
  495. *
  496. * but having it this way
  497. * enforces that it is all in this one place, where it is easier to audit,
  498. * it makes it obvious that whatever "event" "happens" to a request should
  499. * happen "atomically" within the req_lock,
  500. * and it enforces that we have to think in a very structured manner
  501. * about the "events" that may happen to a request during its life time ...
  502. */
  503. int __req_mod(struct drbd_request *req, enum drbd_req_event what,
  504. struct bio_and_error *m)
  505. {
  506. struct drbd_device *const device = req->device;
  507. struct drbd_peer_device *const peer_device = first_peer_device(device);
  508. struct drbd_connection *const connection = peer_device ? peer_device->connection : NULL;
  509. struct net_conf *nc;
  510. int p, rv = 0;
  511. if (m)
  512. m->bio = NULL;
  513. switch (what) {
  514. default:
  515. drbd_err(device, "LOGIC BUG in %s:%u\n", __FILE__ , __LINE__);
  516. break;
  517. /* does not happen...
  518. * initialization done in drbd_req_new
  519. case CREATED:
  520. break;
  521. */
  522. case TO_BE_SENT: /* via network */
  523. /* reached via __drbd_make_request
  524. * and from w_read_retry_remote */
  525. D_ASSERT(device, !(req->rq_state & RQ_NET_MASK));
  526. rcu_read_lock();
  527. nc = rcu_dereference(connection->net_conf);
  528. p = nc->wire_protocol;
  529. rcu_read_unlock();
  530. req->rq_state |=
  531. p == DRBD_PROT_C ? RQ_EXP_WRITE_ACK :
  532. p == DRBD_PROT_B ? RQ_EXP_RECEIVE_ACK : 0;
  533. mod_rq_state(req, m, 0, RQ_NET_PENDING);
  534. break;
  535. case TO_BE_SUBMITTED: /* locally */
  536. /* reached via __drbd_make_request */
  537. D_ASSERT(device, !(req->rq_state & RQ_LOCAL_MASK));
  538. mod_rq_state(req, m, 0, RQ_LOCAL_PENDING);
  539. break;
  540. case COMPLETED_OK:
  541. if (req->rq_state & RQ_WRITE)
  542. device->writ_cnt += req->i.size >> 9;
  543. else
  544. device->read_cnt += req->i.size >> 9;
  545. mod_rq_state(req, m, RQ_LOCAL_PENDING,
  546. RQ_LOCAL_COMPLETED|RQ_LOCAL_OK);
  547. break;
  548. case ABORT_DISK_IO:
  549. mod_rq_state(req, m, 0, RQ_LOCAL_ABORTED);
  550. break;
  551. case WRITE_COMPLETED_WITH_ERROR:
  552. drbd_report_io_error(device, req);
  553. __drbd_chk_io_error(device, DRBD_WRITE_ERROR);
  554. mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED);
  555. break;
  556. case READ_COMPLETED_WITH_ERROR:
  557. drbd_set_out_of_sync(device, req->i.sector, req->i.size);
  558. drbd_report_io_error(device, req);
  559. __drbd_chk_io_error(device, DRBD_READ_ERROR);
  560. /* fall through. */
  561. case READ_AHEAD_COMPLETED_WITH_ERROR:
  562. /* it is legal to fail READA, no __drbd_chk_io_error in that case. */
  563. mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED);
  564. break;
  565. case DISCARD_COMPLETED_NOTSUPP:
  566. case DISCARD_COMPLETED_WITH_ERROR:
  567. /* I'd rather not detach from local disk just because it
  568. * failed a REQ_DISCARD. */
  569. mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED);
  570. break;
  571. case QUEUE_FOR_NET_READ:
  572. /* READ or READA, and
  573. * no local disk,
  574. * or target area marked as invalid,
  575. * or just got an io-error. */
  576. /* from __drbd_make_request
  577. * or from bio_endio during read io-error recovery */
  578. /* So we can verify the handle in the answer packet.
  579. * Corresponding drbd_remove_request_interval is in
  580. * drbd_req_complete() */
  581. D_ASSERT(device, drbd_interval_empty(&req->i));
  582. drbd_insert_interval(&device->read_requests, &req->i);
  583. set_bit(UNPLUG_REMOTE, &device->flags);
  584. D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
  585. D_ASSERT(device, (req->rq_state & RQ_LOCAL_MASK) == 0);
  586. mod_rq_state(req, m, 0, RQ_NET_QUEUED);
  587. req->w.cb = w_send_read_req;
  588. drbd_queue_work(&connection->sender_work,
  589. &req->w);
  590. break;
  591. case QUEUE_FOR_NET_WRITE:
  592. /* assert something? */
  593. /* from __drbd_make_request only */
  594. /* Corresponding drbd_remove_request_interval is in
  595. * drbd_req_complete() */
  596. D_ASSERT(device, drbd_interval_empty(&req->i));
  597. drbd_insert_interval(&device->write_requests, &req->i);
  598. /* NOTE
  599. * In case the req ended up on the transfer log before being
  600. * queued on the worker, it could lead to this request being
  601. * missed during cleanup after connection loss.
  602. * So we have to do both operations here,
  603. * within the same lock that protects the transfer log.
  604. *
  605. * _req_add_to_epoch(req); this has to be after the
  606. * _maybe_start_new_epoch(req); which happened in
  607. * __drbd_make_request, because we now may set the bit
  608. * again ourselves to close the current epoch.
  609. *
  610. * Add req to the (now) current epoch (barrier). */
  611. /* otherwise we may lose an unplug, which may cause some remote
  612. * io-scheduler timeout to expire, increasing maximum latency,
  613. * hurting performance. */
  614. set_bit(UNPLUG_REMOTE, &device->flags);
  615. /* queue work item to send data */
  616. D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
  617. mod_rq_state(req, m, 0, RQ_NET_QUEUED|RQ_EXP_BARR_ACK);
  618. req->w.cb = w_send_dblock;
  619. drbd_queue_work(&connection->sender_work,
  620. &req->w);
  621. /* close the epoch, in case it outgrew the limit */
  622. rcu_read_lock();
  623. nc = rcu_dereference(connection->net_conf);
  624. p = nc->max_epoch_size;
  625. rcu_read_unlock();
  626. if (connection->current_tle_writes >= p)
  627. start_new_tl_epoch(connection);
  628. break;
  629. case QUEUE_FOR_SEND_OOS:
  630. mod_rq_state(req, m, 0, RQ_NET_QUEUED);
  631. req->w.cb = w_send_out_of_sync;
  632. drbd_queue_work(&connection->sender_work,
  633. &req->w);
  634. break;
  635. case READ_RETRY_REMOTE_CANCELED:
  636. case SEND_CANCELED:
  637. case SEND_FAILED:
  638. /* real cleanup will be done from tl_clear. just update flags
  639. * so it is no longer marked as on the worker queue */
  640. mod_rq_state(req, m, RQ_NET_QUEUED, 0);
  641. break;
  642. case HANDED_OVER_TO_NETWORK:
  643. /* assert something? */
  644. if (is_pending_write_protocol_A(req))
  645. /* this is what is dangerous about protocol A:
  646. * pretend it was successfully written on the peer. */
  647. mod_rq_state(req, m, RQ_NET_QUEUED|RQ_NET_PENDING,
  648. RQ_NET_SENT|RQ_NET_OK);
  649. else
  650. mod_rq_state(req, m, RQ_NET_QUEUED, RQ_NET_SENT);
  651. /* It is still not yet RQ_NET_DONE until the
  652. * corresponding epoch barrier got acked as well,
  653. * so we know what to dirty on connection loss. */
  654. break;
  655. case OOS_HANDED_TO_NETWORK:
  656. /* Was not set PENDING, no longer QUEUED, so is now DONE
  657. * as far as this connection is concerned. */
  658. mod_rq_state(req, m, RQ_NET_QUEUED, RQ_NET_DONE);
  659. break;
  660. case CONNECTION_LOST_WHILE_PENDING:
  661. /* transfer log cleanup after connection loss */
  662. mod_rq_state(req, m,
  663. RQ_NET_OK|RQ_NET_PENDING|RQ_COMPLETION_SUSP,
  664. RQ_NET_DONE);
  665. break;
  666. case CONFLICT_RESOLVED:
  667. /* for superseded conflicting writes of multiple primaries,
  668. * there is no need to keep anything in the tl, potential
  669. * node crashes are covered by the activity log.
  670. *
  671. * If this request had been marked as RQ_POSTPONED before,
  672. * it will actually not be completed, but "restarted",
  673. * resubmitted from the retry worker context. */
  674. D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
  675. D_ASSERT(device, req->rq_state & RQ_EXP_WRITE_ACK);
  676. mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_DONE|RQ_NET_OK);
  677. break;
  678. case WRITE_ACKED_BY_PEER_AND_SIS:
  679. req->rq_state |= RQ_NET_SIS;
  680. case WRITE_ACKED_BY_PEER:
  681. /* Normal operation protocol C: successfully written on peer.
  682. * During resync, even in protocol != C,
  683. * we requested an explicit write ack anyways.
  684. * Which means we cannot even assert anything here.
  685. * Nothing more to do here.
  686. * We want to keep the tl in place for all protocols, to cater
  687. * for volatile write-back caches on lower level devices. */
  688. goto ack_common;
  689. case RECV_ACKED_BY_PEER:
  690. D_ASSERT(device, req->rq_state & RQ_EXP_RECEIVE_ACK);
  691. /* protocol B; pretends to be successfully written on peer.
  692. * see also notes above in HANDED_OVER_TO_NETWORK about
  693. * protocol != C */
  694. ack_common:
  695. mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK);
  696. break;
  697. case POSTPONE_WRITE:
  698. D_ASSERT(device, req->rq_state & RQ_EXP_WRITE_ACK);
  699. /* If this node has already detected the write conflict, the
  700. * worker will be waiting on misc_wait. Wake it up once this
  701. * request has completed locally.
  702. */
  703. D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
  704. req->rq_state |= RQ_POSTPONED;
  705. if (req->i.waiting)
  706. wake_up(&device->misc_wait);
  707. /* Do not clear RQ_NET_PENDING. This request will make further
  708. * progress via restart_conflicting_writes() or
  709. * fail_postponed_requests(). Hopefully. */
  710. break;
  711. case NEG_ACKED:
  712. mod_rq_state(req, m, RQ_NET_OK|RQ_NET_PENDING, 0);
  713. break;
  714. case FAIL_FROZEN_DISK_IO:
  715. if (!(req->rq_state & RQ_LOCAL_COMPLETED))
  716. break;
  717. mod_rq_state(req, m, RQ_COMPLETION_SUSP, 0);
  718. break;
  719. case RESTART_FROZEN_DISK_IO:
  720. if (!(req->rq_state & RQ_LOCAL_COMPLETED))
  721. break;
  722. mod_rq_state(req, m,
  723. RQ_COMPLETION_SUSP|RQ_LOCAL_COMPLETED,
  724. RQ_LOCAL_PENDING);
  725. rv = MR_READ;
  726. if (bio_data_dir(req->master_bio) == WRITE)
  727. rv = MR_WRITE;
  728. get_ldev(device); /* always succeeds in this call path */
  729. req->w.cb = w_restart_disk_io;
  730. drbd_queue_work(&connection->sender_work,
  731. &req->w);
  732. break;
  733. case RESEND:
  734. /* Simply complete (local only) READs. */
  735. if (!(req->rq_state & RQ_WRITE) && !req->w.cb) {
  736. mod_rq_state(req, m, RQ_COMPLETION_SUSP, 0);
  737. break;
  738. }
  739. /* If RQ_NET_OK is already set, we got a P_WRITE_ACK or P_RECV_ACK
  740. before the connection loss (B&C only); only P_BARRIER_ACK
  741. (or the local completion?) was missing when we suspended.
  742. Throwing them out of the TL here by pretending we got a BARRIER_ACK.
  743. During connection handshake, we ensure that the peer was not rebooted. */
  744. if (!(req->rq_state & RQ_NET_OK)) {
  745. /* FIXME could this possibly be a req->dw.cb == w_send_out_of_sync?
  746. * in that case we must not set RQ_NET_PENDING. */
  747. mod_rq_state(req, m, RQ_COMPLETION_SUSP, RQ_NET_QUEUED|RQ_NET_PENDING);
  748. if (req->w.cb) {
  749. /* w.cb expected to be w_send_dblock, or w_send_read_req */
  750. drbd_queue_work(&connection->sender_work,
  751. &req->w);
  752. rv = req->rq_state & RQ_WRITE ? MR_WRITE : MR_READ;
  753. } /* else: FIXME can this happen? */
  754. break;
  755. }
  756. /* else, fall through to BARRIER_ACKED */
  757. case BARRIER_ACKED:
  758. /* barrier ack for READ requests does not make sense */
  759. if (!(req->rq_state & RQ_WRITE))
  760. break;
  761. if (req->rq_state & RQ_NET_PENDING) {
  762. /* barrier came in before all requests were acked.
  763. * this is bad, because if the connection is lost now,
  764. * we won't be able to clean them up... */
  765. drbd_err(device, "FIXME (BARRIER_ACKED but pending)\n");
  766. }
  767. /* Allowed to complete requests, even while suspended.
  768. * As this is called for all requests within a matching epoch,
  769. * we need to filter, and only set RQ_NET_DONE for those that
  770. * have actually been on the wire. */
  771. mod_rq_state(req, m, RQ_COMPLETION_SUSP,
  772. (req->rq_state & RQ_NET_MASK) ? RQ_NET_DONE : 0);
  773. break;
  774. case DATA_RECEIVED:
  775. D_ASSERT(device, req->rq_state & RQ_NET_PENDING);
  776. mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK|RQ_NET_DONE);
  777. break;
  778. case QUEUE_AS_DRBD_BARRIER:
  779. start_new_tl_epoch(connection);
  780. mod_rq_state(req, m, 0, RQ_NET_OK|RQ_NET_DONE);
  781. break;
  782. };
  783. return rv;
  784. }
  785. /* we may do a local read if:
  786. * - we are consistent (of course),
  787. * - or we are generally inconsistent,
  788. * BUT we are still/already IN SYNC for this area.
  789. * since size may be bigger than BM_BLOCK_SIZE,
  790. * we may need to check several bits.
  791. */
  792. static bool drbd_may_do_local_read(struct drbd_device *device, sector_t sector, int size)
  793. {
  794. unsigned long sbnr, ebnr;
  795. sector_t esector, nr_sectors;
  796. if (device->state.disk == D_UP_TO_DATE)
  797. return true;
  798. if (device->state.disk != D_INCONSISTENT)
  799. return false;
  800. esector = sector + (size >> 9) - 1;
  801. nr_sectors = drbd_get_capacity(device->this_bdev);
  802. D_ASSERT(device, sector < nr_sectors);
  803. D_ASSERT(device, esector < nr_sectors);
  804. sbnr = BM_SECT_TO_BIT(sector);
  805. ebnr = BM_SECT_TO_BIT(esector);
  806. return drbd_bm_count_bits(device, sbnr, ebnr) == 0;
  807. }
  808. static bool remote_due_to_read_balancing(struct drbd_device *device, sector_t sector,
  809. enum drbd_read_balancing rbm)
  810. {
  811. struct backing_dev_info *bdi;
  812. int stripe_shift;
  813. switch (rbm) {
  814. case RB_CONGESTED_REMOTE:
  815. bdi = &device->ldev->backing_bdev->bd_disk->queue->backing_dev_info;
  816. return bdi_read_congested(bdi);
  817. case RB_LEAST_PENDING:
  818. return atomic_read(&device->local_cnt) >
  819. atomic_read(&device->ap_pending_cnt) + atomic_read(&device->rs_pending_cnt);
  820. case RB_32K_STRIPING: /* stripe_shift = 15 */
  821. case RB_64K_STRIPING:
  822. case RB_128K_STRIPING:
  823. case RB_256K_STRIPING:
  824. case RB_512K_STRIPING:
  825. case RB_1M_STRIPING: /* stripe_shift = 20 */
  826. stripe_shift = (rbm - RB_32K_STRIPING + 15);
  827. return (sector >> (stripe_shift - 9)) & 1;
  828. case RB_ROUND_ROBIN:
  829. return test_and_change_bit(READ_BALANCE_RR, &device->flags);
  830. case RB_PREFER_REMOTE:
  831. return true;
  832. case RB_PREFER_LOCAL:
  833. default:
  834. return false;
  835. }
  836. }
  837. /*
  838. * complete_conflicting_writes - wait for any conflicting write requests
  839. *
  840. * The write_requests tree contains all active write requests which we
  841. * currently know about. Wait for any requests to complete which conflict with
  842. * the new one.
  843. *
  844. * Only way out: remove the conflicting intervals from the tree.
  845. */
  846. static void complete_conflicting_writes(struct drbd_request *req)
  847. {
  848. DEFINE_WAIT(wait);
  849. struct drbd_device *device = req->device;
  850. struct drbd_interval *i;
  851. sector_t sector = req->i.sector;
  852. int size = req->i.size;
  853. i = drbd_find_overlap(&device->write_requests, sector, size);
  854. if (!i)
  855. return;
  856. for (;;) {
  857. prepare_to_wait(&device->misc_wait, &wait, TASK_UNINTERRUPTIBLE);
  858. i = drbd_find_overlap(&device->write_requests, sector, size);
  859. if (!i)
  860. break;
  861. /* Indicate to wake up device->misc_wait on progress. */
  862. i->waiting = true;
  863. spin_unlock_irq(&device->resource->req_lock);
  864. schedule();
  865. spin_lock_irq(&device->resource->req_lock);
  866. }
  867. finish_wait(&device->misc_wait, &wait);
  868. }
  869. /* called within req_lock and rcu_read_lock() */
  870. static void maybe_pull_ahead(struct drbd_device *device)
  871. {
  872. struct drbd_connection *connection = first_peer_device(device)->connection;
  873. struct net_conf *nc;
  874. bool congested = false;
  875. enum drbd_on_congestion on_congestion;
  876. rcu_read_lock();
  877. nc = rcu_dereference(connection->net_conf);
  878. on_congestion = nc ? nc->on_congestion : OC_BLOCK;
  879. rcu_read_unlock();
  880. if (on_congestion == OC_BLOCK ||
  881. connection->agreed_pro_version < 96)
  882. return;
  883. if (on_congestion == OC_PULL_AHEAD && device->state.conn == C_AHEAD)
  884. return; /* nothing to do ... */
  885. /* If I don't even have good local storage, we can not reasonably try
  886. * to pull ahead of the peer. We also need the local reference to make
  887. * sure device->act_log is there.
  888. */
  889. if (!get_ldev_if_state(device, D_UP_TO_DATE))
  890. return;
  891. if (nc->cong_fill &&
  892. atomic_read(&device->ap_in_flight) >= nc->cong_fill) {
  893. drbd_info(device, "Congestion-fill threshold reached\n");
  894. congested = true;
  895. }
  896. if (device->act_log->used >= nc->cong_extents) {
  897. drbd_info(device, "Congestion-extents threshold reached\n");
  898. congested = true;
  899. }
  900. if (congested) {
  901. /* start a new epoch for non-mirrored writes */
  902. start_new_tl_epoch(first_peer_device(device)->connection);
  903. if (on_congestion == OC_PULL_AHEAD)
  904. _drbd_set_state(_NS(device, conn, C_AHEAD), 0, NULL);
  905. else /*nc->on_congestion == OC_DISCONNECT */
  906. _drbd_set_state(_NS(device, conn, C_DISCONNECTING), 0, NULL);
  907. }
  908. put_ldev(device);
  909. }
  910. /* If this returns false, and req->private_bio is still set,
  911. * this should be submitted locally.
  912. *
  913. * If it returns false, but req->private_bio is not set,
  914. * we do not have access to good data :(
  915. *
  916. * Otherwise, this destroys req->private_bio, if any,
  917. * and returns true.
  918. */
  919. static bool do_remote_read(struct drbd_request *req)
  920. {
  921. struct drbd_device *device = req->device;
  922. enum drbd_read_balancing rbm;
  923. if (req->private_bio) {
  924. if (!drbd_may_do_local_read(device,
  925. req->i.sector, req->i.size)) {
  926. bio_put(req->private_bio);
  927. req->private_bio = NULL;
  928. put_ldev(device);
  929. }
  930. }
  931. if (device->state.pdsk != D_UP_TO_DATE)
  932. return false;
  933. if (req->private_bio == NULL)
  934. return true;
  935. /* TODO: improve read balancing decisions, take into account drbd
  936. * protocol, pending requests etc. */
  937. rcu_read_lock();
  938. rbm = rcu_dereference(device->ldev->disk_conf)->read_balancing;
  939. rcu_read_unlock();
  940. if (rbm == RB_PREFER_LOCAL && req->private_bio)
  941. return false; /* submit locally */
  942. if (remote_due_to_read_balancing(device, req->i.sector, rbm)) {
  943. if (req->private_bio) {
  944. bio_put(req->private_bio);
  945. req->private_bio = NULL;
  946. put_ldev(device);
  947. }
  948. return true;
  949. }
  950. return false;
  951. }
  952. /* returns number of connections (== 1, for drbd 8.4)
  953. * expected to actually write this data,
  954. * which does NOT include those that we are L_AHEAD for. */
  955. static int drbd_process_write_request(struct drbd_request *req)
  956. {
  957. struct drbd_device *device = req->device;
  958. int remote, send_oos;
  959. remote = drbd_should_do_remote(device->state);
  960. send_oos = drbd_should_send_out_of_sync(device->state);
  961. /* Need to replicate writes. Unless it is an empty flush,
  962. * which is better mapped to a DRBD P_BARRIER packet,
  963. * also for drbd wire protocol compatibility reasons.
  964. * If this was a flush, just start a new epoch.
  965. * Unless the current epoch was empty anyways, or we are not currently
  966. * replicating, in which case there is no point. */
  967. if (unlikely(req->i.size == 0)) {
  968. /* The only size==0 bios we expect are empty flushes. */
  969. D_ASSERT(device, req->master_bio->bi_rw & REQ_FLUSH);
  970. if (remote)
  971. _req_mod(req, QUEUE_AS_DRBD_BARRIER);
  972. return remote;
  973. }
  974. if (!remote && !send_oos)
  975. return 0;
  976. D_ASSERT(device, !(remote && send_oos));
  977. if (remote) {
  978. _req_mod(req, TO_BE_SENT);
  979. _req_mod(req, QUEUE_FOR_NET_WRITE);
  980. } else if (drbd_set_out_of_sync(device, req->i.sector, req->i.size))
  981. _req_mod(req, QUEUE_FOR_SEND_OOS);
  982. return remote;
  983. }
  984. static void
  985. drbd_submit_req_private_bio(struct drbd_request *req)
  986. {
  987. struct drbd_device *device = req->device;
  988. struct bio *bio = req->private_bio;
  989. const int rw = bio_rw(bio);
  990. bio->bi_bdev = device->ldev->backing_bdev;
  991. /* State may have changed since we grabbed our reference on the
  992. * ->ldev member. Double check, and short-circuit to endio.
  993. * In case the last activity log transaction failed to get on
  994. * stable storage, and this is a WRITE, we may not even submit
  995. * this bio. */
  996. if (get_ldev(device)) {
  997. req->pre_submit_jif = jiffies;
  998. if (drbd_insert_fault(device,
  999. rw == WRITE ? DRBD_FAULT_DT_WR
  1000. : rw == READ ? DRBD_FAULT_DT_RD
  1001. : DRBD_FAULT_DT_RA))
  1002. bio_endio(bio, -EIO);
  1003. else
  1004. generic_make_request(bio);
  1005. put_ldev(device);
  1006. } else
  1007. bio_endio(bio, -EIO);
  1008. }
  1009. static void drbd_queue_write(struct drbd_device *device, struct drbd_request *req)
  1010. {
  1011. spin_lock_irq(&device->resource->req_lock);
  1012. list_add_tail(&req->tl_requests, &device->submit.writes);
  1013. list_add_tail(&req->req_pending_master_completion,
  1014. &device->pending_master_completion[1 /* WRITE */]);
  1015. spin_unlock_irq(&device->resource->req_lock);
  1016. queue_work(device->submit.wq, &device->submit.worker);
  1017. /* do_submit() may sleep internally on al_wait, too */
  1018. wake_up(&device->al_wait);
  1019. }
  1020. /* returns the new drbd_request pointer, if the caller is expected to
  1021. * drbd_send_and_submit() it (to save latency), or NULL if we queued the
  1022. * request on the submitter thread.
  1023. * Returns ERR_PTR(-ENOMEM) if we cannot allocate a drbd_request.
  1024. */
  1025. static struct drbd_request *
  1026. drbd_request_prepare(struct drbd_device *device, struct bio *bio, unsigned long start_jif)
  1027. {
  1028. const int rw = bio_data_dir(bio);
  1029. struct drbd_request *req;
  1030. /* allocate outside of all locks; */
  1031. req = drbd_req_new(device, bio);
  1032. if (!req) {
  1033. dec_ap_bio(device);
  1034. /* only pass the error to the upper layers.
  1035. * if user cannot handle io errors, that's not our business. */
  1036. drbd_err(device, "could not kmalloc() req\n");
  1037. bio_endio(bio, -ENOMEM);
  1038. return ERR_PTR(-ENOMEM);
  1039. }
  1040. req->start_jif = start_jif;
  1041. if (!get_ldev(device)) {
  1042. bio_put(req->private_bio);
  1043. req->private_bio = NULL;
  1044. }
  1045. /* Update disk stats */
  1046. _drbd_start_io_acct(device, req);
  1047. if (rw == WRITE && req->private_bio && req->i.size
  1048. && !test_bit(AL_SUSPENDED, &device->flags)) {
  1049. if (!drbd_al_begin_io_fastpath(device, &req->i)) {
  1050. atomic_inc(&device->ap_actlog_cnt);
  1051. drbd_queue_write(device, req);
  1052. return NULL;
  1053. }
  1054. req->rq_state |= RQ_IN_ACT_LOG;
  1055. req->in_actlog_jif = jiffies;
  1056. }
  1057. return req;
  1058. }
  1059. static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request *req)
  1060. {
  1061. struct drbd_resource *resource = device->resource;
  1062. const int rw = bio_rw(req->master_bio);
  1063. struct bio_and_error m = { NULL, };
  1064. bool no_remote = false;
  1065. bool submit_private_bio = false;
  1066. spin_lock_irq(&resource->req_lock);
  1067. if (rw == WRITE) {
  1068. /* This may temporarily give up the req_lock,
  1069. * but will re-aquire it before it returns here.
  1070. * Needs to be before the check on drbd_suspended() */
  1071. complete_conflicting_writes(req);
  1072. /* no more giving up req_lock from now on! */
  1073. /* check for congestion, and potentially stop sending
  1074. * full data updates, but start sending "dirty bits" only. */
  1075. maybe_pull_ahead(device);
  1076. }
  1077. if (drbd_suspended(device)) {
  1078. /* push back and retry: */
  1079. req->rq_state |= RQ_POSTPONED;
  1080. if (req->private_bio) {
  1081. bio_put(req->private_bio);
  1082. req->private_bio = NULL;
  1083. put_ldev(device);
  1084. }
  1085. goto out;
  1086. }
  1087. /* We fail READ/READA early, if we can not serve it.
  1088. * We must do this before req is registered on any lists.
  1089. * Otherwise, drbd_req_complete() will queue failed READ for retry. */
  1090. if (rw != WRITE) {
  1091. if (!do_remote_read(req) && !req->private_bio)
  1092. goto nodata;
  1093. }
  1094. /* which transfer log epoch does this belong to? */
  1095. req->epoch = atomic_read(&first_peer_device(device)->connection->current_tle_nr);
  1096. /* no point in adding empty flushes to the transfer log,
  1097. * they are mapped to drbd barriers already. */
  1098. if (likely(req->i.size!=0)) {
  1099. if (rw == WRITE)
  1100. first_peer_device(device)->connection->current_tle_writes++;
  1101. list_add_tail(&req->tl_requests, &first_peer_device(device)->connection->transfer_log);
  1102. }
  1103. if (rw == WRITE) {
  1104. if (!drbd_process_write_request(req))
  1105. no_remote = true;
  1106. } else {
  1107. /* We either have a private_bio, or we can read from remote.
  1108. * Otherwise we had done the goto nodata above. */
  1109. if (req->private_bio == NULL) {
  1110. _req_mod(req, TO_BE_SENT);
  1111. _req_mod(req, QUEUE_FOR_NET_READ);
  1112. } else
  1113. no_remote = true;
  1114. }
  1115. /* If it took the fast path in drbd_request_prepare, add it here.
  1116. * The slow path has added it already. */
  1117. if (list_empty(&req->req_pending_master_completion))
  1118. list_add_tail(&req->req_pending_master_completion,
  1119. &device->pending_master_completion[rw == WRITE]);
  1120. if (req->private_bio) {
  1121. /* needs to be marked within the same spinlock */
  1122. list_add_tail(&req->req_pending_local,
  1123. &device->pending_completion[rw == WRITE]);
  1124. _req_mod(req, TO_BE_SUBMITTED);
  1125. /* but we need to give up the spinlock to submit */
  1126. submit_private_bio = true;
  1127. } else if (no_remote) {
  1128. nodata:
  1129. if (__ratelimit(&drbd_ratelimit_state))
  1130. drbd_err(device, "IO ERROR: neither local nor remote data, sector %llu+%u\n",
  1131. (unsigned long long)req->i.sector, req->i.size >> 9);
  1132. /* A write may have been queued for send_oos, however.
  1133. * So we can not simply free it, we must go through drbd_req_put_completion_ref() */
  1134. }
  1135. out:
  1136. if (drbd_req_put_completion_ref(req, &m, 1))
  1137. kref_put(&req->kref, drbd_req_destroy);
  1138. spin_unlock_irq(&resource->req_lock);
  1139. /* Even though above is a kref_put(), this is safe.
  1140. * As long as we still need to submit our private bio,
  1141. * we hold a completion ref, and the request cannot disappear.
  1142. * If however this request did not even have a private bio to submit
  1143. * (e.g. remote read), req may already be invalid now.
  1144. * That's why we cannot check on req->private_bio. */
  1145. if (submit_private_bio)
  1146. drbd_submit_req_private_bio(req);
  1147. if (m.bio)
  1148. complete_master_bio(device, &m);
  1149. }
  1150. void __drbd_make_request(struct drbd_device *device, struct bio *bio, unsigned long start_jif)
  1151. {
  1152. struct drbd_request *req = drbd_request_prepare(device, bio, start_jif);
  1153. if (IS_ERR_OR_NULL(req))
  1154. return;
  1155. drbd_send_and_submit(device, req);
  1156. }
  1157. static void submit_fast_path(struct drbd_device *device, struct list_head *incoming)
  1158. {
  1159. struct drbd_request *req, *tmp;
  1160. list_for_each_entry_safe(req, tmp, incoming, tl_requests) {
  1161. const int rw = bio_data_dir(req->master_bio);
  1162. if (rw == WRITE /* rw != WRITE should not even end up here! */
  1163. && req->private_bio && req->i.size
  1164. && !test_bit(AL_SUSPENDED, &device->flags)) {
  1165. if (!drbd_al_begin_io_fastpath(device, &req->i))
  1166. continue;
  1167. req->rq_state |= RQ_IN_ACT_LOG;
  1168. req->in_actlog_jif = jiffies;
  1169. atomic_dec(&device->ap_actlog_cnt);
  1170. }
  1171. list_del_init(&req->tl_requests);
  1172. drbd_send_and_submit(device, req);
  1173. }
  1174. }
  1175. static bool prepare_al_transaction_nonblock(struct drbd_device *device,
  1176. struct list_head *incoming,
  1177. struct list_head *pending,
  1178. struct list_head *later)
  1179. {
  1180. struct drbd_request *req, *tmp;
  1181. int wake = 0;
  1182. int err;
  1183. spin_lock_irq(&device->al_lock);
  1184. list_for_each_entry_safe(req, tmp, incoming, tl_requests) {
  1185. err = drbd_al_begin_io_nonblock(device, &req->i);
  1186. if (err == -ENOBUFS)
  1187. break;
  1188. if (err == -EBUSY)
  1189. wake = 1;
  1190. if (err)
  1191. list_move_tail(&req->tl_requests, later);
  1192. else
  1193. list_move_tail(&req->tl_requests, pending);
  1194. }
  1195. spin_unlock_irq(&device->al_lock);
  1196. if (wake)
  1197. wake_up(&device->al_wait);
  1198. return !list_empty(pending);
  1199. }
  1200. void send_and_submit_pending(struct drbd_device *device, struct list_head *pending)
  1201. {
  1202. struct drbd_request *req, *tmp;
  1203. list_for_each_entry_safe(req, tmp, pending, tl_requests) {
  1204. req->rq_state |= RQ_IN_ACT_LOG;
  1205. req->in_actlog_jif = jiffies;
  1206. atomic_dec(&device->ap_actlog_cnt);
  1207. list_del_init(&req->tl_requests);
  1208. drbd_send_and_submit(device, req);
  1209. }
  1210. }
  1211. void do_submit(struct work_struct *ws)
  1212. {
  1213. struct drbd_device *device = container_of(ws, struct drbd_device, submit.worker);
  1214. LIST_HEAD(incoming); /* from drbd_make_request() */
  1215. LIST_HEAD(pending); /* to be submitted after next AL-transaction commit */
  1216. LIST_HEAD(busy); /* blocked by resync requests */
  1217. /* grab new incoming requests */
  1218. spin_lock_irq(&device->resource->req_lock);
  1219. list_splice_tail_init(&device->submit.writes, &incoming);
  1220. spin_unlock_irq(&device->resource->req_lock);
  1221. for (;;) {
  1222. DEFINE_WAIT(wait);
  1223. /* move used-to-be-busy back to front of incoming */
  1224. list_splice_init(&busy, &incoming);
  1225. submit_fast_path(device, &incoming);
  1226. if (list_empty(&incoming))
  1227. break;
  1228. for (;;) {
  1229. prepare_to_wait(&device->al_wait, &wait, TASK_UNINTERRUPTIBLE);
  1230. list_splice_init(&busy, &incoming);
  1231. prepare_al_transaction_nonblock(device, &incoming, &pending, &busy);
  1232. if (!list_empty(&pending))
  1233. break;
  1234. schedule();
  1235. /* If all currently "hot" activity log extents are kept busy by
  1236. * incoming requests, we still must not totally starve new
  1237. * requests to "cold" extents.
  1238. * Something left on &incoming means there had not been
  1239. * enough update slots available, and the activity log
  1240. * has been marked as "starving".
  1241. *
  1242. * Try again now, without looking for new requests,
  1243. * effectively blocking all new requests until we made
  1244. * at least _some_ progress with what we currently have.
  1245. */
  1246. if (!list_empty(&incoming))
  1247. continue;
  1248. /* Nothing moved to pending, but nothing left
  1249. * on incoming: all moved to busy!
  1250. * Grab new and iterate. */
  1251. spin_lock_irq(&device->resource->req_lock);
  1252. list_splice_tail_init(&device->submit.writes, &incoming);
  1253. spin_unlock_irq(&device->resource->req_lock);
  1254. }
  1255. finish_wait(&device->al_wait, &wait);
  1256. /* If the transaction was full, before all incoming requests
  1257. * had been processed, skip ahead to commit, and iterate
  1258. * without splicing in more incoming requests from upper layers.
  1259. *
  1260. * Else, if all incoming have been processed,
  1261. * they have become either "pending" (to be submitted after
  1262. * next transaction commit) or "busy" (blocked by resync).
  1263. *
  1264. * Maybe more was queued, while we prepared the transaction?
  1265. * Try to stuff those into this transaction as well.
  1266. * Be strictly non-blocking here,
  1267. * we already have something to commit.
  1268. *
  1269. * Commit if we don't make any more progres.
  1270. */
  1271. while (list_empty(&incoming)) {
  1272. LIST_HEAD(more_pending);
  1273. LIST_HEAD(more_incoming);
  1274. bool made_progress;
  1275. /* It is ok to look outside the lock,
  1276. * it's only an optimization anyways */
  1277. if (list_empty(&device->submit.writes))
  1278. break;
  1279. spin_lock_irq(&device->resource->req_lock);
  1280. list_splice_tail_init(&device->submit.writes, &more_incoming);
  1281. spin_unlock_irq(&device->resource->req_lock);
  1282. if (list_empty(&more_incoming))
  1283. break;
  1284. made_progress = prepare_al_transaction_nonblock(device, &more_incoming, &more_pending, &busy);
  1285. list_splice_tail_init(&more_pending, &pending);
  1286. list_splice_tail_init(&more_incoming, &incoming);
  1287. if (!made_progress)
  1288. break;
  1289. }
  1290. drbd_al_begin_io_commit(device);
  1291. send_and_submit_pending(device, &pending);
  1292. }
  1293. }
  1294. void drbd_make_request(struct request_queue *q, struct bio *bio)
  1295. {
  1296. struct drbd_device *device = (struct drbd_device *) q->queuedata;
  1297. unsigned long start_jif;
  1298. start_jif = jiffies;
  1299. /*
  1300. * what we "blindly" assume:
  1301. */
  1302. D_ASSERT(device, IS_ALIGNED(bio->bi_iter.bi_size, 512));
  1303. inc_ap_bio(device);
  1304. __drbd_make_request(device, bio, start_jif);
  1305. }
  1306. /* This is called by bio_add_page().
  1307. *
  1308. * q->max_hw_sectors and other global limits are already enforced there.
  1309. *
  1310. * We need to call down to our lower level device,
  1311. * in case it has special restrictions.
  1312. *
  1313. * We also may need to enforce configured max-bio-bvecs limits.
  1314. *
  1315. * As long as the BIO is empty we have to allow at least one bvec,
  1316. * regardless of size and offset, so no need to ask lower levels.
  1317. */
  1318. int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec)
  1319. {
  1320. struct drbd_device *device = (struct drbd_device *) q->queuedata;
  1321. unsigned int bio_size = bvm->bi_size;
  1322. int limit = DRBD_MAX_BIO_SIZE;
  1323. int backing_limit;
  1324. if (bio_size && get_ldev(device)) {
  1325. unsigned int max_hw_sectors = queue_max_hw_sectors(q);
  1326. struct request_queue * const b =
  1327. device->ldev->backing_bdev->bd_disk->queue;
  1328. if (b->merge_bvec_fn) {
  1329. bvm->bi_bdev = device->ldev->backing_bdev;
  1330. backing_limit = b->merge_bvec_fn(b, bvm, bvec);
  1331. limit = min(limit, backing_limit);
  1332. }
  1333. put_ldev(device);
  1334. if ((limit >> 9) > max_hw_sectors)
  1335. limit = max_hw_sectors << 9;
  1336. }
  1337. return limit;
  1338. }
  1339. void request_timer_fn(unsigned long data)
  1340. {
  1341. struct drbd_device *device = (struct drbd_device *) data;
  1342. struct drbd_connection *connection = first_peer_device(device)->connection;
  1343. struct drbd_request *req_read, *req_write, *req_peer; /* oldest request */
  1344. struct net_conf *nc;
  1345. unsigned long oldest_submit_jif;
  1346. unsigned long ent = 0, dt = 0, et, nt; /* effective timeout = ko_count * timeout */
  1347. unsigned long now;
  1348. rcu_read_lock();
  1349. nc = rcu_dereference(connection->net_conf);
  1350. if (nc && device->state.conn >= C_WF_REPORT_PARAMS)
  1351. ent = nc->timeout * HZ/10 * nc->ko_count;
  1352. if (get_ldev(device)) { /* implicit state.disk >= D_INCONSISTENT */
  1353. dt = rcu_dereference(device->ldev->disk_conf)->disk_timeout * HZ / 10;
  1354. put_ldev(device);
  1355. }
  1356. rcu_read_unlock();
  1357. et = min_not_zero(dt, ent);
  1358. if (!et)
  1359. return; /* Recurring timer stopped */
  1360. now = jiffies;
  1361. nt = now + et;
  1362. spin_lock_irq(&device->resource->req_lock);
  1363. req_read = list_first_entry_or_null(&device->pending_completion[0], struct drbd_request, req_pending_local);
  1364. req_write = list_first_entry_or_null(&device->pending_completion[1], struct drbd_request, req_pending_local);
  1365. req_peer = connection->req_not_net_done;
  1366. /* maybe the oldest request waiting for the peer is in fact still
  1367. * blocking in tcp sendmsg */
  1368. if (!req_peer && connection->req_next && connection->req_next->pre_send_jif)
  1369. req_peer = connection->req_next;
  1370. /* evaluate the oldest peer request only in one timer! */
  1371. if (req_peer && req_peer->device != device)
  1372. req_peer = NULL;
  1373. /* do we have something to evaluate? */
  1374. if (req_peer == NULL && req_write == NULL && req_read == NULL)
  1375. goto out;
  1376. oldest_submit_jif =
  1377. (req_write && req_read)
  1378. ? ( time_before(req_write->pre_submit_jif, req_read->pre_submit_jif)
  1379. ? req_write->pre_submit_jif : req_read->pre_submit_jif )
  1380. : req_write ? req_write->pre_submit_jif
  1381. : req_read ? req_read->pre_submit_jif : now;
  1382. /* The request is considered timed out, if
  1383. * - we have some effective timeout from the configuration,
  1384. * with above state restrictions applied,
  1385. * - the oldest request is waiting for a response from the network
  1386. * resp. the local disk,
  1387. * - the oldest request is in fact older than the effective timeout,
  1388. * - the connection was established (resp. disk was attached)
  1389. * for longer than the timeout already.
  1390. * Note that for 32bit jiffies and very stable connections/disks,
  1391. * we may have a wrap around, which is catched by
  1392. * !time_in_range(now, last_..._jif, last_..._jif + timeout).
  1393. *
  1394. * Side effect: once per 32bit wrap-around interval, which means every
  1395. * ~198 days with 250 HZ, we have a window where the timeout would need
  1396. * to expire twice (worst case) to become effective. Good enough.
  1397. */
  1398. if (ent && req_peer &&
  1399. time_after(now, req_peer->pre_send_jif + ent) &&
  1400. !time_in_range(now, connection->last_reconnect_jif, connection->last_reconnect_jif + ent)) {
  1401. drbd_warn(device, "Remote failed to finish a request within ko-count * timeout\n");
  1402. _conn_request_state(connection, NS(conn, C_TIMEOUT), CS_VERBOSE | CS_HARD);
  1403. }
  1404. if (dt && oldest_submit_jif != now &&
  1405. time_after(now, oldest_submit_jif + dt) &&
  1406. !time_in_range(now, device->last_reattach_jif, device->last_reattach_jif + dt)) {
  1407. drbd_warn(device, "Local backing device failed to meet the disk-timeout\n");
  1408. __drbd_chk_io_error(device, DRBD_FORCE_DETACH);
  1409. }
  1410. /* Reschedule timer for the nearest not already expired timeout.
  1411. * Fallback to now + min(effective network timeout, disk timeout). */
  1412. ent = (ent && req_peer && time_before(now, req_peer->pre_send_jif + ent))
  1413. ? req_peer->pre_send_jif + ent : now + et;
  1414. dt = (dt && oldest_submit_jif != now && time_before(now, oldest_submit_jif + dt))
  1415. ? oldest_submit_jif + dt : now + et;
  1416. nt = time_before(ent, dt) ? ent : dt;
  1417. out:
  1418. spin_unlock_irq(&device->resource->req_lock);
  1419. mod_timer(&device->request_timer, nt);
  1420. }