blk-settings.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863
  1. /*
  2. * Functions related to setting various queue properties from drivers
  3. */
  4. #include <linux/kernel.h>
  5. #include <linux/module.h>
  6. #include <linux/init.h>
  7. #include <linux/bio.h>
  8. #include <linux/blkdev.h>
  9. #include <linux/bootmem.h> /* for max_pfn/max_low_pfn */
  10. #include <linux/gcd.h>
  11. #include <linux/lcm.h>
  12. #include <linux/jiffies.h>
  13. #include <linux/gfp.h>
  14. #include "blk.h"
  15. unsigned long blk_max_low_pfn;
  16. EXPORT_SYMBOL(blk_max_low_pfn);
  17. unsigned long blk_max_pfn;
  18. /**
  19. * blk_queue_prep_rq - set a prepare_request function for queue
  20. * @q: queue
  21. * @pfn: prepare_request function
  22. *
  23. * It's possible for a queue to register a prepare_request callback which
  24. * is invoked before the request is handed to the request_fn. The goal of
  25. * the function is to prepare a request for I/O, it can be used to build a
  26. * cdb from the request data for instance.
  27. *
  28. */
  29. void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn)
  30. {
  31. q->prep_rq_fn = pfn;
  32. }
  33. EXPORT_SYMBOL(blk_queue_prep_rq);
  34. /**
  35. * blk_queue_unprep_rq - set an unprepare_request function for queue
  36. * @q: queue
  37. * @ufn: unprepare_request function
  38. *
  39. * It's possible for a queue to register an unprepare_request callback
  40. * which is invoked before the request is finally completed. The goal
  41. * of the function is to deallocate any data that was allocated in the
  42. * prepare_request callback.
  43. *
  44. */
  45. void blk_queue_unprep_rq(struct request_queue *q, unprep_rq_fn *ufn)
  46. {
  47. q->unprep_rq_fn = ufn;
  48. }
  49. EXPORT_SYMBOL(blk_queue_unprep_rq);
  50. /**
  51. * blk_queue_merge_bvec - set a merge_bvec function for queue
  52. * @q: queue
  53. * @mbfn: merge_bvec_fn
  54. *
  55. * Usually queues have static limitations on the max sectors or segments that
  56. * we can put in a request. Stacking drivers may have some settings that
  57. * are dynamic, and thus we have to query the queue whether it is ok to
  58. * add a new bio_vec to a bio at a given offset or not. If the block device
  59. * has such limitations, it needs to register a merge_bvec_fn to control
  60. * the size of bio's sent to it. Note that a block device *must* allow a
  61. * single page to be added to an empty bio. The block device driver may want
  62. * to use the bio_split() function to deal with these bio's. By default
  63. * no merge_bvec_fn is defined for a queue, and only the fixed limits are
  64. * honored.
  65. */
  66. void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn)
  67. {
  68. q->merge_bvec_fn = mbfn;
  69. }
  70. EXPORT_SYMBOL(blk_queue_merge_bvec);
  71. void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn)
  72. {
  73. q->softirq_done_fn = fn;
  74. }
  75. EXPORT_SYMBOL(blk_queue_softirq_done);
  76. void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
  77. {
  78. q->rq_timeout = timeout;
  79. }
  80. EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
  81. void blk_queue_rq_timed_out(struct request_queue *q, rq_timed_out_fn *fn)
  82. {
  83. q->rq_timed_out_fn = fn;
  84. }
  85. EXPORT_SYMBOL_GPL(blk_queue_rq_timed_out);
  86. void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn)
  87. {
  88. q->lld_busy_fn = fn;
  89. }
  90. EXPORT_SYMBOL_GPL(blk_queue_lld_busy);
  91. /**
  92. * blk_set_default_limits - reset limits to default values
  93. * @lim: the queue_limits structure to reset
  94. *
  95. * Description:
  96. * Returns a queue_limit struct to its default state.
  97. */
  98. void blk_set_default_limits(struct queue_limits *lim)
  99. {
  100. lim->max_segments = BLK_MAX_SEGMENTS;
  101. lim->max_integrity_segments = 0;
  102. lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
  103. lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
  104. lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
  105. lim->chunk_sectors = 0;
  106. lim->max_write_same_sectors = 0;
  107. lim->max_discard_sectors = 0;
  108. lim->discard_granularity = 0;
  109. lim->discard_alignment = 0;
  110. lim->discard_misaligned = 0;
  111. lim->discard_zeroes_data = 0;
  112. lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
  113. lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT);
  114. lim->alignment_offset = 0;
  115. lim->io_opt = 0;
  116. lim->misaligned = 0;
  117. lim->cluster = 1;
  118. }
  119. EXPORT_SYMBOL(blk_set_default_limits);
  120. /**
  121. * blk_set_stacking_limits - set default limits for stacking devices
  122. * @lim: the queue_limits structure to reset
  123. *
  124. * Description:
  125. * Returns a queue_limit struct to its default state. Should be used
  126. * by stacking drivers like DM that have no internal limits.
  127. */
  128. void blk_set_stacking_limits(struct queue_limits *lim)
  129. {
  130. blk_set_default_limits(lim);
  131. /* Inherit limits from component devices */
  132. lim->discard_zeroes_data = 1;
  133. lim->max_segments = USHRT_MAX;
  134. lim->max_hw_sectors = UINT_MAX;
  135. lim->max_segment_size = UINT_MAX;
  136. lim->max_sectors = UINT_MAX;
  137. lim->max_write_same_sectors = UINT_MAX;
  138. }
  139. EXPORT_SYMBOL(blk_set_stacking_limits);
  140. /**
  141. * blk_queue_make_request - define an alternate make_request function for a device
  142. * @q: the request queue for the device to be affected
  143. * @mfn: the alternate make_request function
  144. *
  145. * Description:
  146. * The normal way for &struct bios to be passed to a device
  147. * driver is for them to be collected into requests on a request
  148. * queue, and then to allow the device driver to select requests
  149. * off that queue when it is ready. This works well for many block
  150. * devices. However some block devices (typically virtual devices
  151. * such as md or lvm) do not benefit from the processing on the
  152. * request queue, and are served best by having the requests passed
  153. * directly to them. This can be achieved by providing a function
  154. * to blk_queue_make_request().
  155. *
  156. * Caveat:
  157. * The driver that does this *must* be able to deal appropriately
  158. * with buffers in "highmemory". This can be accomplished by either calling
  159. * __bio_kmap_atomic() to get a temporary kernel mapping, or by calling
  160. * blk_queue_bounce() to create a buffer in normal memory.
  161. **/
  162. void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
  163. {
  164. /*
  165. * set defaults
  166. */
  167. q->nr_requests = BLKDEV_MAX_RQ;
  168. q->make_request_fn = mfn;
  169. blk_queue_dma_alignment(q, 511);
  170. blk_queue_congestion_threshold(q);
  171. q->nr_batching = BLK_BATCH_REQ;
  172. blk_set_default_limits(&q->limits);
  173. /*
  174. * by default assume old behaviour and bounce for any highmem page
  175. */
  176. blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
  177. }
  178. EXPORT_SYMBOL(blk_queue_make_request);
  179. /**
  180. * blk_queue_bounce_limit - set bounce buffer limit for queue
  181. * @q: the request queue for the device
  182. * @max_addr: the maximum address the device can handle
  183. *
  184. * Description:
  185. * Different hardware can have different requirements as to what pages
  186. * it can do I/O directly to. A low level driver can call
  187. * blk_queue_bounce_limit to have lower memory pages allocated as bounce
  188. * buffers for doing I/O to pages residing above @max_addr.
  189. **/
  190. void blk_queue_bounce_limit(struct request_queue *q, u64 max_addr)
  191. {
  192. unsigned long b_pfn = max_addr >> PAGE_SHIFT;
  193. int dma = 0;
  194. q->bounce_gfp = GFP_NOIO;
  195. #if BITS_PER_LONG == 64
  196. /*
  197. * Assume anything <= 4GB can be handled by IOMMU. Actually
  198. * some IOMMUs can handle everything, but I don't know of a
  199. * way to test this here.
  200. */
  201. if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
  202. dma = 1;
  203. q->limits.bounce_pfn = max(max_low_pfn, b_pfn);
  204. #else
  205. if (b_pfn < blk_max_low_pfn)
  206. dma = 1;
  207. q->limits.bounce_pfn = b_pfn;
  208. #endif
  209. if (dma) {
  210. init_emergency_isa_pool();
  211. q->bounce_gfp = GFP_NOIO | GFP_DMA;
  212. q->limits.bounce_pfn = b_pfn;
  213. }
  214. }
  215. EXPORT_SYMBOL(blk_queue_bounce_limit);
  216. /**
  217. * blk_limits_max_hw_sectors - set hard and soft limit of max sectors for request
  218. * @limits: the queue limits
  219. * @max_hw_sectors: max hardware sectors in the usual 512b unit
  220. *
  221. * Description:
  222. * Enables a low level driver to set a hard upper limit,
  223. * max_hw_sectors, on the size of requests. max_hw_sectors is set by
  224. * the device driver based upon the combined capabilities of I/O
  225. * controller and storage device.
  226. *
  227. * max_sectors is a soft limit imposed by the block layer for
  228. * filesystem type requests. This value can be overridden on a
  229. * per-device basis in /sys/block/<device>/queue/max_sectors_kb.
  230. * The soft limit can not exceed max_hw_sectors.
  231. **/
  232. void blk_limits_max_hw_sectors(struct queue_limits *limits, unsigned int max_hw_sectors)
  233. {
  234. if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) {
  235. max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
  236. printk(KERN_INFO "%s: set to minimum %d\n",
  237. __func__, max_hw_sectors);
  238. }
  239. limits->max_hw_sectors = max_hw_sectors;
  240. limits->max_sectors = min_t(unsigned int, max_hw_sectors,
  241. BLK_DEF_MAX_SECTORS);
  242. }
  243. EXPORT_SYMBOL(blk_limits_max_hw_sectors);
  244. /**
  245. * blk_queue_max_hw_sectors - set max sectors for a request for this queue
  246. * @q: the request queue for the device
  247. * @max_hw_sectors: max hardware sectors in the usual 512b unit
  248. *
  249. * Description:
  250. * See description for blk_limits_max_hw_sectors().
  251. **/
  252. void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
  253. {
  254. blk_limits_max_hw_sectors(&q->limits, max_hw_sectors);
  255. }
  256. EXPORT_SYMBOL(blk_queue_max_hw_sectors);
  257. /**
  258. * blk_queue_chunk_sectors - set size of the chunk for this queue
  259. * @q: the request queue for the device
  260. * @chunk_sectors: chunk sectors in the usual 512b unit
  261. *
  262. * Description:
  263. * If a driver doesn't want IOs to cross a given chunk size, it can set
  264. * this limit and prevent merging across chunks. Note that the chunk size
  265. * must currently be a power-of-2 in sectors. Also note that the block
  266. * layer must accept a page worth of data at any offset. So if the
  267. * crossing of chunks is a hard limitation in the driver, it must still be
  268. * prepared to split single page bios.
  269. **/
  270. void blk_queue_chunk_sectors(struct request_queue *q, unsigned int chunk_sectors)
  271. {
  272. BUG_ON(!is_power_of_2(chunk_sectors));
  273. q->limits.chunk_sectors = chunk_sectors;
  274. }
  275. EXPORT_SYMBOL(blk_queue_chunk_sectors);
  276. /**
  277. * blk_queue_max_discard_sectors - set max sectors for a single discard
  278. * @q: the request queue for the device
  279. * @max_discard_sectors: maximum number of sectors to discard
  280. **/
  281. void blk_queue_max_discard_sectors(struct request_queue *q,
  282. unsigned int max_discard_sectors)
  283. {
  284. q->limits.max_discard_sectors = max_discard_sectors;
  285. }
  286. EXPORT_SYMBOL(blk_queue_max_discard_sectors);
  287. /**
  288. * blk_queue_max_write_same_sectors - set max sectors for a single write same
  289. * @q: the request queue for the device
  290. * @max_write_same_sectors: maximum number of sectors to write per command
  291. **/
  292. void blk_queue_max_write_same_sectors(struct request_queue *q,
  293. unsigned int max_write_same_sectors)
  294. {
  295. q->limits.max_write_same_sectors = max_write_same_sectors;
  296. }
  297. EXPORT_SYMBOL(blk_queue_max_write_same_sectors);
  298. /**
  299. * blk_queue_max_segments - set max hw segments for a request for this queue
  300. * @q: the request queue for the device
  301. * @max_segments: max number of segments
  302. *
  303. * Description:
  304. * Enables a low level driver to set an upper limit on the number of
  305. * hw data segments in a request.
  306. **/
  307. void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments)
  308. {
  309. if (!max_segments) {
  310. max_segments = 1;
  311. printk(KERN_INFO "%s: set to minimum %d\n",
  312. __func__, max_segments);
  313. }
  314. q->limits.max_segments = max_segments;
  315. }
  316. EXPORT_SYMBOL(blk_queue_max_segments);
  317. /**
  318. * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
  319. * @q: the request queue for the device
  320. * @max_size: max size of segment in bytes
  321. *
  322. * Description:
  323. * Enables a low level driver to set an upper limit on the size of a
  324. * coalesced segment
  325. **/
  326. void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
  327. {
  328. if (max_size < PAGE_CACHE_SIZE) {
  329. max_size = PAGE_CACHE_SIZE;
  330. printk(KERN_INFO "%s: set to minimum %d\n",
  331. __func__, max_size);
  332. }
  333. q->limits.max_segment_size = max_size;
  334. }
  335. EXPORT_SYMBOL(blk_queue_max_segment_size);
  336. /**
  337. * blk_queue_logical_block_size - set logical block size for the queue
  338. * @q: the request queue for the device
  339. * @size: the logical block size, in bytes
  340. *
  341. * Description:
  342. * This should be set to the lowest possible block size that the
  343. * storage device can address. The default of 512 covers most
  344. * hardware.
  345. **/
  346. void blk_queue_logical_block_size(struct request_queue *q, unsigned short size)
  347. {
  348. q->limits.logical_block_size = size;
  349. if (q->limits.physical_block_size < size)
  350. q->limits.physical_block_size = size;
  351. if (q->limits.io_min < q->limits.physical_block_size)
  352. q->limits.io_min = q->limits.physical_block_size;
  353. }
  354. EXPORT_SYMBOL(blk_queue_logical_block_size);
  355. /**
  356. * blk_queue_physical_block_size - set physical block size for the queue
  357. * @q: the request queue for the device
  358. * @size: the physical block size, in bytes
  359. *
  360. * Description:
  361. * This should be set to the lowest possible sector size that the
  362. * hardware can operate on without reverting to read-modify-write
  363. * operations.
  364. */
  365. void blk_queue_physical_block_size(struct request_queue *q, unsigned int size)
  366. {
  367. q->limits.physical_block_size = size;
  368. if (q->limits.physical_block_size < q->limits.logical_block_size)
  369. q->limits.physical_block_size = q->limits.logical_block_size;
  370. if (q->limits.io_min < q->limits.physical_block_size)
  371. q->limits.io_min = q->limits.physical_block_size;
  372. }
  373. EXPORT_SYMBOL(blk_queue_physical_block_size);
  374. /**
  375. * blk_queue_alignment_offset - set physical block alignment offset
  376. * @q: the request queue for the device
  377. * @offset: alignment offset in bytes
  378. *
  379. * Description:
  380. * Some devices are naturally misaligned to compensate for things like
  381. * the legacy DOS partition table 63-sector offset. Low-level drivers
  382. * should call this function for devices whose first sector is not
  383. * naturally aligned.
  384. */
  385. void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset)
  386. {
  387. q->limits.alignment_offset =
  388. offset & (q->limits.physical_block_size - 1);
  389. q->limits.misaligned = 0;
  390. }
  391. EXPORT_SYMBOL(blk_queue_alignment_offset);
  392. /**
  393. * blk_limits_io_min - set minimum request size for a device
  394. * @limits: the queue limits
  395. * @min: smallest I/O size in bytes
  396. *
  397. * Description:
  398. * Some devices have an internal block size bigger than the reported
  399. * hardware sector size. This function can be used to signal the
  400. * smallest I/O the device can perform without incurring a performance
  401. * penalty.
  402. */
  403. void blk_limits_io_min(struct queue_limits *limits, unsigned int min)
  404. {
  405. limits->io_min = min;
  406. if (limits->io_min < limits->logical_block_size)
  407. limits->io_min = limits->logical_block_size;
  408. if (limits->io_min < limits->physical_block_size)
  409. limits->io_min = limits->physical_block_size;
  410. }
  411. EXPORT_SYMBOL(blk_limits_io_min);
  412. /**
  413. * blk_queue_io_min - set minimum request size for the queue
  414. * @q: the request queue for the device
  415. * @min: smallest I/O size in bytes
  416. *
  417. * Description:
  418. * Storage devices may report a granularity or preferred minimum I/O
  419. * size which is the smallest request the device can perform without
  420. * incurring a performance penalty. For disk drives this is often the
  421. * physical block size. For RAID arrays it is often the stripe chunk
  422. * size. A properly aligned multiple of minimum_io_size is the
  423. * preferred request size for workloads where a high number of I/O
  424. * operations is desired.
  425. */
  426. void blk_queue_io_min(struct request_queue *q, unsigned int min)
  427. {
  428. blk_limits_io_min(&q->limits, min);
  429. }
  430. EXPORT_SYMBOL(blk_queue_io_min);
  431. /**
  432. * blk_limits_io_opt - set optimal request size for a device
  433. * @limits: the queue limits
  434. * @opt: smallest I/O size in bytes
  435. *
  436. * Description:
  437. * Storage devices may report an optimal I/O size, which is the
  438. * device's preferred unit for sustained I/O. This is rarely reported
  439. * for disk drives. For RAID arrays it is usually the stripe width or
  440. * the internal track size. A properly aligned multiple of
  441. * optimal_io_size is the preferred request size for workloads where
  442. * sustained throughput is desired.
  443. */
  444. void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt)
  445. {
  446. limits->io_opt = opt;
  447. }
  448. EXPORT_SYMBOL(blk_limits_io_opt);
  449. /**
  450. * blk_queue_io_opt - set optimal request size for the queue
  451. * @q: the request queue for the device
  452. * @opt: optimal request size in bytes
  453. *
  454. * Description:
  455. * Storage devices may report an optimal I/O size, which is the
  456. * device's preferred unit for sustained I/O. This is rarely reported
  457. * for disk drives. For RAID arrays it is usually the stripe width or
  458. * the internal track size. A properly aligned multiple of
  459. * optimal_io_size is the preferred request size for workloads where
  460. * sustained throughput is desired.
  461. */
  462. void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
  463. {
  464. blk_limits_io_opt(&q->limits, opt);
  465. }
  466. EXPORT_SYMBOL(blk_queue_io_opt);
  467. /**
  468. * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers
  469. * @t: the stacking driver (top)
  470. * @b: the underlying device (bottom)
  471. **/
  472. void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
  473. {
  474. blk_stack_limits(&t->limits, &b->limits, 0);
  475. }
  476. EXPORT_SYMBOL(blk_queue_stack_limits);
  477. /**
  478. * blk_stack_limits - adjust queue_limits for stacked devices
  479. * @t: the stacking driver limits (top device)
  480. * @b: the underlying queue limits (bottom, component device)
  481. * @start: first data sector within component device
  482. *
  483. * Description:
  484. * This function is used by stacking drivers like MD and DM to ensure
  485. * that all component devices have compatible block sizes and
  486. * alignments. The stacking driver must provide a queue_limits
  487. * struct (top) and then iteratively call the stacking function for
  488. * all component (bottom) devices. The stacking function will
  489. * attempt to combine the values and ensure proper alignment.
  490. *
  491. * Returns 0 if the top and bottom queue_limits are compatible. The
  492. * top device's block sizes and alignment offsets may be adjusted to
  493. * ensure alignment with the bottom device. If no compatible sizes
  494. * and alignments exist, -1 is returned and the resulting top
  495. * queue_limits will have the misaligned flag set to indicate that
  496. * the alignment_offset is undefined.
  497. */
  498. int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
  499. sector_t start)
  500. {
  501. unsigned int top, bottom, alignment, ret = 0;
  502. t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
  503. t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
  504. t->max_write_same_sectors = min(t->max_write_same_sectors,
  505. b->max_write_same_sectors);
  506. t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
  507. t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
  508. b->seg_boundary_mask);
  509. t->max_segments = min_not_zero(t->max_segments, b->max_segments);
  510. t->max_integrity_segments = min_not_zero(t->max_integrity_segments,
  511. b->max_integrity_segments);
  512. t->max_segment_size = min_not_zero(t->max_segment_size,
  513. b->max_segment_size);
  514. t->misaligned |= b->misaligned;
  515. alignment = queue_limit_alignment_offset(b, start);
  516. /* Bottom device has different alignment. Check that it is
  517. * compatible with the current top alignment.
  518. */
  519. if (t->alignment_offset != alignment) {
  520. top = max(t->physical_block_size, t->io_min)
  521. + t->alignment_offset;
  522. bottom = max(b->physical_block_size, b->io_min) + alignment;
  523. /* Verify that top and bottom intervals line up */
  524. if (max(top, bottom) % min(top, bottom)) {
  525. t->misaligned = 1;
  526. ret = -1;
  527. }
  528. }
  529. t->logical_block_size = max(t->logical_block_size,
  530. b->logical_block_size);
  531. t->physical_block_size = max(t->physical_block_size,
  532. b->physical_block_size);
  533. t->io_min = max(t->io_min, b->io_min);
  534. t->io_opt = lcm(t->io_opt, b->io_opt);
  535. t->cluster &= b->cluster;
  536. t->discard_zeroes_data &= b->discard_zeroes_data;
  537. /* Physical block size a multiple of the logical block size? */
  538. if (t->physical_block_size & (t->logical_block_size - 1)) {
  539. t->physical_block_size = t->logical_block_size;
  540. t->misaligned = 1;
  541. ret = -1;
  542. }
  543. /* Minimum I/O a multiple of the physical block size? */
  544. if (t->io_min & (t->physical_block_size - 1)) {
  545. t->io_min = t->physical_block_size;
  546. t->misaligned = 1;
  547. ret = -1;
  548. }
  549. /* Optimal I/O a multiple of the physical block size? */
  550. if (t->io_opt & (t->physical_block_size - 1)) {
  551. t->io_opt = 0;
  552. t->misaligned = 1;
  553. ret = -1;
  554. }
  555. t->raid_partial_stripes_expensive =
  556. max(t->raid_partial_stripes_expensive,
  557. b->raid_partial_stripes_expensive);
  558. /* Find lowest common alignment_offset */
  559. t->alignment_offset = lcm(t->alignment_offset, alignment)
  560. % max(t->physical_block_size, t->io_min);
  561. /* Verify that new alignment_offset is on a logical block boundary */
  562. if (t->alignment_offset & (t->logical_block_size - 1)) {
  563. t->misaligned = 1;
  564. ret = -1;
  565. }
  566. /* Discard alignment and granularity */
  567. if (b->discard_granularity) {
  568. alignment = queue_limit_discard_alignment(b, start);
  569. if (t->discard_granularity != 0 &&
  570. t->discard_alignment != alignment) {
  571. top = t->discard_granularity + t->discard_alignment;
  572. bottom = b->discard_granularity + alignment;
  573. /* Verify that top and bottom intervals line up */
  574. if ((max(top, bottom) % min(top, bottom)) != 0)
  575. t->discard_misaligned = 1;
  576. }
  577. t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
  578. b->max_discard_sectors);
  579. t->discard_granularity = max(t->discard_granularity,
  580. b->discard_granularity);
  581. t->discard_alignment = lcm(t->discard_alignment, alignment) %
  582. t->discard_granularity;
  583. }
  584. return ret;
  585. }
  586. EXPORT_SYMBOL(blk_stack_limits);
  587. /**
  588. * bdev_stack_limits - adjust queue limits for stacked drivers
  589. * @t: the stacking driver limits (top device)
  590. * @bdev: the component block_device (bottom)
  591. * @start: first data sector within component device
  592. *
  593. * Description:
  594. * Merges queue limits for a top device and a block_device. Returns
  595. * 0 if alignment didn't change. Returns -1 if adding the bottom
  596. * device caused misalignment.
  597. */
  598. int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
  599. sector_t start)
  600. {
  601. struct request_queue *bq = bdev_get_queue(bdev);
  602. start += get_start_sect(bdev);
  603. return blk_stack_limits(t, &bq->limits, start);
  604. }
  605. EXPORT_SYMBOL(bdev_stack_limits);
  606. /**
  607. * disk_stack_limits - adjust queue limits for stacked drivers
  608. * @disk: MD/DM gendisk (top)
  609. * @bdev: the underlying block device (bottom)
  610. * @offset: offset to beginning of data within component device
  611. *
  612. * Description:
  613. * Merges the limits for a top level gendisk and a bottom level
  614. * block_device.
  615. */
  616. void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
  617. sector_t offset)
  618. {
  619. struct request_queue *t = disk->queue;
  620. if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) {
  621. char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE];
  622. disk_name(disk, 0, top);
  623. bdevname(bdev, bottom);
  624. printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n",
  625. top, bottom);
  626. }
  627. }
  628. EXPORT_SYMBOL(disk_stack_limits);
  629. /**
  630. * blk_queue_dma_pad - set pad mask
  631. * @q: the request queue for the device
  632. * @mask: pad mask
  633. *
  634. * Set dma pad mask.
  635. *
  636. * Appending pad buffer to a request modifies the last entry of a
  637. * scatter list such that it includes the pad buffer.
  638. **/
  639. void blk_queue_dma_pad(struct request_queue *q, unsigned int mask)
  640. {
  641. q->dma_pad_mask = mask;
  642. }
  643. EXPORT_SYMBOL(blk_queue_dma_pad);
  644. /**
  645. * blk_queue_update_dma_pad - update pad mask
  646. * @q: the request queue for the device
  647. * @mask: pad mask
  648. *
  649. * Update dma pad mask.
  650. *
  651. * Appending pad buffer to a request modifies the last entry of a
  652. * scatter list such that it includes the pad buffer.
  653. **/
  654. void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
  655. {
  656. if (mask > q->dma_pad_mask)
  657. q->dma_pad_mask = mask;
  658. }
  659. EXPORT_SYMBOL(blk_queue_update_dma_pad);
  660. /**
  661. * blk_queue_dma_drain - Set up a drain buffer for excess dma.
  662. * @q: the request queue for the device
  663. * @dma_drain_needed: fn which returns non-zero if drain is necessary
  664. * @buf: physically contiguous buffer
  665. * @size: size of the buffer in bytes
  666. *
  667. * Some devices have excess DMA problems and can't simply discard (or
  668. * zero fill) the unwanted piece of the transfer. They have to have a
  669. * real area of memory to transfer it into. The use case for this is
  670. * ATAPI devices in DMA mode. If the packet command causes a transfer
  671. * bigger than the transfer size some HBAs will lock up if there
  672. * aren't DMA elements to contain the excess transfer. What this API
  673. * does is adjust the queue so that the buf is always appended
  674. * silently to the scatterlist.
  675. *
  676. * Note: This routine adjusts max_hw_segments to make room for appending
  677. * the drain buffer. If you call blk_queue_max_segments() after calling
  678. * this routine, you must set the limit to one fewer than your device
  679. * can support otherwise there won't be room for the drain buffer.
  680. */
  681. int blk_queue_dma_drain(struct request_queue *q,
  682. dma_drain_needed_fn *dma_drain_needed,
  683. void *buf, unsigned int size)
  684. {
  685. if (queue_max_segments(q) < 2)
  686. return -EINVAL;
  687. /* make room for appending the drain */
  688. blk_queue_max_segments(q, queue_max_segments(q) - 1);
  689. q->dma_drain_needed = dma_drain_needed;
  690. q->dma_drain_buffer = buf;
  691. q->dma_drain_size = size;
  692. return 0;
  693. }
  694. EXPORT_SYMBOL_GPL(blk_queue_dma_drain);
  695. /**
  696. * blk_queue_segment_boundary - set boundary rules for segment merging
  697. * @q: the request queue for the device
  698. * @mask: the memory boundary mask
  699. **/
  700. void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
  701. {
  702. if (mask < PAGE_CACHE_SIZE - 1) {
  703. mask = PAGE_CACHE_SIZE - 1;
  704. printk(KERN_INFO "%s: set to minimum %lx\n",
  705. __func__, mask);
  706. }
  707. q->limits.seg_boundary_mask = mask;
  708. }
  709. EXPORT_SYMBOL(blk_queue_segment_boundary);
  710. /**
  711. * blk_queue_dma_alignment - set dma length and memory alignment
  712. * @q: the request queue for the device
  713. * @mask: alignment mask
  714. *
  715. * description:
  716. * set required memory and length alignment for direct dma transactions.
  717. * this is used when building direct io requests for the queue.
  718. *
  719. **/
  720. void blk_queue_dma_alignment(struct request_queue *q, int mask)
  721. {
  722. q->dma_alignment = mask;
  723. }
  724. EXPORT_SYMBOL(blk_queue_dma_alignment);
  725. /**
  726. * blk_queue_update_dma_alignment - update dma length and memory alignment
  727. * @q: the request queue for the device
  728. * @mask: alignment mask
  729. *
  730. * description:
  731. * update required memory and length alignment for direct dma transactions.
  732. * If the requested alignment is larger than the current alignment, then
  733. * the current queue alignment is updated to the new value, otherwise it
  734. * is left alone. The design of this is to allow multiple objects
  735. * (driver, device, transport etc) to set their respective
  736. * alignments without having them interfere.
  737. *
  738. **/
  739. void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
  740. {
  741. BUG_ON(mask > PAGE_SIZE);
  742. if (mask > q->dma_alignment)
  743. q->dma_alignment = mask;
  744. }
  745. EXPORT_SYMBOL(blk_queue_update_dma_alignment);
  746. /**
  747. * blk_queue_flush - configure queue's cache flush capability
  748. * @q: the request queue for the device
  749. * @flush: 0, REQ_FLUSH or REQ_FLUSH | REQ_FUA
  750. *
  751. * Tell block layer cache flush capability of @q. If it supports
  752. * flushing, REQ_FLUSH should be set. If it supports bypassing
  753. * write cache for individual writes, REQ_FUA should be set.
  754. */
  755. void blk_queue_flush(struct request_queue *q, unsigned int flush)
  756. {
  757. WARN_ON_ONCE(flush & ~(REQ_FLUSH | REQ_FUA));
  758. if (WARN_ON_ONCE(!(flush & REQ_FLUSH) && (flush & REQ_FUA)))
  759. flush &= ~REQ_FUA;
  760. q->flush_flags = flush & (REQ_FLUSH | REQ_FUA);
  761. }
  762. EXPORT_SYMBOL_GPL(blk_queue_flush);
  763. void blk_queue_flush_queueable(struct request_queue *q, bool queueable)
  764. {
  765. q->flush_not_queueable = !queueable;
  766. }
  767. EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
  768. static int __init blk_settings_init(void)
  769. {
  770. blk_max_low_pfn = max_low_pfn - 1;
  771. blk_max_pfn = max_pfn - 1;
  772. return 0;
  773. }
  774. subsys_initcall(blk_settings_init);