blk-mq-sysfs.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461
  1. #include <linux/kernel.h>
  2. #include <linux/module.h>
  3. #include <linux/backing-dev.h>
  4. #include <linux/bio.h>
  5. #include <linux/blkdev.h>
  6. #include <linux/mm.h>
  7. #include <linux/init.h>
  8. #include <linux/slab.h>
  9. #include <linux/workqueue.h>
  10. #include <linux/smp.h>
  11. #include <linux/blk-mq.h>
  12. #include "blk-mq.h"
  13. #include "blk-mq-tag.h"
  14. static void blk_mq_sysfs_release(struct kobject *kobj)
  15. {
  16. }
  17. struct blk_mq_ctx_sysfs_entry {
  18. struct attribute attr;
  19. ssize_t (*show)(struct blk_mq_ctx *, char *);
  20. ssize_t (*store)(struct blk_mq_ctx *, const char *, size_t);
  21. };
  22. struct blk_mq_hw_ctx_sysfs_entry {
  23. struct attribute attr;
  24. ssize_t (*show)(struct blk_mq_hw_ctx *, char *);
  25. ssize_t (*store)(struct blk_mq_hw_ctx *, const char *, size_t);
  26. };
  27. static ssize_t blk_mq_sysfs_show(struct kobject *kobj, struct attribute *attr,
  28. char *page)
  29. {
  30. struct blk_mq_ctx_sysfs_entry *entry;
  31. struct blk_mq_ctx *ctx;
  32. struct request_queue *q;
  33. ssize_t res;
  34. entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
  35. ctx = container_of(kobj, struct blk_mq_ctx, kobj);
  36. q = ctx->queue;
  37. if (!entry->show)
  38. return -EIO;
  39. res = -ENOENT;
  40. mutex_lock(&q->sysfs_lock);
  41. if (!blk_queue_dying(q))
  42. res = entry->show(ctx, page);
  43. mutex_unlock(&q->sysfs_lock);
  44. return res;
  45. }
  46. static ssize_t blk_mq_sysfs_store(struct kobject *kobj, struct attribute *attr,
  47. const char *page, size_t length)
  48. {
  49. struct blk_mq_ctx_sysfs_entry *entry;
  50. struct blk_mq_ctx *ctx;
  51. struct request_queue *q;
  52. ssize_t res;
  53. entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
  54. ctx = container_of(kobj, struct blk_mq_ctx, kobj);
  55. q = ctx->queue;
  56. if (!entry->store)
  57. return -EIO;
  58. res = -ENOENT;
  59. mutex_lock(&q->sysfs_lock);
  60. if (!blk_queue_dying(q))
  61. res = entry->store(ctx, page, length);
  62. mutex_unlock(&q->sysfs_lock);
  63. return res;
  64. }
  65. static ssize_t blk_mq_hw_sysfs_show(struct kobject *kobj,
  66. struct attribute *attr, char *page)
  67. {
  68. struct blk_mq_hw_ctx_sysfs_entry *entry;
  69. struct blk_mq_hw_ctx *hctx;
  70. struct request_queue *q;
  71. ssize_t res;
  72. entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
  73. hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
  74. q = hctx->queue;
  75. if (!entry->show)
  76. return -EIO;
  77. res = -ENOENT;
  78. mutex_lock(&q->sysfs_lock);
  79. if (!blk_queue_dying(q))
  80. res = entry->show(hctx, page);
  81. mutex_unlock(&q->sysfs_lock);
  82. return res;
  83. }
  84. static ssize_t blk_mq_hw_sysfs_store(struct kobject *kobj,
  85. struct attribute *attr, const char *page,
  86. size_t length)
  87. {
  88. struct blk_mq_hw_ctx_sysfs_entry *entry;
  89. struct blk_mq_hw_ctx *hctx;
  90. struct request_queue *q;
  91. ssize_t res;
  92. entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
  93. hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
  94. q = hctx->queue;
  95. if (!entry->store)
  96. return -EIO;
  97. res = -ENOENT;
  98. mutex_lock(&q->sysfs_lock);
  99. if (!blk_queue_dying(q))
  100. res = entry->store(hctx, page, length);
  101. mutex_unlock(&q->sysfs_lock);
  102. return res;
  103. }
  104. static ssize_t blk_mq_sysfs_dispatched_show(struct blk_mq_ctx *ctx, char *page)
  105. {
  106. return sprintf(page, "%lu %lu\n", ctx->rq_dispatched[1],
  107. ctx->rq_dispatched[0]);
  108. }
  109. static ssize_t blk_mq_sysfs_merged_show(struct blk_mq_ctx *ctx, char *page)
  110. {
  111. return sprintf(page, "%lu\n", ctx->rq_merged);
  112. }
  113. static ssize_t blk_mq_sysfs_completed_show(struct blk_mq_ctx *ctx, char *page)
  114. {
  115. return sprintf(page, "%lu %lu\n", ctx->rq_completed[1],
  116. ctx->rq_completed[0]);
  117. }
  118. static ssize_t sysfs_list_show(char *page, struct list_head *list, char *msg)
  119. {
  120. char *start_page = page;
  121. struct request *rq;
  122. page += sprintf(page, "%s:\n", msg);
  123. list_for_each_entry(rq, list, queuelist)
  124. page += sprintf(page, "\t%p\n", rq);
  125. return page - start_page;
  126. }
  127. static ssize_t blk_mq_sysfs_rq_list_show(struct blk_mq_ctx *ctx, char *page)
  128. {
  129. ssize_t ret;
  130. spin_lock(&ctx->lock);
  131. ret = sysfs_list_show(page, &ctx->rq_list, "CTX pending");
  132. spin_unlock(&ctx->lock);
  133. return ret;
  134. }
  135. static ssize_t blk_mq_hw_sysfs_queued_show(struct blk_mq_hw_ctx *hctx,
  136. char *page)
  137. {
  138. return sprintf(page, "%lu\n", hctx->queued);
  139. }
  140. static ssize_t blk_mq_hw_sysfs_run_show(struct blk_mq_hw_ctx *hctx, char *page)
  141. {
  142. return sprintf(page, "%lu\n", hctx->run);
  143. }
  144. static ssize_t blk_mq_hw_sysfs_dispatched_show(struct blk_mq_hw_ctx *hctx,
  145. char *page)
  146. {
  147. char *start_page = page;
  148. int i;
  149. page += sprintf(page, "%8u\t%lu\n", 0U, hctx->dispatched[0]);
  150. for (i = 1; i < BLK_MQ_MAX_DISPATCH_ORDER; i++) {
  151. unsigned long d = 1U << (i - 1);
  152. page += sprintf(page, "%8lu\t%lu\n", d, hctx->dispatched[i]);
  153. }
  154. return page - start_page;
  155. }
  156. static ssize_t blk_mq_hw_sysfs_rq_list_show(struct blk_mq_hw_ctx *hctx,
  157. char *page)
  158. {
  159. ssize_t ret;
  160. spin_lock(&hctx->lock);
  161. ret = sysfs_list_show(page, &hctx->dispatch, "HCTX pending");
  162. spin_unlock(&hctx->lock);
  163. return ret;
  164. }
  165. static ssize_t blk_mq_hw_sysfs_tags_show(struct blk_mq_hw_ctx *hctx, char *page)
  166. {
  167. return blk_mq_tag_sysfs_show(hctx->tags, page);
  168. }
  169. static ssize_t blk_mq_hw_sysfs_active_show(struct blk_mq_hw_ctx *hctx, char *page)
  170. {
  171. return sprintf(page, "%u\n", atomic_read(&hctx->nr_active));
  172. }
  173. static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
  174. {
  175. unsigned int i, first = 1;
  176. ssize_t ret = 0;
  177. blk_mq_disable_hotplug();
  178. for_each_cpu(i, hctx->cpumask) {
  179. if (first)
  180. ret += sprintf(ret + page, "%u", i);
  181. else
  182. ret += sprintf(ret + page, ", %u", i);
  183. first = 0;
  184. }
  185. blk_mq_enable_hotplug();
  186. ret += sprintf(ret + page, "\n");
  187. return ret;
  188. }
  189. static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_dispatched = {
  190. .attr = {.name = "dispatched", .mode = S_IRUGO },
  191. .show = blk_mq_sysfs_dispatched_show,
  192. };
  193. static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_merged = {
  194. .attr = {.name = "merged", .mode = S_IRUGO },
  195. .show = blk_mq_sysfs_merged_show,
  196. };
  197. static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_completed = {
  198. .attr = {.name = "completed", .mode = S_IRUGO },
  199. .show = blk_mq_sysfs_completed_show,
  200. };
  201. static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_rq_list = {
  202. .attr = {.name = "rq_list", .mode = S_IRUGO },
  203. .show = blk_mq_sysfs_rq_list_show,
  204. };
  205. static struct attribute *default_ctx_attrs[] = {
  206. &blk_mq_sysfs_dispatched.attr,
  207. &blk_mq_sysfs_merged.attr,
  208. &blk_mq_sysfs_completed.attr,
  209. &blk_mq_sysfs_rq_list.attr,
  210. NULL,
  211. };
  212. static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_queued = {
  213. .attr = {.name = "queued", .mode = S_IRUGO },
  214. .show = blk_mq_hw_sysfs_queued_show,
  215. };
  216. static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_run = {
  217. .attr = {.name = "run", .mode = S_IRUGO },
  218. .show = blk_mq_hw_sysfs_run_show,
  219. };
  220. static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_dispatched = {
  221. .attr = {.name = "dispatched", .mode = S_IRUGO },
  222. .show = blk_mq_hw_sysfs_dispatched_show,
  223. };
  224. static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_active = {
  225. .attr = {.name = "active", .mode = S_IRUGO },
  226. .show = blk_mq_hw_sysfs_active_show,
  227. };
  228. static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_pending = {
  229. .attr = {.name = "pending", .mode = S_IRUGO },
  230. .show = blk_mq_hw_sysfs_rq_list_show,
  231. };
  232. static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_tags = {
  233. .attr = {.name = "tags", .mode = S_IRUGO },
  234. .show = blk_mq_hw_sysfs_tags_show,
  235. };
  236. static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus = {
  237. .attr = {.name = "cpu_list", .mode = S_IRUGO },
  238. .show = blk_mq_hw_sysfs_cpus_show,
  239. };
  240. static struct attribute *default_hw_ctx_attrs[] = {
  241. &blk_mq_hw_sysfs_queued.attr,
  242. &blk_mq_hw_sysfs_run.attr,
  243. &blk_mq_hw_sysfs_dispatched.attr,
  244. &blk_mq_hw_sysfs_pending.attr,
  245. &blk_mq_hw_sysfs_tags.attr,
  246. &blk_mq_hw_sysfs_cpus.attr,
  247. &blk_mq_hw_sysfs_active.attr,
  248. NULL,
  249. };
  250. static const struct sysfs_ops blk_mq_sysfs_ops = {
  251. .show = blk_mq_sysfs_show,
  252. .store = blk_mq_sysfs_store,
  253. };
  254. static const struct sysfs_ops blk_mq_hw_sysfs_ops = {
  255. .show = blk_mq_hw_sysfs_show,
  256. .store = blk_mq_hw_sysfs_store,
  257. };
  258. static struct kobj_type blk_mq_ktype = {
  259. .sysfs_ops = &blk_mq_sysfs_ops,
  260. .release = blk_mq_sysfs_release,
  261. };
  262. static struct kobj_type blk_mq_ctx_ktype = {
  263. .sysfs_ops = &blk_mq_sysfs_ops,
  264. .default_attrs = default_ctx_attrs,
  265. .release = blk_mq_sysfs_release,
  266. };
  267. static struct kobj_type blk_mq_hw_ktype = {
  268. .sysfs_ops = &blk_mq_hw_sysfs_ops,
  269. .default_attrs = default_hw_ctx_attrs,
  270. .release = blk_mq_sysfs_release,
  271. };
  272. static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
  273. {
  274. struct blk_mq_ctx *ctx;
  275. int i;
  276. if (!hctx->nr_ctx || !(hctx->flags & BLK_MQ_F_SYSFS_UP))
  277. return;
  278. hctx_for_each_ctx(hctx, ctx, i)
  279. kobject_del(&ctx->kobj);
  280. kobject_del(&hctx->kobj);
  281. }
  282. static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
  283. {
  284. struct request_queue *q = hctx->queue;
  285. struct blk_mq_ctx *ctx;
  286. int i, ret;
  287. if (!hctx->nr_ctx || !(hctx->flags & BLK_MQ_F_SYSFS_UP))
  288. return 0;
  289. ret = kobject_add(&hctx->kobj, &q->mq_kobj, "%u", hctx->queue_num);
  290. if (ret)
  291. return ret;
  292. hctx_for_each_ctx(hctx, ctx, i) {
  293. ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
  294. if (ret)
  295. break;
  296. }
  297. return ret;
  298. }
  299. void blk_mq_unregister_disk(struct gendisk *disk)
  300. {
  301. struct request_queue *q = disk->queue;
  302. struct blk_mq_hw_ctx *hctx;
  303. struct blk_mq_ctx *ctx;
  304. int i, j;
  305. queue_for_each_hw_ctx(q, hctx, i) {
  306. blk_mq_unregister_hctx(hctx);
  307. hctx_for_each_ctx(hctx, ctx, j)
  308. kobject_put(&ctx->kobj);
  309. kobject_put(&hctx->kobj);
  310. }
  311. kobject_uevent(&q->mq_kobj, KOBJ_REMOVE);
  312. kobject_del(&q->mq_kobj);
  313. kobject_put(&q->mq_kobj);
  314. kobject_put(&disk_to_dev(disk)->kobj);
  315. }
  316. static void blk_mq_sysfs_init(struct request_queue *q)
  317. {
  318. struct blk_mq_hw_ctx *hctx;
  319. struct blk_mq_ctx *ctx;
  320. int i;
  321. kobject_init(&q->mq_kobj, &blk_mq_ktype);
  322. queue_for_each_hw_ctx(q, hctx, i)
  323. kobject_init(&hctx->kobj, &blk_mq_hw_ktype);
  324. queue_for_each_ctx(q, ctx, i)
  325. kobject_init(&ctx->kobj, &blk_mq_ctx_ktype);
  326. }
  327. /* see blk_register_queue() */
  328. void blk_mq_finish_init(struct request_queue *q)
  329. {
  330. percpu_ref_switch_to_percpu(&q->mq_usage_counter);
  331. }
  332. int blk_mq_register_disk(struct gendisk *disk)
  333. {
  334. struct device *dev = disk_to_dev(disk);
  335. struct request_queue *q = disk->queue;
  336. struct blk_mq_hw_ctx *hctx;
  337. int ret, i;
  338. blk_mq_sysfs_init(q);
  339. ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
  340. if (ret < 0)
  341. return ret;
  342. kobject_uevent(&q->mq_kobj, KOBJ_ADD);
  343. queue_for_each_hw_ctx(q, hctx, i) {
  344. hctx->flags |= BLK_MQ_F_SYSFS_UP;
  345. ret = blk_mq_register_hctx(hctx);
  346. if (ret)
  347. break;
  348. }
  349. if (ret) {
  350. blk_mq_unregister_disk(disk);
  351. return ret;
  352. }
  353. return 0;
  354. }
  355. void blk_mq_sysfs_unregister(struct request_queue *q)
  356. {
  357. struct blk_mq_hw_ctx *hctx;
  358. int i;
  359. queue_for_each_hw_ctx(q, hctx, i)
  360. blk_mq_unregister_hctx(hctx);
  361. }
  362. int blk_mq_sysfs_register(struct request_queue *q)
  363. {
  364. struct blk_mq_hw_ctx *hctx;
  365. int i, ret = 0;
  366. queue_for_each_hw_ctx(q, hctx, i) {
  367. ret = blk_mq_register_hctx(hctx);
  368. if (ret)
  369. break;
  370. }
  371. return ret;
  372. }