xen-scsifront.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026
  1. /*
  2. * Xen SCSI frontend driver
  3. *
  4. * Copyright (c) 2008, FUJITSU Limited
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License version 2
  8. * as published by the Free Software Foundation; or, when distributed
  9. * separately from the Linux kernel or incorporated into other
  10. * software packages, subject to the following license:
  11. *
  12. * Permission is hereby granted, free of charge, to any person obtaining a copy
  13. * of this source file (the "Software"), to deal in the Software without
  14. * restriction, including without limitation the rights to use, copy, modify,
  15. * merge, publish, distribute, sublicense, and/or sell copies of the Software,
  16. * and to permit persons to whom the Software is furnished to do so, subject to
  17. * the following conditions:
  18. *
  19. * The above copyright notice and this permission notice shall be included in
  20. * all copies or substantial portions of the Software.
  21. *
  22. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  23. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  24. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  25. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  26. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  27. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  28. * IN THE SOFTWARE.
  29. */
  30. #include <linux/module.h>
  31. #include <linux/kernel.h>
  32. #include <linux/device.h>
  33. #include <linux/wait.h>
  34. #include <linux/interrupt.h>
  35. #include <linux/mutex.h>
  36. #include <linux/spinlock.h>
  37. #include <linux/sched.h>
  38. #include <linux/blkdev.h>
  39. #include <linux/pfn.h>
  40. #include <linux/slab.h>
  41. #include <linux/bitops.h>
  42. #include <scsi/scsi_cmnd.h>
  43. #include <scsi/scsi_device.h>
  44. #include <scsi/scsi.h>
  45. #include <scsi/scsi_host.h>
  46. #include <xen/xen.h>
  47. #include <xen/xenbus.h>
  48. #include <xen/grant_table.h>
  49. #include <xen/events.h>
  50. #include <xen/page.h>
  51. #include <xen/interface/grant_table.h>
  52. #include <xen/interface/io/vscsiif.h>
  53. #include <xen/interface/io/protocols.h>
  54. #include <asm/xen/hypervisor.h>
  55. #define GRANT_INVALID_REF 0
  56. #define VSCSIFRONT_OP_ADD_LUN 1
  57. #define VSCSIFRONT_OP_DEL_LUN 2
  58. /* Tuning point. */
  59. #define VSCSIIF_DEFAULT_CMD_PER_LUN 10
  60. #define VSCSIIF_MAX_TARGET 64
  61. #define VSCSIIF_MAX_LUN 255
  62. #define VSCSIIF_RING_SIZE __CONST_RING_SIZE(vscsiif, PAGE_SIZE)
  63. #define VSCSIIF_MAX_REQS VSCSIIF_RING_SIZE
  64. #define vscsiif_grants_sg(_sg) (PFN_UP((_sg) * \
  65. sizeof(struct scsiif_request_segment)))
  66. struct vscsifrnt_shadow {
  67. /* command between backend and frontend */
  68. unsigned char act;
  69. uint16_t rqid;
  70. unsigned int nr_grants; /* number of grants in gref[] */
  71. struct scsiif_request_segment *sg; /* scatter/gather elements */
  72. /* Do reset or abort function. */
  73. wait_queue_head_t wq_reset; /* reset work queue */
  74. int wait_reset; /* reset work queue condition */
  75. int32_t rslt_reset; /* reset response status: */
  76. /* SUCCESS or FAILED or: */
  77. #define RSLT_RESET_WAITING 0
  78. #define RSLT_RESET_ERR -1
  79. /* Requested struct scsi_cmnd is stored from kernel. */
  80. struct scsi_cmnd *sc;
  81. int gref[vscsiif_grants_sg(SG_ALL) + SG_ALL];
  82. };
  83. struct vscsifrnt_info {
  84. struct xenbus_device *dev;
  85. struct Scsi_Host *host;
  86. int host_active;
  87. unsigned int evtchn;
  88. unsigned int irq;
  89. grant_ref_t ring_ref;
  90. struct vscsiif_front_ring ring;
  91. struct vscsiif_response ring_rsp;
  92. spinlock_t shadow_lock;
  93. DECLARE_BITMAP(shadow_free_bitmap, VSCSIIF_MAX_REQS);
  94. struct vscsifrnt_shadow *shadow[VSCSIIF_MAX_REQS];
  95. wait_queue_head_t wq_sync;
  96. unsigned int wait_ring_available:1;
  97. char dev_state_path[64];
  98. struct task_struct *curr;
  99. };
  100. static DEFINE_MUTEX(scsifront_mutex);
  101. static void scsifront_wake_up(struct vscsifrnt_info *info)
  102. {
  103. info->wait_ring_available = 0;
  104. wake_up(&info->wq_sync);
  105. }
  106. static int scsifront_get_rqid(struct vscsifrnt_info *info)
  107. {
  108. unsigned long flags;
  109. int free;
  110. spin_lock_irqsave(&info->shadow_lock, flags);
  111. free = find_first_bit(info->shadow_free_bitmap, VSCSIIF_MAX_REQS);
  112. __clear_bit(free, info->shadow_free_bitmap);
  113. spin_unlock_irqrestore(&info->shadow_lock, flags);
  114. return free;
  115. }
  116. static int _scsifront_put_rqid(struct vscsifrnt_info *info, uint32_t id)
  117. {
  118. int empty = bitmap_empty(info->shadow_free_bitmap, VSCSIIF_MAX_REQS);
  119. __set_bit(id, info->shadow_free_bitmap);
  120. info->shadow[id] = NULL;
  121. return empty || info->wait_ring_available;
  122. }
  123. static void scsifront_put_rqid(struct vscsifrnt_info *info, uint32_t id)
  124. {
  125. unsigned long flags;
  126. int kick;
  127. spin_lock_irqsave(&info->shadow_lock, flags);
  128. kick = _scsifront_put_rqid(info, id);
  129. spin_unlock_irqrestore(&info->shadow_lock, flags);
  130. if (kick)
  131. scsifront_wake_up(info);
  132. }
  133. static struct vscsiif_request *scsifront_pre_req(struct vscsifrnt_info *info)
  134. {
  135. struct vscsiif_front_ring *ring = &(info->ring);
  136. struct vscsiif_request *ring_req;
  137. uint32_t id;
  138. id = scsifront_get_rqid(info); /* use id in response */
  139. if (id >= VSCSIIF_MAX_REQS)
  140. return NULL;
  141. ring_req = RING_GET_REQUEST(&(info->ring), ring->req_prod_pvt);
  142. ring->req_prod_pvt++;
  143. ring_req->rqid = (uint16_t)id;
  144. return ring_req;
  145. }
  146. static void scsifront_do_request(struct vscsifrnt_info *info)
  147. {
  148. struct vscsiif_front_ring *ring = &(info->ring);
  149. int notify;
  150. RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(ring, notify);
  151. if (notify)
  152. notify_remote_via_irq(info->irq);
  153. }
  154. static void scsifront_gnttab_done(struct vscsifrnt_info *info, uint32_t id)
  155. {
  156. struct vscsifrnt_shadow *s = info->shadow[id];
  157. int i;
  158. if (s->sc->sc_data_direction == DMA_NONE)
  159. return;
  160. for (i = 0; i < s->nr_grants; i++) {
  161. if (unlikely(gnttab_query_foreign_access(s->gref[i]) != 0)) {
  162. shost_printk(KERN_ALERT, info->host, KBUILD_MODNAME
  163. "grant still in use by backend\n");
  164. BUG();
  165. }
  166. gnttab_end_foreign_access(s->gref[i], 0, 0UL);
  167. }
  168. kfree(s->sg);
  169. }
  170. static void scsifront_cdb_cmd_done(struct vscsifrnt_info *info,
  171. struct vscsiif_response *ring_rsp)
  172. {
  173. struct scsi_cmnd *sc;
  174. uint32_t id;
  175. uint8_t sense_len;
  176. id = ring_rsp->rqid;
  177. sc = info->shadow[id]->sc;
  178. BUG_ON(sc == NULL);
  179. scsifront_gnttab_done(info, id);
  180. scsifront_put_rqid(info, id);
  181. sc->result = ring_rsp->rslt;
  182. scsi_set_resid(sc, ring_rsp->residual_len);
  183. sense_len = min_t(uint8_t, VSCSIIF_SENSE_BUFFERSIZE,
  184. ring_rsp->sense_len);
  185. if (sense_len)
  186. memcpy(sc->sense_buffer, ring_rsp->sense_buffer, sense_len);
  187. sc->scsi_done(sc);
  188. }
  189. static void scsifront_sync_cmd_done(struct vscsifrnt_info *info,
  190. struct vscsiif_response *ring_rsp)
  191. {
  192. uint16_t id = ring_rsp->rqid;
  193. unsigned long flags;
  194. struct vscsifrnt_shadow *shadow = info->shadow[id];
  195. int kick;
  196. spin_lock_irqsave(&info->shadow_lock, flags);
  197. shadow->wait_reset = 1;
  198. switch (shadow->rslt_reset) {
  199. case RSLT_RESET_WAITING:
  200. shadow->rslt_reset = ring_rsp->rslt;
  201. break;
  202. case RSLT_RESET_ERR:
  203. kick = _scsifront_put_rqid(info, id);
  204. spin_unlock_irqrestore(&info->shadow_lock, flags);
  205. kfree(shadow);
  206. if (kick)
  207. scsifront_wake_up(info);
  208. return;
  209. default:
  210. shost_printk(KERN_ERR, info->host, KBUILD_MODNAME
  211. "bad reset state %d, possibly leaking %u\n",
  212. shadow->rslt_reset, id);
  213. break;
  214. }
  215. spin_unlock_irqrestore(&info->shadow_lock, flags);
  216. wake_up(&shadow->wq_reset);
  217. }
  218. static int scsifront_cmd_done(struct vscsifrnt_info *info)
  219. {
  220. struct vscsiif_response *ring_rsp;
  221. RING_IDX i, rp;
  222. int more_to_do = 0;
  223. unsigned long flags;
  224. spin_lock_irqsave(info->host->host_lock, flags);
  225. rp = info->ring.sring->rsp_prod;
  226. rmb(); /* ordering required respective to dom0 */
  227. for (i = info->ring.rsp_cons; i != rp; i++) {
  228. ring_rsp = RING_GET_RESPONSE(&info->ring, i);
  229. if (WARN(ring_rsp->rqid >= VSCSIIF_MAX_REQS ||
  230. test_bit(ring_rsp->rqid, info->shadow_free_bitmap),
  231. "illegal rqid %u returned by backend!\n",
  232. ring_rsp->rqid))
  233. continue;
  234. if (info->shadow[ring_rsp->rqid]->act == VSCSIIF_ACT_SCSI_CDB)
  235. scsifront_cdb_cmd_done(info, ring_rsp);
  236. else
  237. scsifront_sync_cmd_done(info, ring_rsp);
  238. }
  239. info->ring.rsp_cons = i;
  240. if (i != info->ring.req_prod_pvt)
  241. RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do);
  242. else
  243. info->ring.sring->rsp_event = i + 1;
  244. info->wait_ring_available = 0;
  245. spin_unlock_irqrestore(info->host->host_lock, flags);
  246. wake_up(&info->wq_sync);
  247. return more_to_do;
  248. }
  249. static irqreturn_t scsifront_irq_fn(int irq, void *dev_id)
  250. {
  251. struct vscsifrnt_info *info = dev_id;
  252. while (scsifront_cmd_done(info))
  253. /* Yield point for this unbounded loop. */
  254. cond_resched();
  255. return IRQ_HANDLED;
  256. }
  257. static int map_data_for_request(struct vscsifrnt_info *info,
  258. struct scsi_cmnd *sc,
  259. struct vscsiif_request *ring_req,
  260. struct vscsifrnt_shadow *shadow)
  261. {
  262. grant_ref_t gref_head;
  263. struct page *page;
  264. int err, ref, ref_cnt = 0;
  265. int grant_ro = (sc->sc_data_direction == DMA_TO_DEVICE);
  266. unsigned int i, off, len, bytes;
  267. unsigned int data_len = scsi_bufflen(sc);
  268. unsigned int data_grants = 0, seg_grants = 0;
  269. struct scatterlist *sg;
  270. unsigned long mfn;
  271. struct scsiif_request_segment *seg;
  272. ring_req->nr_segments = 0;
  273. if (sc->sc_data_direction == DMA_NONE || !data_len)
  274. return 0;
  275. scsi_for_each_sg(sc, sg, scsi_sg_count(sc), i)
  276. data_grants += PFN_UP(sg->offset + sg->length);
  277. if (data_grants > VSCSIIF_SG_TABLESIZE) {
  278. if (data_grants > info->host->sg_tablesize) {
  279. shost_printk(KERN_ERR, info->host, KBUILD_MODNAME
  280. "Unable to map request_buffer for command!\n");
  281. return -E2BIG;
  282. }
  283. seg_grants = vscsiif_grants_sg(data_grants);
  284. shadow->sg = kcalloc(data_grants,
  285. sizeof(struct scsiif_request_segment), GFP_ATOMIC);
  286. if (!shadow->sg)
  287. return -ENOMEM;
  288. }
  289. seg = shadow->sg ? : ring_req->seg;
  290. err = gnttab_alloc_grant_references(seg_grants + data_grants,
  291. &gref_head);
  292. if (err) {
  293. kfree(shadow->sg);
  294. shost_printk(KERN_ERR, info->host, KBUILD_MODNAME
  295. "gnttab_alloc_grant_references() error\n");
  296. return -ENOMEM;
  297. }
  298. if (seg_grants) {
  299. page = virt_to_page(seg);
  300. off = (unsigned long)seg & ~PAGE_MASK;
  301. len = sizeof(struct scsiif_request_segment) * data_grants;
  302. while (len > 0) {
  303. bytes = min_t(unsigned int, len, PAGE_SIZE - off);
  304. ref = gnttab_claim_grant_reference(&gref_head);
  305. BUG_ON(ref == -ENOSPC);
  306. mfn = pfn_to_mfn(page_to_pfn(page));
  307. gnttab_grant_foreign_access_ref(ref,
  308. info->dev->otherend_id, mfn, 1);
  309. shadow->gref[ref_cnt] = ref;
  310. ring_req->seg[ref_cnt].gref = ref;
  311. ring_req->seg[ref_cnt].offset = (uint16_t)off;
  312. ring_req->seg[ref_cnt].length = (uint16_t)bytes;
  313. page++;
  314. len -= bytes;
  315. off = 0;
  316. ref_cnt++;
  317. }
  318. BUG_ON(seg_grants < ref_cnt);
  319. seg_grants = ref_cnt;
  320. }
  321. scsi_for_each_sg(sc, sg, scsi_sg_count(sc), i) {
  322. page = sg_page(sg);
  323. off = sg->offset;
  324. len = sg->length;
  325. while (len > 0 && data_len > 0) {
  326. /*
  327. * sg sends a scatterlist that is larger than
  328. * the data_len it wants transferred for certain
  329. * IO sizes.
  330. */
  331. bytes = min_t(unsigned int, len, PAGE_SIZE - off);
  332. bytes = min(bytes, data_len);
  333. ref = gnttab_claim_grant_reference(&gref_head);
  334. BUG_ON(ref == -ENOSPC);
  335. mfn = pfn_to_mfn(page_to_pfn(page));
  336. gnttab_grant_foreign_access_ref(ref,
  337. info->dev->otherend_id, mfn, grant_ro);
  338. shadow->gref[ref_cnt] = ref;
  339. seg->gref = ref;
  340. seg->offset = (uint16_t)off;
  341. seg->length = (uint16_t)bytes;
  342. page++;
  343. seg++;
  344. len -= bytes;
  345. data_len -= bytes;
  346. off = 0;
  347. ref_cnt++;
  348. }
  349. }
  350. if (seg_grants)
  351. ring_req->nr_segments = VSCSIIF_SG_GRANT | seg_grants;
  352. else
  353. ring_req->nr_segments = (uint8_t)ref_cnt;
  354. shadow->nr_grants = ref_cnt;
  355. return 0;
  356. }
  357. static struct vscsiif_request *scsifront_command2ring(
  358. struct vscsifrnt_info *info, struct scsi_cmnd *sc,
  359. struct vscsifrnt_shadow *shadow)
  360. {
  361. struct vscsiif_request *ring_req;
  362. memset(shadow, 0, sizeof(*shadow));
  363. ring_req = scsifront_pre_req(info);
  364. if (!ring_req)
  365. return NULL;
  366. info->shadow[ring_req->rqid] = shadow;
  367. shadow->rqid = ring_req->rqid;
  368. ring_req->id = sc->device->id;
  369. ring_req->lun = sc->device->lun;
  370. ring_req->channel = sc->device->channel;
  371. ring_req->cmd_len = sc->cmd_len;
  372. BUG_ON(sc->cmd_len > VSCSIIF_MAX_COMMAND_SIZE);
  373. memcpy(ring_req->cmnd, sc->cmnd, sc->cmd_len);
  374. ring_req->sc_data_direction = (uint8_t)sc->sc_data_direction;
  375. ring_req->timeout_per_command = sc->request->timeout / HZ;
  376. return ring_req;
  377. }
  378. static int scsifront_queuecommand(struct Scsi_Host *shost,
  379. struct scsi_cmnd *sc)
  380. {
  381. struct vscsifrnt_info *info = shost_priv(shost);
  382. struct vscsiif_request *ring_req;
  383. struct vscsifrnt_shadow *shadow = scsi_cmd_priv(sc);
  384. unsigned long flags;
  385. int err;
  386. uint16_t rqid;
  387. spin_lock_irqsave(shost->host_lock, flags);
  388. if (RING_FULL(&info->ring))
  389. goto busy;
  390. ring_req = scsifront_command2ring(info, sc, shadow);
  391. if (!ring_req)
  392. goto busy;
  393. sc->result = 0;
  394. rqid = ring_req->rqid;
  395. ring_req->act = VSCSIIF_ACT_SCSI_CDB;
  396. shadow->sc = sc;
  397. shadow->act = VSCSIIF_ACT_SCSI_CDB;
  398. err = map_data_for_request(info, sc, ring_req, shadow);
  399. if (err < 0) {
  400. pr_debug("%s: err %d\n", __func__, err);
  401. scsifront_put_rqid(info, rqid);
  402. spin_unlock_irqrestore(shost->host_lock, flags);
  403. if (err == -ENOMEM)
  404. return SCSI_MLQUEUE_HOST_BUSY;
  405. sc->result = DID_ERROR << 16;
  406. sc->scsi_done(sc);
  407. return 0;
  408. }
  409. scsifront_do_request(info);
  410. spin_unlock_irqrestore(shost->host_lock, flags);
  411. return 0;
  412. busy:
  413. spin_unlock_irqrestore(shost->host_lock, flags);
  414. pr_debug("%s: busy\n", __func__);
  415. return SCSI_MLQUEUE_HOST_BUSY;
  416. }
  417. /*
  418. * Any exception handling (reset or abort) must be forwarded to the backend.
  419. * We have to wait until an answer is returned. This answer contains the
  420. * result to be returned to the requestor.
  421. */
  422. static int scsifront_action_handler(struct scsi_cmnd *sc, uint8_t act)
  423. {
  424. struct Scsi_Host *host = sc->device->host;
  425. struct vscsifrnt_info *info = shost_priv(host);
  426. struct vscsifrnt_shadow *shadow, *s = scsi_cmd_priv(sc);
  427. struct vscsiif_request *ring_req;
  428. int err = 0;
  429. shadow = kmalloc(sizeof(*shadow), GFP_NOIO);
  430. if (!shadow)
  431. return FAILED;
  432. spin_lock_irq(host->host_lock);
  433. for (;;) {
  434. if (!RING_FULL(&info->ring)) {
  435. ring_req = scsifront_command2ring(info, sc, shadow);
  436. if (ring_req)
  437. break;
  438. }
  439. if (err) {
  440. spin_unlock_irq(host->host_lock);
  441. kfree(shadow);
  442. return FAILED;
  443. }
  444. info->wait_ring_available = 1;
  445. spin_unlock_irq(host->host_lock);
  446. err = wait_event_interruptible(info->wq_sync,
  447. !info->wait_ring_available);
  448. spin_lock_irq(host->host_lock);
  449. }
  450. ring_req->act = act;
  451. ring_req->ref_rqid = s->rqid;
  452. shadow->act = act;
  453. shadow->rslt_reset = RSLT_RESET_WAITING;
  454. init_waitqueue_head(&shadow->wq_reset);
  455. ring_req->nr_segments = 0;
  456. scsifront_do_request(info);
  457. spin_unlock_irq(host->host_lock);
  458. err = wait_event_interruptible(shadow->wq_reset, shadow->wait_reset);
  459. spin_lock_irq(host->host_lock);
  460. if (!err) {
  461. err = shadow->rslt_reset;
  462. scsifront_put_rqid(info, shadow->rqid);
  463. kfree(shadow);
  464. } else {
  465. spin_lock(&info->shadow_lock);
  466. shadow->rslt_reset = RSLT_RESET_ERR;
  467. spin_unlock(&info->shadow_lock);
  468. err = FAILED;
  469. }
  470. spin_unlock_irq(host->host_lock);
  471. return err;
  472. }
  473. static int scsifront_eh_abort_handler(struct scsi_cmnd *sc)
  474. {
  475. pr_debug("%s\n", __func__);
  476. return scsifront_action_handler(sc, VSCSIIF_ACT_SCSI_ABORT);
  477. }
  478. static int scsifront_dev_reset_handler(struct scsi_cmnd *sc)
  479. {
  480. pr_debug("%s\n", __func__);
  481. return scsifront_action_handler(sc, VSCSIIF_ACT_SCSI_RESET);
  482. }
  483. static int scsifront_sdev_configure(struct scsi_device *sdev)
  484. {
  485. struct vscsifrnt_info *info = shost_priv(sdev->host);
  486. if (info && current == info->curr)
  487. xenbus_printf(XBT_NIL, info->dev->nodename,
  488. info->dev_state_path, "%d", XenbusStateConnected);
  489. return 0;
  490. }
  491. static void scsifront_sdev_destroy(struct scsi_device *sdev)
  492. {
  493. struct vscsifrnt_info *info = shost_priv(sdev->host);
  494. if (info && current == info->curr)
  495. xenbus_printf(XBT_NIL, info->dev->nodename,
  496. info->dev_state_path, "%d", XenbusStateClosed);
  497. }
  498. static struct scsi_host_template scsifront_sht = {
  499. .module = THIS_MODULE,
  500. .name = "Xen SCSI frontend driver",
  501. .queuecommand = scsifront_queuecommand,
  502. .eh_abort_handler = scsifront_eh_abort_handler,
  503. .eh_device_reset_handler = scsifront_dev_reset_handler,
  504. .slave_configure = scsifront_sdev_configure,
  505. .slave_destroy = scsifront_sdev_destroy,
  506. .cmd_per_lun = VSCSIIF_DEFAULT_CMD_PER_LUN,
  507. .can_queue = VSCSIIF_MAX_REQS,
  508. .this_id = -1,
  509. .cmd_size = sizeof(struct vscsifrnt_shadow),
  510. .sg_tablesize = VSCSIIF_SG_TABLESIZE,
  511. .use_clustering = DISABLE_CLUSTERING,
  512. .proc_name = "scsifront",
  513. };
  514. static int scsifront_alloc_ring(struct vscsifrnt_info *info)
  515. {
  516. struct xenbus_device *dev = info->dev;
  517. struct vscsiif_sring *sring;
  518. int err = -ENOMEM;
  519. /***** Frontend to Backend ring start *****/
  520. sring = (struct vscsiif_sring *)__get_free_page(GFP_KERNEL);
  521. if (!sring) {
  522. xenbus_dev_fatal(dev, err,
  523. "fail to allocate shared ring (Front to Back)");
  524. return err;
  525. }
  526. SHARED_RING_INIT(sring);
  527. FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE);
  528. err = xenbus_grant_ring(dev, virt_to_mfn(sring));
  529. if (err < 0) {
  530. free_page((unsigned long)sring);
  531. xenbus_dev_fatal(dev, err,
  532. "fail to grant shared ring (Front to Back)");
  533. return err;
  534. }
  535. info->ring_ref = err;
  536. err = xenbus_alloc_evtchn(dev, &info->evtchn);
  537. if (err) {
  538. xenbus_dev_fatal(dev, err, "xenbus_alloc_evtchn");
  539. goto free_gnttab;
  540. }
  541. err = bind_evtchn_to_irq(info->evtchn);
  542. if (err <= 0) {
  543. xenbus_dev_fatal(dev, err, "bind_evtchn_to_irq");
  544. goto free_gnttab;
  545. }
  546. info->irq = err;
  547. err = request_threaded_irq(info->irq, NULL, scsifront_irq_fn,
  548. IRQF_ONESHOT, "scsifront", info);
  549. if (err) {
  550. xenbus_dev_fatal(dev, err, "request_threaded_irq");
  551. goto free_irq;
  552. }
  553. return 0;
  554. /* free resource */
  555. free_irq:
  556. unbind_from_irqhandler(info->irq, info);
  557. free_gnttab:
  558. gnttab_end_foreign_access(info->ring_ref, 0,
  559. (unsigned long)info->ring.sring);
  560. return err;
  561. }
  562. static int scsifront_init_ring(struct vscsifrnt_info *info)
  563. {
  564. struct xenbus_device *dev = info->dev;
  565. struct xenbus_transaction xbt;
  566. int err;
  567. pr_debug("%s\n", __func__);
  568. err = scsifront_alloc_ring(info);
  569. if (err)
  570. return err;
  571. pr_debug("%s: %u %u\n", __func__, info->ring_ref, info->evtchn);
  572. again:
  573. err = xenbus_transaction_start(&xbt);
  574. if (err)
  575. xenbus_dev_fatal(dev, err, "starting transaction");
  576. err = xenbus_printf(xbt, dev->nodename, "ring-ref", "%u",
  577. info->ring_ref);
  578. if (err) {
  579. xenbus_dev_fatal(dev, err, "%s", "writing ring-ref");
  580. goto fail;
  581. }
  582. err = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
  583. info->evtchn);
  584. if (err) {
  585. xenbus_dev_fatal(dev, err, "%s", "writing event-channel");
  586. goto fail;
  587. }
  588. err = xenbus_transaction_end(xbt, 0);
  589. if (err) {
  590. if (err == -EAGAIN)
  591. goto again;
  592. xenbus_dev_fatal(dev, err, "completing transaction");
  593. goto free_sring;
  594. }
  595. return 0;
  596. fail:
  597. xenbus_transaction_end(xbt, 1);
  598. free_sring:
  599. unbind_from_irqhandler(info->irq, info);
  600. gnttab_end_foreign_access(info->ring_ref, 0,
  601. (unsigned long)info->ring.sring);
  602. return err;
  603. }
  604. static int scsifront_probe(struct xenbus_device *dev,
  605. const struct xenbus_device_id *id)
  606. {
  607. struct vscsifrnt_info *info;
  608. struct Scsi_Host *host;
  609. int err = -ENOMEM;
  610. char name[TASK_COMM_LEN];
  611. host = scsi_host_alloc(&scsifront_sht, sizeof(*info));
  612. if (!host) {
  613. xenbus_dev_fatal(dev, err, "fail to allocate scsi host");
  614. return err;
  615. }
  616. info = (struct vscsifrnt_info *)host->hostdata;
  617. dev_set_drvdata(&dev->dev, info);
  618. info->dev = dev;
  619. bitmap_fill(info->shadow_free_bitmap, VSCSIIF_MAX_REQS);
  620. err = scsifront_init_ring(info);
  621. if (err) {
  622. scsi_host_put(host);
  623. return err;
  624. }
  625. init_waitqueue_head(&info->wq_sync);
  626. spin_lock_init(&info->shadow_lock);
  627. snprintf(name, TASK_COMM_LEN, "vscsiif.%d", host->host_no);
  628. host->max_id = VSCSIIF_MAX_TARGET;
  629. host->max_channel = 0;
  630. host->max_lun = VSCSIIF_MAX_LUN;
  631. host->max_sectors = (host->sg_tablesize - 1) * PAGE_SIZE / 512;
  632. host->max_cmd_len = VSCSIIF_MAX_COMMAND_SIZE;
  633. err = scsi_add_host(host, &dev->dev);
  634. if (err) {
  635. dev_err(&dev->dev, "fail to add scsi host %d\n", err);
  636. goto free_sring;
  637. }
  638. info->host = host;
  639. info->host_active = 1;
  640. xenbus_switch_state(dev, XenbusStateInitialised);
  641. return 0;
  642. free_sring:
  643. unbind_from_irqhandler(info->irq, info);
  644. gnttab_end_foreign_access(info->ring_ref, 0,
  645. (unsigned long)info->ring.sring);
  646. scsi_host_put(host);
  647. return err;
  648. }
  649. static int scsifront_remove(struct xenbus_device *dev)
  650. {
  651. struct vscsifrnt_info *info = dev_get_drvdata(&dev->dev);
  652. pr_debug("%s: %s removed\n", __func__, dev->nodename);
  653. mutex_lock(&scsifront_mutex);
  654. if (info->host_active) {
  655. /* Scsi_host not yet removed */
  656. scsi_remove_host(info->host);
  657. info->host_active = 0;
  658. }
  659. mutex_unlock(&scsifront_mutex);
  660. gnttab_end_foreign_access(info->ring_ref, 0,
  661. (unsigned long)info->ring.sring);
  662. unbind_from_irqhandler(info->irq, info);
  663. scsi_host_put(info->host);
  664. return 0;
  665. }
  666. static void scsifront_disconnect(struct vscsifrnt_info *info)
  667. {
  668. struct xenbus_device *dev = info->dev;
  669. struct Scsi_Host *host = info->host;
  670. pr_debug("%s: %s disconnect\n", __func__, dev->nodename);
  671. /*
  672. * When this function is executed, all devices of
  673. * Frontend have been deleted.
  674. * Therefore, it need not block I/O before remove_host.
  675. */
  676. mutex_lock(&scsifront_mutex);
  677. if (info->host_active) {
  678. scsi_remove_host(host);
  679. info->host_active = 0;
  680. }
  681. mutex_unlock(&scsifront_mutex);
  682. xenbus_frontend_closed(dev);
  683. }
  684. static void scsifront_do_lun_hotplug(struct vscsifrnt_info *info, int op)
  685. {
  686. struct xenbus_device *dev = info->dev;
  687. int i, err = 0;
  688. char str[64];
  689. char **dir;
  690. unsigned int dir_n = 0;
  691. unsigned int device_state;
  692. unsigned int hst, chn, tgt, lun;
  693. struct scsi_device *sdev;
  694. dir = xenbus_directory(XBT_NIL, dev->otherend, "vscsi-devs", &dir_n);
  695. if (IS_ERR(dir))
  696. return;
  697. /* mark current task as the one allowed to modify device states */
  698. BUG_ON(info->curr);
  699. info->curr = current;
  700. for (i = 0; i < dir_n; i++) {
  701. /* read status */
  702. snprintf(str, sizeof(str), "vscsi-devs/%s/state", dir[i]);
  703. err = xenbus_scanf(XBT_NIL, dev->otherend, str, "%u",
  704. &device_state);
  705. if (XENBUS_EXIST_ERR(err))
  706. continue;
  707. /* virtual SCSI device */
  708. snprintf(str, sizeof(str), "vscsi-devs/%s/v-dev", dir[i]);
  709. err = xenbus_scanf(XBT_NIL, dev->otherend, str,
  710. "%u:%u:%u:%u", &hst, &chn, &tgt, &lun);
  711. if (XENBUS_EXIST_ERR(err))
  712. continue;
  713. /*
  714. * Front device state path, used in slave_configure called
  715. * on successfull scsi_add_device, and in slave_destroy called
  716. * on remove of a device.
  717. */
  718. snprintf(info->dev_state_path, sizeof(info->dev_state_path),
  719. "vscsi-devs/%s/state", dir[i]);
  720. switch (op) {
  721. case VSCSIFRONT_OP_ADD_LUN:
  722. if (device_state != XenbusStateInitialised)
  723. break;
  724. if (scsi_add_device(info->host, chn, tgt, lun)) {
  725. dev_err(&dev->dev, "scsi_add_device\n");
  726. xenbus_printf(XBT_NIL, dev->nodename,
  727. info->dev_state_path,
  728. "%d", XenbusStateClosed);
  729. }
  730. break;
  731. case VSCSIFRONT_OP_DEL_LUN:
  732. if (device_state != XenbusStateClosing)
  733. break;
  734. sdev = scsi_device_lookup(info->host, chn, tgt, lun);
  735. if (sdev) {
  736. scsi_remove_device(sdev);
  737. scsi_device_put(sdev);
  738. }
  739. break;
  740. default:
  741. break;
  742. }
  743. }
  744. info->curr = NULL;
  745. kfree(dir);
  746. }
  747. static void scsifront_read_backend_params(struct xenbus_device *dev,
  748. struct vscsifrnt_info *info)
  749. {
  750. unsigned int sg_grant;
  751. int ret;
  752. struct Scsi_Host *host = info->host;
  753. ret = xenbus_scanf(XBT_NIL, dev->otherend, "feature-sg-grant", "%u",
  754. &sg_grant);
  755. if (ret == 1 && sg_grant) {
  756. sg_grant = min_t(unsigned int, sg_grant, SG_ALL);
  757. sg_grant = max_t(unsigned int, sg_grant, VSCSIIF_SG_TABLESIZE);
  758. host->sg_tablesize = min_t(unsigned int, sg_grant,
  759. VSCSIIF_SG_TABLESIZE * PAGE_SIZE /
  760. sizeof(struct scsiif_request_segment));
  761. host->max_sectors = (host->sg_tablesize - 1) * PAGE_SIZE / 512;
  762. }
  763. dev_info(&dev->dev, "using up to %d SG entries\n", host->sg_tablesize);
  764. }
  765. static void scsifront_backend_changed(struct xenbus_device *dev,
  766. enum xenbus_state backend_state)
  767. {
  768. struct vscsifrnt_info *info = dev_get_drvdata(&dev->dev);
  769. pr_debug("%s: %p %u %u\n", __func__, dev, dev->state, backend_state);
  770. switch (backend_state) {
  771. case XenbusStateUnknown:
  772. case XenbusStateInitialising:
  773. case XenbusStateInitWait:
  774. case XenbusStateInitialised:
  775. break;
  776. case XenbusStateConnected:
  777. scsifront_read_backend_params(dev, info);
  778. if (xenbus_read_driver_state(dev->nodename) ==
  779. XenbusStateInitialised)
  780. scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_ADD_LUN);
  781. if (dev->state != XenbusStateConnected)
  782. xenbus_switch_state(dev, XenbusStateConnected);
  783. break;
  784. case XenbusStateClosed:
  785. if (dev->state == XenbusStateClosed)
  786. break;
  787. /* Missed the backend's Closing state -- fallthrough */
  788. case XenbusStateClosing:
  789. scsifront_disconnect(info);
  790. break;
  791. case XenbusStateReconfiguring:
  792. scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_DEL_LUN);
  793. xenbus_switch_state(dev, XenbusStateReconfiguring);
  794. break;
  795. case XenbusStateReconfigured:
  796. scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_ADD_LUN);
  797. xenbus_switch_state(dev, XenbusStateConnected);
  798. break;
  799. }
  800. }
  801. static const struct xenbus_device_id scsifront_ids[] = {
  802. { "vscsi" },
  803. { "" }
  804. };
  805. static struct xenbus_driver scsifront_driver = {
  806. .ids = scsifront_ids,
  807. .probe = scsifront_probe,
  808. .remove = scsifront_remove,
  809. .otherend_changed = scsifront_backend_changed,
  810. };
  811. static int __init scsifront_init(void)
  812. {
  813. if (!xen_domain())
  814. return -ENODEV;
  815. return xenbus_register_frontend(&scsifront_driver);
  816. }
  817. module_init(scsifront_init);
  818. static void __exit scsifront_exit(void)
  819. {
  820. xenbus_unregister_driver(&scsifront_driver);
  821. }
  822. module_exit(scsifront_exit);
  823. MODULE_DESCRIPTION("Xen SCSI frontend driver");
  824. MODULE_LICENSE("GPL");
  825. MODULE_ALIAS("xen:vscsi");
  826. MODULE_AUTHOR("Juergen Gross <jgross@suse.com>");