sunvdc.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957
  1. /* sunvdc.c: Sun LDOM Virtual Disk Client.
  2. *
  3. * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
  4. */
  5. #include <linux/module.h>
  6. #include <linux/kernel.h>
  7. #include <linux/types.h>
  8. #include <linux/blkdev.h>
  9. #include <linux/hdreg.h>
  10. #include <linux/genhd.h>
  11. #include <linux/cdrom.h>
  12. #include <linux/slab.h>
  13. #include <linux/spinlock.h>
  14. #include <linux/completion.h>
  15. #include <linux/delay.h>
  16. #include <linux/init.h>
  17. #include <linux/list.h>
  18. #include <linux/scatterlist.h>
  19. #include <asm/vio.h>
  20. #include <asm/ldc.h>
  21. #define DRV_MODULE_NAME "sunvdc"
  22. #define PFX DRV_MODULE_NAME ": "
  23. #define DRV_MODULE_VERSION "1.1"
  24. #define DRV_MODULE_RELDATE "February 13, 2013"
  25. static char version[] =
  26. DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
  27. MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
  28. MODULE_DESCRIPTION("Sun LDOM virtual disk client driver");
  29. MODULE_LICENSE("GPL");
  30. MODULE_VERSION(DRV_MODULE_VERSION);
  31. #define VDC_TX_RING_SIZE 512
  32. #define WAITING_FOR_LINK_UP 0x01
  33. #define WAITING_FOR_TX_SPACE 0x02
  34. #define WAITING_FOR_GEN_CMD 0x04
  35. #define WAITING_FOR_ANY -1
  36. struct vdc_req_entry {
  37. struct request *req;
  38. };
  39. struct vdc_port {
  40. struct vio_driver_state vio;
  41. struct gendisk *disk;
  42. struct vdc_completion *cmp;
  43. u64 req_id;
  44. u64 seq;
  45. struct vdc_req_entry rq_arr[VDC_TX_RING_SIZE];
  46. unsigned long ring_cookies;
  47. u64 max_xfer_size;
  48. u32 vdisk_block_size;
  49. /* The server fills these in for us in the disk attribute
  50. * ACK packet.
  51. */
  52. u64 operations;
  53. u32 vdisk_size;
  54. u8 vdisk_type;
  55. u8 vdisk_mtype;
  56. char disk_name[32];
  57. };
  58. static inline struct vdc_port *to_vdc_port(struct vio_driver_state *vio)
  59. {
  60. return container_of(vio, struct vdc_port, vio);
  61. }
  62. /* Ordered from largest major to lowest */
  63. static struct vio_version vdc_versions[] = {
  64. { .major = 1, .minor = 1 },
  65. { .major = 1, .minor = 0 },
  66. };
  67. static inline int vdc_version_supported(struct vdc_port *port,
  68. u16 major, u16 minor)
  69. {
  70. return port->vio.ver.major == major && port->vio.ver.minor >= minor;
  71. }
  72. #define VDCBLK_NAME "vdisk"
  73. static int vdc_major;
  74. #define PARTITION_SHIFT 3
  75. static inline u32 vdc_tx_dring_avail(struct vio_dring_state *dr)
  76. {
  77. return vio_dring_avail(dr, VDC_TX_RING_SIZE);
  78. }
  79. static int vdc_getgeo(struct block_device *bdev, struct hd_geometry *geo)
  80. {
  81. struct gendisk *disk = bdev->bd_disk;
  82. sector_t nsect = get_capacity(disk);
  83. sector_t cylinders = nsect;
  84. geo->heads = 0xff;
  85. geo->sectors = 0x3f;
  86. sector_div(cylinders, geo->heads * geo->sectors);
  87. geo->cylinders = cylinders;
  88. if ((sector_t)(geo->cylinders + 1) * geo->heads * geo->sectors < nsect)
  89. geo->cylinders = 0xffff;
  90. return 0;
  91. }
  92. /* Add ioctl/CDROM_GET_CAPABILITY to support cdrom_id in udev
  93. * when vdisk_mtype is VD_MEDIA_TYPE_CD or VD_MEDIA_TYPE_DVD.
  94. * Needed to be able to install inside an ldom from an iso image.
  95. */
  96. static int vdc_ioctl(struct block_device *bdev, fmode_t mode,
  97. unsigned command, unsigned long argument)
  98. {
  99. int i;
  100. struct gendisk *disk;
  101. switch (command) {
  102. case CDROMMULTISESSION:
  103. pr_debug(PFX "Multisession CDs not supported\n");
  104. for (i = 0; i < sizeof(struct cdrom_multisession); i++)
  105. if (put_user(0, (char __user *)(argument + i)))
  106. return -EFAULT;
  107. return 0;
  108. case CDROM_GET_CAPABILITY:
  109. disk = bdev->bd_disk;
  110. if (bdev->bd_disk && (disk->flags & GENHD_FL_CD))
  111. return 0;
  112. return -EINVAL;
  113. default:
  114. pr_debug(PFX "ioctl %08x not supported\n", command);
  115. return -EINVAL;
  116. }
  117. }
  118. static const struct block_device_operations vdc_fops = {
  119. .owner = THIS_MODULE,
  120. .getgeo = vdc_getgeo,
  121. .ioctl = vdc_ioctl,
  122. };
  123. static void vdc_finish(struct vio_driver_state *vio, int err, int waiting_for)
  124. {
  125. if (vio->cmp &&
  126. (waiting_for == -1 ||
  127. vio->cmp->waiting_for == waiting_for)) {
  128. vio->cmp->err = err;
  129. complete(&vio->cmp->com);
  130. vio->cmp = NULL;
  131. }
  132. }
  133. static void vdc_handshake_complete(struct vio_driver_state *vio)
  134. {
  135. vdc_finish(vio, 0, WAITING_FOR_LINK_UP);
  136. }
  137. static int vdc_handle_unknown(struct vdc_port *port, void *arg)
  138. {
  139. struct vio_msg_tag *pkt = arg;
  140. printk(KERN_ERR PFX "Received unknown msg [%02x:%02x:%04x:%08x]\n",
  141. pkt->type, pkt->stype, pkt->stype_env, pkt->sid);
  142. printk(KERN_ERR PFX "Resetting connection.\n");
  143. ldc_disconnect(port->vio.lp);
  144. return -ECONNRESET;
  145. }
  146. static int vdc_send_attr(struct vio_driver_state *vio)
  147. {
  148. struct vdc_port *port = to_vdc_port(vio);
  149. struct vio_disk_attr_info pkt;
  150. memset(&pkt, 0, sizeof(pkt));
  151. pkt.tag.type = VIO_TYPE_CTRL;
  152. pkt.tag.stype = VIO_SUBTYPE_INFO;
  153. pkt.tag.stype_env = VIO_ATTR_INFO;
  154. pkt.tag.sid = vio_send_sid(vio);
  155. pkt.xfer_mode = VIO_DRING_MODE;
  156. pkt.vdisk_block_size = port->vdisk_block_size;
  157. pkt.max_xfer_size = port->max_xfer_size;
  158. viodbg(HS, "SEND ATTR xfer_mode[0x%x] blksz[%u] max_xfer[%llu]\n",
  159. pkt.xfer_mode, pkt.vdisk_block_size, pkt.max_xfer_size);
  160. return vio_ldc_send(&port->vio, &pkt, sizeof(pkt));
  161. }
  162. static int vdc_handle_attr(struct vio_driver_state *vio, void *arg)
  163. {
  164. struct vdc_port *port = to_vdc_port(vio);
  165. struct vio_disk_attr_info *pkt = arg;
  166. viodbg(HS, "GOT ATTR stype[0x%x] ops[%llx] disk_size[%llu] disk_type[%x] "
  167. "mtype[0x%x] xfer_mode[0x%x] blksz[%u] max_xfer[%llu]\n",
  168. pkt->tag.stype, pkt->operations,
  169. pkt->vdisk_size, pkt->vdisk_type, pkt->vdisk_mtype,
  170. pkt->xfer_mode, pkt->vdisk_block_size,
  171. pkt->max_xfer_size);
  172. if (pkt->tag.stype == VIO_SUBTYPE_ACK) {
  173. switch (pkt->vdisk_type) {
  174. case VD_DISK_TYPE_DISK:
  175. case VD_DISK_TYPE_SLICE:
  176. break;
  177. default:
  178. printk(KERN_ERR PFX "%s: Bogus vdisk_type 0x%x\n",
  179. vio->name, pkt->vdisk_type);
  180. return -ECONNRESET;
  181. }
  182. if (pkt->vdisk_block_size > port->vdisk_block_size) {
  183. printk(KERN_ERR PFX "%s: BLOCK size increased "
  184. "%u --> %u\n",
  185. vio->name,
  186. port->vdisk_block_size, pkt->vdisk_block_size);
  187. return -ECONNRESET;
  188. }
  189. port->operations = pkt->operations;
  190. port->vdisk_type = pkt->vdisk_type;
  191. if (vdc_version_supported(port, 1, 1)) {
  192. port->vdisk_size = pkt->vdisk_size;
  193. port->vdisk_mtype = pkt->vdisk_mtype;
  194. }
  195. if (pkt->max_xfer_size < port->max_xfer_size)
  196. port->max_xfer_size = pkt->max_xfer_size;
  197. port->vdisk_block_size = pkt->vdisk_block_size;
  198. return 0;
  199. } else {
  200. printk(KERN_ERR PFX "%s: Attribute NACK\n", vio->name);
  201. return -ECONNRESET;
  202. }
  203. }
  204. static void vdc_end_special(struct vdc_port *port, struct vio_disk_desc *desc)
  205. {
  206. int err = desc->status;
  207. vdc_finish(&port->vio, -err, WAITING_FOR_GEN_CMD);
  208. }
  209. static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr,
  210. unsigned int index)
  211. {
  212. struct vio_disk_desc *desc = vio_dring_entry(dr, index);
  213. struct vdc_req_entry *rqe = &port->rq_arr[index];
  214. struct request *req;
  215. if (unlikely(desc->hdr.state != VIO_DESC_DONE))
  216. return;
  217. ldc_unmap(port->vio.lp, desc->cookies, desc->ncookies);
  218. desc->hdr.state = VIO_DESC_FREE;
  219. dr->cons = (index + 1) & (VDC_TX_RING_SIZE - 1);
  220. req = rqe->req;
  221. if (req == NULL) {
  222. vdc_end_special(port, desc);
  223. return;
  224. }
  225. rqe->req = NULL;
  226. __blk_end_request(req, (desc->status ? -EIO : 0), desc->size);
  227. /* restart blk queue when ring is half emptied */
  228. if (blk_queue_stopped(port->disk->queue) &&
  229. vdc_tx_dring_avail(dr) * 100 / VDC_TX_RING_SIZE >= 50)
  230. blk_start_queue(port->disk->queue);
  231. }
  232. static int vdc_ack(struct vdc_port *port, void *msgbuf)
  233. {
  234. struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
  235. struct vio_dring_data *pkt = msgbuf;
  236. if (unlikely(pkt->dring_ident != dr->ident ||
  237. pkt->start_idx != pkt->end_idx ||
  238. pkt->start_idx >= VDC_TX_RING_SIZE))
  239. return 0;
  240. vdc_end_one(port, dr, pkt->start_idx);
  241. return 0;
  242. }
  243. static int vdc_nack(struct vdc_port *port, void *msgbuf)
  244. {
  245. /* XXX Implement me XXX */
  246. return 0;
  247. }
  248. static void vdc_event(void *arg, int event)
  249. {
  250. struct vdc_port *port = arg;
  251. struct vio_driver_state *vio = &port->vio;
  252. unsigned long flags;
  253. int err;
  254. spin_lock_irqsave(&vio->lock, flags);
  255. if (unlikely(event == LDC_EVENT_RESET ||
  256. event == LDC_EVENT_UP)) {
  257. vio_link_state_change(vio, event);
  258. spin_unlock_irqrestore(&vio->lock, flags);
  259. return;
  260. }
  261. if (unlikely(event != LDC_EVENT_DATA_READY)) {
  262. printk(KERN_WARNING PFX "Unexpected LDC event %d\n", event);
  263. spin_unlock_irqrestore(&vio->lock, flags);
  264. return;
  265. }
  266. err = 0;
  267. while (1) {
  268. union {
  269. struct vio_msg_tag tag;
  270. u64 raw[8];
  271. } msgbuf;
  272. err = ldc_read(vio->lp, &msgbuf, sizeof(msgbuf));
  273. if (unlikely(err < 0)) {
  274. if (err == -ECONNRESET)
  275. vio_conn_reset(vio);
  276. break;
  277. }
  278. if (err == 0)
  279. break;
  280. viodbg(DATA, "TAG [%02x:%02x:%04x:%08x]\n",
  281. msgbuf.tag.type,
  282. msgbuf.tag.stype,
  283. msgbuf.tag.stype_env,
  284. msgbuf.tag.sid);
  285. err = vio_validate_sid(vio, &msgbuf.tag);
  286. if (err < 0)
  287. break;
  288. if (likely(msgbuf.tag.type == VIO_TYPE_DATA)) {
  289. if (msgbuf.tag.stype == VIO_SUBTYPE_ACK)
  290. err = vdc_ack(port, &msgbuf);
  291. else if (msgbuf.tag.stype == VIO_SUBTYPE_NACK)
  292. err = vdc_nack(port, &msgbuf);
  293. else
  294. err = vdc_handle_unknown(port, &msgbuf);
  295. } else if (msgbuf.tag.type == VIO_TYPE_CTRL) {
  296. err = vio_control_pkt_engine(vio, &msgbuf);
  297. } else {
  298. err = vdc_handle_unknown(port, &msgbuf);
  299. }
  300. if (err < 0)
  301. break;
  302. }
  303. if (err < 0)
  304. vdc_finish(&port->vio, err, WAITING_FOR_ANY);
  305. spin_unlock_irqrestore(&vio->lock, flags);
  306. }
  307. static int __vdc_tx_trigger(struct vdc_port *port)
  308. {
  309. struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
  310. struct vio_dring_data hdr = {
  311. .tag = {
  312. .type = VIO_TYPE_DATA,
  313. .stype = VIO_SUBTYPE_INFO,
  314. .stype_env = VIO_DRING_DATA,
  315. .sid = vio_send_sid(&port->vio),
  316. },
  317. .dring_ident = dr->ident,
  318. .start_idx = dr->prod,
  319. .end_idx = dr->prod,
  320. };
  321. int err, delay;
  322. hdr.seq = dr->snd_nxt;
  323. delay = 1;
  324. do {
  325. err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr));
  326. if (err > 0) {
  327. dr->snd_nxt++;
  328. break;
  329. }
  330. udelay(delay);
  331. if ((delay <<= 1) > 128)
  332. delay = 128;
  333. } while (err == -EAGAIN);
  334. return err;
  335. }
  336. static int __send_request(struct request *req)
  337. {
  338. struct vdc_port *port = req->rq_disk->private_data;
  339. struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
  340. struct scatterlist sg[port->ring_cookies];
  341. struct vdc_req_entry *rqe;
  342. struct vio_disk_desc *desc;
  343. unsigned int map_perm;
  344. int nsg, err, i;
  345. u64 len;
  346. u8 op;
  347. map_perm = LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_IO;
  348. if (rq_data_dir(req) == READ) {
  349. map_perm |= LDC_MAP_W;
  350. op = VD_OP_BREAD;
  351. } else {
  352. map_perm |= LDC_MAP_R;
  353. op = VD_OP_BWRITE;
  354. }
  355. sg_init_table(sg, port->ring_cookies);
  356. nsg = blk_rq_map_sg(req->q, req, sg);
  357. len = 0;
  358. for (i = 0; i < nsg; i++)
  359. len += sg[i].length;
  360. desc = vio_dring_cur(dr);
  361. err = ldc_map_sg(port->vio.lp, sg, nsg,
  362. desc->cookies, port->ring_cookies,
  363. map_perm);
  364. if (err < 0) {
  365. printk(KERN_ERR PFX "ldc_map_sg() failure, err=%d.\n", err);
  366. return err;
  367. }
  368. rqe = &port->rq_arr[dr->prod];
  369. rqe->req = req;
  370. desc->hdr.ack = VIO_ACK_ENABLE;
  371. desc->req_id = port->req_id;
  372. desc->operation = op;
  373. if (port->vdisk_type == VD_DISK_TYPE_DISK) {
  374. desc->slice = 0xff;
  375. } else {
  376. desc->slice = 0;
  377. }
  378. desc->status = ~0;
  379. desc->offset = (blk_rq_pos(req) << 9) / port->vdisk_block_size;
  380. desc->size = len;
  381. desc->ncookies = err;
  382. /* This has to be a non-SMP write barrier because we are writing
  383. * to memory which is shared with the peer LDOM.
  384. */
  385. wmb();
  386. desc->hdr.state = VIO_DESC_READY;
  387. err = __vdc_tx_trigger(port);
  388. if (err < 0) {
  389. printk(KERN_ERR PFX "vdc_tx_trigger() failure, err=%d\n", err);
  390. } else {
  391. port->req_id++;
  392. dr->prod = (dr->prod + 1) & (VDC_TX_RING_SIZE - 1);
  393. }
  394. return err;
  395. }
  396. static void do_vdc_request(struct request_queue *rq)
  397. {
  398. struct request *req;
  399. while ((req = blk_peek_request(rq)) != NULL) {
  400. struct vdc_port *port;
  401. struct vio_dring_state *dr;
  402. port = req->rq_disk->private_data;
  403. dr = &port->vio.drings[VIO_DRIVER_TX_RING];
  404. if (unlikely(vdc_tx_dring_avail(dr) < 1))
  405. goto wait;
  406. blk_start_request(req);
  407. if (__send_request(req) < 0) {
  408. blk_requeue_request(rq, req);
  409. wait:
  410. /* Avoid pointless unplugs. */
  411. blk_stop_queue(rq);
  412. break;
  413. }
  414. }
  415. }
  416. static int generic_request(struct vdc_port *port, u8 op, void *buf, int len)
  417. {
  418. struct vio_dring_state *dr;
  419. struct vio_completion comp;
  420. struct vio_disk_desc *desc;
  421. unsigned int map_perm;
  422. unsigned long flags;
  423. int op_len, err;
  424. void *req_buf;
  425. if (!(((u64)1 << (u64)op) & port->operations))
  426. return -EOPNOTSUPP;
  427. switch (op) {
  428. case VD_OP_BREAD:
  429. case VD_OP_BWRITE:
  430. default:
  431. return -EINVAL;
  432. case VD_OP_FLUSH:
  433. op_len = 0;
  434. map_perm = 0;
  435. break;
  436. case VD_OP_GET_WCE:
  437. op_len = sizeof(u32);
  438. map_perm = LDC_MAP_W;
  439. break;
  440. case VD_OP_SET_WCE:
  441. op_len = sizeof(u32);
  442. map_perm = LDC_MAP_R;
  443. break;
  444. case VD_OP_GET_VTOC:
  445. op_len = sizeof(struct vio_disk_vtoc);
  446. map_perm = LDC_MAP_W;
  447. break;
  448. case VD_OP_SET_VTOC:
  449. op_len = sizeof(struct vio_disk_vtoc);
  450. map_perm = LDC_MAP_R;
  451. break;
  452. case VD_OP_GET_DISKGEOM:
  453. op_len = sizeof(struct vio_disk_geom);
  454. map_perm = LDC_MAP_W;
  455. break;
  456. case VD_OP_SET_DISKGEOM:
  457. op_len = sizeof(struct vio_disk_geom);
  458. map_perm = LDC_MAP_R;
  459. break;
  460. case VD_OP_SCSICMD:
  461. op_len = 16;
  462. map_perm = LDC_MAP_RW;
  463. break;
  464. case VD_OP_GET_DEVID:
  465. op_len = sizeof(struct vio_disk_devid);
  466. map_perm = LDC_MAP_W;
  467. break;
  468. case VD_OP_GET_EFI:
  469. case VD_OP_SET_EFI:
  470. return -EOPNOTSUPP;
  471. break;
  472. };
  473. map_perm |= LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_IO;
  474. op_len = (op_len + 7) & ~7;
  475. req_buf = kzalloc(op_len, GFP_KERNEL);
  476. if (!req_buf)
  477. return -ENOMEM;
  478. if (len > op_len)
  479. len = op_len;
  480. if (map_perm & LDC_MAP_R)
  481. memcpy(req_buf, buf, len);
  482. spin_lock_irqsave(&port->vio.lock, flags);
  483. dr = &port->vio.drings[VIO_DRIVER_TX_RING];
  484. /* XXX If we want to use this code generically we have to
  485. * XXX handle TX ring exhaustion etc.
  486. */
  487. desc = vio_dring_cur(dr);
  488. err = ldc_map_single(port->vio.lp, req_buf, op_len,
  489. desc->cookies, port->ring_cookies,
  490. map_perm);
  491. if (err < 0) {
  492. spin_unlock_irqrestore(&port->vio.lock, flags);
  493. kfree(req_buf);
  494. return err;
  495. }
  496. init_completion(&comp.com);
  497. comp.waiting_for = WAITING_FOR_GEN_CMD;
  498. port->vio.cmp = &comp;
  499. desc->hdr.ack = VIO_ACK_ENABLE;
  500. desc->req_id = port->req_id;
  501. desc->operation = op;
  502. desc->slice = 0;
  503. desc->status = ~0;
  504. desc->offset = 0;
  505. desc->size = op_len;
  506. desc->ncookies = err;
  507. /* This has to be a non-SMP write barrier because we are writing
  508. * to memory which is shared with the peer LDOM.
  509. */
  510. wmb();
  511. desc->hdr.state = VIO_DESC_READY;
  512. err = __vdc_tx_trigger(port);
  513. if (err >= 0) {
  514. port->req_id++;
  515. dr->prod = (dr->prod + 1) & (VDC_TX_RING_SIZE - 1);
  516. spin_unlock_irqrestore(&port->vio.lock, flags);
  517. wait_for_completion(&comp.com);
  518. err = comp.err;
  519. } else {
  520. port->vio.cmp = NULL;
  521. spin_unlock_irqrestore(&port->vio.lock, flags);
  522. }
  523. if (map_perm & LDC_MAP_W)
  524. memcpy(buf, req_buf, len);
  525. kfree(req_buf);
  526. return err;
  527. }
  528. static int vdc_alloc_tx_ring(struct vdc_port *port)
  529. {
  530. struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
  531. unsigned long len, entry_size;
  532. int ncookies;
  533. void *dring;
  534. entry_size = sizeof(struct vio_disk_desc) +
  535. (sizeof(struct ldc_trans_cookie) * port->ring_cookies);
  536. len = (VDC_TX_RING_SIZE * entry_size);
  537. ncookies = VIO_MAX_RING_COOKIES;
  538. dring = ldc_alloc_exp_dring(port->vio.lp, len,
  539. dr->cookies, &ncookies,
  540. (LDC_MAP_SHADOW |
  541. LDC_MAP_DIRECT |
  542. LDC_MAP_RW));
  543. if (IS_ERR(dring))
  544. return PTR_ERR(dring);
  545. dr->base = dring;
  546. dr->entry_size = entry_size;
  547. dr->num_entries = VDC_TX_RING_SIZE;
  548. dr->prod = dr->cons = 0;
  549. dr->pending = VDC_TX_RING_SIZE;
  550. dr->ncookies = ncookies;
  551. return 0;
  552. }
  553. static void vdc_free_tx_ring(struct vdc_port *port)
  554. {
  555. struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
  556. if (dr->base) {
  557. ldc_free_exp_dring(port->vio.lp, dr->base,
  558. (dr->entry_size * dr->num_entries),
  559. dr->cookies, dr->ncookies);
  560. dr->base = NULL;
  561. dr->entry_size = 0;
  562. dr->num_entries = 0;
  563. dr->pending = 0;
  564. dr->ncookies = 0;
  565. }
  566. }
  567. static int probe_disk(struct vdc_port *port)
  568. {
  569. struct vio_completion comp;
  570. struct request_queue *q;
  571. struct gendisk *g;
  572. int err;
  573. init_completion(&comp.com);
  574. comp.err = 0;
  575. comp.waiting_for = WAITING_FOR_LINK_UP;
  576. port->vio.cmp = &comp;
  577. vio_port_up(&port->vio);
  578. wait_for_completion(&comp.com);
  579. if (comp.err)
  580. return comp.err;
  581. if (vdc_version_supported(port, 1, 1)) {
  582. /* vdisk_size should be set during the handshake, if it wasn't
  583. * then the underlying disk is reserved by another system
  584. */
  585. if (port->vdisk_size == -1)
  586. return -ENODEV;
  587. } else {
  588. struct vio_disk_geom geom;
  589. err = generic_request(port, VD_OP_GET_DISKGEOM,
  590. &geom, sizeof(geom));
  591. if (err < 0) {
  592. printk(KERN_ERR PFX "VD_OP_GET_DISKGEOM returns "
  593. "error %d\n", err);
  594. return err;
  595. }
  596. port->vdisk_size = ((u64)geom.num_cyl *
  597. (u64)geom.num_hd *
  598. (u64)geom.num_sec);
  599. }
  600. q = blk_init_queue(do_vdc_request, &port->vio.lock);
  601. if (!q) {
  602. printk(KERN_ERR PFX "%s: Could not allocate queue.\n",
  603. port->vio.name);
  604. return -ENOMEM;
  605. }
  606. g = alloc_disk(1 << PARTITION_SHIFT);
  607. if (!g) {
  608. printk(KERN_ERR PFX "%s: Could not allocate gendisk.\n",
  609. port->vio.name);
  610. blk_cleanup_queue(q);
  611. return -ENOMEM;
  612. }
  613. port->disk = g;
  614. /* Each segment in a request is up to an aligned page in size. */
  615. blk_queue_segment_boundary(q, PAGE_SIZE - 1);
  616. blk_queue_max_segment_size(q, PAGE_SIZE);
  617. blk_queue_max_segments(q, port->ring_cookies);
  618. blk_queue_max_hw_sectors(q, port->max_xfer_size);
  619. g->major = vdc_major;
  620. g->first_minor = port->vio.vdev->dev_no << PARTITION_SHIFT;
  621. strcpy(g->disk_name, port->disk_name);
  622. g->fops = &vdc_fops;
  623. g->queue = q;
  624. g->private_data = port;
  625. g->driverfs_dev = &port->vio.vdev->dev;
  626. set_capacity(g, port->vdisk_size);
  627. if (vdc_version_supported(port, 1, 1)) {
  628. switch (port->vdisk_mtype) {
  629. case VD_MEDIA_TYPE_CD:
  630. pr_info(PFX "Virtual CDROM %s\n", port->disk_name);
  631. g->flags |= GENHD_FL_CD;
  632. g->flags |= GENHD_FL_REMOVABLE;
  633. set_disk_ro(g, 1);
  634. break;
  635. case VD_MEDIA_TYPE_DVD:
  636. pr_info(PFX "Virtual DVD %s\n", port->disk_name);
  637. g->flags |= GENHD_FL_CD;
  638. g->flags |= GENHD_FL_REMOVABLE;
  639. set_disk_ro(g, 1);
  640. break;
  641. case VD_MEDIA_TYPE_FIXED:
  642. pr_info(PFX "Virtual Hard disk %s\n", port->disk_name);
  643. break;
  644. }
  645. }
  646. pr_info(PFX "%s: %u sectors (%u MB) protocol %d.%d\n",
  647. g->disk_name,
  648. port->vdisk_size, (port->vdisk_size >> (20 - 9)),
  649. port->vio.ver.major, port->vio.ver.minor);
  650. add_disk(g);
  651. return 0;
  652. }
  653. static struct ldc_channel_config vdc_ldc_cfg = {
  654. .event = vdc_event,
  655. .mtu = 64,
  656. .mode = LDC_MODE_UNRELIABLE,
  657. };
  658. static struct vio_driver_ops vdc_vio_ops = {
  659. .send_attr = vdc_send_attr,
  660. .handle_attr = vdc_handle_attr,
  661. .handshake_complete = vdc_handshake_complete,
  662. };
  663. static void print_version(void)
  664. {
  665. static int version_printed;
  666. if (version_printed++ == 0)
  667. printk(KERN_INFO "%s", version);
  668. }
  669. static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
  670. {
  671. struct mdesc_handle *hp;
  672. struct vdc_port *port;
  673. int err;
  674. print_version();
  675. hp = mdesc_grab();
  676. err = -ENODEV;
  677. if ((vdev->dev_no << PARTITION_SHIFT) & ~(u64)MINORMASK) {
  678. printk(KERN_ERR PFX "Port id [%llu] too large.\n",
  679. vdev->dev_no);
  680. goto err_out_release_mdesc;
  681. }
  682. port = kzalloc(sizeof(*port), GFP_KERNEL);
  683. err = -ENOMEM;
  684. if (!port) {
  685. printk(KERN_ERR PFX "Cannot allocate vdc_port.\n");
  686. goto err_out_release_mdesc;
  687. }
  688. if (vdev->dev_no >= 26)
  689. snprintf(port->disk_name, sizeof(port->disk_name),
  690. VDCBLK_NAME "%c%c",
  691. 'a' + ((int)vdev->dev_no / 26) - 1,
  692. 'a' + ((int)vdev->dev_no % 26));
  693. else
  694. snprintf(port->disk_name, sizeof(port->disk_name),
  695. VDCBLK_NAME "%c", 'a' + ((int)vdev->dev_no % 26));
  696. port->vdisk_size = -1;
  697. err = vio_driver_init(&port->vio, vdev, VDEV_DISK,
  698. vdc_versions, ARRAY_SIZE(vdc_versions),
  699. &vdc_vio_ops, port->disk_name);
  700. if (err)
  701. goto err_out_free_port;
  702. port->vdisk_block_size = 512;
  703. port->max_xfer_size = ((128 * 1024) / port->vdisk_block_size);
  704. port->ring_cookies = ((port->max_xfer_size *
  705. port->vdisk_block_size) / PAGE_SIZE) + 2;
  706. err = vio_ldc_alloc(&port->vio, &vdc_ldc_cfg, port);
  707. if (err)
  708. goto err_out_free_port;
  709. err = vdc_alloc_tx_ring(port);
  710. if (err)
  711. goto err_out_free_ldc;
  712. err = probe_disk(port);
  713. if (err)
  714. goto err_out_free_tx_ring;
  715. dev_set_drvdata(&vdev->dev, port);
  716. mdesc_release(hp);
  717. return 0;
  718. err_out_free_tx_ring:
  719. vdc_free_tx_ring(port);
  720. err_out_free_ldc:
  721. vio_ldc_free(&port->vio);
  722. err_out_free_port:
  723. kfree(port);
  724. err_out_release_mdesc:
  725. mdesc_release(hp);
  726. return err;
  727. }
  728. static int vdc_port_remove(struct vio_dev *vdev)
  729. {
  730. struct vdc_port *port = dev_get_drvdata(&vdev->dev);
  731. if (port) {
  732. del_timer_sync(&port->vio.timer);
  733. vdc_free_tx_ring(port);
  734. vio_ldc_free(&port->vio);
  735. dev_set_drvdata(&vdev->dev, NULL);
  736. kfree(port);
  737. }
  738. return 0;
  739. }
  740. static const struct vio_device_id vdc_port_match[] = {
  741. {
  742. .type = "vdc-port",
  743. },
  744. {},
  745. };
  746. MODULE_DEVICE_TABLE(vio, vdc_port_match);
  747. static struct vio_driver vdc_port_driver = {
  748. .id_table = vdc_port_match,
  749. .probe = vdc_port_probe,
  750. .remove = vdc_port_remove,
  751. .name = "vdc_port",
  752. };
  753. static int __init vdc_init(void)
  754. {
  755. int err;
  756. err = register_blkdev(0, VDCBLK_NAME);
  757. if (err < 0)
  758. goto out_err;
  759. vdc_major = err;
  760. err = vio_register_driver(&vdc_port_driver);
  761. if (err)
  762. goto out_unregister_blkdev;
  763. return 0;
  764. out_unregister_blkdev:
  765. unregister_blkdev(vdc_major, VDCBLK_NAME);
  766. vdc_major = 0;
  767. out_err:
  768. return err;
  769. }
  770. static void __exit vdc_exit(void)
  771. {
  772. vio_unregister_driver(&vdc_port_driver);
  773. unregister_blkdev(vdc_major, VDCBLK_NAME);
  774. }
  775. module_init(vdc_init);
  776. module_exit(vdc_exit);