pktcdvd.c 74 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029
  1. /*
  2. * Copyright (C) 2000 Jens Axboe <axboe@suse.de>
  3. * Copyright (C) 2001-2004 Peter Osterlund <petero2@telia.com>
  4. * Copyright (C) 2006 Thomas Maier <balagi@justmail.de>
  5. *
  6. * May be copied or modified under the terms of the GNU General Public
  7. * License. See linux/COPYING for more information.
  8. *
  9. * Packet writing layer for ATAPI and SCSI CD-RW, DVD+RW, DVD-RW and
  10. * DVD-RAM devices.
  11. *
  12. * Theory of operation:
  13. *
  14. * At the lowest level, there is the standard driver for the CD/DVD device,
  15. * typically ide-cd.c or sr.c. This driver can handle read and write requests,
  16. * but it doesn't know anything about the special restrictions that apply to
  17. * packet writing. One restriction is that write requests must be aligned to
  18. * packet boundaries on the physical media, and the size of a write request
  19. * must be equal to the packet size. Another restriction is that a
  20. * GPCMD_FLUSH_CACHE command has to be issued to the drive before a read
  21. * command, if the previous command was a write.
  22. *
  23. * The purpose of the packet writing driver is to hide these restrictions from
  24. * higher layers, such as file systems, and present a block device that can be
  25. * randomly read and written using 2kB-sized blocks.
  26. *
  27. * The lowest layer in the packet writing driver is the packet I/O scheduler.
  28. * Its data is defined by the struct packet_iosched and includes two bio
  29. * queues with pending read and write requests. These queues are processed
  30. * by the pkt_iosched_process_queue() function. The write requests in this
  31. * queue are already properly aligned and sized. This layer is responsible for
  32. * issuing the flush cache commands and scheduling the I/O in a good order.
  33. *
  34. * The next layer transforms unaligned write requests to aligned writes. This
  35. * transformation requires reading missing pieces of data from the underlying
  36. * block device, assembling the pieces to full packets and queuing them to the
  37. * packet I/O scheduler.
  38. *
  39. * At the top layer there is a custom make_request_fn function that forwards
  40. * read requests directly to the iosched queue and puts write requests in the
  41. * unaligned write queue. A kernel thread performs the necessary read
  42. * gathering to convert the unaligned writes to aligned writes and then feeds
  43. * them to the packet I/O scheduler.
  44. *
  45. *************************************************************************/
  46. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  47. #include <linux/pktcdvd.h>
  48. #include <linux/module.h>
  49. #include <linux/types.h>
  50. #include <linux/kernel.h>
  51. #include <linux/compat.h>
  52. #include <linux/kthread.h>
  53. #include <linux/errno.h>
  54. #include <linux/spinlock.h>
  55. #include <linux/file.h>
  56. #include <linux/proc_fs.h>
  57. #include <linux/seq_file.h>
  58. #include <linux/miscdevice.h>
  59. #include <linux/freezer.h>
  60. #include <linux/mutex.h>
  61. #include <linux/slab.h>
  62. #include <scsi/scsi_cmnd.h>
  63. #include <scsi/scsi_ioctl.h>
  64. #include <scsi/scsi.h>
  65. #include <linux/debugfs.h>
  66. #include <linux/device.h>
  67. #include <asm/uaccess.h>
  68. #define DRIVER_NAME "pktcdvd"
  69. #define pkt_err(pd, fmt, ...) \
  70. pr_err("%s: " fmt, pd->name, ##__VA_ARGS__)
  71. #define pkt_notice(pd, fmt, ...) \
  72. pr_notice("%s: " fmt, pd->name, ##__VA_ARGS__)
  73. #define pkt_info(pd, fmt, ...) \
  74. pr_info("%s: " fmt, pd->name, ##__VA_ARGS__)
  75. #define pkt_dbg(level, pd, fmt, ...) \
  76. do { \
  77. if (level == 2 && PACKET_DEBUG >= 2) \
  78. pr_notice("%s: %s():" fmt, \
  79. pd->name, __func__, ##__VA_ARGS__); \
  80. else if (level == 1 && PACKET_DEBUG >= 1) \
  81. pr_notice("%s: " fmt, pd->name, ##__VA_ARGS__); \
  82. } while (0)
  83. #define MAX_SPEED 0xffff
  84. static DEFINE_MUTEX(pktcdvd_mutex);
  85. static struct pktcdvd_device *pkt_devs[MAX_WRITERS];
  86. static struct proc_dir_entry *pkt_proc;
  87. static int pktdev_major;
  88. static int write_congestion_on = PKT_WRITE_CONGESTION_ON;
  89. static int write_congestion_off = PKT_WRITE_CONGESTION_OFF;
  90. static struct mutex ctl_mutex; /* Serialize open/close/setup/teardown */
  91. static mempool_t *psd_pool;
  92. static struct class *class_pktcdvd = NULL; /* /sys/class/pktcdvd */
  93. static struct dentry *pkt_debugfs_root = NULL; /* /sys/kernel/debug/pktcdvd */
  94. /* forward declaration */
  95. static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev);
  96. static int pkt_remove_dev(dev_t pkt_dev);
  97. static int pkt_seq_show(struct seq_file *m, void *p);
  98. static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd)
  99. {
  100. return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1);
  101. }
  102. /*
  103. * create and register a pktcdvd kernel object.
  104. */
  105. static struct pktcdvd_kobj* pkt_kobj_create(struct pktcdvd_device *pd,
  106. const char* name,
  107. struct kobject* parent,
  108. struct kobj_type* ktype)
  109. {
  110. struct pktcdvd_kobj *p;
  111. int error;
  112. p = kzalloc(sizeof(*p), GFP_KERNEL);
  113. if (!p)
  114. return NULL;
  115. p->pd = pd;
  116. error = kobject_init_and_add(&p->kobj, ktype, parent, "%s", name);
  117. if (error) {
  118. kobject_put(&p->kobj);
  119. return NULL;
  120. }
  121. kobject_uevent(&p->kobj, KOBJ_ADD);
  122. return p;
  123. }
  124. /*
  125. * remove a pktcdvd kernel object.
  126. */
  127. static void pkt_kobj_remove(struct pktcdvd_kobj *p)
  128. {
  129. if (p)
  130. kobject_put(&p->kobj);
  131. }
  132. /*
  133. * default release function for pktcdvd kernel objects.
  134. */
  135. static void pkt_kobj_release(struct kobject *kobj)
  136. {
  137. kfree(to_pktcdvdkobj(kobj));
  138. }
  139. /**********************************************************
  140. *
  141. * sysfs interface for pktcdvd
  142. * by (C) 2006 Thomas Maier <balagi@justmail.de>
  143. *
  144. **********************************************************/
  145. #define DEF_ATTR(_obj,_name,_mode) \
  146. static struct attribute _obj = { .name = _name, .mode = _mode }
  147. /**********************************************************
  148. /sys/class/pktcdvd/pktcdvd[0-7]/
  149. stat/reset
  150. stat/packets_started
  151. stat/packets_finished
  152. stat/kb_written
  153. stat/kb_read
  154. stat/kb_read_gather
  155. write_queue/size
  156. write_queue/congestion_off
  157. write_queue/congestion_on
  158. **********************************************************/
  159. DEF_ATTR(kobj_pkt_attr_st1, "reset", 0200);
  160. DEF_ATTR(kobj_pkt_attr_st2, "packets_started", 0444);
  161. DEF_ATTR(kobj_pkt_attr_st3, "packets_finished", 0444);
  162. DEF_ATTR(kobj_pkt_attr_st4, "kb_written", 0444);
  163. DEF_ATTR(kobj_pkt_attr_st5, "kb_read", 0444);
  164. DEF_ATTR(kobj_pkt_attr_st6, "kb_read_gather", 0444);
  165. static struct attribute *kobj_pkt_attrs_stat[] = {
  166. &kobj_pkt_attr_st1,
  167. &kobj_pkt_attr_st2,
  168. &kobj_pkt_attr_st3,
  169. &kobj_pkt_attr_st4,
  170. &kobj_pkt_attr_st5,
  171. &kobj_pkt_attr_st6,
  172. NULL
  173. };
  174. DEF_ATTR(kobj_pkt_attr_wq1, "size", 0444);
  175. DEF_ATTR(kobj_pkt_attr_wq2, "congestion_off", 0644);
  176. DEF_ATTR(kobj_pkt_attr_wq3, "congestion_on", 0644);
  177. static struct attribute *kobj_pkt_attrs_wqueue[] = {
  178. &kobj_pkt_attr_wq1,
  179. &kobj_pkt_attr_wq2,
  180. &kobj_pkt_attr_wq3,
  181. NULL
  182. };
  183. static ssize_t kobj_pkt_show(struct kobject *kobj,
  184. struct attribute *attr, char *data)
  185. {
  186. struct pktcdvd_device *pd = to_pktcdvdkobj(kobj)->pd;
  187. int n = 0;
  188. int v;
  189. if (strcmp(attr->name, "packets_started") == 0) {
  190. n = sprintf(data, "%lu\n", pd->stats.pkt_started);
  191. } else if (strcmp(attr->name, "packets_finished") == 0) {
  192. n = sprintf(data, "%lu\n", pd->stats.pkt_ended);
  193. } else if (strcmp(attr->name, "kb_written") == 0) {
  194. n = sprintf(data, "%lu\n", pd->stats.secs_w >> 1);
  195. } else if (strcmp(attr->name, "kb_read") == 0) {
  196. n = sprintf(data, "%lu\n", pd->stats.secs_r >> 1);
  197. } else if (strcmp(attr->name, "kb_read_gather") == 0) {
  198. n = sprintf(data, "%lu\n", pd->stats.secs_rg >> 1);
  199. } else if (strcmp(attr->name, "size") == 0) {
  200. spin_lock(&pd->lock);
  201. v = pd->bio_queue_size;
  202. spin_unlock(&pd->lock);
  203. n = sprintf(data, "%d\n", v);
  204. } else if (strcmp(attr->name, "congestion_off") == 0) {
  205. spin_lock(&pd->lock);
  206. v = pd->write_congestion_off;
  207. spin_unlock(&pd->lock);
  208. n = sprintf(data, "%d\n", v);
  209. } else if (strcmp(attr->name, "congestion_on") == 0) {
  210. spin_lock(&pd->lock);
  211. v = pd->write_congestion_on;
  212. spin_unlock(&pd->lock);
  213. n = sprintf(data, "%d\n", v);
  214. }
  215. return n;
  216. }
  217. static void init_write_congestion_marks(int* lo, int* hi)
  218. {
  219. if (*hi > 0) {
  220. *hi = max(*hi, 500);
  221. *hi = min(*hi, 1000000);
  222. if (*lo <= 0)
  223. *lo = *hi - 100;
  224. else {
  225. *lo = min(*lo, *hi - 100);
  226. *lo = max(*lo, 100);
  227. }
  228. } else {
  229. *hi = -1;
  230. *lo = -1;
  231. }
  232. }
  233. static ssize_t kobj_pkt_store(struct kobject *kobj,
  234. struct attribute *attr,
  235. const char *data, size_t len)
  236. {
  237. struct pktcdvd_device *pd = to_pktcdvdkobj(kobj)->pd;
  238. int val;
  239. if (strcmp(attr->name, "reset") == 0 && len > 0) {
  240. pd->stats.pkt_started = 0;
  241. pd->stats.pkt_ended = 0;
  242. pd->stats.secs_w = 0;
  243. pd->stats.secs_rg = 0;
  244. pd->stats.secs_r = 0;
  245. } else if (strcmp(attr->name, "congestion_off") == 0
  246. && sscanf(data, "%d", &val) == 1) {
  247. spin_lock(&pd->lock);
  248. pd->write_congestion_off = val;
  249. init_write_congestion_marks(&pd->write_congestion_off,
  250. &pd->write_congestion_on);
  251. spin_unlock(&pd->lock);
  252. } else if (strcmp(attr->name, "congestion_on") == 0
  253. && sscanf(data, "%d", &val) == 1) {
  254. spin_lock(&pd->lock);
  255. pd->write_congestion_on = val;
  256. init_write_congestion_marks(&pd->write_congestion_off,
  257. &pd->write_congestion_on);
  258. spin_unlock(&pd->lock);
  259. }
  260. return len;
  261. }
  262. static const struct sysfs_ops kobj_pkt_ops = {
  263. .show = kobj_pkt_show,
  264. .store = kobj_pkt_store
  265. };
  266. static struct kobj_type kobj_pkt_type_stat = {
  267. .release = pkt_kobj_release,
  268. .sysfs_ops = &kobj_pkt_ops,
  269. .default_attrs = kobj_pkt_attrs_stat
  270. };
  271. static struct kobj_type kobj_pkt_type_wqueue = {
  272. .release = pkt_kobj_release,
  273. .sysfs_ops = &kobj_pkt_ops,
  274. .default_attrs = kobj_pkt_attrs_wqueue
  275. };
  276. static void pkt_sysfs_dev_new(struct pktcdvd_device *pd)
  277. {
  278. if (class_pktcdvd) {
  279. pd->dev = device_create(class_pktcdvd, NULL, MKDEV(0, 0), NULL,
  280. "%s", pd->name);
  281. if (IS_ERR(pd->dev))
  282. pd->dev = NULL;
  283. }
  284. if (pd->dev) {
  285. pd->kobj_stat = pkt_kobj_create(pd, "stat",
  286. &pd->dev->kobj,
  287. &kobj_pkt_type_stat);
  288. pd->kobj_wqueue = pkt_kobj_create(pd, "write_queue",
  289. &pd->dev->kobj,
  290. &kobj_pkt_type_wqueue);
  291. }
  292. }
  293. static void pkt_sysfs_dev_remove(struct pktcdvd_device *pd)
  294. {
  295. pkt_kobj_remove(pd->kobj_stat);
  296. pkt_kobj_remove(pd->kobj_wqueue);
  297. if (class_pktcdvd)
  298. device_unregister(pd->dev);
  299. }
  300. /********************************************************************
  301. /sys/class/pktcdvd/
  302. add map block device
  303. remove unmap packet dev
  304. device_map show mappings
  305. *******************************************************************/
  306. static void class_pktcdvd_release(struct class *cls)
  307. {
  308. kfree(cls);
  309. }
  310. static ssize_t class_pktcdvd_show_map(struct class *c,
  311. struct class_attribute *attr,
  312. char *data)
  313. {
  314. int n = 0;
  315. int idx;
  316. mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
  317. for (idx = 0; idx < MAX_WRITERS; idx++) {
  318. struct pktcdvd_device *pd = pkt_devs[idx];
  319. if (!pd)
  320. continue;
  321. n += sprintf(data+n, "%s %u:%u %u:%u\n",
  322. pd->name,
  323. MAJOR(pd->pkt_dev), MINOR(pd->pkt_dev),
  324. MAJOR(pd->bdev->bd_dev),
  325. MINOR(pd->bdev->bd_dev));
  326. }
  327. mutex_unlock(&ctl_mutex);
  328. return n;
  329. }
  330. static ssize_t class_pktcdvd_store_add(struct class *c,
  331. struct class_attribute *attr,
  332. const char *buf,
  333. size_t count)
  334. {
  335. unsigned int major, minor;
  336. if (sscanf(buf, "%u:%u", &major, &minor) == 2) {
  337. /* pkt_setup_dev() expects caller to hold reference to self */
  338. if (!try_module_get(THIS_MODULE))
  339. return -ENODEV;
  340. pkt_setup_dev(MKDEV(major, minor), NULL);
  341. module_put(THIS_MODULE);
  342. return count;
  343. }
  344. return -EINVAL;
  345. }
  346. static ssize_t class_pktcdvd_store_remove(struct class *c,
  347. struct class_attribute *attr,
  348. const char *buf,
  349. size_t count)
  350. {
  351. unsigned int major, minor;
  352. if (sscanf(buf, "%u:%u", &major, &minor) == 2) {
  353. pkt_remove_dev(MKDEV(major, minor));
  354. return count;
  355. }
  356. return -EINVAL;
  357. }
  358. static struct class_attribute class_pktcdvd_attrs[] = {
  359. __ATTR(add, 0200, NULL, class_pktcdvd_store_add),
  360. __ATTR(remove, 0200, NULL, class_pktcdvd_store_remove),
  361. __ATTR(device_map, 0444, class_pktcdvd_show_map, NULL),
  362. __ATTR_NULL
  363. };
  364. static int pkt_sysfs_init(void)
  365. {
  366. int ret = 0;
  367. /*
  368. * create control files in sysfs
  369. * /sys/class/pktcdvd/...
  370. */
  371. class_pktcdvd = kzalloc(sizeof(*class_pktcdvd), GFP_KERNEL);
  372. if (!class_pktcdvd)
  373. return -ENOMEM;
  374. class_pktcdvd->name = DRIVER_NAME;
  375. class_pktcdvd->owner = THIS_MODULE;
  376. class_pktcdvd->class_release = class_pktcdvd_release;
  377. class_pktcdvd->class_attrs = class_pktcdvd_attrs;
  378. ret = class_register(class_pktcdvd);
  379. if (ret) {
  380. kfree(class_pktcdvd);
  381. class_pktcdvd = NULL;
  382. pr_err("failed to create class pktcdvd\n");
  383. return ret;
  384. }
  385. return 0;
  386. }
  387. static void pkt_sysfs_cleanup(void)
  388. {
  389. if (class_pktcdvd)
  390. class_destroy(class_pktcdvd);
  391. class_pktcdvd = NULL;
  392. }
  393. /********************************************************************
  394. entries in debugfs
  395. /sys/kernel/debug/pktcdvd[0-7]/
  396. info
  397. *******************************************************************/
  398. static int pkt_debugfs_seq_show(struct seq_file *m, void *p)
  399. {
  400. return pkt_seq_show(m, p);
  401. }
  402. static int pkt_debugfs_fops_open(struct inode *inode, struct file *file)
  403. {
  404. return single_open(file, pkt_debugfs_seq_show, inode->i_private);
  405. }
  406. static const struct file_operations debug_fops = {
  407. .open = pkt_debugfs_fops_open,
  408. .read = seq_read,
  409. .llseek = seq_lseek,
  410. .release = single_release,
  411. .owner = THIS_MODULE,
  412. };
  413. static void pkt_debugfs_dev_new(struct pktcdvd_device *pd)
  414. {
  415. if (!pkt_debugfs_root)
  416. return;
  417. pd->dfs_d_root = debugfs_create_dir(pd->name, pkt_debugfs_root);
  418. if (!pd->dfs_d_root)
  419. return;
  420. pd->dfs_f_info = debugfs_create_file("info", S_IRUGO,
  421. pd->dfs_d_root, pd, &debug_fops);
  422. }
  423. static void pkt_debugfs_dev_remove(struct pktcdvd_device *pd)
  424. {
  425. if (!pkt_debugfs_root)
  426. return;
  427. debugfs_remove(pd->dfs_f_info);
  428. debugfs_remove(pd->dfs_d_root);
  429. pd->dfs_f_info = NULL;
  430. pd->dfs_d_root = NULL;
  431. }
  432. static void pkt_debugfs_init(void)
  433. {
  434. pkt_debugfs_root = debugfs_create_dir(DRIVER_NAME, NULL);
  435. }
  436. static void pkt_debugfs_cleanup(void)
  437. {
  438. debugfs_remove(pkt_debugfs_root);
  439. pkt_debugfs_root = NULL;
  440. }
  441. /* ----------------------------------------------------------*/
  442. static void pkt_bio_finished(struct pktcdvd_device *pd)
  443. {
  444. BUG_ON(atomic_read(&pd->cdrw.pending_bios) <= 0);
  445. if (atomic_dec_and_test(&pd->cdrw.pending_bios)) {
  446. pkt_dbg(2, pd, "queue empty\n");
  447. atomic_set(&pd->iosched.attention, 1);
  448. wake_up(&pd->wqueue);
  449. }
  450. }
  451. /*
  452. * Allocate a packet_data struct
  453. */
  454. static struct packet_data *pkt_alloc_packet_data(int frames)
  455. {
  456. int i;
  457. struct packet_data *pkt;
  458. pkt = kzalloc(sizeof(struct packet_data), GFP_KERNEL);
  459. if (!pkt)
  460. goto no_pkt;
  461. pkt->frames = frames;
  462. pkt->w_bio = bio_kmalloc(GFP_KERNEL, frames);
  463. if (!pkt->w_bio)
  464. goto no_bio;
  465. for (i = 0; i < frames / FRAMES_PER_PAGE; i++) {
  466. pkt->pages[i] = alloc_page(GFP_KERNEL|__GFP_ZERO);
  467. if (!pkt->pages[i])
  468. goto no_page;
  469. }
  470. spin_lock_init(&pkt->lock);
  471. bio_list_init(&pkt->orig_bios);
  472. for (i = 0; i < frames; i++) {
  473. struct bio *bio = bio_kmalloc(GFP_KERNEL, 1);
  474. if (!bio)
  475. goto no_rd_bio;
  476. pkt->r_bios[i] = bio;
  477. }
  478. return pkt;
  479. no_rd_bio:
  480. for (i = 0; i < frames; i++) {
  481. struct bio *bio = pkt->r_bios[i];
  482. if (bio)
  483. bio_put(bio);
  484. }
  485. no_page:
  486. for (i = 0; i < frames / FRAMES_PER_PAGE; i++)
  487. if (pkt->pages[i])
  488. __free_page(pkt->pages[i]);
  489. bio_put(pkt->w_bio);
  490. no_bio:
  491. kfree(pkt);
  492. no_pkt:
  493. return NULL;
  494. }
  495. /*
  496. * Free a packet_data struct
  497. */
  498. static void pkt_free_packet_data(struct packet_data *pkt)
  499. {
  500. int i;
  501. for (i = 0; i < pkt->frames; i++) {
  502. struct bio *bio = pkt->r_bios[i];
  503. if (bio)
  504. bio_put(bio);
  505. }
  506. for (i = 0; i < pkt->frames / FRAMES_PER_PAGE; i++)
  507. __free_page(pkt->pages[i]);
  508. bio_put(pkt->w_bio);
  509. kfree(pkt);
  510. }
  511. static void pkt_shrink_pktlist(struct pktcdvd_device *pd)
  512. {
  513. struct packet_data *pkt, *next;
  514. BUG_ON(!list_empty(&pd->cdrw.pkt_active_list));
  515. list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_free_list, list) {
  516. pkt_free_packet_data(pkt);
  517. }
  518. INIT_LIST_HEAD(&pd->cdrw.pkt_free_list);
  519. }
  520. static int pkt_grow_pktlist(struct pktcdvd_device *pd, int nr_packets)
  521. {
  522. struct packet_data *pkt;
  523. BUG_ON(!list_empty(&pd->cdrw.pkt_free_list));
  524. while (nr_packets > 0) {
  525. pkt = pkt_alloc_packet_data(pd->settings.size >> 2);
  526. if (!pkt) {
  527. pkt_shrink_pktlist(pd);
  528. return 0;
  529. }
  530. pkt->id = nr_packets;
  531. pkt->pd = pd;
  532. list_add(&pkt->list, &pd->cdrw.pkt_free_list);
  533. nr_packets--;
  534. }
  535. return 1;
  536. }
  537. static inline struct pkt_rb_node *pkt_rbtree_next(struct pkt_rb_node *node)
  538. {
  539. struct rb_node *n = rb_next(&node->rb_node);
  540. if (!n)
  541. return NULL;
  542. return rb_entry(n, struct pkt_rb_node, rb_node);
  543. }
  544. static void pkt_rbtree_erase(struct pktcdvd_device *pd, struct pkt_rb_node *node)
  545. {
  546. rb_erase(&node->rb_node, &pd->bio_queue);
  547. mempool_free(node, pd->rb_pool);
  548. pd->bio_queue_size--;
  549. BUG_ON(pd->bio_queue_size < 0);
  550. }
  551. /*
  552. * Find the first node in the pd->bio_queue rb tree with a starting sector >= s.
  553. */
  554. static struct pkt_rb_node *pkt_rbtree_find(struct pktcdvd_device *pd, sector_t s)
  555. {
  556. struct rb_node *n = pd->bio_queue.rb_node;
  557. struct rb_node *next;
  558. struct pkt_rb_node *tmp;
  559. if (!n) {
  560. BUG_ON(pd->bio_queue_size > 0);
  561. return NULL;
  562. }
  563. for (;;) {
  564. tmp = rb_entry(n, struct pkt_rb_node, rb_node);
  565. if (s <= tmp->bio->bi_iter.bi_sector)
  566. next = n->rb_left;
  567. else
  568. next = n->rb_right;
  569. if (!next)
  570. break;
  571. n = next;
  572. }
  573. if (s > tmp->bio->bi_iter.bi_sector) {
  574. tmp = pkt_rbtree_next(tmp);
  575. if (!tmp)
  576. return NULL;
  577. }
  578. BUG_ON(s > tmp->bio->bi_iter.bi_sector);
  579. return tmp;
  580. }
  581. /*
  582. * Insert a node into the pd->bio_queue rb tree.
  583. */
  584. static void pkt_rbtree_insert(struct pktcdvd_device *pd, struct pkt_rb_node *node)
  585. {
  586. struct rb_node **p = &pd->bio_queue.rb_node;
  587. struct rb_node *parent = NULL;
  588. sector_t s = node->bio->bi_iter.bi_sector;
  589. struct pkt_rb_node *tmp;
  590. while (*p) {
  591. parent = *p;
  592. tmp = rb_entry(parent, struct pkt_rb_node, rb_node);
  593. if (s < tmp->bio->bi_iter.bi_sector)
  594. p = &(*p)->rb_left;
  595. else
  596. p = &(*p)->rb_right;
  597. }
  598. rb_link_node(&node->rb_node, parent, p);
  599. rb_insert_color(&node->rb_node, &pd->bio_queue);
  600. pd->bio_queue_size++;
  601. }
  602. /*
  603. * Send a packet_command to the underlying block device and
  604. * wait for completion.
  605. */
  606. static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *cgc)
  607. {
  608. struct request_queue *q = bdev_get_queue(pd->bdev);
  609. struct request *rq;
  610. int ret = 0;
  611. rq = blk_get_request(q, (cgc->data_direction == CGC_DATA_WRITE) ?
  612. WRITE : READ, __GFP_WAIT);
  613. if (IS_ERR(rq))
  614. return PTR_ERR(rq);
  615. blk_rq_set_block_pc(rq);
  616. if (cgc->buflen) {
  617. ret = blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen,
  618. __GFP_WAIT);
  619. if (ret)
  620. goto out;
  621. }
  622. rq->cmd_len = COMMAND_SIZE(cgc->cmd[0]);
  623. memcpy(rq->cmd, cgc->cmd, CDROM_PACKET_SIZE);
  624. rq->timeout = 60*HZ;
  625. if (cgc->quiet)
  626. rq->cmd_flags |= REQ_QUIET;
  627. blk_execute_rq(rq->q, pd->bdev->bd_disk, rq, 0);
  628. if (rq->errors)
  629. ret = -EIO;
  630. out:
  631. blk_put_request(rq);
  632. return ret;
  633. }
  634. static const char *sense_key_string(__u8 index)
  635. {
  636. static const char * const info[] = {
  637. "No sense", "Recovered error", "Not ready",
  638. "Medium error", "Hardware error", "Illegal request",
  639. "Unit attention", "Data protect", "Blank check",
  640. };
  641. return index < ARRAY_SIZE(info) ? info[index] : "INVALID";
  642. }
  643. /*
  644. * A generic sense dump / resolve mechanism should be implemented across
  645. * all ATAPI + SCSI devices.
  646. */
  647. static void pkt_dump_sense(struct pktcdvd_device *pd,
  648. struct packet_command *cgc)
  649. {
  650. struct request_sense *sense = cgc->sense;
  651. if (sense)
  652. pkt_err(pd, "%*ph - sense %02x.%02x.%02x (%s)\n",
  653. CDROM_PACKET_SIZE, cgc->cmd,
  654. sense->sense_key, sense->asc, sense->ascq,
  655. sense_key_string(sense->sense_key));
  656. else
  657. pkt_err(pd, "%*ph - no sense\n", CDROM_PACKET_SIZE, cgc->cmd);
  658. }
  659. /*
  660. * flush the drive cache to media
  661. */
  662. static int pkt_flush_cache(struct pktcdvd_device *pd)
  663. {
  664. struct packet_command cgc;
  665. init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
  666. cgc.cmd[0] = GPCMD_FLUSH_CACHE;
  667. cgc.quiet = 1;
  668. /*
  669. * the IMMED bit -- we default to not setting it, although that
  670. * would allow a much faster close, this is safer
  671. */
  672. #if 0
  673. cgc.cmd[1] = 1 << 1;
  674. #endif
  675. return pkt_generic_packet(pd, &cgc);
  676. }
  677. /*
  678. * speed is given as the normal factor, e.g. 4 for 4x
  679. */
  680. static noinline_for_stack int pkt_set_speed(struct pktcdvd_device *pd,
  681. unsigned write_speed, unsigned read_speed)
  682. {
  683. struct packet_command cgc;
  684. struct request_sense sense;
  685. int ret;
  686. init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
  687. cgc.sense = &sense;
  688. cgc.cmd[0] = GPCMD_SET_SPEED;
  689. cgc.cmd[2] = (read_speed >> 8) & 0xff;
  690. cgc.cmd[3] = read_speed & 0xff;
  691. cgc.cmd[4] = (write_speed >> 8) & 0xff;
  692. cgc.cmd[5] = write_speed & 0xff;
  693. if ((ret = pkt_generic_packet(pd, &cgc)))
  694. pkt_dump_sense(pd, &cgc);
  695. return ret;
  696. }
  697. /*
  698. * Queue a bio for processing by the low-level CD device. Must be called
  699. * from process context.
  700. */
  701. static void pkt_queue_bio(struct pktcdvd_device *pd, struct bio *bio)
  702. {
  703. spin_lock(&pd->iosched.lock);
  704. if (bio_data_dir(bio) == READ)
  705. bio_list_add(&pd->iosched.read_queue, bio);
  706. else
  707. bio_list_add(&pd->iosched.write_queue, bio);
  708. spin_unlock(&pd->iosched.lock);
  709. atomic_set(&pd->iosched.attention, 1);
  710. wake_up(&pd->wqueue);
  711. }
  712. /*
  713. * Process the queued read/write requests. This function handles special
  714. * requirements for CDRW drives:
  715. * - A cache flush command must be inserted before a read request if the
  716. * previous request was a write.
  717. * - Switching between reading and writing is slow, so don't do it more often
  718. * than necessary.
  719. * - Optimize for throughput at the expense of latency. This means that streaming
  720. * writes will never be interrupted by a read, but if the drive has to seek
  721. * before the next write, switch to reading instead if there are any pending
  722. * read requests.
  723. * - Set the read speed according to current usage pattern. When only reading
  724. * from the device, it's best to use the highest possible read speed, but
  725. * when switching often between reading and writing, it's better to have the
  726. * same read and write speeds.
  727. */
  728. static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
  729. {
  730. if (atomic_read(&pd->iosched.attention) == 0)
  731. return;
  732. atomic_set(&pd->iosched.attention, 0);
  733. for (;;) {
  734. struct bio *bio;
  735. int reads_queued, writes_queued;
  736. spin_lock(&pd->iosched.lock);
  737. reads_queued = !bio_list_empty(&pd->iosched.read_queue);
  738. writes_queued = !bio_list_empty(&pd->iosched.write_queue);
  739. spin_unlock(&pd->iosched.lock);
  740. if (!reads_queued && !writes_queued)
  741. break;
  742. if (pd->iosched.writing) {
  743. int need_write_seek = 1;
  744. spin_lock(&pd->iosched.lock);
  745. bio = bio_list_peek(&pd->iosched.write_queue);
  746. spin_unlock(&pd->iosched.lock);
  747. if (bio && (bio->bi_iter.bi_sector ==
  748. pd->iosched.last_write))
  749. need_write_seek = 0;
  750. if (need_write_seek && reads_queued) {
  751. if (atomic_read(&pd->cdrw.pending_bios) > 0) {
  752. pkt_dbg(2, pd, "write, waiting\n");
  753. break;
  754. }
  755. pkt_flush_cache(pd);
  756. pd->iosched.writing = 0;
  757. }
  758. } else {
  759. if (!reads_queued && writes_queued) {
  760. if (atomic_read(&pd->cdrw.pending_bios) > 0) {
  761. pkt_dbg(2, pd, "read, waiting\n");
  762. break;
  763. }
  764. pd->iosched.writing = 1;
  765. }
  766. }
  767. spin_lock(&pd->iosched.lock);
  768. if (pd->iosched.writing)
  769. bio = bio_list_pop(&pd->iosched.write_queue);
  770. else
  771. bio = bio_list_pop(&pd->iosched.read_queue);
  772. spin_unlock(&pd->iosched.lock);
  773. if (!bio)
  774. continue;
  775. if (bio_data_dir(bio) == READ)
  776. pd->iosched.successive_reads +=
  777. bio->bi_iter.bi_size >> 10;
  778. else {
  779. pd->iosched.successive_reads = 0;
  780. pd->iosched.last_write = bio_end_sector(bio);
  781. }
  782. if (pd->iosched.successive_reads >= HI_SPEED_SWITCH) {
  783. if (pd->read_speed == pd->write_speed) {
  784. pd->read_speed = MAX_SPEED;
  785. pkt_set_speed(pd, pd->write_speed, pd->read_speed);
  786. }
  787. } else {
  788. if (pd->read_speed != pd->write_speed) {
  789. pd->read_speed = pd->write_speed;
  790. pkt_set_speed(pd, pd->write_speed, pd->read_speed);
  791. }
  792. }
  793. atomic_inc(&pd->cdrw.pending_bios);
  794. generic_make_request(bio);
  795. }
  796. }
  797. /*
  798. * Special care is needed if the underlying block device has a small
  799. * max_phys_segments value.
  800. */
  801. static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q)
  802. {
  803. if ((pd->settings.size << 9) / CD_FRAMESIZE
  804. <= queue_max_segments(q)) {
  805. /*
  806. * The cdrom device can handle one segment/frame
  807. */
  808. clear_bit(PACKET_MERGE_SEGS, &pd->flags);
  809. return 0;
  810. } else if ((pd->settings.size << 9) / PAGE_SIZE
  811. <= queue_max_segments(q)) {
  812. /*
  813. * We can handle this case at the expense of some extra memory
  814. * copies during write operations
  815. */
  816. set_bit(PACKET_MERGE_SEGS, &pd->flags);
  817. return 0;
  818. } else {
  819. pkt_err(pd, "cdrom max_phys_segments too small\n");
  820. return -EIO;
  821. }
  822. }
  823. /*
  824. * Copy all data for this packet to pkt->pages[], so that
  825. * a) The number of required segments for the write bio is minimized, which
  826. * is necessary for some scsi controllers.
  827. * b) The data can be used as cache to avoid read requests if we receive a
  828. * new write request for the same zone.
  829. */
  830. static void pkt_make_local_copy(struct packet_data *pkt, struct bio_vec *bvec)
  831. {
  832. int f, p, offs;
  833. /* Copy all data to pkt->pages[] */
  834. p = 0;
  835. offs = 0;
  836. for (f = 0; f < pkt->frames; f++) {
  837. if (bvec[f].bv_page != pkt->pages[p]) {
  838. void *vfrom = kmap_atomic(bvec[f].bv_page) + bvec[f].bv_offset;
  839. void *vto = page_address(pkt->pages[p]) + offs;
  840. memcpy(vto, vfrom, CD_FRAMESIZE);
  841. kunmap_atomic(vfrom);
  842. bvec[f].bv_page = pkt->pages[p];
  843. bvec[f].bv_offset = offs;
  844. } else {
  845. BUG_ON(bvec[f].bv_offset != offs);
  846. }
  847. offs += CD_FRAMESIZE;
  848. if (offs >= PAGE_SIZE) {
  849. offs = 0;
  850. p++;
  851. }
  852. }
  853. }
  854. static void pkt_end_io_read(struct bio *bio, int err)
  855. {
  856. struct packet_data *pkt = bio->bi_private;
  857. struct pktcdvd_device *pd = pkt->pd;
  858. BUG_ON(!pd);
  859. pkt_dbg(2, pd, "bio=%p sec0=%llx sec=%llx err=%d\n",
  860. bio, (unsigned long long)pkt->sector,
  861. (unsigned long long)bio->bi_iter.bi_sector, err);
  862. if (err)
  863. atomic_inc(&pkt->io_errors);
  864. if (atomic_dec_and_test(&pkt->io_wait)) {
  865. atomic_inc(&pkt->run_sm);
  866. wake_up(&pd->wqueue);
  867. }
  868. pkt_bio_finished(pd);
  869. }
  870. static void pkt_end_io_packet_write(struct bio *bio, int err)
  871. {
  872. struct packet_data *pkt = bio->bi_private;
  873. struct pktcdvd_device *pd = pkt->pd;
  874. BUG_ON(!pd);
  875. pkt_dbg(2, pd, "id=%d, err=%d\n", pkt->id, err);
  876. pd->stats.pkt_ended++;
  877. pkt_bio_finished(pd);
  878. atomic_dec(&pkt->io_wait);
  879. atomic_inc(&pkt->run_sm);
  880. wake_up(&pd->wqueue);
  881. }
  882. /*
  883. * Schedule reads for the holes in a packet
  884. */
  885. static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
  886. {
  887. int frames_read = 0;
  888. struct bio *bio;
  889. int f;
  890. char written[PACKET_MAX_SIZE];
  891. BUG_ON(bio_list_empty(&pkt->orig_bios));
  892. atomic_set(&pkt->io_wait, 0);
  893. atomic_set(&pkt->io_errors, 0);
  894. /*
  895. * Figure out which frames we need to read before we can write.
  896. */
  897. memset(written, 0, sizeof(written));
  898. spin_lock(&pkt->lock);
  899. bio_list_for_each(bio, &pkt->orig_bios) {
  900. int first_frame = (bio->bi_iter.bi_sector - pkt->sector) /
  901. (CD_FRAMESIZE >> 9);
  902. int num_frames = bio->bi_iter.bi_size / CD_FRAMESIZE;
  903. pd->stats.secs_w += num_frames * (CD_FRAMESIZE >> 9);
  904. BUG_ON(first_frame < 0);
  905. BUG_ON(first_frame + num_frames > pkt->frames);
  906. for (f = first_frame; f < first_frame + num_frames; f++)
  907. written[f] = 1;
  908. }
  909. spin_unlock(&pkt->lock);
  910. if (pkt->cache_valid) {
  911. pkt_dbg(2, pd, "zone %llx cached\n",
  912. (unsigned long long)pkt->sector);
  913. goto out_account;
  914. }
  915. /*
  916. * Schedule reads for missing parts of the packet.
  917. */
  918. for (f = 0; f < pkt->frames; f++) {
  919. int p, offset;
  920. if (written[f])
  921. continue;
  922. bio = pkt->r_bios[f];
  923. bio_reset(bio);
  924. bio->bi_iter.bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
  925. bio->bi_bdev = pd->bdev;
  926. bio->bi_end_io = pkt_end_io_read;
  927. bio->bi_private = pkt;
  928. p = (f * CD_FRAMESIZE) / PAGE_SIZE;
  929. offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
  930. pkt_dbg(2, pd, "Adding frame %d, page:%p offs:%d\n",
  931. f, pkt->pages[p], offset);
  932. if (!bio_add_page(bio, pkt->pages[p], CD_FRAMESIZE, offset))
  933. BUG();
  934. atomic_inc(&pkt->io_wait);
  935. bio->bi_rw = READ;
  936. pkt_queue_bio(pd, bio);
  937. frames_read++;
  938. }
  939. out_account:
  940. pkt_dbg(2, pd, "need %d frames for zone %llx\n",
  941. frames_read, (unsigned long long)pkt->sector);
  942. pd->stats.pkt_started++;
  943. pd->stats.secs_rg += frames_read * (CD_FRAMESIZE >> 9);
  944. }
  945. /*
  946. * Find a packet matching zone, or the least recently used packet if
  947. * there is no match.
  948. */
  949. static struct packet_data *pkt_get_packet_data(struct pktcdvd_device *pd, int zone)
  950. {
  951. struct packet_data *pkt;
  952. list_for_each_entry(pkt, &pd->cdrw.pkt_free_list, list) {
  953. if (pkt->sector == zone || pkt->list.next == &pd->cdrw.pkt_free_list) {
  954. list_del_init(&pkt->list);
  955. if (pkt->sector != zone)
  956. pkt->cache_valid = 0;
  957. return pkt;
  958. }
  959. }
  960. BUG();
  961. return NULL;
  962. }
  963. static void pkt_put_packet_data(struct pktcdvd_device *pd, struct packet_data *pkt)
  964. {
  965. if (pkt->cache_valid) {
  966. list_add(&pkt->list, &pd->cdrw.pkt_free_list);
  967. } else {
  968. list_add_tail(&pkt->list, &pd->cdrw.pkt_free_list);
  969. }
  970. }
  971. /*
  972. * recover a failed write, query for relocation if possible
  973. *
  974. * returns 1 if recovery is possible, or 0 if not
  975. *
  976. */
  977. static int pkt_start_recovery(struct packet_data *pkt)
  978. {
  979. /*
  980. * FIXME. We need help from the file system to implement
  981. * recovery handling.
  982. */
  983. return 0;
  984. #if 0
  985. struct request *rq = pkt->rq;
  986. struct pktcdvd_device *pd = rq->rq_disk->private_data;
  987. struct block_device *pkt_bdev;
  988. struct super_block *sb = NULL;
  989. unsigned long old_block, new_block;
  990. sector_t new_sector;
  991. pkt_bdev = bdget(kdev_t_to_nr(pd->pkt_dev));
  992. if (pkt_bdev) {
  993. sb = get_super(pkt_bdev);
  994. bdput(pkt_bdev);
  995. }
  996. if (!sb)
  997. return 0;
  998. if (!sb->s_op->relocate_blocks)
  999. goto out;
  1000. old_block = pkt->sector / (CD_FRAMESIZE >> 9);
  1001. if (sb->s_op->relocate_blocks(sb, old_block, &new_block))
  1002. goto out;
  1003. new_sector = new_block * (CD_FRAMESIZE >> 9);
  1004. pkt->sector = new_sector;
  1005. bio_reset(pkt->bio);
  1006. pkt->bio->bi_bdev = pd->bdev;
  1007. pkt->bio->bi_rw = REQ_WRITE;
  1008. pkt->bio->bi_iter.bi_sector = new_sector;
  1009. pkt->bio->bi_iter.bi_size = pkt->frames * CD_FRAMESIZE;
  1010. pkt->bio->bi_vcnt = pkt->frames;
  1011. pkt->bio->bi_end_io = pkt_end_io_packet_write;
  1012. pkt->bio->bi_private = pkt;
  1013. drop_super(sb);
  1014. return 1;
  1015. out:
  1016. drop_super(sb);
  1017. return 0;
  1018. #endif
  1019. }
  1020. static inline void pkt_set_state(struct packet_data *pkt, enum packet_data_state state)
  1021. {
  1022. #if PACKET_DEBUG > 1
  1023. static const char *state_name[] = {
  1024. "IDLE", "WAITING", "READ_WAIT", "WRITE_WAIT", "RECOVERY", "FINISHED"
  1025. };
  1026. enum packet_data_state old_state = pkt->state;
  1027. pkt_dbg(2, pd, "pkt %2d : s=%6llx %s -> %s\n",
  1028. pkt->id, (unsigned long long)pkt->sector,
  1029. state_name[old_state], state_name[state]);
  1030. #endif
  1031. pkt->state = state;
  1032. }
  1033. /*
  1034. * Scan the work queue to see if we can start a new packet.
  1035. * returns non-zero if any work was done.
  1036. */
  1037. static int pkt_handle_queue(struct pktcdvd_device *pd)
  1038. {
  1039. struct packet_data *pkt, *p;
  1040. struct bio *bio = NULL;
  1041. sector_t zone = 0; /* Suppress gcc warning */
  1042. struct pkt_rb_node *node, *first_node;
  1043. struct rb_node *n;
  1044. int wakeup;
  1045. atomic_set(&pd->scan_queue, 0);
  1046. if (list_empty(&pd->cdrw.pkt_free_list)) {
  1047. pkt_dbg(2, pd, "no pkt\n");
  1048. return 0;
  1049. }
  1050. /*
  1051. * Try to find a zone we are not already working on.
  1052. */
  1053. spin_lock(&pd->lock);
  1054. first_node = pkt_rbtree_find(pd, pd->current_sector);
  1055. if (!first_node) {
  1056. n = rb_first(&pd->bio_queue);
  1057. if (n)
  1058. first_node = rb_entry(n, struct pkt_rb_node, rb_node);
  1059. }
  1060. node = first_node;
  1061. while (node) {
  1062. bio = node->bio;
  1063. zone = get_zone(bio->bi_iter.bi_sector, pd);
  1064. list_for_each_entry(p, &pd->cdrw.pkt_active_list, list) {
  1065. if (p->sector == zone) {
  1066. bio = NULL;
  1067. goto try_next_bio;
  1068. }
  1069. }
  1070. break;
  1071. try_next_bio:
  1072. node = pkt_rbtree_next(node);
  1073. if (!node) {
  1074. n = rb_first(&pd->bio_queue);
  1075. if (n)
  1076. node = rb_entry(n, struct pkt_rb_node, rb_node);
  1077. }
  1078. if (node == first_node)
  1079. node = NULL;
  1080. }
  1081. spin_unlock(&pd->lock);
  1082. if (!bio) {
  1083. pkt_dbg(2, pd, "no bio\n");
  1084. return 0;
  1085. }
  1086. pkt = pkt_get_packet_data(pd, zone);
  1087. pd->current_sector = zone + pd->settings.size;
  1088. pkt->sector = zone;
  1089. BUG_ON(pkt->frames != pd->settings.size >> 2);
  1090. pkt->write_size = 0;
  1091. /*
  1092. * Scan work queue for bios in the same zone and link them
  1093. * to this packet.
  1094. */
  1095. spin_lock(&pd->lock);
  1096. pkt_dbg(2, pd, "looking for zone %llx\n", (unsigned long long)zone);
  1097. while ((node = pkt_rbtree_find(pd, zone)) != NULL) {
  1098. bio = node->bio;
  1099. pkt_dbg(2, pd, "found zone=%llx\n", (unsigned long long)
  1100. get_zone(bio->bi_iter.bi_sector, pd));
  1101. if (get_zone(bio->bi_iter.bi_sector, pd) != zone)
  1102. break;
  1103. pkt_rbtree_erase(pd, node);
  1104. spin_lock(&pkt->lock);
  1105. bio_list_add(&pkt->orig_bios, bio);
  1106. pkt->write_size += bio->bi_iter.bi_size / CD_FRAMESIZE;
  1107. spin_unlock(&pkt->lock);
  1108. }
  1109. /* check write congestion marks, and if bio_queue_size is
  1110. below, wake up any waiters */
  1111. wakeup = (pd->write_congestion_on > 0
  1112. && pd->bio_queue_size <= pd->write_congestion_off);
  1113. spin_unlock(&pd->lock);
  1114. if (wakeup) {
  1115. clear_bdi_congested(&pd->disk->queue->backing_dev_info,
  1116. BLK_RW_ASYNC);
  1117. }
  1118. pkt->sleep_time = max(PACKET_WAIT_TIME, 1);
  1119. pkt_set_state(pkt, PACKET_WAITING_STATE);
  1120. atomic_set(&pkt->run_sm, 1);
  1121. spin_lock(&pd->cdrw.active_list_lock);
  1122. list_add(&pkt->list, &pd->cdrw.pkt_active_list);
  1123. spin_unlock(&pd->cdrw.active_list_lock);
  1124. return 1;
  1125. }
  1126. /*
  1127. * Assemble a bio to write one packet and queue the bio for processing
  1128. * by the underlying block device.
  1129. */
  1130. static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
  1131. {
  1132. int f;
  1133. struct bio_vec *bvec = pkt->w_bio->bi_io_vec;
  1134. bio_reset(pkt->w_bio);
  1135. pkt->w_bio->bi_iter.bi_sector = pkt->sector;
  1136. pkt->w_bio->bi_bdev = pd->bdev;
  1137. pkt->w_bio->bi_end_io = pkt_end_io_packet_write;
  1138. pkt->w_bio->bi_private = pkt;
  1139. /* XXX: locking? */
  1140. for (f = 0; f < pkt->frames; f++) {
  1141. bvec[f].bv_page = pkt->pages[(f * CD_FRAMESIZE) / PAGE_SIZE];
  1142. bvec[f].bv_offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
  1143. if (!bio_add_page(pkt->w_bio, bvec[f].bv_page, CD_FRAMESIZE, bvec[f].bv_offset))
  1144. BUG();
  1145. }
  1146. pkt_dbg(2, pd, "vcnt=%d\n", pkt->w_bio->bi_vcnt);
  1147. /*
  1148. * Fill-in bvec with data from orig_bios.
  1149. */
  1150. spin_lock(&pkt->lock);
  1151. bio_copy_data(pkt->w_bio, pkt->orig_bios.head);
  1152. pkt_set_state(pkt, PACKET_WRITE_WAIT_STATE);
  1153. spin_unlock(&pkt->lock);
  1154. pkt_dbg(2, pd, "Writing %d frames for zone %llx\n",
  1155. pkt->write_size, (unsigned long long)pkt->sector);
  1156. if (test_bit(PACKET_MERGE_SEGS, &pd->flags) || (pkt->write_size < pkt->frames)) {
  1157. pkt_make_local_copy(pkt, bvec);
  1158. pkt->cache_valid = 1;
  1159. } else {
  1160. pkt->cache_valid = 0;
  1161. }
  1162. /* Start the write request */
  1163. atomic_set(&pkt->io_wait, 1);
  1164. pkt->w_bio->bi_rw = WRITE;
  1165. pkt_queue_bio(pd, pkt->w_bio);
  1166. }
  1167. static void pkt_finish_packet(struct packet_data *pkt, int uptodate)
  1168. {
  1169. struct bio *bio;
  1170. if (!uptodate)
  1171. pkt->cache_valid = 0;
  1172. /* Finish all bios corresponding to this packet */
  1173. while ((bio = bio_list_pop(&pkt->orig_bios)))
  1174. bio_endio(bio, uptodate ? 0 : -EIO);
  1175. }
  1176. static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data *pkt)
  1177. {
  1178. int uptodate;
  1179. pkt_dbg(2, pd, "pkt %d\n", pkt->id);
  1180. for (;;) {
  1181. switch (pkt->state) {
  1182. case PACKET_WAITING_STATE:
  1183. if ((pkt->write_size < pkt->frames) && (pkt->sleep_time > 0))
  1184. return;
  1185. pkt->sleep_time = 0;
  1186. pkt_gather_data(pd, pkt);
  1187. pkt_set_state(pkt, PACKET_READ_WAIT_STATE);
  1188. break;
  1189. case PACKET_READ_WAIT_STATE:
  1190. if (atomic_read(&pkt->io_wait) > 0)
  1191. return;
  1192. if (atomic_read(&pkt->io_errors) > 0) {
  1193. pkt_set_state(pkt, PACKET_RECOVERY_STATE);
  1194. } else {
  1195. pkt_start_write(pd, pkt);
  1196. }
  1197. break;
  1198. case PACKET_WRITE_WAIT_STATE:
  1199. if (atomic_read(&pkt->io_wait) > 0)
  1200. return;
  1201. if (test_bit(BIO_UPTODATE, &pkt->w_bio->bi_flags)) {
  1202. pkt_set_state(pkt, PACKET_FINISHED_STATE);
  1203. } else {
  1204. pkt_set_state(pkt, PACKET_RECOVERY_STATE);
  1205. }
  1206. break;
  1207. case PACKET_RECOVERY_STATE:
  1208. if (pkt_start_recovery(pkt)) {
  1209. pkt_start_write(pd, pkt);
  1210. } else {
  1211. pkt_dbg(2, pd, "No recovery possible\n");
  1212. pkt_set_state(pkt, PACKET_FINISHED_STATE);
  1213. }
  1214. break;
  1215. case PACKET_FINISHED_STATE:
  1216. uptodate = test_bit(BIO_UPTODATE, &pkt->w_bio->bi_flags);
  1217. pkt_finish_packet(pkt, uptodate);
  1218. return;
  1219. default:
  1220. BUG();
  1221. break;
  1222. }
  1223. }
  1224. }
  1225. static void pkt_handle_packets(struct pktcdvd_device *pd)
  1226. {
  1227. struct packet_data *pkt, *next;
  1228. /*
  1229. * Run state machine for active packets
  1230. */
  1231. list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
  1232. if (atomic_read(&pkt->run_sm) > 0) {
  1233. atomic_set(&pkt->run_sm, 0);
  1234. pkt_run_state_machine(pd, pkt);
  1235. }
  1236. }
  1237. /*
  1238. * Move no longer active packets to the free list
  1239. */
  1240. spin_lock(&pd->cdrw.active_list_lock);
  1241. list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_active_list, list) {
  1242. if (pkt->state == PACKET_FINISHED_STATE) {
  1243. list_del(&pkt->list);
  1244. pkt_put_packet_data(pd, pkt);
  1245. pkt_set_state(pkt, PACKET_IDLE_STATE);
  1246. atomic_set(&pd->scan_queue, 1);
  1247. }
  1248. }
  1249. spin_unlock(&pd->cdrw.active_list_lock);
  1250. }
  1251. static void pkt_count_states(struct pktcdvd_device *pd, int *states)
  1252. {
  1253. struct packet_data *pkt;
  1254. int i;
  1255. for (i = 0; i < PACKET_NUM_STATES; i++)
  1256. states[i] = 0;
  1257. spin_lock(&pd->cdrw.active_list_lock);
  1258. list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
  1259. states[pkt->state]++;
  1260. }
  1261. spin_unlock(&pd->cdrw.active_list_lock);
  1262. }
  1263. /*
  1264. * kcdrwd is woken up when writes have been queued for one of our
  1265. * registered devices
  1266. */
  1267. static int kcdrwd(void *foobar)
  1268. {
  1269. struct pktcdvd_device *pd = foobar;
  1270. struct packet_data *pkt;
  1271. long min_sleep_time, residue;
  1272. set_user_nice(current, MIN_NICE);
  1273. set_freezable();
  1274. for (;;) {
  1275. DECLARE_WAITQUEUE(wait, current);
  1276. /*
  1277. * Wait until there is something to do
  1278. */
  1279. add_wait_queue(&pd->wqueue, &wait);
  1280. for (;;) {
  1281. set_current_state(TASK_INTERRUPTIBLE);
  1282. /* Check if we need to run pkt_handle_queue */
  1283. if (atomic_read(&pd->scan_queue) > 0)
  1284. goto work_to_do;
  1285. /* Check if we need to run the state machine for some packet */
  1286. list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
  1287. if (atomic_read(&pkt->run_sm) > 0)
  1288. goto work_to_do;
  1289. }
  1290. /* Check if we need to process the iosched queues */
  1291. if (atomic_read(&pd->iosched.attention) != 0)
  1292. goto work_to_do;
  1293. /* Otherwise, go to sleep */
  1294. if (PACKET_DEBUG > 1) {
  1295. int states[PACKET_NUM_STATES];
  1296. pkt_count_states(pd, states);
  1297. pkt_dbg(2, pd, "i:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
  1298. states[0], states[1], states[2],
  1299. states[3], states[4], states[5]);
  1300. }
  1301. min_sleep_time = MAX_SCHEDULE_TIMEOUT;
  1302. list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
  1303. if (pkt->sleep_time && pkt->sleep_time < min_sleep_time)
  1304. min_sleep_time = pkt->sleep_time;
  1305. }
  1306. pkt_dbg(2, pd, "sleeping\n");
  1307. residue = schedule_timeout(min_sleep_time);
  1308. pkt_dbg(2, pd, "wake up\n");
  1309. /* make swsusp happy with our thread */
  1310. try_to_freeze();
  1311. list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
  1312. if (!pkt->sleep_time)
  1313. continue;
  1314. pkt->sleep_time -= min_sleep_time - residue;
  1315. if (pkt->sleep_time <= 0) {
  1316. pkt->sleep_time = 0;
  1317. atomic_inc(&pkt->run_sm);
  1318. }
  1319. }
  1320. if (kthread_should_stop())
  1321. break;
  1322. }
  1323. work_to_do:
  1324. set_current_state(TASK_RUNNING);
  1325. remove_wait_queue(&pd->wqueue, &wait);
  1326. if (kthread_should_stop())
  1327. break;
  1328. /*
  1329. * if pkt_handle_queue returns true, we can queue
  1330. * another request.
  1331. */
  1332. while (pkt_handle_queue(pd))
  1333. ;
  1334. /*
  1335. * Handle packet state machine
  1336. */
  1337. pkt_handle_packets(pd);
  1338. /*
  1339. * Handle iosched queues
  1340. */
  1341. pkt_iosched_process_queue(pd);
  1342. }
  1343. return 0;
  1344. }
  1345. static void pkt_print_settings(struct pktcdvd_device *pd)
  1346. {
  1347. pkt_info(pd, "%s packets, %u blocks, Mode-%c disc\n",
  1348. pd->settings.fp ? "Fixed" : "Variable",
  1349. pd->settings.size >> 2,
  1350. pd->settings.block_mode == 8 ? '1' : '2');
  1351. }
  1352. static int pkt_mode_sense(struct pktcdvd_device *pd, struct packet_command *cgc, int page_code, int page_control)
  1353. {
  1354. memset(cgc->cmd, 0, sizeof(cgc->cmd));
  1355. cgc->cmd[0] = GPCMD_MODE_SENSE_10;
  1356. cgc->cmd[2] = page_code | (page_control << 6);
  1357. cgc->cmd[7] = cgc->buflen >> 8;
  1358. cgc->cmd[8] = cgc->buflen & 0xff;
  1359. cgc->data_direction = CGC_DATA_READ;
  1360. return pkt_generic_packet(pd, cgc);
  1361. }
  1362. static int pkt_mode_select(struct pktcdvd_device *pd, struct packet_command *cgc)
  1363. {
  1364. memset(cgc->cmd, 0, sizeof(cgc->cmd));
  1365. memset(cgc->buffer, 0, 2);
  1366. cgc->cmd[0] = GPCMD_MODE_SELECT_10;
  1367. cgc->cmd[1] = 0x10; /* PF */
  1368. cgc->cmd[7] = cgc->buflen >> 8;
  1369. cgc->cmd[8] = cgc->buflen & 0xff;
  1370. cgc->data_direction = CGC_DATA_WRITE;
  1371. return pkt_generic_packet(pd, cgc);
  1372. }
  1373. static int pkt_get_disc_info(struct pktcdvd_device *pd, disc_information *di)
  1374. {
  1375. struct packet_command cgc;
  1376. int ret;
  1377. /* set up command and get the disc info */
  1378. init_cdrom_command(&cgc, di, sizeof(*di), CGC_DATA_READ);
  1379. cgc.cmd[0] = GPCMD_READ_DISC_INFO;
  1380. cgc.cmd[8] = cgc.buflen = 2;
  1381. cgc.quiet = 1;
  1382. if ((ret = pkt_generic_packet(pd, &cgc)))
  1383. return ret;
  1384. /* not all drives have the same disc_info length, so requeue
  1385. * packet with the length the drive tells us it can supply
  1386. */
  1387. cgc.buflen = be16_to_cpu(di->disc_information_length) +
  1388. sizeof(di->disc_information_length);
  1389. if (cgc.buflen > sizeof(disc_information))
  1390. cgc.buflen = sizeof(disc_information);
  1391. cgc.cmd[8] = cgc.buflen;
  1392. return pkt_generic_packet(pd, &cgc);
  1393. }
  1394. static int pkt_get_track_info(struct pktcdvd_device *pd, __u16 track, __u8 type, track_information *ti)
  1395. {
  1396. struct packet_command cgc;
  1397. int ret;
  1398. init_cdrom_command(&cgc, ti, 8, CGC_DATA_READ);
  1399. cgc.cmd[0] = GPCMD_READ_TRACK_RZONE_INFO;
  1400. cgc.cmd[1] = type & 3;
  1401. cgc.cmd[4] = (track & 0xff00) >> 8;
  1402. cgc.cmd[5] = track & 0xff;
  1403. cgc.cmd[8] = 8;
  1404. cgc.quiet = 1;
  1405. if ((ret = pkt_generic_packet(pd, &cgc)))
  1406. return ret;
  1407. cgc.buflen = be16_to_cpu(ti->track_information_length) +
  1408. sizeof(ti->track_information_length);
  1409. if (cgc.buflen > sizeof(track_information))
  1410. cgc.buflen = sizeof(track_information);
  1411. cgc.cmd[8] = cgc.buflen;
  1412. return pkt_generic_packet(pd, &cgc);
  1413. }
  1414. static noinline_for_stack int pkt_get_last_written(struct pktcdvd_device *pd,
  1415. long *last_written)
  1416. {
  1417. disc_information di;
  1418. track_information ti;
  1419. __u32 last_track;
  1420. int ret = -1;
  1421. if ((ret = pkt_get_disc_info(pd, &di)))
  1422. return ret;
  1423. last_track = (di.last_track_msb << 8) | di.last_track_lsb;
  1424. if ((ret = pkt_get_track_info(pd, last_track, 1, &ti)))
  1425. return ret;
  1426. /* if this track is blank, try the previous. */
  1427. if (ti.blank) {
  1428. last_track--;
  1429. if ((ret = pkt_get_track_info(pd, last_track, 1, &ti)))
  1430. return ret;
  1431. }
  1432. /* if last recorded field is valid, return it. */
  1433. if (ti.lra_v) {
  1434. *last_written = be32_to_cpu(ti.last_rec_address);
  1435. } else {
  1436. /* make it up instead */
  1437. *last_written = be32_to_cpu(ti.track_start) +
  1438. be32_to_cpu(ti.track_size);
  1439. if (ti.free_blocks)
  1440. *last_written -= (be32_to_cpu(ti.free_blocks) + 7);
  1441. }
  1442. return 0;
  1443. }
  1444. /*
  1445. * write mode select package based on pd->settings
  1446. */
  1447. static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd)
  1448. {
  1449. struct packet_command cgc;
  1450. struct request_sense sense;
  1451. write_param_page *wp;
  1452. char buffer[128];
  1453. int ret, size;
  1454. /* doesn't apply to DVD+RW or DVD-RAM */
  1455. if ((pd->mmc3_profile == 0x1a) || (pd->mmc3_profile == 0x12))
  1456. return 0;
  1457. memset(buffer, 0, sizeof(buffer));
  1458. init_cdrom_command(&cgc, buffer, sizeof(*wp), CGC_DATA_READ);
  1459. cgc.sense = &sense;
  1460. if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0))) {
  1461. pkt_dump_sense(pd, &cgc);
  1462. return ret;
  1463. }
  1464. size = 2 + ((buffer[0] << 8) | (buffer[1] & 0xff));
  1465. pd->mode_offset = (buffer[6] << 8) | (buffer[7] & 0xff);
  1466. if (size > sizeof(buffer))
  1467. size = sizeof(buffer);
  1468. /*
  1469. * now get it all
  1470. */
  1471. init_cdrom_command(&cgc, buffer, size, CGC_DATA_READ);
  1472. cgc.sense = &sense;
  1473. if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0))) {
  1474. pkt_dump_sense(pd, &cgc);
  1475. return ret;
  1476. }
  1477. /*
  1478. * write page is offset header + block descriptor length
  1479. */
  1480. wp = (write_param_page *) &buffer[sizeof(struct mode_page_header) + pd->mode_offset];
  1481. wp->fp = pd->settings.fp;
  1482. wp->track_mode = pd->settings.track_mode;
  1483. wp->write_type = pd->settings.write_type;
  1484. wp->data_block_type = pd->settings.block_mode;
  1485. wp->multi_session = 0;
  1486. #ifdef PACKET_USE_LS
  1487. wp->link_size = 7;
  1488. wp->ls_v = 1;
  1489. #endif
  1490. if (wp->data_block_type == PACKET_BLOCK_MODE1) {
  1491. wp->session_format = 0;
  1492. wp->subhdr2 = 0x20;
  1493. } else if (wp->data_block_type == PACKET_BLOCK_MODE2) {
  1494. wp->session_format = 0x20;
  1495. wp->subhdr2 = 8;
  1496. #if 0
  1497. wp->mcn[0] = 0x80;
  1498. memcpy(&wp->mcn[1], PACKET_MCN, sizeof(wp->mcn) - 1);
  1499. #endif
  1500. } else {
  1501. /*
  1502. * paranoia
  1503. */
  1504. pkt_err(pd, "write mode wrong %d\n", wp->data_block_type);
  1505. return 1;
  1506. }
  1507. wp->packet_size = cpu_to_be32(pd->settings.size >> 2);
  1508. cgc.buflen = cgc.cmd[8] = size;
  1509. if ((ret = pkt_mode_select(pd, &cgc))) {
  1510. pkt_dump_sense(pd, &cgc);
  1511. return ret;
  1512. }
  1513. pkt_print_settings(pd);
  1514. return 0;
  1515. }
  1516. /*
  1517. * 1 -- we can write to this track, 0 -- we can't
  1518. */
  1519. static int pkt_writable_track(struct pktcdvd_device *pd, track_information *ti)
  1520. {
  1521. switch (pd->mmc3_profile) {
  1522. case 0x1a: /* DVD+RW */
  1523. case 0x12: /* DVD-RAM */
  1524. /* The track is always writable on DVD+RW/DVD-RAM */
  1525. return 1;
  1526. default:
  1527. break;
  1528. }
  1529. if (!ti->packet || !ti->fp)
  1530. return 0;
  1531. /*
  1532. * "good" settings as per Mt Fuji.
  1533. */
  1534. if (ti->rt == 0 && ti->blank == 0)
  1535. return 1;
  1536. if (ti->rt == 0 && ti->blank == 1)
  1537. return 1;
  1538. if (ti->rt == 1 && ti->blank == 0)
  1539. return 1;
  1540. pkt_err(pd, "bad state %d-%d-%d\n", ti->rt, ti->blank, ti->packet);
  1541. return 0;
  1542. }
  1543. /*
  1544. * 1 -- we can write to this disc, 0 -- we can't
  1545. */
  1546. static int pkt_writable_disc(struct pktcdvd_device *pd, disc_information *di)
  1547. {
  1548. switch (pd->mmc3_profile) {
  1549. case 0x0a: /* CD-RW */
  1550. case 0xffff: /* MMC3 not supported */
  1551. break;
  1552. case 0x1a: /* DVD+RW */
  1553. case 0x13: /* DVD-RW */
  1554. case 0x12: /* DVD-RAM */
  1555. return 1;
  1556. default:
  1557. pkt_dbg(2, pd, "Wrong disc profile (%x)\n",
  1558. pd->mmc3_profile);
  1559. return 0;
  1560. }
  1561. /*
  1562. * for disc type 0xff we should probably reserve a new track.
  1563. * but i'm not sure, should we leave this to user apps? probably.
  1564. */
  1565. if (di->disc_type == 0xff) {
  1566. pkt_notice(pd, "unknown disc - no track?\n");
  1567. return 0;
  1568. }
  1569. if (di->disc_type != 0x20 && di->disc_type != 0) {
  1570. pkt_err(pd, "wrong disc type (%x)\n", di->disc_type);
  1571. return 0;
  1572. }
  1573. if (di->erasable == 0) {
  1574. pkt_notice(pd, "disc not erasable\n");
  1575. return 0;
  1576. }
  1577. if (di->border_status == PACKET_SESSION_RESERVED) {
  1578. pkt_err(pd, "can't write to last track (reserved)\n");
  1579. return 0;
  1580. }
  1581. return 1;
  1582. }
  1583. static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
  1584. {
  1585. struct packet_command cgc;
  1586. unsigned char buf[12];
  1587. disc_information di;
  1588. track_information ti;
  1589. int ret, track;
  1590. init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
  1591. cgc.cmd[0] = GPCMD_GET_CONFIGURATION;
  1592. cgc.cmd[8] = 8;
  1593. ret = pkt_generic_packet(pd, &cgc);
  1594. pd->mmc3_profile = ret ? 0xffff : buf[6] << 8 | buf[7];
  1595. memset(&di, 0, sizeof(disc_information));
  1596. memset(&ti, 0, sizeof(track_information));
  1597. if ((ret = pkt_get_disc_info(pd, &di))) {
  1598. pkt_err(pd, "failed get_disc\n");
  1599. return ret;
  1600. }
  1601. if (!pkt_writable_disc(pd, &di))
  1602. return -EROFS;
  1603. pd->type = di.erasable ? PACKET_CDRW : PACKET_CDR;
  1604. track = 1; /* (di.last_track_msb << 8) | di.last_track_lsb; */
  1605. if ((ret = pkt_get_track_info(pd, track, 1, &ti))) {
  1606. pkt_err(pd, "failed get_track\n");
  1607. return ret;
  1608. }
  1609. if (!pkt_writable_track(pd, &ti)) {
  1610. pkt_err(pd, "can't write to this track\n");
  1611. return -EROFS;
  1612. }
  1613. /*
  1614. * we keep packet size in 512 byte units, makes it easier to
  1615. * deal with request calculations.
  1616. */
  1617. pd->settings.size = be32_to_cpu(ti.fixed_packet_size) << 2;
  1618. if (pd->settings.size == 0) {
  1619. pkt_notice(pd, "detected zero packet size!\n");
  1620. return -ENXIO;
  1621. }
  1622. if (pd->settings.size > PACKET_MAX_SECTORS) {
  1623. pkt_err(pd, "packet size is too big\n");
  1624. return -EROFS;
  1625. }
  1626. pd->settings.fp = ti.fp;
  1627. pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1);
  1628. if (ti.nwa_v) {
  1629. pd->nwa = be32_to_cpu(ti.next_writable);
  1630. set_bit(PACKET_NWA_VALID, &pd->flags);
  1631. }
  1632. /*
  1633. * in theory we could use lra on -RW media as well and just zero
  1634. * blocks that haven't been written yet, but in practice that
  1635. * is just a no-go. we'll use that for -R, naturally.
  1636. */
  1637. if (ti.lra_v) {
  1638. pd->lra = be32_to_cpu(ti.last_rec_address);
  1639. set_bit(PACKET_LRA_VALID, &pd->flags);
  1640. } else {
  1641. pd->lra = 0xffffffff;
  1642. set_bit(PACKET_LRA_VALID, &pd->flags);
  1643. }
  1644. /*
  1645. * fine for now
  1646. */
  1647. pd->settings.link_loss = 7;
  1648. pd->settings.write_type = 0; /* packet */
  1649. pd->settings.track_mode = ti.track_mode;
  1650. /*
  1651. * mode1 or mode2 disc
  1652. */
  1653. switch (ti.data_mode) {
  1654. case PACKET_MODE1:
  1655. pd->settings.block_mode = PACKET_BLOCK_MODE1;
  1656. break;
  1657. case PACKET_MODE2:
  1658. pd->settings.block_mode = PACKET_BLOCK_MODE2;
  1659. break;
  1660. default:
  1661. pkt_err(pd, "unknown data mode\n");
  1662. return -EROFS;
  1663. }
  1664. return 0;
  1665. }
  1666. /*
  1667. * enable/disable write caching on drive
  1668. */
  1669. static noinline_for_stack int pkt_write_caching(struct pktcdvd_device *pd,
  1670. int set)
  1671. {
  1672. struct packet_command cgc;
  1673. struct request_sense sense;
  1674. unsigned char buf[64];
  1675. int ret;
  1676. init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
  1677. cgc.sense = &sense;
  1678. cgc.buflen = pd->mode_offset + 12;
  1679. /*
  1680. * caching mode page might not be there, so quiet this command
  1681. */
  1682. cgc.quiet = 1;
  1683. if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WCACHING_PAGE, 0)))
  1684. return ret;
  1685. buf[pd->mode_offset + 10] |= (!!set << 2);
  1686. cgc.buflen = cgc.cmd[8] = 2 + ((buf[0] << 8) | (buf[1] & 0xff));
  1687. ret = pkt_mode_select(pd, &cgc);
  1688. if (ret) {
  1689. pkt_err(pd, "write caching control failed\n");
  1690. pkt_dump_sense(pd, &cgc);
  1691. } else if (!ret && set)
  1692. pkt_notice(pd, "enabled write caching\n");
  1693. return ret;
  1694. }
  1695. static int pkt_lock_door(struct pktcdvd_device *pd, int lockflag)
  1696. {
  1697. struct packet_command cgc;
  1698. init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
  1699. cgc.cmd[0] = GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL;
  1700. cgc.cmd[4] = lockflag ? 1 : 0;
  1701. return pkt_generic_packet(pd, &cgc);
  1702. }
  1703. /*
  1704. * Returns drive maximum write speed
  1705. */
  1706. static noinline_for_stack int pkt_get_max_speed(struct pktcdvd_device *pd,
  1707. unsigned *write_speed)
  1708. {
  1709. struct packet_command cgc;
  1710. struct request_sense sense;
  1711. unsigned char buf[256+18];
  1712. unsigned char *cap_buf;
  1713. int ret, offset;
  1714. cap_buf = &buf[sizeof(struct mode_page_header) + pd->mode_offset];
  1715. init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_UNKNOWN);
  1716. cgc.sense = &sense;
  1717. ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
  1718. if (ret) {
  1719. cgc.buflen = pd->mode_offset + cap_buf[1] + 2 +
  1720. sizeof(struct mode_page_header);
  1721. ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
  1722. if (ret) {
  1723. pkt_dump_sense(pd, &cgc);
  1724. return ret;
  1725. }
  1726. }
  1727. offset = 20; /* Obsoleted field, used by older drives */
  1728. if (cap_buf[1] >= 28)
  1729. offset = 28; /* Current write speed selected */
  1730. if (cap_buf[1] >= 30) {
  1731. /* If the drive reports at least one "Logical Unit Write
  1732. * Speed Performance Descriptor Block", use the information
  1733. * in the first block. (contains the highest speed)
  1734. */
  1735. int num_spdb = (cap_buf[30] << 8) + cap_buf[31];
  1736. if (num_spdb > 0)
  1737. offset = 34;
  1738. }
  1739. *write_speed = (cap_buf[offset] << 8) | cap_buf[offset + 1];
  1740. return 0;
  1741. }
  1742. /* These tables from cdrecord - I don't have orange book */
  1743. /* standard speed CD-RW (1-4x) */
  1744. static char clv_to_speed[16] = {
  1745. /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */
  1746. 0, 2, 4, 6, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
  1747. };
  1748. /* high speed CD-RW (-10x) */
  1749. static char hs_clv_to_speed[16] = {
  1750. /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */
  1751. 0, 2, 4, 6, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
  1752. };
  1753. /* ultra high speed CD-RW */
  1754. static char us_clv_to_speed[16] = {
  1755. /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */
  1756. 0, 2, 4, 8, 0, 0,16, 0,24,32,40,48, 0, 0, 0, 0
  1757. };
  1758. /*
  1759. * reads the maximum media speed from ATIP
  1760. */
  1761. static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd,
  1762. unsigned *speed)
  1763. {
  1764. struct packet_command cgc;
  1765. struct request_sense sense;
  1766. unsigned char buf[64];
  1767. unsigned int size, st, sp;
  1768. int ret;
  1769. init_cdrom_command(&cgc, buf, 2, CGC_DATA_READ);
  1770. cgc.sense = &sense;
  1771. cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
  1772. cgc.cmd[1] = 2;
  1773. cgc.cmd[2] = 4; /* READ ATIP */
  1774. cgc.cmd[8] = 2;
  1775. ret = pkt_generic_packet(pd, &cgc);
  1776. if (ret) {
  1777. pkt_dump_sense(pd, &cgc);
  1778. return ret;
  1779. }
  1780. size = ((unsigned int) buf[0]<<8) + buf[1] + 2;
  1781. if (size > sizeof(buf))
  1782. size = sizeof(buf);
  1783. init_cdrom_command(&cgc, buf, size, CGC_DATA_READ);
  1784. cgc.sense = &sense;
  1785. cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
  1786. cgc.cmd[1] = 2;
  1787. cgc.cmd[2] = 4;
  1788. cgc.cmd[8] = size;
  1789. ret = pkt_generic_packet(pd, &cgc);
  1790. if (ret) {
  1791. pkt_dump_sense(pd, &cgc);
  1792. return ret;
  1793. }
  1794. if (!(buf[6] & 0x40)) {
  1795. pkt_notice(pd, "disc type is not CD-RW\n");
  1796. return 1;
  1797. }
  1798. if (!(buf[6] & 0x4)) {
  1799. pkt_notice(pd, "A1 values on media are not valid, maybe not CDRW?\n");
  1800. return 1;
  1801. }
  1802. st = (buf[6] >> 3) & 0x7; /* disc sub-type */
  1803. sp = buf[16] & 0xf; /* max speed from ATIP A1 field */
  1804. /* Info from cdrecord */
  1805. switch (st) {
  1806. case 0: /* standard speed */
  1807. *speed = clv_to_speed[sp];
  1808. break;
  1809. case 1: /* high speed */
  1810. *speed = hs_clv_to_speed[sp];
  1811. break;
  1812. case 2: /* ultra high speed */
  1813. *speed = us_clv_to_speed[sp];
  1814. break;
  1815. default:
  1816. pkt_notice(pd, "unknown disc sub-type %d\n", st);
  1817. return 1;
  1818. }
  1819. if (*speed) {
  1820. pkt_info(pd, "maximum media speed: %d\n", *speed);
  1821. return 0;
  1822. } else {
  1823. pkt_notice(pd, "unknown speed %d for sub-type %d\n", sp, st);
  1824. return 1;
  1825. }
  1826. }
  1827. static noinline_for_stack int pkt_perform_opc(struct pktcdvd_device *pd)
  1828. {
  1829. struct packet_command cgc;
  1830. struct request_sense sense;
  1831. int ret;
  1832. pkt_dbg(2, pd, "Performing OPC\n");
  1833. init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
  1834. cgc.sense = &sense;
  1835. cgc.timeout = 60*HZ;
  1836. cgc.cmd[0] = GPCMD_SEND_OPC;
  1837. cgc.cmd[1] = 1;
  1838. if ((ret = pkt_generic_packet(pd, &cgc)))
  1839. pkt_dump_sense(pd, &cgc);
  1840. return ret;
  1841. }
  1842. static int pkt_open_write(struct pktcdvd_device *pd)
  1843. {
  1844. int ret;
  1845. unsigned int write_speed, media_write_speed, read_speed;
  1846. if ((ret = pkt_probe_settings(pd))) {
  1847. pkt_dbg(2, pd, "failed probe\n");
  1848. return ret;
  1849. }
  1850. if ((ret = pkt_set_write_settings(pd))) {
  1851. pkt_dbg(1, pd, "failed saving write settings\n");
  1852. return -EIO;
  1853. }
  1854. pkt_write_caching(pd, USE_WCACHING);
  1855. if ((ret = pkt_get_max_speed(pd, &write_speed)))
  1856. write_speed = 16 * 177;
  1857. switch (pd->mmc3_profile) {
  1858. case 0x13: /* DVD-RW */
  1859. case 0x1a: /* DVD+RW */
  1860. case 0x12: /* DVD-RAM */
  1861. pkt_dbg(1, pd, "write speed %ukB/s\n", write_speed);
  1862. break;
  1863. default:
  1864. if ((ret = pkt_media_speed(pd, &media_write_speed)))
  1865. media_write_speed = 16;
  1866. write_speed = min(write_speed, media_write_speed * 177);
  1867. pkt_dbg(1, pd, "write speed %ux\n", write_speed / 176);
  1868. break;
  1869. }
  1870. read_speed = write_speed;
  1871. if ((ret = pkt_set_speed(pd, write_speed, read_speed))) {
  1872. pkt_dbg(1, pd, "couldn't set write speed\n");
  1873. return -EIO;
  1874. }
  1875. pd->write_speed = write_speed;
  1876. pd->read_speed = read_speed;
  1877. if ((ret = pkt_perform_opc(pd))) {
  1878. pkt_dbg(1, pd, "Optimum Power Calibration failed\n");
  1879. }
  1880. return 0;
  1881. }
  1882. /*
  1883. * called at open time.
  1884. */
  1885. static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write)
  1886. {
  1887. int ret;
  1888. long lba;
  1889. struct request_queue *q;
  1890. /*
  1891. * We need to re-open the cdrom device without O_NONBLOCK to be able
  1892. * to read/write from/to it. It is already opened in O_NONBLOCK mode
  1893. * so bdget() can't fail.
  1894. */
  1895. bdget(pd->bdev->bd_dev);
  1896. if ((ret = blkdev_get(pd->bdev, FMODE_READ | FMODE_EXCL, pd)))
  1897. goto out;
  1898. if ((ret = pkt_get_last_written(pd, &lba))) {
  1899. pkt_err(pd, "pkt_get_last_written failed\n");
  1900. goto out_putdev;
  1901. }
  1902. set_capacity(pd->disk, lba << 2);
  1903. set_capacity(pd->bdev->bd_disk, lba << 2);
  1904. bd_set_size(pd->bdev, (loff_t)lba << 11);
  1905. q = bdev_get_queue(pd->bdev);
  1906. if (write) {
  1907. if ((ret = pkt_open_write(pd)))
  1908. goto out_putdev;
  1909. /*
  1910. * Some CDRW drives can not handle writes larger than one packet,
  1911. * even if the size is a multiple of the packet size.
  1912. */
  1913. spin_lock_irq(q->queue_lock);
  1914. blk_queue_max_hw_sectors(q, pd->settings.size);
  1915. spin_unlock_irq(q->queue_lock);
  1916. set_bit(PACKET_WRITABLE, &pd->flags);
  1917. } else {
  1918. pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
  1919. clear_bit(PACKET_WRITABLE, &pd->flags);
  1920. }
  1921. if ((ret = pkt_set_segment_merging(pd, q)))
  1922. goto out_putdev;
  1923. if (write) {
  1924. if (!pkt_grow_pktlist(pd, CONFIG_CDROM_PKTCDVD_BUFFERS)) {
  1925. pkt_err(pd, "not enough memory for buffers\n");
  1926. ret = -ENOMEM;
  1927. goto out_putdev;
  1928. }
  1929. pkt_info(pd, "%lukB available on disc\n", lba << 1);
  1930. }
  1931. return 0;
  1932. out_putdev:
  1933. blkdev_put(pd->bdev, FMODE_READ | FMODE_EXCL);
  1934. out:
  1935. return ret;
  1936. }
  1937. /*
  1938. * called when the device is closed. makes sure that the device flushes
  1939. * the internal cache before we close.
  1940. */
  1941. static void pkt_release_dev(struct pktcdvd_device *pd, int flush)
  1942. {
  1943. if (flush && pkt_flush_cache(pd))
  1944. pkt_dbg(1, pd, "not flushing cache\n");
  1945. pkt_lock_door(pd, 0);
  1946. pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
  1947. blkdev_put(pd->bdev, FMODE_READ | FMODE_EXCL);
  1948. pkt_shrink_pktlist(pd);
  1949. }
  1950. static struct pktcdvd_device *pkt_find_dev_from_minor(unsigned int dev_minor)
  1951. {
  1952. if (dev_minor >= MAX_WRITERS)
  1953. return NULL;
  1954. return pkt_devs[dev_minor];
  1955. }
  1956. static int pkt_open(struct block_device *bdev, fmode_t mode)
  1957. {
  1958. struct pktcdvd_device *pd = NULL;
  1959. int ret;
  1960. mutex_lock(&pktcdvd_mutex);
  1961. mutex_lock(&ctl_mutex);
  1962. pd = pkt_find_dev_from_minor(MINOR(bdev->bd_dev));
  1963. if (!pd) {
  1964. ret = -ENODEV;
  1965. goto out;
  1966. }
  1967. BUG_ON(pd->refcnt < 0);
  1968. pd->refcnt++;
  1969. if (pd->refcnt > 1) {
  1970. if ((mode & FMODE_WRITE) &&
  1971. !test_bit(PACKET_WRITABLE, &pd->flags)) {
  1972. ret = -EBUSY;
  1973. goto out_dec;
  1974. }
  1975. } else {
  1976. ret = pkt_open_dev(pd, mode & FMODE_WRITE);
  1977. if (ret)
  1978. goto out_dec;
  1979. /*
  1980. * needed here as well, since ext2 (among others) may change
  1981. * the blocksize at mount time
  1982. */
  1983. set_blocksize(bdev, CD_FRAMESIZE);
  1984. }
  1985. mutex_unlock(&ctl_mutex);
  1986. mutex_unlock(&pktcdvd_mutex);
  1987. return 0;
  1988. out_dec:
  1989. pd->refcnt--;
  1990. out:
  1991. mutex_unlock(&ctl_mutex);
  1992. mutex_unlock(&pktcdvd_mutex);
  1993. return ret;
  1994. }
  1995. static void pkt_close(struct gendisk *disk, fmode_t mode)
  1996. {
  1997. struct pktcdvd_device *pd = disk->private_data;
  1998. mutex_lock(&pktcdvd_mutex);
  1999. mutex_lock(&ctl_mutex);
  2000. pd->refcnt--;
  2001. BUG_ON(pd->refcnt < 0);
  2002. if (pd->refcnt == 0) {
  2003. int flush = test_bit(PACKET_WRITABLE, &pd->flags);
  2004. pkt_release_dev(pd, flush);
  2005. }
  2006. mutex_unlock(&ctl_mutex);
  2007. mutex_unlock(&pktcdvd_mutex);
  2008. }
  2009. static void pkt_end_io_read_cloned(struct bio *bio, int err)
  2010. {
  2011. struct packet_stacked_data *psd = bio->bi_private;
  2012. struct pktcdvd_device *pd = psd->pd;
  2013. bio_put(bio);
  2014. bio_endio(psd->bio, err);
  2015. mempool_free(psd, psd_pool);
  2016. pkt_bio_finished(pd);
  2017. }
  2018. static void pkt_make_request_read(struct pktcdvd_device *pd, struct bio *bio)
  2019. {
  2020. struct bio *cloned_bio = bio_clone(bio, GFP_NOIO);
  2021. struct packet_stacked_data *psd = mempool_alloc(psd_pool, GFP_NOIO);
  2022. psd->pd = pd;
  2023. psd->bio = bio;
  2024. cloned_bio->bi_bdev = pd->bdev;
  2025. cloned_bio->bi_private = psd;
  2026. cloned_bio->bi_end_io = pkt_end_io_read_cloned;
  2027. pd->stats.secs_r += bio_sectors(bio);
  2028. pkt_queue_bio(pd, cloned_bio);
  2029. }
  2030. static void pkt_make_request_write(struct request_queue *q, struct bio *bio)
  2031. {
  2032. struct pktcdvd_device *pd = q->queuedata;
  2033. sector_t zone;
  2034. struct packet_data *pkt;
  2035. int was_empty, blocked_bio;
  2036. struct pkt_rb_node *node;
  2037. zone = get_zone(bio->bi_iter.bi_sector, pd);
  2038. /*
  2039. * If we find a matching packet in state WAITING or READ_WAIT, we can
  2040. * just append this bio to that packet.
  2041. */
  2042. spin_lock(&pd->cdrw.active_list_lock);
  2043. blocked_bio = 0;
  2044. list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
  2045. if (pkt->sector == zone) {
  2046. spin_lock(&pkt->lock);
  2047. if ((pkt->state == PACKET_WAITING_STATE) ||
  2048. (pkt->state == PACKET_READ_WAIT_STATE)) {
  2049. bio_list_add(&pkt->orig_bios, bio);
  2050. pkt->write_size +=
  2051. bio->bi_iter.bi_size / CD_FRAMESIZE;
  2052. if ((pkt->write_size >= pkt->frames) &&
  2053. (pkt->state == PACKET_WAITING_STATE)) {
  2054. atomic_inc(&pkt->run_sm);
  2055. wake_up(&pd->wqueue);
  2056. }
  2057. spin_unlock(&pkt->lock);
  2058. spin_unlock(&pd->cdrw.active_list_lock);
  2059. return;
  2060. } else {
  2061. blocked_bio = 1;
  2062. }
  2063. spin_unlock(&pkt->lock);
  2064. }
  2065. }
  2066. spin_unlock(&pd->cdrw.active_list_lock);
  2067. /*
  2068. * Test if there is enough room left in the bio work queue
  2069. * (queue size >= congestion on mark).
  2070. * If not, wait till the work queue size is below the congestion off mark.
  2071. */
  2072. spin_lock(&pd->lock);
  2073. if (pd->write_congestion_on > 0
  2074. && pd->bio_queue_size >= pd->write_congestion_on) {
  2075. set_bdi_congested(&q->backing_dev_info, BLK_RW_ASYNC);
  2076. do {
  2077. spin_unlock(&pd->lock);
  2078. congestion_wait(BLK_RW_ASYNC, HZ);
  2079. spin_lock(&pd->lock);
  2080. } while(pd->bio_queue_size > pd->write_congestion_off);
  2081. }
  2082. spin_unlock(&pd->lock);
  2083. /*
  2084. * No matching packet found. Store the bio in the work queue.
  2085. */
  2086. node = mempool_alloc(pd->rb_pool, GFP_NOIO);
  2087. node->bio = bio;
  2088. spin_lock(&pd->lock);
  2089. BUG_ON(pd->bio_queue_size < 0);
  2090. was_empty = (pd->bio_queue_size == 0);
  2091. pkt_rbtree_insert(pd, node);
  2092. spin_unlock(&pd->lock);
  2093. /*
  2094. * Wake up the worker thread.
  2095. */
  2096. atomic_set(&pd->scan_queue, 1);
  2097. if (was_empty) {
  2098. /* This wake_up is required for correct operation */
  2099. wake_up(&pd->wqueue);
  2100. } else if (!list_empty(&pd->cdrw.pkt_free_list) && !blocked_bio) {
  2101. /*
  2102. * This wake up is not required for correct operation,
  2103. * but improves performance in some cases.
  2104. */
  2105. wake_up(&pd->wqueue);
  2106. }
  2107. }
  2108. static void pkt_make_request(struct request_queue *q, struct bio *bio)
  2109. {
  2110. struct pktcdvd_device *pd;
  2111. char b[BDEVNAME_SIZE];
  2112. struct bio *split;
  2113. pd = q->queuedata;
  2114. if (!pd) {
  2115. pr_err("%s incorrect request queue\n",
  2116. bdevname(bio->bi_bdev, b));
  2117. goto end_io;
  2118. }
  2119. pkt_dbg(2, pd, "start = %6llx stop = %6llx\n",
  2120. (unsigned long long)bio->bi_iter.bi_sector,
  2121. (unsigned long long)bio_end_sector(bio));
  2122. /*
  2123. * Clone READ bios so we can have our own bi_end_io callback.
  2124. */
  2125. if (bio_data_dir(bio) == READ) {
  2126. pkt_make_request_read(pd, bio);
  2127. return;
  2128. }
  2129. if (!test_bit(PACKET_WRITABLE, &pd->flags)) {
  2130. pkt_notice(pd, "WRITE for ro device (%llu)\n",
  2131. (unsigned long long)bio->bi_iter.bi_sector);
  2132. goto end_io;
  2133. }
  2134. if (!bio->bi_iter.bi_size || (bio->bi_iter.bi_size % CD_FRAMESIZE)) {
  2135. pkt_err(pd, "wrong bio size\n");
  2136. goto end_io;
  2137. }
  2138. blk_queue_bounce(q, &bio);
  2139. do {
  2140. sector_t zone = get_zone(bio->bi_iter.bi_sector, pd);
  2141. sector_t last_zone = get_zone(bio_end_sector(bio) - 1, pd);
  2142. if (last_zone != zone) {
  2143. BUG_ON(last_zone != zone + pd->settings.size);
  2144. split = bio_split(bio, last_zone -
  2145. bio->bi_iter.bi_sector,
  2146. GFP_NOIO, fs_bio_set);
  2147. bio_chain(split, bio);
  2148. } else {
  2149. split = bio;
  2150. }
  2151. pkt_make_request_write(q, split);
  2152. } while (split != bio);
  2153. return;
  2154. end_io:
  2155. bio_io_error(bio);
  2156. }
  2157. static int pkt_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd,
  2158. struct bio_vec *bvec)
  2159. {
  2160. struct pktcdvd_device *pd = q->queuedata;
  2161. sector_t zone = get_zone(bmd->bi_sector, pd);
  2162. int used = ((bmd->bi_sector - zone) << 9) + bmd->bi_size;
  2163. int remaining = (pd->settings.size << 9) - used;
  2164. int remaining2;
  2165. /*
  2166. * A bio <= PAGE_SIZE must be allowed. If it crosses a packet
  2167. * boundary, pkt_make_request() will split the bio.
  2168. */
  2169. remaining2 = PAGE_SIZE - bmd->bi_size;
  2170. remaining = max(remaining, remaining2);
  2171. BUG_ON(remaining < 0);
  2172. return remaining;
  2173. }
  2174. static void pkt_init_queue(struct pktcdvd_device *pd)
  2175. {
  2176. struct request_queue *q = pd->disk->queue;
  2177. blk_queue_make_request(q, pkt_make_request);
  2178. blk_queue_logical_block_size(q, CD_FRAMESIZE);
  2179. blk_queue_max_hw_sectors(q, PACKET_MAX_SECTORS);
  2180. blk_queue_merge_bvec(q, pkt_merge_bvec);
  2181. q->queuedata = pd;
  2182. }
  2183. static int pkt_seq_show(struct seq_file *m, void *p)
  2184. {
  2185. struct pktcdvd_device *pd = m->private;
  2186. char *msg;
  2187. char bdev_buf[BDEVNAME_SIZE];
  2188. int states[PACKET_NUM_STATES];
  2189. seq_printf(m, "Writer %s mapped to %s:\n", pd->name,
  2190. bdevname(pd->bdev, bdev_buf));
  2191. seq_printf(m, "\nSettings:\n");
  2192. seq_printf(m, "\tpacket size:\t\t%dkB\n", pd->settings.size / 2);
  2193. if (pd->settings.write_type == 0)
  2194. msg = "Packet";
  2195. else
  2196. msg = "Unknown";
  2197. seq_printf(m, "\twrite type:\t\t%s\n", msg);
  2198. seq_printf(m, "\tpacket type:\t\t%s\n", pd->settings.fp ? "Fixed" : "Variable");
  2199. seq_printf(m, "\tlink loss:\t\t%d\n", pd->settings.link_loss);
  2200. seq_printf(m, "\ttrack mode:\t\t%d\n", pd->settings.track_mode);
  2201. if (pd->settings.block_mode == PACKET_BLOCK_MODE1)
  2202. msg = "Mode 1";
  2203. else if (pd->settings.block_mode == PACKET_BLOCK_MODE2)
  2204. msg = "Mode 2";
  2205. else
  2206. msg = "Unknown";
  2207. seq_printf(m, "\tblock mode:\t\t%s\n", msg);
  2208. seq_printf(m, "\nStatistics:\n");
  2209. seq_printf(m, "\tpackets started:\t%lu\n", pd->stats.pkt_started);
  2210. seq_printf(m, "\tpackets ended:\t\t%lu\n", pd->stats.pkt_ended);
  2211. seq_printf(m, "\twritten:\t\t%lukB\n", pd->stats.secs_w >> 1);
  2212. seq_printf(m, "\tread gather:\t\t%lukB\n", pd->stats.secs_rg >> 1);
  2213. seq_printf(m, "\tread:\t\t\t%lukB\n", pd->stats.secs_r >> 1);
  2214. seq_printf(m, "\nMisc:\n");
  2215. seq_printf(m, "\treference count:\t%d\n", pd->refcnt);
  2216. seq_printf(m, "\tflags:\t\t\t0x%lx\n", pd->flags);
  2217. seq_printf(m, "\tread speed:\t\t%ukB/s\n", pd->read_speed);
  2218. seq_printf(m, "\twrite speed:\t\t%ukB/s\n", pd->write_speed);
  2219. seq_printf(m, "\tstart offset:\t\t%lu\n", pd->offset);
  2220. seq_printf(m, "\tmode page offset:\t%u\n", pd->mode_offset);
  2221. seq_printf(m, "\nQueue state:\n");
  2222. seq_printf(m, "\tbios queued:\t\t%d\n", pd->bio_queue_size);
  2223. seq_printf(m, "\tbios pending:\t\t%d\n", atomic_read(&pd->cdrw.pending_bios));
  2224. seq_printf(m, "\tcurrent sector:\t\t0x%llx\n", (unsigned long long)pd->current_sector);
  2225. pkt_count_states(pd, states);
  2226. seq_printf(m, "\tstate:\t\t\ti:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
  2227. states[0], states[1], states[2], states[3], states[4], states[5]);
  2228. seq_printf(m, "\twrite congestion marks:\toff=%d on=%d\n",
  2229. pd->write_congestion_off,
  2230. pd->write_congestion_on);
  2231. return 0;
  2232. }
  2233. static int pkt_seq_open(struct inode *inode, struct file *file)
  2234. {
  2235. return single_open(file, pkt_seq_show, PDE_DATA(inode));
  2236. }
  2237. static const struct file_operations pkt_proc_fops = {
  2238. .open = pkt_seq_open,
  2239. .read = seq_read,
  2240. .llseek = seq_lseek,
  2241. .release = single_release
  2242. };
  2243. static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
  2244. {
  2245. int i;
  2246. int ret = 0;
  2247. char b[BDEVNAME_SIZE];
  2248. struct block_device *bdev;
  2249. if (pd->pkt_dev == dev) {
  2250. pkt_err(pd, "recursive setup not allowed\n");
  2251. return -EBUSY;
  2252. }
  2253. for (i = 0; i < MAX_WRITERS; i++) {
  2254. struct pktcdvd_device *pd2 = pkt_devs[i];
  2255. if (!pd2)
  2256. continue;
  2257. if (pd2->bdev->bd_dev == dev) {
  2258. pkt_err(pd, "%s already setup\n",
  2259. bdevname(pd2->bdev, b));
  2260. return -EBUSY;
  2261. }
  2262. if (pd2->pkt_dev == dev) {
  2263. pkt_err(pd, "can't chain pktcdvd devices\n");
  2264. return -EBUSY;
  2265. }
  2266. }
  2267. bdev = bdget(dev);
  2268. if (!bdev)
  2269. return -ENOMEM;
  2270. ret = blkdev_get(bdev, FMODE_READ | FMODE_NDELAY, NULL);
  2271. if (ret)
  2272. return ret;
  2273. /* This is safe, since we have a reference from open(). */
  2274. __module_get(THIS_MODULE);
  2275. pd->bdev = bdev;
  2276. set_blocksize(bdev, CD_FRAMESIZE);
  2277. pkt_init_queue(pd);
  2278. atomic_set(&pd->cdrw.pending_bios, 0);
  2279. pd->cdrw.thread = kthread_run(kcdrwd, pd, "%s", pd->name);
  2280. if (IS_ERR(pd->cdrw.thread)) {
  2281. pkt_err(pd, "can't start kernel thread\n");
  2282. ret = -ENOMEM;
  2283. goto out_mem;
  2284. }
  2285. proc_create_data(pd->name, 0, pkt_proc, &pkt_proc_fops, pd);
  2286. pkt_dbg(1, pd, "writer mapped to %s\n", bdevname(bdev, b));
  2287. return 0;
  2288. out_mem:
  2289. blkdev_put(bdev, FMODE_READ | FMODE_NDELAY);
  2290. /* This is safe: open() is still holding a reference. */
  2291. module_put(THIS_MODULE);
  2292. return ret;
  2293. }
  2294. static int pkt_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg)
  2295. {
  2296. struct pktcdvd_device *pd = bdev->bd_disk->private_data;
  2297. int ret;
  2298. pkt_dbg(2, pd, "cmd %x, dev %d:%d\n",
  2299. cmd, MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev));
  2300. mutex_lock(&pktcdvd_mutex);
  2301. switch (cmd) {
  2302. case CDROMEJECT:
  2303. /*
  2304. * The door gets locked when the device is opened, so we
  2305. * have to unlock it or else the eject command fails.
  2306. */
  2307. if (pd->refcnt == 1)
  2308. pkt_lock_door(pd, 0);
  2309. /* fallthru */
  2310. /*
  2311. * forward selected CDROM ioctls to CD-ROM, for UDF
  2312. */
  2313. case CDROMMULTISESSION:
  2314. case CDROMREADTOCENTRY:
  2315. case CDROM_LAST_WRITTEN:
  2316. case CDROM_SEND_PACKET:
  2317. case SCSI_IOCTL_SEND_COMMAND:
  2318. ret = __blkdev_driver_ioctl(pd->bdev, mode, cmd, arg);
  2319. break;
  2320. default:
  2321. pkt_dbg(2, pd, "Unknown ioctl (%x)\n", cmd);
  2322. ret = -ENOTTY;
  2323. }
  2324. mutex_unlock(&pktcdvd_mutex);
  2325. return ret;
  2326. }
  2327. static unsigned int pkt_check_events(struct gendisk *disk,
  2328. unsigned int clearing)
  2329. {
  2330. struct pktcdvd_device *pd = disk->private_data;
  2331. struct gendisk *attached_disk;
  2332. if (!pd)
  2333. return 0;
  2334. if (!pd->bdev)
  2335. return 0;
  2336. attached_disk = pd->bdev->bd_disk;
  2337. if (!attached_disk || !attached_disk->fops->check_events)
  2338. return 0;
  2339. return attached_disk->fops->check_events(attached_disk, clearing);
  2340. }
  2341. static const struct block_device_operations pktcdvd_ops = {
  2342. .owner = THIS_MODULE,
  2343. .open = pkt_open,
  2344. .release = pkt_close,
  2345. .ioctl = pkt_ioctl,
  2346. .check_events = pkt_check_events,
  2347. };
  2348. static char *pktcdvd_devnode(struct gendisk *gd, umode_t *mode)
  2349. {
  2350. return kasprintf(GFP_KERNEL, "pktcdvd/%s", gd->disk_name);
  2351. }
  2352. /*
  2353. * Set up mapping from pktcdvd device to CD-ROM device.
  2354. */
  2355. static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
  2356. {
  2357. int idx;
  2358. int ret = -ENOMEM;
  2359. struct pktcdvd_device *pd;
  2360. struct gendisk *disk;
  2361. mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
  2362. for (idx = 0; idx < MAX_WRITERS; idx++)
  2363. if (!pkt_devs[idx])
  2364. break;
  2365. if (idx == MAX_WRITERS) {
  2366. pr_err("max %d writers supported\n", MAX_WRITERS);
  2367. ret = -EBUSY;
  2368. goto out_mutex;
  2369. }
  2370. pd = kzalloc(sizeof(struct pktcdvd_device), GFP_KERNEL);
  2371. if (!pd)
  2372. goto out_mutex;
  2373. pd->rb_pool = mempool_create_kmalloc_pool(PKT_RB_POOL_SIZE,
  2374. sizeof(struct pkt_rb_node));
  2375. if (!pd->rb_pool)
  2376. goto out_mem;
  2377. INIT_LIST_HEAD(&pd->cdrw.pkt_free_list);
  2378. INIT_LIST_HEAD(&pd->cdrw.pkt_active_list);
  2379. spin_lock_init(&pd->cdrw.active_list_lock);
  2380. spin_lock_init(&pd->lock);
  2381. spin_lock_init(&pd->iosched.lock);
  2382. bio_list_init(&pd->iosched.read_queue);
  2383. bio_list_init(&pd->iosched.write_queue);
  2384. sprintf(pd->name, DRIVER_NAME"%d", idx);
  2385. init_waitqueue_head(&pd->wqueue);
  2386. pd->bio_queue = RB_ROOT;
  2387. pd->write_congestion_on = write_congestion_on;
  2388. pd->write_congestion_off = write_congestion_off;
  2389. disk = alloc_disk(1);
  2390. if (!disk)
  2391. goto out_mem;
  2392. pd->disk = disk;
  2393. disk->major = pktdev_major;
  2394. disk->first_minor = idx;
  2395. disk->fops = &pktcdvd_ops;
  2396. disk->flags = GENHD_FL_REMOVABLE;
  2397. strcpy(disk->disk_name, pd->name);
  2398. disk->devnode = pktcdvd_devnode;
  2399. disk->private_data = pd;
  2400. disk->queue = blk_alloc_queue(GFP_KERNEL);
  2401. if (!disk->queue)
  2402. goto out_mem2;
  2403. pd->pkt_dev = MKDEV(pktdev_major, idx);
  2404. ret = pkt_new_dev(pd, dev);
  2405. if (ret)
  2406. goto out_new_dev;
  2407. /* inherit events of the host device */
  2408. disk->events = pd->bdev->bd_disk->events;
  2409. disk->async_events = pd->bdev->bd_disk->async_events;
  2410. add_disk(disk);
  2411. pkt_sysfs_dev_new(pd);
  2412. pkt_debugfs_dev_new(pd);
  2413. pkt_devs[idx] = pd;
  2414. if (pkt_dev)
  2415. *pkt_dev = pd->pkt_dev;
  2416. mutex_unlock(&ctl_mutex);
  2417. return 0;
  2418. out_new_dev:
  2419. blk_cleanup_queue(disk->queue);
  2420. out_mem2:
  2421. put_disk(disk);
  2422. out_mem:
  2423. if (pd->rb_pool)
  2424. mempool_destroy(pd->rb_pool);
  2425. kfree(pd);
  2426. out_mutex:
  2427. mutex_unlock(&ctl_mutex);
  2428. pr_err("setup of pktcdvd device failed\n");
  2429. return ret;
  2430. }
  2431. /*
  2432. * Tear down mapping from pktcdvd device to CD-ROM device.
  2433. */
  2434. static int pkt_remove_dev(dev_t pkt_dev)
  2435. {
  2436. struct pktcdvd_device *pd;
  2437. int idx;
  2438. int ret = 0;
  2439. mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
  2440. for (idx = 0; idx < MAX_WRITERS; idx++) {
  2441. pd = pkt_devs[idx];
  2442. if (pd && (pd->pkt_dev == pkt_dev))
  2443. break;
  2444. }
  2445. if (idx == MAX_WRITERS) {
  2446. pr_debug("dev not setup\n");
  2447. ret = -ENXIO;
  2448. goto out;
  2449. }
  2450. if (pd->refcnt > 0) {
  2451. ret = -EBUSY;
  2452. goto out;
  2453. }
  2454. if (!IS_ERR(pd->cdrw.thread))
  2455. kthread_stop(pd->cdrw.thread);
  2456. pkt_devs[idx] = NULL;
  2457. pkt_debugfs_dev_remove(pd);
  2458. pkt_sysfs_dev_remove(pd);
  2459. blkdev_put(pd->bdev, FMODE_READ | FMODE_NDELAY);
  2460. remove_proc_entry(pd->name, pkt_proc);
  2461. pkt_dbg(1, pd, "writer unmapped\n");
  2462. del_gendisk(pd->disk);
  2463. blk_cleanup_queue(pd->disk->queue);
  2464. put_disk(pd->disk);
  2465. mempool_destroy(pd->rb_pool);
  2466. kfree(pd);
  2467. /* This is safe: open() is still holding a reference. */
  2468. module_put(THIS_MODULE);
  2469. out:
  2470. mutex_unlock(&ctl_mutex);
  2471. return ret;
  2472. }
  2473. static void pkt_get_status(struct pkt_ctrl_command *ctrl_cmd)
  2474. {
  2475. struct pktcdvd_device *pd;
  2476. mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
  2477. pd = pkt_find_dev_from_minor(ctrl_cmd->dev_index);
  2478. if (pd) {
  2479. ctrl_cmd->dev = new_encode_dev(pd->bdev->bd_dev);
  2480. ctrl_cmd->pkt_dev = new_encode_dev(pd->pkt_dev);
  2481. } else {
  2482. ctrl_cmd->dev = 0;
  2483. ctrl_cmd->pkt_dev = 0;
  2484. }
  2485. ctrl_cmd->num_devices = MAX_WRITERS;
  2486. mutex_unlock(&ctl_mutex);
  2487. }
  2488. static long pkt_ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
  2489. {
  2490. void __user *argp = (void __user *)arg;
  2491. struct pkt_ctrl_command ctrl_cmd;
  2492. int ret = 0;
  2493. dev_t pkt_dev = 0;
  2494. if (cmd != PACKET_CTRL_CMD)
  2495. return -ENOTTY;
  2496. if (copy_from_user(&ctrl_cmd, argp, sizeof(struct pkt_ctrl_command)))
  2497. return -EFAULT;
  2498. switch (ctrl_cmd.command) {
  2499. case PKT_CTRL_CMD_SETUP:
  2500. if (!capable(CAP_SYS_ADMIN))
  2501. return -EPERM;
  2502. ret = pkt_setup_dev(new_decode_dev(ctrl_cmd.dev), &pkt_dev);
  2503. ctrl_cmd.pkt_dev = new_encode_dev(pkt_dev);
  2504. break;
  2505. case PKT_CTRL_CMD_TEARDOWN:
  2506. if (!capable(CAP_SYS_ADMIN))
  2507. return -EPERM;
  2508. ret = pkt_remove_dev(new_decode_dev(ctrl_cmd.pkt_dev));
  2509. break;
  2510. case PKT_CTRL_CMD_STATUS:
  2511. pkt_get_status(&ctrl_cmd);
  2512. break;
  2513. default:
  2514. return -ENOTTY;
  2515. }
  2516. if (copy_to_user(argp, &ctrl_cmd, sizeof(struct pkt_ctrl_command)))
  2517. return -EFAULT;
  2518. return ret;
  2519. }
  2520. #ifdef CONFIG_COMPAT
  2521. static long pkt_ctl_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
  2522. {
  2523. return pkt_ctl_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
  2524. }
  2525. #endif
  2526. static const struct file_operations pkt_ctl_fops = {
  2527. .open = nonseekable_open,
  2528. .unlocked_ioctl = pkt_ctl_ioctl,
  2529. #ifdef CONFIG_COMPAT
  2530. .compat_ioctl = pkt_ctl_compat_ioctl,
  2531. #endif
  2532. .owner = THIS_MODULE,
  2533. .llseek = no_llseek,
  2534. };
  2535. static struct miscdevice pkt_misc = {
  2536. .minor = MISC_DYNAMIC_MINOR,
  2537. .name = DRIVER_NAME,
  2538. .nodename = "pktcdvd/control",
  2539. .fops = &pkt_ctl_fops
  2540. };
  2541. static int __init pkt_init(void)
  2542. {
  2543. int ret;
  2544. mutex_init(&ctl_mutex);
  2545. psd_pool = mempool_create_kmalloc_pool(PSD_POOL_SIZE,
  2546. sizeof(struct packet_stacked_data));
  2547. if (!psd_pool)
  2548. return -ENOMEM;
  2549. ret = register_blkdev(pktdev_major, DRIVER_NAME);
  2550. if (ret < 0) {
  2551. pr_err("unable to register block device\n");
  2552. goto out2;
  2553. }
  2554. if (!pktdev_major)
  2555. pktdev_major = ret;
  2556. ret = pkt_sysfs_init();
  2557. if (ret)
  2558. goto out;
  2559. pkt_debugfs_init();
  2560. ret = misc_register(&pkt_misc);
  2561. if (ret) {
  2562. pr_err("unable to register misc device\n");
  2563. goto out_misc;
  2564. }
  2565. pkt_proc = proc_mkdir("driver/"DRIVER_NAME, NULL);
  2566. return 0;
  2567. out_misc:
  2568. pkt_debugfs_cleanup();
  2569. pkt_sysfs_cleanup();
  2570. out:
  2571. unregister_blkdev(pktdev_major, DRIVER_NAME);
  2572. out2:
  2573. mempool_destroy(psd_pool);
  2574. return ret;
  2575. }
  2576. static void __exit pkt_exit(void)
  2577. {
  2578. remove_proc_entry("driver/"DRIVER_NAME, NULL);
  2579. misc_deregister(&pkt_misc);
  2580. pkt_debugfs_cleanup();
  2581. pkt_sysfs_cleanup();
  2582. unregister_blkdev(pktdev_major, DRIVER_NAME);
  2583. mempool_destroy(psd_pool);
  2584. }
  2585. MODULE_DESCRIPTION("Packet writing layer for CD/DVD drives");
  2586. MODULE_AUTHOR("Jens Axboe <axboe@suse.de>");
  2587. MODULE_LICENSE("GPL");
  2588. module_init(pkt_init);
  2589. module_exit(pkt_exit);