pktcdvd.c 73 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020
  1. /*
  2. * Copyright (C) 2000 Jens Axboe <axboe@suse.de>
  3. * Copyright (C) 2001-2004 Peter Osterlund <petero2@telia.com>
  4. * Copyright (C) 2006 Thomas Maier <balagi@justmail.de>
  5. *
  6. * May be copied or modified under the terms of the GNU General Public
  7. * License. See linux/COPYING for more information.
  8. *
  9. * Packet writing layer for ATAPI and SCSI CD-RW, DVD+RW, DVD-RW and
  10. * DVD-RAM devices.
  11. *
  12. * Theory of operation:
  13. *
  14. * At the lowest level, there is the standard driver for the CD/DVD device,
  15. * typically ide-cd.c or sr.c. This driver can handle read and write requests,
  16. * but it doesn't know anything about the special restrictions that apply to
  17. * packet writing. One restriction is that write requests must be aligned to
  18. * packet boundaries on the physical media, and the size of a write request
  19. * must be equal to the packet size. Another restriction is that a
  20. * GPCMD_FLUSH_CACHE command has to be issued to the drive before a read
  21. * command, if the previous command was a write.
  22. *
  23. * The purpose of the packet writing driver is to hide these restrictions from
  24. * higher layers, such as file systems, and present a block device that can be
  25. * randomly read and written using 2kB-sized blocks.
  26. *
  27. * The lowest layer in the packet writing driver is the packet I/O scheduler.
  28. * Its data is defined by the struct packet_iosched and includes two bio
  29. * queues with pending read and write requests. These queues are processed
  30. * by the pkt_iosched_process_queue() function. The write requests in this
  31. * queue are already properly aligned and sized. This layer is responsible for
  32. * issuing the flush cache commands and scheduling the I/O in a good order.
  33. *
  34. * The next layer transforms unaligned write requests to aligned writes. This
  35. * transformation requires reading missing pieces of data from the underlying
  36. * block device, assembling the pieces to full packets and queuing them to the
  37. * packet I/O scheduler.
  38. *
  39. * At the top layer there is a custom make_request_fn function that forwards
  40. * read requests directly to the iosched queue and puts write requests in the
  41. * unaligned write queue. A kernel thread performs the necessary read
  42. * gathering to convert the unaligned writes to aligned writes and then feeds
  43. * them to the packet I/O scheduler.
  44. *
  45. *************************************************************************/
  46. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  47. #include <linux/pktcdvd.h>
  48. #include <linux/module.h>
  49. #include <linux/types.h>
  50. #include <linux/kernel.h>
  51. #include <linux/compat.h>
  52. #include <linux/kthread.h>
  53. #include <linux/errno.h>
  54. #include <linux/spinlock.h>
  55. #include <linux/file.h>
  56. #include <linux/proc_fs.h>
  57. #include <linux/seq_file.h>
  58. #include <linux/miscdevice.h>
  59. #include <linux/freezer.h>
  60. #include <linux/mutex.h>
  61. #include <linux/slab.h>
  62. #include <scsi/scsi_cmnd.h>
  63. #include <scsi/scsi_ioctl.h>
  64. #include <scsi/scsi.h>
  65. #include <linux/debugfs.h>
  66. #include <linux/device.h>
  67. #include <asm/uaccess.h>
  68. #define DRIVER_NAME "pktcdvd"
  69. #define pkt_dbg(level, fmt, ...) \
  70. do { \
  71. if (level == 2 && PACKET_DEBUG >= 2) \
  72. pr_notice("%s: " fmt, __func__, ##__VA_ARGS__); \
  73. else if (level == 1 && PACKET_DEBUG >= 1) \
  74. pr_notice(fmt, ##__VA_ARGS__); \
  75. } while (0)
  76. #define MAX_SPEED 0xffff
  77. static DEFINE_MUTEX(pktcdvd_mutex);
  78. static struct pktcdvd_device *pkt_devs[MAX_WRITERS];
  79. static struct proc_dir_entry *pkt_proc;
  80. static int pktdev_major;
  81. static int write_congestion_on = PKT_WRITE_CONGESTION_ON;
  82. static int write_congestion_off = PKT_WRITE_CONGESTION_OFF;
  83. static struct mutex ctl_mutex; /* Serialize open/close/setup/teardown */
  84. static mempool_t *psd_pool;
  85. static struct class *class_pktcdvd = NULL; /* /sys/class/pktcdvd */
  86. static struct dentry *pkt_debugfs_root = NULL; /* /sys/kernel/debug/pktcdvd */
  87. /* forward declaration */
  88. static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev);
  89. static int pkt_remove_dev(dev_t pkt_dev);
  90. static int pkt_seq_show(struct seq_file *m, void *p);
  91. static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd)
  92. {
  93. return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1);
  94. }
  95. /*
  96. * create and register a pktcdvd kernel object.
  97. */
  98. static struct pktcdvd_kobj* pkt_kobj_create(struct pktcdvd_device *pd,
  99. const char* name,
  100. struct kobject* parent,
  101. struct kobj_type* ktype)
  102. {
  103. struct pktcdvd_kobj *p;
  104. int error;
  105. p = kzalloc(sizeof(*p), GFP_KERNEL);
  106. if (!p)
  107. return NULL;
  108. p->pd = pd;
  109. error = kobject_init_and_add(&p->kobj, ktype, parent, "%s", name);
  110. if (error) {
  111. kobject_put(&p->kobj);
  112. return NULL;
  113. }
  114. kobject_uevent(&p->kobj, KOBJ_ADD);
  115. return p;
  116. }
  117. /*
  118. * remove a pktcdvd kernel object.
  119. */
  120. static void pkt_kobj_remove(struct pktcdvd_kobj *p)
  121. {
  122. if (p)
  123. kobject_put(&p->kobj);
  124. }
  125. /*
  126. * default release function for pktcdvd kernel objects.
  127. */
  128. static void pkt_kobj_release(struct kobject *kobj)
  129. {
  130. kfree(to_pktcdvdkobj(kobj));
  131. }
  132. /**********************************************************
  133. *
  134. * sysfs interface for pktcdvd
  135. * by (C) 2006 Thomas Maier <balagi@justmail.de>
  136. *
  137. **********************************************************/
  138. #define DEF_ATTR(_obj,_name,_mode) \
  139. static struct attribute _obj = { .name = _name, .mode = _mode }
  140. /**********************************************************
  141. /sys/class/pktcdvd/pktcdvd[0-7]/
  142. stat/reset
  143. stat/packets_started
  144. stat/packets_finished
  145. stat/kb_written
  146. stat/kb_read
  147. stat/kb_read_gather
  148. write_queue/size
  149. write_queue/congestion_off
  150. write_queue/congestion_on
  151. **********************************************************/
  152. DEF_ATTR(kobj_pkt_attr_st1, "reset", 0200);
  153. DEF_ATTR(kobj_pkt_attr_st2, "packets_started", 0444);
  154. DEF_ATTR(kobj_pkt_attr_st3, "packets_finished", 0444);
  155. DEF_ATTR(kobj_pkt_attr_st4, "kb_written", 0444);
  156. DEF_ATTR(kobj_pkt_attr_st5, "kb_read", 0444);
  157. DEF_ATTR(kobj_pkt_attr_st6, "kb_read_gather", 0444);
  158. static struct attribute *kobj_pkt_attrs_stat[] = {
  159. &kobj_pkt_attr_st1,
  160. &kobj_pkt_attr_st2,
  161. &kobj_pkt_attr_st3,
  162. &kobj_pkt_attr_st4,
  163. &kobj_pkt_attr_st5,
  164. &kobj_pkt_attr_st6,
  165. NULL
  166. };
  167. DEF_ATTR(kobj_pkt_attr_wq1, "size", 0444);
  168. DEF_ATTR(kobj_pkt_attr_wq2, "congestion_off", 0644);
  169. DEF_ATTR(kobj_pkt_attr_wq3, "congestion_on", 0644);
  170. static struct attribute *kobj_pkt_attrs_wqueue[] = {
  171. &kobj_pkt_attr_wq1,
  172. &kobj_pkt_attr_wq2,
  173. &kobj_pkt_attr_wq3,
  174. NULL
  175. };
  176. static ssize_t kobj_pkt_show(struct kobject *kobj,
  177. struct attribute *attr, char *data)
  178. {
  179. struct pktcdvd_device *pd = to_pktcdvdkobj(kobj)->pd;
  180. int n = 0;
  181. int v;
  182. if (strcmp(attr->name, "packets_started") == 0) {
  183. n = sprintf(data, "%lu\n", pd->stats.pkt_started);
  184. } else if (strcmp(attr->name, "packets_finished") == 0) {
  185. n = sprintf(data, "%lu\n", pd->stats.pkt_ended);
  186. } else if (strcmp(attr->name, "kb_written") == 0) {
  187. n = sprintf(data, "%lu\n", pd->stats.secs_w >> 1);
  188. } else if (strcmp(attr->name, "kb_read") == 0) {
  189. n = sprintf(data, "%lu\n", pd->stats.secs_r >> 1);
  190. } else if (strcmp(attr->name, "kb_read_gather") == 0) {
  191. n = sprintf(data, "%lu\n", pd->stats.secs_rg >> 1);
  192. } else if (strcmp(attr->name, "size") == 0) {
  193. spin_lock(&pd->lock);
  194. v = pd->bio_queue_size;
  195. spin_unlock(&pd->lock);
  196. n = sprintf(data, "%d\n", v);
  197. } else if (strcmp(attr->name, "congestion_off") == 0) {
  198. spin_lock(&pd->lock);
  199. v = pd->write_congestion_off;
  200. spin_unlock(&pd->lock);
  201. n = sprintf(data, "%d\n", v);
  202. } else if (strcmp(attr->name, "congestion_on") == 0) {
  203. spin_lock(&pd->lock);
  204. v = pd->write_congestion_on;
  205. spin_unlock(&pd->lock);
  206. n = sprintf(data, "%d\n", v);
  207. }
  208. return n;
  209. }
  210. static void init_write_congestion_marks(int* lo, int* hi)
  211. {
  212. if (*hi > 0) {
  213. *hi = max(*hi, 500);
  214. *hi = min(*hi, 1000000);
  215. if (*lo <= 0)
  216. *lo = *hi - 100;
  217. else {
  218. *lo = min(*lo, *hi - 100);
  219. *lo = max(*lo, 100);
  220. }
  221. } else {
  222. *hi = -1;
  223. *lo = -1;
  224. }
  225. }
  226. static ssize_t kobj_pkt_store(struct kobject *kobj,
  227. struct attribute *attr,
  228. const char *data, size_t len)
  229. {
  230. struct pktcdvd_device *pd = to_pktcdvdkobj(kobj)->pd;
  231. int val;
  232. if (strcmp(attr->name, "reset") == 0 && len > 0) {
  233. pd->stats.pkt_started = 0;
  234. pd->stats.pkt_ended = 0;
  235. pd->stats.secs_w = 0;
  236. pd->stats.secs_rg = 0;
  237. pd->stats.secs_r = 0;
  238. } else if (strcmp(attr->name, "congestion_off") == 0
  239. && sscanf(data, "%d", &val) == 1) {
  240. spin_lock(&pd->lock);
  241. pd->write_congestion_off = val;
  242. init_write_congestion_marks(&pd->write_congestion_off,
  243. &pd->write_congestion_on);
  244. spin_unlock(&pd->lock);
  245. } else if (strcmp(attr->name, "congestion_on") == 0
  246. && sscanf(data, "%d", &val) == 1) {
  247. spin_lock(&pd->lock);
  248. pd->write_congestion_on = val;
  249. init_write_congestion_marks(&pd->write_congestion_off,
  250. &pd->write_congestion_on);
  251. spin_unlock(&pd->lock);
  252. }
  253. return len;
  254. }
  255. static const struct sysfs_ops kobj_pkt_ops = {
  256. .show = kobj_pkt_show,
  257. .store = kobj_pkt_store
  258. };
  259. static struct kobj_type kobj_pkt_type_stat = {
  260. .release = pkt_kobj_release,
  261. .sysfs_ops = &kobj_pkt_ops,
  262. .default_attrs = kobj_pkt_attrs_stat
  263. };
  264. static struct kobj_type kobj_pkt_type_wqueue = {
  265. .release = pkt_kobj_release,
  266. .sysfs_ops = &kobj_pkt_ops,
  267. .default_attrs = kobj_pkt_attrs_wqueue
  268. };
  269. static void pkt_sysfs_dev_new(struct pktcdvd_device *pd)
  270. {
  271. if (class_pktcdvd) {
  272. pd->dev = device_create(class_pktcdvd, NULL, MKDEV(0, 0), NULL,
  273. "%s", pd->name);
  274. if (IS_ERR(pd->dev))
  275. pd->dev = NULL;
  276. }
  277. if (pd->dev) {
  278. pd->kobj_stat = pkt_kobj_create(pd, "stat",
  279. &pd->dev->kobj,
  280. &kobj_pkt_type_stat);
  281. pd->kobj_wqueue = pkt_kobj_create(pd, "write_queue",
  282. &pd->dev->kobj,
  283. &kobj_pkt_type_wqueue);
  284. }
  285. }
  286. static void pkt_sysfs_dev_remove(struct pktcdvd_device *pd)
  287. {
  288. pkt_kobj_remove(pd->kobj_stat);
  289. pkt_kobj_remove(pd->kobj_wqueue);
  290. if (class_pktcdvd)
  291. device_unregister(pd->dev);
  292. }
  293. /********************************************************************
  294. /sys/class/pktcdvd/
  295. add map block device
  296. remove unmap packet dev
  297. device_map show mappings
  298. *******************************************************************/
  299. static void class_pktcdvd_release(struct class *cls)
  300. {
  301. kfree(cls);
  302. }
  303. static ssize_t class_pktcdvd_show_map(struct class *c,
  304. struct class_attribute *attr,
  305. char *data)
  306. {
  307. int n = 0;
  308. int idx;
  309. mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
  310. for (idx = 0; idx < MAX_WRITERS; idx++) {
  311. struct pktcdvd_device *pd = pkt_devs[idx];
  312. if (!pd)
  313. continue;
  314. n += sprintf(data+n, "%s %u:%u %u:%u\n",
  315. pd->name,
  316. MAJOR(pd->pkt_dev), MINOR(pd->pkt_dev),
  317. MAJOR(pd->bdev->bd_dev),
  318. MINOR(pd->bdev->bd_dev));
  319. }
  320. mutex_unlock(&ctl_mutex);
  321. return n;
  322. }
  323. static ssize_t class_pktcdvd_store_add(struct class *c,
  324. struct class_attribute *attr,
  325. const char *buf,
  326. size_t count)
  327. {
  328. unsigned int major, minor;
  329. if (sscanf(buf, "%u:%u", &major, &minor) == 2) {
  330. /* pkt_setup_dev() expects caller to hold reference to self */
  331. if (!try_module_get(THIS_MODULE))
  332. return -ENODEV;
  333. pkt_setup_dev(MKDEV(major, minor), NULL);
  334. module_put(THIS_MODULE);
  335. return count;
  336. }
  337. return -EINVAL;
  338. }
  339. static ssize_t class_pktcdvd_store_remove(struct class *c,
  340. struct class_attribute *attr,
  341. const char *buf,
  342. size_t count)
  343. {
  344. unsigned int major, minor;
  345. if (sscanf(buf, "%u:%u", &major, &minor) == 2) {
  346. pkt_remove_dev(MKDEV(major, minor));
  347. return count;
  348. }
  349. return -EINVAL;
  350. }
  351. static struct class_attribute class_pktcdvd_attrs[] = {
  352. __ATTR(add, 0200, NULL, class_pktcdvd_store_add),
  353. __ATTR(remove, 0200, NULL, class_pktcdvd_store_remove),
  354. __ATTR(device_map, 0444, class_pktcdvd_show_map, NULL),
  355. __ATTR_NULL
  356. };
  357. static int pkt_sysfs_init(void)
  358. {
  359. int ret = 0;
  360. /*
  361. * create control files in sysfs
  362. * /sys/class/pktcdvd/...
  363. */
  364. class_pktcdvd = kzalloc(sizeof(*class_pktcdvd), GFP_KERNEL);
  365. if (!class_pktcdvd)
  366. return -ENOMEM;
  367. class_pktcdvd->name = DRIVER_NAME;
  368. class_pktcdvd->owner = THIS_MODULE;
  369. class_pktcdvd->class_release = class_pktcdvd_release;
  370. class_pktcdvd->class_attrs = class_pktcdvd_attrs;
  371. ret = class_register(class_pktcdvd);
  372. if (ret) {
  373. kfree(class_pktcdvd);
  374. class_pktcdvd = NULL;
  375. pr_err("failed to create class pktcdvd\n");
  376. return ret;
  377. }
  378. return 0;
  379. }
  380. static void pkt_sysfs_cleanup(void)
  381. {
  382. if (class_pktcdvd)
  383. class_destroy(class_pktcdvd);
  384. class_pktcdvd = NULL;
  385. }
  386. /********************************************************************
  387. entries in debugfs
  388. /sys/kernel/debug/pktcdvd[0-7]/
  389. info
  390. *******************************************************************/
  391. static int pkt_debugfs_seq_show(struct seq_file *m, void *p)
  392. {
  393. return pkt_seq_show(m, p);
  394. }
  395. static int pkt_debugfs_fops_open(struct inode *inode, struct file *file)
  396. {
  397. return single_open(file, pkt_debugfs_seq_show, inode->i_private);
  398. }
  399. static const struct file_operations debug_fops = {
  400. .open = pkt_debugfs_fops_open,
  401. .read = seq_read,
  402. .llseek = seq_lseek,
  403. .release = single_release,
  404. .owner = THIS_MODULE,
  405. };
  406. static void pkt_debugfs_dev_new(struct pktcdvd_device *pd)
  407. {
  408. if (!pkt_debugfs_root)
  409. return;
  410. pd->dfs_f_info = NULL;
  411. pd->dfs_d_root = debugfs_create_dir(pd->name, pkt_debugfs_root);
  412. if (IS_ERR(pd->dfs_d_root)) {
  413. pd->dfs_d_root = NULL;
  414. return;
  415. }
  416. pd->dfs_f_info = debugfs_create_file("info", S_IRUGO,
  417. pd->dfs_d_root, pd, &debug_fops);
  418. if (IS_ERR(pd->dfs_f_info)) {
  419. pd->dfs_f_info = NULL;
  420. return;
  421. }
  422. }
  423. static void pkt_debugfs_dev_remove(struct pktcdvd_device *pd)
  424. {
  425. if (!pkt_debugfs_root)
  426. return;
  427. if (pd->dfs_f_info)
  428. debugfs_remove(pd->dfs_f_info);
  429. pd->dfs_f_info = NULL;
  430. if (pd->dfs_d_root)
  431. debugfs_remove(pd->dfs_d_root);
  432. pd->dfs_d_root = NULL;
  433. }
  434. static void pkt_debugfs_init(void)
  435. {
  436. pkt_debugfs_root = debugfs_create_dir(DRIVER_NAME, NULL);
  437. if (IS_ERR(pkt_debugfs_root)) {
  438. pkt_debugfs_root = NULL;
  439. return;
  440. }
  441. }
  442. static void pkt_debugfs_cleanup(void)
  443. {
  444. if (!pkt_debugfs_root)
  445. return;
  446. debugfs_remove(pkt_debugfs_root);
  447. pkt_debugfs_root = NULL;
  448. }
  449. /* ----------------------------------------------------------*/
  450. static void pkt_bio_finished(struct pktcdvd_device *pd)
  451. {
  452. BUG_ON(atomic_read(&pd->cdrw.pending_bios) <= 0);
  453. if (atomic_dec_and_test(&pd->cdrw.pending_bios)) {
  454. pkt_dbg(2, "queue empty\n");
  455. atomic_set(&pd->iosched.attention, 1);
  456. wake_up(&pd->wqueue);
  457. }
  458. }
  459. /*
  460. * Allocate a packet_data struct
  461. */
  462. static struct packet_data *pkt_alloc_packet_data(int frames)
  463. {
  464. int i;
  465. struct packet_data *pkt;
  466. pkt = kzalloc(sizeof(struct packet_data), GFP_KERNEL);
  467. if (!pkt)
  468. goto no_pkt;
  469. pkt->frames = frames;
  470. pkt->w_bio = bio_kmalloc(GFP_KERNEL, frames);
  471. if (!pkt->w_bio)
  472. goto no_bio;
  473. for (i = 0; i < frames / FRAMES_PER_PAGE; i++) {
  474. pkt->pages[i] = alloc_page(GFP_KERNEL|__GFP_ZERO);
  475. if (!pkt->pages[i])
  476. goto no_page;
  477. }
  478. spin_lock_init(&pkt->lock);
  479. bio_list_init(&pkt->orig_bios);
  480. for (i = 0; i < frames; i++) {
  481. struct bio *bio = bio_kmalloc(GFP_KERNEL, 1);
  482. if (!bio)
  483. goto no_rd_bio;
  484. pkt->r_bios[i] = bio;
  485. }
  486. return pkt;
  487. no_rd_bio:
  488. for (i = 0; i < frames; i++) {
  489. struct bio *bio = pkt->r_bios[i];
  490. if (bio)
  491. bio_put(bio);
  492. }
  493. no_page:
  494. for (i = 0; i < frames / FRAMES_PER_PAGE; i++)
  495. if (pkt->pages[i])
  496. __free_page(pkt->pages[i]);
  497. bio_put(pkt->w_bio);
  498. no_bio:
  499. kfree(pkt);
  500. no_pkt:
  501. return NULL;
  502. }
  503. /*
  504. * Free a packet_data struct
  505. */
  506. static void pkt_free_packet_data(struct packet_data *pkt)
  507. {
  508. int i;
  509. for (i = 0; i < pkt->frames; i++) {
  510. struct bio *bio = pkt->r_bios[i];
  511. if (bio)
  512. bio_put(bio);
  513. }
  514. for (i = 0; i < pkt->frames / FRAMES_PER_PAGE; i++)
  515. __free_page(pkt->pages[i]);
  516. bio_put(pkt->w_bio);
  517. kfree(pkt);
  518. }
  519. static void pkt_shrink_pktlist(struct pktcdvd_device *pd)
  520. {
  521. struct packet_data *pkt, *next;
  522. BUG_ON(!list_empty(&pd->cdrw.pkt_active_list));
  523. list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_free_list, list) {
  524. pkt_free_packet_data(pkt);
  525. }
  526. INIT_LIST_HEAD(&pd->cdrw.pkt_free_list);
  527. }
  528. static int pkt_grow_pktlist(struct pktcdvd_device *pd, int nr_packets)
  529. {
  530. struct packet_data *pkt;
  531. BUG_ON(!list_empty(&pd->cdrw.pkt_free_list));
  532. while (nr_packets > 0) {
  533. pkt = pkt_alloc_packet_data(pd->settings.size >> 2);
  534. if (!pkt) {
  535. pkt_shrink_pktlist(pd);
  536. return 0;
  537. }
  538. pkt->id = nr_packets;
  539. pkt->pd = pd;
  540. list_add(&pkt->list, &pd->cdrw.pkt_free_list);
  541. nr_packets--;
  542. }
  543. return 1;
  544. }
  545. static inline struct pkt_rb_node *pkt_rbtree_next(struct pkt_rb_node *node)
  546. {
  547. struct rb_node *n = rb_next(&node->rb_node);
  548. if (!n)
  549. return NULL;
  550. return rb_entry(n, struct pkt_rb_node, rb_node);
  551. }
  552. static void pkt_rbtree_erase(struct pktcdvd_device *pd, struct pkt_rb_node *node)
  553. {
  554. rb_erase(&node->rb_node, &pd->bio_queue);
  555. mempool_free(node, pd->rb_pool);
  556. pd->bio_queue_size--;
  557. BUG_ON(pd->bio_queue_size < 0);
  558. }
  559. /*
  560. * Find the first node in the pd->bio_queue rb tree with a starting sector >= s.
  561. */
  562. static struct pkt_rb_node *pkt_rbtree_find(struct pktcdvd_device *pd, sector_t s)
  563. {
  564. struct rb_node *n = pd->bio_queue.rb_node;
  565. struct rb_node *next;
  566. struct pkt_rb_node *tmp;
  567. if (!n) {
  568. BUG_ON(pd->bio_queue_size > 0);
  569. return NULL;
  570. }
  571. for (;;) {
  572. tmp = rb_entry(n, struct pkt_rb_node, rb_node);
  573. if (s <= tmp->bio->bi_sector)
  574. next = n->rb_left;
  575. else
  576. next = n->rb_right;
  577. if (!next)
  578. break;
  579. n = next;
  580. }
  581. if (s > tmp->bio->bi_sector) {
  582. tmp = pkt_rbtree_next(tmp);
  583. if (!tmp)
  584. return NULL;
  585. }
  586. BUG_ON(s > tmp->bio->bi_sector);
  587. return tmp;
  588. }
  589. /*
  590. * Insert a node into the pd->bio_queue rb tree.
  591. */
  592. static void pkt_rbtree_insert(struct pktcdvd_device *pd, struct pkt_rb_node *node)
  593. {
  594. struct rb_node **p = &pd->bio_queue.rb_node;
  595. struct rb_node *parent = NULL;
  596. sector_t s = node->bio->bi_sector;
  597. struct pkt_rb_node *tmp;
  598. while (*p) {
  599. parent = *p;
  600. tmp = rb_entry(parent, struct pkt_rb_node, rb_node);
  601. if (s < tmp->bio->bi_sector)
  602. p = &(*p)->rb_left;
  603. else
  604. p = &(*p)->rb_right;
  605. }
  606. rb_link_node(&node->rb_node, parent, p);
  607. rb_insert_color(&node->rb_node, &pd->bio_queue);
  608. pd->bio_queue_size++;
  609. }
  610. /*
  611. * Send a packet_command to the underlying block device and
  612. * wait for completion.
  613. */
  614. static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *cgc)
  615. {
  616. struct request_queue *q = bdev_get_queue(pd->bdev);
  617. struct request *rq;
  618. int ret = 0;
  619. rq = blk_get_request(q, (cgc->data_direction == CGC_DATA_WRITE) ?
  620. WRITE : READ, __GFP_WAIT);
  621. if (cgc->buflen) {
  622. if (blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen, __GFP_WAIT))
  623. goto out;
  624. }
  625. rq->cmd_len = COMMAND_SIZE(cgc->cmd[0]);
  626. memcpy(rq->cmd, cgc->cmd, CDROM_PACKET_SIZE);
  627. rq->timeout = 60*HZ;
  628. rq->cmd_type = REQ_TYPE_BLOCK_PC;
  629. if (cgc->quiet)
  630. rq->cmd_flags |= REQ_QUIET;
  631. blk_execute_rq(rq->q, pd->bdev->bd_disk, rq, 0);
  632. if (rq->errors)
  633. ret = -EIO;
  634. out:
  635. blk_put_request(rq);
  636. return ret;
  637. }
  638. static const char *sense_key_string(__u8 index)
  639. {
  640. static const char * const info[] = {
  641. "No sense", "Recovered error", "Not ready",
  642. "Medium error", "Hardware error", "Illegal request",
  643. "Unit attention", "Data protect", "Blank check",
  644. };
  645. return index < ARRAY_SIZE(info) ? info[index] : "INVALID";
  646. }
  647. /*
  648. * A generic sense dump / resolve mechanism should be implemented across
  649. * all ATAPI + SCSI devices.
  650. */
  651. static void pkt_dump_sense(struct packet_command *cgc)
  652. {
  653. struct request_sense *sense = cgc->sense;
  654. if (sense)
  655. pr_err("%*ph - sense %02x.%02x.%02x (%s)\n",
  656. CDROM_PACKET_SIZE, cgc->cmd,
  657. sense->sense_key, sense->asc, sense->ascq,
  658. sense_key_string(sense->sense_key));
  659. else
  660. pr_err("%*ph - no sense\n", CDROM_PACKET_SIZE, cgc->cmd);
  661. }
  662. /*
  663. * flush the drive cache to media
  664. */
  665. static int pkt_flush_cache(struct pktcdvd_device *pd)
  666. {
  667. struct packet_command cgc;
  668. init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
  669. cgc.cmd[0] = GPCMD_FLUSH_CACHE;
  670. cgc.quiet = 1;
  671. /*
  672. * the IMMED bit -- we default to not setting it, although that
  673. * would allow a much faster close, this is safer
  674. */
  675. #if 0
  676. cgc.cmd[1] = 1 << 1;
  677. #endif
  678. return pkt_generic_packet(pd, &cgc);
  679. }
  680. /*
  681. * speed is given as the normal factor, e.g. 4 for 4x
  682. */
  683. static noinline_for_stack int pkt_set_speed(struct pktcdvd_device *pd,
  684. unsigned write_speed, unsigned read_speed)
  685. {
  686. struct packet_command cgc;
  687. struct request_sense sense;
  688. int ret;
  689. init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
  690. cgc.sense = &sense;
  691. cgc.cmd[0] = GPCMD_SET_SPEED;
  692. cgc.cmd[2] = (read_speed >> 8) & 0xff;
  693. cgc.cmd[3] = read_speed & 0xff;
  694. cgc.cmd[4] = (write_speed >> 8) & 0xff;
  695. cgc.cmd[5] = write_speed & 0xff;
  696. if ((ret = pkt_generic_packet(pd, &cgc)))
  697. pkt_dump_sense(&cgc);
  698. return ret;
  699. }
  700. /*
  701. * Queue a bio for processing by the low-level CD device. Must be called
  702. * from process context.
  703. */
  704. static void pkt_queue_bio(struct pktcdvd_device *pd, struct bio *bio)
  705. {
  706. spin_lock(&pd->iosched.lock);
  707. if (bio_data_dir(bio) == READ)
  708. bio_list_add(&pd->iosched.read_queue, bio);
  709. else
  710. bio_list_add(&pd->iosched.write_queue, bio);
  711. spin_unlock(&pd->iosched.lock);
  712. atomic_set(&pd->iosched.attention, 1);
  713. wake_up(&pd->wqueue);
  714. }
  715. /*
  716. * Process the queued read/write requests. This function handles special
  717. * requirements for CDRW drives:
  718. * - A cache flush command must be inserted before a read request if the
  719. * previous request was a write.
  720. * - Switching between reading and writing is slow, so don't do it more often
  721. * than necessary.
  722. * - Optimize for throughput at the expense of latency. This means that streaming
  723. * writes will never be interrupted by a read, but if the drive has to seek
  724. * before the next write, switch to reading instead if there are any pending
  725. * read requests.
  726. * - Set the read speed according to current usage pattern. When only reading
  727. * from the device, it's best to use the highest possible read speed, but
  728. * when switching often between reading and writing, it's better to have the
  729. * same read and write speeds.
  730. */
  731. static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
  732. {
  733. if (atomic_read(&pd->iosched.attention) == 0)
  734. return;
  735. atomic_set(&pd->iosched.attention, 0);
  736. for (;;) {
  737. struct bio *bio;
  738. int reads_queued, writes_queued;
  739. spin_lock(&pd->iosched.lock);
  740. reads_queued = !bio_list_empty(&pd->iosched.read_queue);
  741. writes_queued = !bio_list_empty(&pd->iosched.write_queue);
  742. spin_unlock(&pd->iosched.lock);
  743. if (!reads_queued && !writes_queued)
  744. break;
  745. if (pd->iosched.writing) {
  746. int need_write_seek = 1;
  747. spin_lock(&pd->iosched.lock);
  748. bio = bio_list_peek(&pd->iosched.write_queue);
  749. spin_unlock(&pd->iosched.lock);
  750. if (bio && (bio->bi_sector == pd->iosched.last_write))
  751. need_write_seek = 0;
  752. if (need_write_seek && reads_queued) {
  753. if (atomic_read(&pd->cdrw.pending_bios) > 0) {
  754. pkt_dbg(2, "write, waiting\n");
  755. break;
  756. }
  757. pkt_flush_cache(pd);
  758. pd->iosched.writing = 0;
  759. }
  760. } else {
  761. if (!reads_queued && writes_queued) {
  762. if (atomic_read(&pd->cdrw.pending_bios) > 0) {
  763. pkt_dbg(2, "read, waiting\n");
  764. break;
  765. }
  766. pd->iosched.writing = 1;
  767. }
  768. }
  769. spin_lock(&pd->iosched.lock);
  770. if (pd->iosched.writing)
  771. bio = bio_list_pop(&pd->iosched.write_queue);
  772. else
  773. bio = bio_list_pop(&pd->iosched.read_queue);
  774. spin_unlock(&pd->iosched.lock);
  775. if (!bio)
  776. continue;
  777. if (bio_data_dir(bio) == READ)
  778. pd->iosched.successive_reads += bio->bi_size >> 10;
  779. else {
  780. pd->iosched.successive_reads = 0;
  781. pd->iosched.last_write = bio_end_sector(bio);
  782. }
  783. if (pd->iosched.successive_reads >= HI_SPEED_SWITCH) {
  784. if (pd->read_speed == pd->write_speed) {
  785. pd->read_speed = MAX_SPEED;
  786. pkt_set_speed(pd, pd->write_speed, pd->read_speed);
  787. }
  788. } else {
  789. if (pd->read_speed != pd->write_speed) {
  790. pd->read_speed = pd->write_speed;
  791. pkt_set_speed(pd, pd->write_speed, pd->read_speed);
  792. }
  793. }
  794. atomic_inc(&pd->cdrw.pending_bios);
  795. generic_make_request(bio);
  796. }
  797. }
  798. /*
  799. * Special care is needed if the underlying block device has a small
  800. * max_phys_segments value.
  801. */
  802. static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q)
  803. {
  804. if ((pd->settings.size << 9) / CD_FRAMESIZE
  805. <= queue_max_segments(q)) {
  806. /*
  807. * The cdrom device can handle one segment/frame
  808. */
  809. clear_bit(PACKET_MERGE_SEGS, &pd->flags);
  810. return 0;
  811. } else if ((pd->settings.size << 9) / PAGE_SIZE
  812. <= queue_max_segments(q)) {
  813. /*
  814. * We can handle this case at the expense of some extra memory
  815. * copies during write operations
  816. */
  817. set_bit(PACKET_MERGE_SEGS, &pd->flags);
  818. return 0;
  819. } else {
  820. pr_err("cdrom max_phys_segments too small\n");
  821. return -EIO;
  822. }
  823. }
  824. /*
  825. * Copy all data for this packet to pkt->pages[], so that
  826. * a) The number of required segments for the write bio is minimized, which
  827. * is necessary for some scsi controllers.
  828. * b) The data can be used as cache to avoid read requests if we receive a
  829. * new write request for the same zone.
  830. */
  831. static void pkt_make_local_copy(struct packet_data *pkt, struct bio_vec *bvec)
  832. {
  833. int f, p, offs;
  834. /* Copy all data to pkt->pages[] */
  835. p = 0;
  836. offs = 0;
  837. for (f = 0; f < pkt->frames; f++) {
  838. if (bvec[f].bv_page != pkt->pages[p]) {
  839. void *vfrom = kmap_atomic(bvec[f].bv_page) + bvec[f].bv_offset;
  840. void *vto = page_address(pkt->pages[p]) + offs;
  841. memcpy(vto, vfrom, CD_FRAMESIZE);
  842. kunmap_atomic(vfrom);
  843. bvec[f].bv_page = pkt->pages[p];
  844. bvec[f].bv_offset = offs;
  845. } else {
  846. BUG_ON(bvec[f].bv_offset != offs);
  847. }
  848. offs += CD_FRAMESIZE;
  849. if (offs >= PAGE_SIZE) {
  850. offs = 0;
  851. p++;
  852. }
  853. }
  854. }
  855. static void pkt_end_io_read(struct bio *bio, int err)
  856. {
  857. struct packet_data *pkt = bio->bi_private;
  858. struct pktcdvd_device *pd = pkt->pd;
  859. BUG_ON(!pd);
  860. pkt_dbg(2, "bio=%p sec0=%llx sec=%llx err=%d\n",
  861. bio, (unsigned long long)pkt->sector,
  862. (unsigned long long)bio->bi_sector, err);
  863. if (err)
  864. atomic_inc(&pkt->io_errors);
  865. if (atomic_dec_and_test(&pkt->io_wait)) {
  866. atomic_inc(&pkt->run_sm);
  867. wake_up(&pd->wqueue);
  868. }
  869. pkt_bio_finished(pd);
  870. }
  871. static void pkt_end_io_packet_write(struct bio *bio, int err)
  872. {
  873. struct packet_data *pkt = bio->bi_private;
  874. struct pktcdvd_device *pd = pkt->pd;
  875. BUG_ON(!pd);
  876. pkt_dbg(2, "id=%d, err=%d\n", pkt->id, err);
  877. pd->stats.pkt_ended++;
  878. pkt_bio_finished(pd);
  879. atomic_dec(&pkt->io_wait);
  880. atomic_inc(&pkt->run_sm);
  881. wake_up(&pd->wqueue);
  882. }
  883. /*
  884. * Schedule reads for the holes in a packet
  885. */
  886. static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
  887. {
  888. int frames_read = 0;
  889. struct bio *bio;
  890. int f;
  891. char written[PACKET_MAX_SIZE];
  892. BUG_ON(bio_list_empty(&pkt->orig_bios));
  893. atomic_set(&pkt->io_wait, 0);
  894. atomic_set(&pkt->io_errors, 0);
  895. /*
  896. * Figure out which frames we need to read before we can write.
  897. */
  898. memset(written, 0, sizeof(written));
  899. spin_lock(&pkt->lock);
  900. bio_list_for_each(bio, &pkt->orig_bios) {
  901. int first_frame = (bio->bi_sector - pkt->sector) / (CD_FRAMESIZE >> 9);
  902. int num_frames = bio->bi_size / CD_FRAMESIZE;
  903. pd->stats.secs_w += num_frames * (CD_FRAMESIZE >> 9);
  904. BUG_ON(first_frame < 0);
  905. BUG_ON(first_frame + num_frames > pkt->frames);
  906. for (f = first_frame; f < first_frame + num_frames; f++)
  907. written[f] = 1;
  908. }
  909. spin_unlock(&pkt->lock);
  910. if (pkt->cache_valid) {
  911. pkt_dbg(2, "zone %llx cached\n",
  912. (unsigned long long)pkt->sector);
  913. goto out_account;
  914. }
  915. /*
  916. * Schedule reads for missing parts of the packet.
  917. */
  918. for (f = 0; f < pkt->frames; f++) {
  919. int p, offset;
  920. if (written[f])
  921. continue;
  922. bio = pkt->r_bios[f];
  923. bio_reset(bio);
  924. bio->bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
  925. bio->bi_bdev = pd->bdev;
  926. bio->bi_end_io = pkt_end_io_read;
  927. bio->bi_private = pkt;
  928. p = (f * CD_FRAMESIZE) / PAGE_SIZE;
  929. offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
  930. pkt_dbg(2, "Adding frame %d, page:%p offs:%d\n",
  931. f, pkt->pages[p], offset);
  932. if (!bio_add_page(bio, pkt->pages[p], CD_FRAMESIZE, offset))
  933. BUG();
  934. atomic_inc(&pkt->io_wait);
  935. bio->bi_rw = READ;
  936. pkt_queue_bio(pd, bio);
  937. frames_read++;
  938. }
  939. out_account:
  940. pkt_dbg(2, "need %d frames for zone %llx\n",
  941. frames_read, (unsigned long long)pkt->sector);
  942. pd->stats.pkt_started++;
  943. pd->stats.secs_rg += frames_read * (CD_FRAMESIZE >> 9);
  944. }
  945. /*
  946. * Find a packet matching zone, or the least recently used packet if
  947. * there is no match.
  948. */
  949. static struct packet_data *pkt_get_packet_data(struct pktcdvd_device *pd, int zone)
  950. {
  951. struct packet_data *pkt;
  952. list_for_each_entry(pkt, &pd->cdrw.pkt_free_list, list) {
  953. if (pkt->sector == zone || pkt->list.next == &pd->cdrw.pkt_free_list) {
  954. list_del_init(&pkt->list);
  955. if (pkt->sector != zone)
  956. pkt->cache_valid = 0;
  957. return pkt;
  958. }
  959. }
  960. BUG();
  961. return NULL;
  962. }
  963. static void pkt_put_packet_data(struct pktcdvd_device *pd, struct packet_data *pkt)
  964. {
  965. if (pkt->cache_valid) {
  966. list_add(&pkt->list, &pd->cdrw.pkt_free_list);
  967. } else {
  968. list_add_tail(&pkt->list, &pd->cdrw.pkt_free_list);
  969. }
  970. }
  971. /*
  972. * recover a failed write, query for relocation if possible
  973. *
  974. * returns 1 if recovery is possible, or 0 if not
  975. *
  976. */
  977. static int pkt_start_recovery(struct packet_data *pkt)
  978. {
  979. /*
  980. * FIXME. We need help from the file system to implement
  981. * recovery handling.
  982. */
  983. return 0;
  984. #if 0
  985. struct request *rq = pkt->rq;
  986. struct pktcdvd_device *pd = rq->rq_disk->private_data;
  987. struct block_device *pkt_bdev;
  988. struct super_block *sb = NULL;
  989. unsigned long old_block, new_block;
  990. sector_t new_sector;
  991. pkt_bdev = bdget(kdev_t_to_nr(pd->pkt_dev));
  992. if (pkt_bdev) {
  993. sb = get_super(pkt_bdev);
  994. bdput(pkt_bdev);
  995. }
  996. if (!sb)
  997. return 0;
  998. if (!sb->s_op->relocate_blocks)
  999. goto out;
  1000. old_block = pkt->sector / (CD_FRAMESIZE >> 9);
  1001. if (sb->s_op->relocate_blocks(sb, old_block, &new_block))
  1002. goto out;
  1003. new_sector = new_block * (CD_FRAMESIZE >> 9);
  1004. pkt->sector = new_sector;
  1005. bio_reset(pkt->bio);
  1006. pkt->bio->bi_bdev = pd->bdev;
  1007. pkt->bio->bi_rw = REQ_WRITE;
  1008. pkt->bio->bi_sector = new_sector;
  1009. pkt->bio->bi_size = pkt->frames * CD_FRAMESIZE;
  1010. pkt->bio->bi_vcnt = pkt->frames;
  1011. pkt->bio->bi_end_io = pkt_end_io_packet_write;
  1012. pkt->bio->bi_private = pkt;
  1013. drop_super(sb);
  1014. return 1;
  1015. out:
  1016. drop_super(sb);
  1017. return 0;
  1018. #endif
  1019. }
  1020. static inline void pkt_set_state(struct packet_data *pkt, enum packet_data_state state)
  1021. {
  1022. #if PACKET_DEBUG > 1
  1023. static const char *state_name[] = {
  1024. "IDLE", "WAITING", "READ_WAIT", "WRITE_WAIT", "RECOVERY", "FINISHED"
  1025. };
  1026. enum packet_data_state old_state = pkt->state;
  1027. pkt_dbg(2, "pkt %2d : s=%6llx %s -> %s\n",
  1028. pkt->id, (unsigned long long)pkt->sector,
  1029. state_name[old_state], state_name[state]);
  1030. #endif
  1031. pkt->state = state;
  1032. }
  1033. /*
  1034. * Scan the work queue to see if we can start a new packet.
  1035. * returns non-zero if any work was done.
  1036. */
  1037. static int pkt_handle_queue(struct pktcdvd_device *pd)
  1038. {
  1039. struct packet_data *pkt, *p;
  1040. struct bio *bio = NULL;
  1041. sector_t zone = 0; /* Suppress gcc warning */
  1042. struct pkt_rb_node *node, *first_node;
  1043. struct rb_node *n;
  1044. int wakeup;
  1045. pkt_dbg(2, "\n");
  1046. atomic_set(&pd->scan_queue, 0);
  1047. if (list_empty(&pd->cdrw.pkt_free_list)) {
  1048. pkt_dbg(2, "no pkt\n");
  1049. return 0;
  1050. }
  1051. /*
  1052. * Try to find a zone we are not already working on.
  1053. */
  1054. spin_lock(&pd->lock);
  1055. first_node = pkt_rbtree_find(pd, pd->current_sector);
  1056. if (!first_node) {
  1057. n = rb_first(&pd->bio_queue);
  1058. if (n)
  1059. first_node = rb_entry(n, struct pkt_rb_node, rb_node);
  1060. }
  1061. node = first_node;
  1062. while (node) {
  1063. bio = node->bio;
  1064. zone = get_zone(bio->bi_sector, pd);
  1065. list_for_each_entry(p, &pd->cdrw.pkt_active_list, list) {
  1066. if (p->sector == zone) {
  1067. bio = NULL;
  1068. goto try_next_bio;
  1069. }
  1070. }
  1071. break;
  1072. try_next_bio:
  1073. node = pkt_rbtree_next(node);
  1074. if (!node) {
  1075. n = rb_first(&pd->bio_queue);
  1076. if (n)
  1077. node = rb_entry(n, struct pkt_rb_node, rb_node);
  1078. }
  1079. if (node == first_node)
  1080. node = NULL;
  1081. }
  1082. spin_unlock(&pd->lock);
  1083. if (!bio) {
  1084. pkt_dbg(2, "no bio\n");
  1085. return 0;
  1086. }
  1087. pkt = pkt_get_packet_data(pd, zone);
  1088. pd->current_sector = zone + pd->settings.size;
  1089. pkt->sector = zone;
  1090. BUG_ON(pkt->frames != pd->settings.size >> 2);
  1091. pkt->write_size = 0;
  1092. /*
  1093. * Scan work queue for bios in the same zone and link them
  1094. * to this packet.
  1095. */
  1096. spin_lock(&pd->lock);
  1097. pkt_dbg(2, "looking for zone %llx\n", (unsigned long long)zone);
  1098. while ((node = pkt_rbtree_find(pd, zone)) != NULL) {
  1099. bio = node->bio;
  1100. pkt_dbg(2, "found zone=%llx\n",
  1101. (unsigned long long)get_zone(bio->bi_sector, pd));
  1102. if (get_zone(bio->bi_sector, pd) != zone)
  1103. break;
  1104. pkt_rbtree_erase(pd, node);
  1105. spin_lock(&pkt->lock);
  1106. bio_list_add(&pkt->orig_bios, bio);
  1107. pkt->write_size += bio->bi_size / CD_FRAMESIZE;
  1108. spin_unlock(&pkt->lock);
  1109. }
  1110. /* check write congestion marks, and if bio_queue_size is
  1111. below, wake up any waiters */
  1112. wakeup = (pd->write_congestion_on > 0
  1113. && pd->bio_queue_size <= pd->write_congestion_off);
  1114. spin_unlock(&pd->lock);
  1115. if (wakeup) {
  1116. clear_bdi_congested(&pd->disk->queue->backing_dev_info,
  1117. BLK_RW_ASYNC);
  1118. }
  1119. pkt->sleep_time = max(PACKET_WAIT_TIME, 1);
  1120. pkt_set_state(pkt, PACKET_WAITING_STATE);
  1121. atomic_set(&pkt->run_sm, 1);
  1122. spin_lock(&pd->cdrw.active_list_lock);
  1123. list_add(&pkt->list, &pd->cdrw.pkt_active_list);
  1124. spin_unlock(&pd->cdrw.active_list_lock);
  1125. return 1;
  1126. }
  1127. /*
  1128. * Assemble a bio to write one packet and queue the bio for processing
  1129. * by the underlying block device.
  1130. */
  1131. static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
  1132. {
  1133. int f;
  1134. struct bio_vec *bvec = pkt->w_bio->bi_io_vec;
  1135. bio_reset(pkt->w_bio);
  1136. pkt->w_bio->bi_sector = pkt->sector;
  1137. pkt->w_bio->bi_bdev = pd->bdev;
  1138. pkt->w_bio->bi_end_io = pkt_end_io_packet_write;
  1139. pkt->w_bio->bi_private = pkt;
  1140. /* XXX: locking? */
  1141. for (f = 0; f < pkt->frames; f++) {
  1142. bvec[f].bv_page = pkt->pages[(f * CD_FRAMESIZE) / PAGE_SIZE];
  1143. bvec[f].bv_offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
  1144. if (!bio_add_page(pkt->w_bio, bvec[f].bv_page, CD_FRAMESIZE, bvec[f].bv_offset))
  1145. BUG();
  1146. }
  1147. pkt_dbg(2, "vcnt=%d\n", pkt->w_bio->bi_vcnt);
  1148. /*
  1149. * Fill-in bvec with data from orig_bios.
  1150. */
  1151. spin_lock(&pkt->lock);
  1152. bio_copy_data(pkt->w_bio, pkt->orig_bios.head);
  1153. pkt_set_state(pkt, PACKET_WRITE_WAIT_STATE);
  1154. spin_unlock(&pkt->lock);
  1155. pkt_dbg(2, "Writing %d frames for zone %llx\n",
  1156. pkt->write_size, (unsigned long long)pkt->sector);
  1157. if (test_bit(PACKET_MERGE_SEGS, &pd->flags) || (pkt->write_size < pkt->frames)) {
  1158. pkt_make_local_copy(pkt, bvec);
  1159. pkt->cache_valid = 1;
  1160. } else {
  1161. pkt->cache_valid = 0;
  1162. }
  1163. /* Start the write request */
  1164. atomic_set(&pkt->io_wait, 1);
  1165. pkt->w_bio->bi_rw = WRITE;
  1166. pkt_queue_bio(pd, pkt->w_bio);
  1167. }
  1168. static void pkt_finish_packet(struct packet_data *pkt, int uptodate)
  1169. {
  1170. struct bio *bio;
  1171. if (!uptodate)
  1172. pkt->cache_valid = 0;
  1173. /* Finish all bios corresponding to this packet */
  1174. while ((bio = bio_list_pop(&pkt->orig_bios)))
  1175. bio_endio(bio, uptodate ? 0 : -EIO);
  1176. }
  1177. static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data *pkt)
  1178. {
  1179. int uptodate;
  1180. pkt_dbg(2, "pkt %d\n", pkt->id);
  1181. for (;;) {
  1182. switch (pkt->state) {
  1183. case PACKET_WAITING_STATE:
  1184. if ((pkt->write_size < pkt->frames) && (pkt->sleep_time > 0))
  1185. return;
  1186. pkt->sleep_time = 0;
  1187. pkt_gather_data(pd, pkt);
  1188. pkt_set_state(pkt, PACKET_READ_WAIT_STATE);
  1189. break;
  1190. case PACKET_READ_WAIT_STATE:
  1191. if (atomic_read(&pkt->io_wait) > 0)
  1192. return;
  1193. if (atomic_read(&pkt->io_errors) > 0) {
  1194. pkt_set_state(pkt, PACKET_RECOVERY_STATE);
  1195. } else {
  1196. pkt_start_write(pd, pkt);
  1197. }
  1198. break;
  1199. case PACKET_WRITE_WAIT_STATE:
  1200. if (atomic_read(&pkt->io_wait) > 0)
  1201. return;
  1202. if (test_bit(BIO_UPTODATE, &pkt->w_bio->bi_flags)) {
  1203. pkt_set_state(pkt, PACKET_FINISHED_STATE);
  1204. } else {
  1205. pkt_set_state(pkt, PACKET_RECOVERY_STATE);
  1206. }
  1207. break;
  1208. case PACKET_RECOVERY_STATE:
  1209. if (pkt_start_recovery(pkt)) {
  1210. pkt_start_write(pd, pkt);
  1211. } else {
  1212. pkt_dbg(2, "No recovery possible\n");
  1213. pkt_set_state(pkt, PACKET_FINISHED_STATE);
  1214. }
  1215. break;
  1216. case PACKET_FINISHED_STATE:
  1217. uptodate = test_bit(BIO_UPTODATE, &pkt->w_bio->bi_flags);
  1218. pkt_finish_packet(pkt, uptodate);
  1219. return;
  1220. default:
  1221. BUG();
  1222. break;
  1223. }
  1224. }
  1225. }
  1226. static void pkt_handle_packets(struct pktcdvd_device *pd)
  1227. {
  1228. struct packet_data *pkt, *next;
  1229. pkt_dbg(2, "\n");
  1230. /*
  1231. * Run state machine for active packets
  1232. */
  1233. list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
  1234. if (atomic_read(&pkt->run_sm) > 0) {
  1235. atomic_set(&pkt->run_sm, 0);
  1236. pkt_run_state_machine(pd, pkt);
  1237. }
  1238. }
  1239. /*
  1240. * Move no longer active packets to the free list
  1241. */
  1242. spin_lock(&pd->cdrw.active_list_lock);
  1243. list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_active_list, list) {
  1244. if (pkt->state == PACKET_FINISHED_STATE) {
  1245. list_del(&pkt->list);
  1246. pkt_put_packet_data(pd, pkt);
  1247. pkt_set_state(pkt, PACKET_IDLE_STATE);
  1248. atomic_set(&pd->scan_queue, 1);
  1249. }
  1250. }
  1251. spin_unlock(&pd->cdrw.active_list_lock);
  1252. }
  1253. static void pkt_count_states(struct pktcdvd_device *pd, int *states)
  1254. {
  1255. struct packet_data *pkt;
  1256. int i;
  1257. for (i = 0; i < PACKET_NUM_STATES; i++)
  1258. states[i] = 0;
  1259. spin_lock(&pd->cdrw.active_list_lock);
  1260. list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
  1261. states[pkt->state]++;
  1262. }
  1263. spin_unlock(&pd->cdrw.active_list_lock);
  1264. }
  1265. /*
  1266. * kcdrwd is woken up when writes have been queued for one of our
  1267. * registered devices
  1268. */
  1269. static int kcdrwd(void *foobar)
  1270. {
  1271. struct pktcdvd_device *pd = foobar;
  1272. struct packet_data *pkt;
  1273. long min_sleep_time, residue;
  1274. set_user_nice(current, -20);
  1275. set_freezable();
  1276. for (;;) {
  1277. DECLARE_WAITQUEUE(wait, current);
  1278. /*
  1279. * Wait until there is something to do
  1280. */
  1281. add_wait_queue(&pd->wqueue, &wait);
  1282. for (;;) {
  1283. set_current_state(TASK_INTERRUPTIBLE);
  1284. /* Check if we need to run pkt_handle_queue */
  1285. if (atomic_read(&pd->scan_queue) > 0)
  1286. goto work_to_do;
  1287. /* Check if we need to run the state machine for some packet */
  1288. list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
  1289. if (atomic_read(&pkt->run_sm) > 0)
  1290. goto work_to_do;
  1291. }
  1292. /* Check if we need to process the iosched queues */
  1293. if (atomic_read(&pd->iosched.attention) != 0)
  1294. goto work_to_do;
  1295. /* Otherwise, go to sleep */
  1296. if (PACKET_DEBUG > 1) {
  1297. int states[PACKET_NUM_STATES];
  1298. pkt_count_states(pd, states);
  1299. pkt_dbg(2, "i:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
  1300. states[0], states[1], states[2],
  1301. states[3], states[4], states[5]);
  1302. }
  1303. min_sleep_time = MAX_SCHEDULE_TIMEOUT;
  1304. list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
  1305. if (pkt->sleep_time && pkt->sleep_time < min_sleep_time)
  1306. min_sleep_time = pkt->sleep_time;
  1307. }
  1308. pkt_dbg(2, "sleeping\n");
  1309. residue = schedule_timeout(min_sleep_time);
  1310. pkt_dbg(2, "wake up\n");
  1311. /* make swsusp happy with our thread */
  1312. try_to_freeze();
  1313. list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
  1314. if (!pkt->sleep_time)
  1315. continue;
  1316. pkt->sleep_time -= min_sleep_time - residue;
  1317. if (pkt->sleep_time <= 0) {
  1318. pkt->sleep_time = 0;
  1319. atomic_inc(&pkt->run_sm);
  1320. }
  1321. }
  1322. if (kthread_should_stop())
  1323. break;
  1324. }
  1325. work_to_do:
  1326. set_current_state(TASK_RUNNING);
  1327. remove_wait_queue(&pd->wqueue, &wait);
  1328. if (kthread_should_stop())
  1329. break;
  1330. /*
  1331. * if pkt_handle_queue returns true, we can queue
  1332. * another request.
  1333. */
  1334. while (pkt_handle_queue(pd))
  1335. ;
  1336. /*
  1337. * Handle packet state machine
  1338. */
  1339. pkt_handle_packets(pd);
  1340. /*
  1341. * Handle iosched queues
  1342. */
  1343. pkt_iosched_process_queue(pd);
  1344. }
  1345. return 0;
  1346. }
  1347. static void pkt_print_settings(struct pktcdvd_device *pd)
  1348. {
  1349. pr_info("%s packets, %u blocks, Mode-%c disc\n",
  1350. pd->settings.fp ? "Fixed" : "Variable",
  1351. pd->settings.size >> 2,
  1352. pd->settings.block_mode == 8 ? '1' : '2');
  1353. }
  1354. static int pkt_mode_sense(struct pktcdvd_device *pd, struct packet_command *cgc, int page_code, int page_control)
  1355. {
  1356. memset(cgc->cmd, 0, sizeof(cgc->cmd));
  1357. cgc->cmd[0] = GPCMD_MODE_SENSE_10;
  1358. cgc->cmd[2] = page_code | (page_control << 6);
  1359. cgc->cmd[7] = cgc->buflen >> 8;
  1360. cgc->cmd[8] = cgc->buflen & 0xff;
  1361. cgc->data_direction = CGC_DATA_READ;
  1362. return pkt_generic_packet(pd, cgc);
  1363. }
  1364. static int pkt_mode_select(struct pktcdvd_device *pd, struct packet_command *cgc)
  1365. {
  1366. memset(cgc->cmd, 0, sizeof(cgc->cmd));
  1367. memset(cgc->buffer, 0, 2);
  1368. cgc->cmd[0] = GPCMD_MODE_SELECT_10;
  1369. cgc->cmd[1] = 0x10; /* PF */
  1370. cgc->cmd[7] = cgc->buflen >> 8;
  1371. cgc->cmd[8] = cgc->buflen & 0xff;
  1372. cgc->data_direction = CGC_DATA_WRITE;
  1373. return pkt_generic_packet(pd, cgc);
  1374. }
  1375. static int pkt_get_disc_info(struct pktcdvd_device *pd, disc_information *di)
  1376. {
  1377. struct packet_command cgc;
  1378. int ret;
  1379. /* set up command and get the disc info */
  1380. init_cdrom_command(&cgc, di, sizeof(*di), CGC_DATA_READ);
  1381. cgc.cmd[0] = GPCMD_READ_DISC_INFO;
  1382. cgc.cmd[8] = cgc.buflen = 2;
  1383. cgc.quiet = 1;
  1384. if ((ret = pkt_generic_packet(pd, &cgc)))
  1385. return ret;
  1386. /* not all drives have the same disc_info length, so requeue
  1387. * packet with the length the drive tells us it can supply
  1388. */
  1389. cgc.buflen = be16_to_cpu(di->disc_information_length) +
  1390. sizeof(di->disc_information_length);
  1391. if (cgc.buflen > sizeof(disc_information))
  1392. cgc.buflen = sizeof(disc_information);
  1393. cgc.cmd[8] = cgc.buflen;
  1394. return pkt_generic_packet(pd, &cgc);
  1395. }
  1396. static int pkt_get_track_info(struct pktcdvd_device *pd, __u16 track, __u8 type, track_information *ti)
  1397. {
  1398. struct packet_command cgc;
  1399. int ret;
  1400. init_cdrom_command(&cgc, ti, 8, CGC_DATA_READ);
  1401. cgc.cmd[0] = GPCMD_READ_TRACK_RZONE_INFO;
  1402. cgc.cmd[1] = type & 3;
  1403. cgc.cmd[4] = (track & 0xff00) >> 8;
  1404. cgc.cmd[5] = track & 0xff;
  1405. cgc.cmd[8] = 8;
  1406. cgc.quiet = 1;
  1407. if ((ret = pkt_generic_packet(pd, &cgc)))
  1408. return ret;
  1409. cgc.buflen = be16_to_cpu(ti->track_information_length) +
  1410. sizeof(ti->track_information_length);
  1411. if (cgc.buflen > sizeof(track_information))
  1412. cgc.buflen = sizeof(track_information);
  1413. cgc.cmd[8] = cgc.buflen;
  1414. return pkt_generic_packet(pd, &cgc);
  1415. }
  1416. static noinline_for_stack int pkt_get_last_written(struct pktcdvd_device *pd,
  1417. long *last_written)
  1418. {
  1419. disc_information di;
  1420. track_information ti;
  1421. __u32 last_track;
  1422. int ret = -1;
  1423. if ((ret = pkt_get_disc_info(pd, &di)))
  1424. return ret;
  1425. last_track = (di.last_track_msb << 8) | di.last_track_lsb;
  1426. if ((ret = pkt_get_track_info(pd, last_track, 1, &ti)))
  1427. return ret;
  1428. /* if this track is blank, try the previous. */
  1429. if (ti.blank) {
  1430. last_track--;
  1431. if ((ret = pkt_get_track_info(pd, last_track, 1, &ti)))
  1432. return ret;
  1433. }
  1434. /* if last recorded field is valid, return it. */
  1435. if (ti.lra_v) {
  1436. *last_written = be32_to_cpu(ti.last_rec_address);
  1437. } else {
  1438. /* make it up instead */
  1439. *last_written = be32_to_cpu(ti.track_start) +
  1440. be32_to_cpu(ti.track_size);
  1441. if (ti.free_blocks)
  1442. *last_written -= (be32_to_cpu(ti.free_blocks) + 7);
  1443. }
  1444. return 0;
  1445. }
  1446. /*
  1447. * write mode select package based on pd->settings
  1448. */
  1449. static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd)
  1450. {
  1451. struct packet_command cgc;
  1452. struct request_sense sense;
  1453. write_param_page *wp;
  1454. char buffer[128];
  1455. int ret, size;
  1456. /* doesn't apply to DVD+RW or DVD-RAM */
  1457. if ((pd->mmc3_profile == 0x1a) || (pd->mmc3_profile == 0x12))
  1458. return 0;
  1459. memset(buffer, 0, sizeof(buffer));
  1460. init_cdrom_command(&cgc, buffer, sizeof(*wp), CGC_DATA_READ);
  1461. cgc.sense = &sense;
  1462. if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0))) {
  1463. pkt_dump_sense(&cgc);
  1464. return ret;
  1465. }
  1466. size = 2 + ((buffer[0] << 8) | (buffer[1] & 0xff));
  1467. pd->mode_offset = (buffer[6] << 8) | (buffer[7] & 0xff);
  1468. if (size > sizeof(buffer))
  1469. size = sizeof(buffer);
  1470. /*
  1471. * now get it all
  1472. */
  1473. init_cdrom_command(&cgc, buffer, size, CGC_DATA_READ);
  1474. cgc.sense = &sense;
  1475. if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0))) {
  1476. pkt_dump_sense(&cgc);
  1477. return ret;
  1478. }
  1479. /*
  1480. * write page is offset header + block descriptor length
  1481. */
  1482. wp = (write_param_page *) &buffer[sizeof(struct mode_page_header) + pd->mode_offset];
  1483. wp->fp = pd->settings.fp;
  1484. wp->track_mode = pd->settings.track_mode;
  1485. wp->write_type = pd->settings.write_type;
  1486. wp->data_block_type = pd->settings.block_mode;
  1487. wp->multi_session = 0;
  1488. #ifdef PACKET_USE_LS
  1489. wp->link_size = 7;
  1490. wp->ls_v = 1;
  1491. #endif
  1492. if (wp->data_block_type == PACKET_BLOCK_MODE1) {
  1493. wp->session_format = 0;
  1494. wp->subhdr2 = 0x20;
  1495. } else if (wp->data_block_type == PACKET_BLOCK_MODE2) {
  1496. wp->session_format = 0x20;
  1497. wp->subhdr2 = 8;
  1498. #if 0
  1499. wp->mcn[0] = 0x80;
  1500. memcpy(&wp->mcn[1], PACKET_MCN, sizeof(wp->mcn) - 1);
  1501. #endif
  1502. } else {
  1503. /*
  1504. * paranoia
  1505. */
  1506. pr_err("write mode wrong %d\n", wp->data_block_type);
  1507. return 1;
  1508. }
  1509. wp->packet_size = cpu_to_be32(pd->settings.size >> 2);
  1510. cgc.buflen = cgc.cmd[8] = size;
  1511. if ((ret = pkt_mode_select(pd, &cgc))) {
  1512. pkt_dump_sense(&cgc);
  1513. return ret;
  1514. }
  1515. pkt_print_settings(pd);
  1516. return 0;
  1517. }
  1518. /*
  1519. * 1 -- we can write to this track, 0 -- we can't
  1520. */
  1521. static int pkt_writable_track(struct pktcdvd_device *pd, track_information *ti)
  1522. {
  1523. switch (pd->mmc3_profile) {
  1524. case 0x1a: /* DVD+RW */
  1525. case 0x12: /* DVD-RAM */
  1526. /* The track is always writable on DVD+RW/DVD-RAM */
  1527. return 1;
  1528. default:
  1529. break;
  1530. }
  1531. if (!ti->packet || !ti->fp)
  1532. return 0;
  1533. /*
  1534. * "good" settings as per Mt Fuji.
  1535. */
  1536. if (ti->rt == 0 && ti->blank == 0)
  1537. return 1;
  1538. if (ti->rt == 0 && ti->blank == 1)
  1539. return 1;
  1540. if (ti->rt == 1 && ti->blank == 0)
  1541. return 1;
  1542. pr_err("bad state %d-%d-%d\n", ti->rt, ti->blank, ti->packet);
  1543. return 0;
  1544. }
  1545. /*
  1546. * 1 -- we can write to this disc, 0 -- we can't
  1547. */
  1548. static int pkt_writable_disc(struct pktcdvd_device *pd, disc_information *di)
  1549. {
  1550. switch (pd->mmc3_profile) {
  1551. case 0x0a: /* CD-RW */
  1552. case 0xffff: /* MMC3 not supported */
  1553. break;
  1554. case 0x1a: /* DVD+RW */
  1555. case 0x13: /* DVD-RW */
  1556. case 0x12: /* DVD-RAM */
  1557. return 1;
  1558. default:
  1559. pkt_dbg(2, "Wrong disc profile (%x)\n",
  1560. pd->mmc3_profile);
  1561. return 0;
  1562. }
  1563. /*
  1564. * for disc type 0xff we should probably reserve a new track.
  1565. * but i'm not sure, should we leave this to user apps? probably.
  1566. */
  1567. if (di->disc_type == 0xff) {
  1568. pr_notice("unknown disc - no track?\n");
  1569. return 0;
  1570. }
  1571. if (di->disc_type != 0x20 && di->disc_type != 0) {
  1572. pr_err("wrong disc type (%x)\n", di->disc_type);
  1573. return 0;
  1574. }
  1575. if (di->erasable == 0) {
  1576. pr_notice("disc not erasable\n");
  1577. return 0;
  1578. }
  1579. if (di->border_status == PACKET_SESSION_RESERVED) {
  1580. pr_err("can't write to last track (reserved)\n");
  1581. return 0;
  1582. }
  1583. return 1;
  1584. }
  1585. static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
  1586. {
  1587. struct packet_command cgc;
  1588. unsigned char buf[12];
  1589. disc_information di;
  1590. track_information ti;
  1591. int ret, track;
  1592. init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
  1593. cgc.cmd[0] = GPCMD_GET_CONFIGURATION;
  1594. cgc.cmd[8] = 8;
  1595. ret = pkt_generic_packet(pd, &cgc);
  1596. pd->mmc3_profile = ret ? 0xffff : buf[6] << 8 | buf[7];
  1597. memset(&di, 0, sizeof(disc_information));
  1598. memset(&ti, 0, sizeof(track_information));
  1599. if ((ret = pkt_get_disc_info(pd, &di))) {
  1600. pr_err("failed get_disc\n");
  1601. return ret;
  1602. }
  1603. if (!pkt_writable_disc(pd, &di))
  1604. return -EROFS;
  1605. pd->type = di.erasable ? PACKET_CDRW : PACKET_CDR;
  1606. track = 1; /* (di.last_track_msb << 8) | di.last_track_lsb; */
  1607. if ((ret = pkt_get_track_info(pd, track, 1, &ti))) {
  1608. pr_err("failed get_track\n");
  1609. return ret;
  1610. }
  1611. if (!pkt_writable_track(pd, &ti)) {
  1612. pr_err("can't write to this track\n");
  1613. return -EROFS;
  1614. }
  1615. /*
  1616. * we keep packet size in 512 byte units, makes it easier to
  1617. * deal with request calculations.
  1618. */
  1619. pd->settings.size = be32_to_cpu(ti.fixed_packet_size) << 2;
  1620. if (pd->settings.size == 0) {
  1621. pr_notice("detected zero packet size!\n");
  1622. return -ENXIO;
  1623. }
  1624. if (pd->settings.size > PACKET_MAX_SECTORS) {
  1625. pr_err("packet size is too big\n");
  1626. return -EROFS;
  1627. }
  1628. pd->settings.fp = ti.fp;
  1629. pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1);
  1630. if (ti.nwa_v) {
  1631. pd->nwa = be32_to_cpu(ti.next_writable);
  1632. set_bit(PACKET_NWA_VALID, &pd->flags);
  1633. }
  1634. /*
  1635. * in theory we could use lra on -RW media as well and just zero
  1636. * blocks that haven't been written yet, but in practice that
  1637. * is just a no-go. we'll use that for -R, naturally.
  1638. */
  1639. if (ti.lra_v) {
  1640. pd->lra = be32_to_cpu(ti.last_rec_address);
  1641. set_bit(PACKET_LRA_VALID, &pd->flags);
  1642. } else {
  1643. pd->lra = 0xffffffff;
  1644. set_bit(PACKET_LRA_VALID, &pd->flags);
  1645. }
  1646. /*
  1647. * fine for now
  1648. */
  1649. pd->settings.link_loss = 7;
  1650. pd->settings.write_type = 0; /* packet */
  1651. pd->settings.track_mode = ti.track_mode;
  1652. /*
  1653. * mode1 or mode2 disc
  1654. */
  1655. switch (ti.data_mode) {
  1656. case PACKET_MODE1:
  1657. pd->settings.block_mode = PACKET_BLOCK_MODE1;
  1658. break;
  1659. case PACKET_MODE2:
  1660. pd->settings.block_mode = PACKET_BLOCK_MODE2;
  1661. break;
  1662. default:
  1663. pr_err("unknown data mode\n");
  1664. return -EROFS;
  1665. }
  1666. return 0;
  1667. }
  1668. /*
  1669. * enable/disable write caching on drive
  1670. */
  1671. static noinline_for_stack int pkt_write_caching(struct pktcdvd_device *pd,
  1672. int set)
  1673. {
  1674. struct packet_command cgc;
  1675. struct request_sense sense;
  1676. unsigned char buf[64];
  1677. int ret;
  1678. init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
  1679. cgc.sense = &sense;
  1680. cgc.buflen = pd->mode_offset + 12;
  1681. /*
  1682. * caching mode page might not be there, so quiet this command
  1683. */
  1684. cgc.quiet = 1;
  1685. if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WCACHING_PAGE, 0)))
  1686. return ret;
  1687. buf[pd->mode_offset + 10] |= (!!set << 2);
  1688. cgc.buflen = cgc.cmd[8] = 2 + ((buf[0] << 8) | (buf[1] & 0xff));
  1689. ret = pkt_mode_select(pd, &cgc);
  1690. if (ret) {
  1691. pr_err("write caching control failed\n");
  1692. pkt_dump_sense(&cgc);
  1693. } else if (!ret && set)
  1694. pr_notice("enabled write caching on %s\n", pd->name);
  1695. return ret;
  1696. }
  1697. static int pkt_lock_door(struct pktcdvd_device *pd, int lockflag)
  1698. {
  1699. struct packet_command cgc;
  1700. init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
  1701. cgc.cmd[0] = GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL;
  1702. cgc.cmd[4] = lockflag ? 1 : 0;
  1703. return pkt_generic_packet(pd, &cgc);
  1704. }
  1705. /*
  1706. * Returns drive maximum write speed
  1707. */
  1708. static noinline_for_stack int pkt_get_max_speed(struct pktcdvd_device *pd,
  1709. unsigned *write_speed)
  1710. {
  1711. struct packet_command cgc;
  1712. struct request_sense sense;
  1713. unsigned char buf[256+18];
  1714. unsigned char *cap_buf;
  1715. int ret, offset;
  1716. cap_buf = &buf[sizeof(struct mode_page_header) + pd->mode_offset];
  1717. init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_UNKNOWN);
  1718. cgc.sense = &sense;
  1719. ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
  1720. if (ret) {
  1721. cgc.buflen = pd->mode_offset + cap_buf[1] + 2 +
  1722. sizeof(struct mode_page_header);
  1723. ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
  1724. if (ret) {
  1725. pkt_dump_sense(&cgc);
  1726. return ret;
  1727. }
  1728. }
  1729. offset = 20; /* Obsoleted field, used by older drives */
  1730. if (cap_buf[1] >= 28)
  1731. offset = 28; /* Current write speed selected */
  1732. if (cap_buf[1] >= 30) {
  1733. /* If the drive reports at least one "Logical Unit Write
  1734. * Speed Performance Descriptor Block", use the information
  1735. * in the first block. (contains the highest speed)
  1736. */
  1737. int num_spdb = (cap_buf[30] << 8) + cap_buf[31];
  1738. if (num_spdb > 0)
  1739. offset = 34;
  1740. }
  1741. *write_speed = (cap_buf[offset] << 8) | cap_buf[offset + 1];
  1742. return 0;
  1743. }
  1744. /* These tables from cdrecord - I don't have orange book */
  1745. /* standard speed CD-RW (1-4x) */
  1746. static char clv_to_speed[16] = {
  1747. /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */
  1748. 0, 2, 4, 6, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
  1749. };
  1750. /* high speed CD-RW (-10x) */
  1751. static char hs_clv_to_speed[16] = {
  1752. /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */
  1753. 0, 2, 4, 6, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
  1754. };
  1755. /* ultra high speed CD-RW */
  1756. static char us_clv_to_speed[16] = {
  1757. /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */
  1758. 0, 2, 4, 8, 0, 0,16, 0,24,32,40,48, 0, 0, 0, 0
  1759. };
  1760. /*
  1761. * reads the maximum media speed from ATIP
  1762. */
  1763. static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd,
  1764. unsigned *speed)
  1765. {
  1766. struct packet_command cgc;
  1767. struct request_sense sense;
  1768. unsigned char buf[64];
  1769. unsigned int size, st, sp;
  1770. int ret;
  1771. init_cdrom_command(&cgc, buf, 2, CGC_DATA_READ);
  1772. cgc.sense = &sense;
  1773. cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
  1774. cgc.cmd[1] = 2;
  1775. cgc.cmd[2] = 4; /* READ ATIP */
  1776. cgc.cmd[8] = 2;
  1777. ret = pkt_generic_packet(pd, &cgc);
  1778. if (ret) {
  1779. pkt_dump_sense(&cgc);
  1780. return ret;
  1781. }
  1782. size = ((unsigned int) buf[0]<<8) + buf[1] + 2;
  1783. if (size > sizeof(buf))
  1784. size = sizeof(buf);
  1785. init_cdrom_command(&cgc, buf, size, CGC_DATA_READ);
  1786. cgc.sense = &sense;
  1787. cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
  1788. cgc.cmd[1] = 2;
  1789. cgc.cmd[2] = 4;
  1790. cgc.cmd[8] = size;
  1791. ret = pkt_generic_packet(pd, &cgc);
  1792. if (ret) {
  1793. pkt_dump_sense(&cgc);
  1794. return ret;
  1795. }
  1796. if (!(buf[6] & 0x40)) {
  1797. pr_notice("disc type is not CD-RW\n");
  1798. return 1;
  1799. }
  1800. if (!(buf[6] & 0x4)) {
  1801. pr_notice("A1 values on media are not valid, maybe not CDRW?\n");
  1802. return 1;
  1803. }
  1804. st = (buf[6] >> 3) & 0x7; /* disc sub-type */
  1805. sp = buf[16] & 0xf; /* max speed from ATIP A1 field */
  1806. /* Info from cdrecord */
  1807. switch (st) {
  1808. case 0: /* standard speed */
  1809. *speed = clv_to_speed[sp];
  1810. break;
  1811. case 1: /* high speed */
  1812. *speed = hs_clv_to_speed[sp];
  1813. break;
  1814. case 2: /* ultra high speed */
  1815. *speed = us_clv_to_speed[sp];
  1816. break;
  1817. default:
  1818. pr_notice("unknown disc sub-type %d\n", st);
  1819. return 1;
  1820. }
  1821. if (*speed) {
  1822. pr_info("maximum media speed: %d\n", *speed);
  1823. return 0;
  1824. } else {
  1825. pr_notice("unknown speed %d for sub-type %d\n", sp, st);
  1826. return 1;
  1827. }
  1828. }
  1829. static noinline_for_stack int pkt_perform_opc(struct pktcdvd_device *pd)
  1830. {
  1831. struct packet_command cgc;
  1832. struct request_sense sense;
  1833. int ret;
  1834. pkt_dbg(2, "Performing OPC\n");
  1835. init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
  1836. cgc.sense = &sense;
  1837. cgc.timeout = 60*HZ;
  1838. cgc.cmd[0] = GPCMD_SEND_OPC;
  1839. cgc.cmd[1] = 1;
  1840. if ((ret = pkt_generic_packet(pd, &cgc)))
  1841. pkt_dump_sense(&cgc);
  1842. return ret;
  1843. }
  1844. static int pkt_open_write(struct pktcdvd_device *pd)
  1845. {
  1846. int ret;
  1847. unsigned int write_speed, media_write_speed, read_speed;
  1848. if ((ret = pkt_probe_settings(pd))) {
  1849. pkt_dbg(2, "%s failed probe\n", pd->name);
  1850. return ret;
  1851. }
  1852. if ((ret = pkt_set_write_settings(pd))) {
  1853. pkt_dbg(1, "%s failed saving write settings\n", pd->name);
  1854. return -EIO;
  1855. }
  1856. pkt_write_caching(pd, USE_WCACHING);
  1857. if ((ret = pkt_get_max_speed(pd, &write_speed)))
  1858. write_speed = 16 * 177;
  1859. switch (pd->mmc3_profile) {
  1860. case 0x13: /* DVD-RW */
  1861. case 0x1a: /* DVD+RW */
  1862. case 0x12: /* DVD-RAM */
  1863. pkt_dbg(1, "write speed %ukB/s\n", write_speed);
  1864. break;
  1865. default:
  1866. if ((ret = pkt_media_speed(pd, &media_write_speed)))
  1867. media_write_speed = 16;
  1868. write_speed = min(write_speed, media_write_speed * 177);
  1869. pkt_dbg(1, "write speed %ux\n", write_speed / 176);
  1870. break;
  1871. }
  1872. read_speed = write_speed;
  1873. if ((ret = pkt_set_speed(pd, write_speed, read_speed))) {
  1874. pkt_dbg(1, "%s couldn't set write speed\n", pd->name);
  1875. return -EIO;
  1876. }
  1877. pd->write_speed = write_speed;
  1878. pd->read_speed = read_speed;
  1879. if ((ret = pkt_perform_opc(pd))) {
  1880. pkt_dbg(1, "%s Optimum Power Calibration failed\n", pd->name);
  1881. }
  1882. return 0;
  1883. }
  1884. /*
  1885. * called at open time.
  1886. */
  1887. static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write)
  1888. {
  1889. int ret;
  1890. long lba;
  1891. struct request_queue *q;
  1892. /*
  1893. * We need to re-open the cdrom device without O_NONBLOCK to be able
  1894. * to read/write from/to it. It is already opened in O_NONBLOCK mode
  1895. * so bdget() can't fail.
  1896. */
  1897. bdget(pd->bdev->bd_dev);
  1898. if ((ret = blkdev_get(pd->bdev, FMODE_READ | FMODE_EXCL, pd)))
  1899. goto out;
  1900. if ((ret = pkt_get_last_written(pd, &lba))) {
  1901. pr_err("pkt_get_last_written failed\n");
  1902. goto out_putdev;
  1903. }
  1904. set_capacity(pd->disk, lba << 2);
  1905. set_capacity(pd->bdev->bd_disk, lba << 2);
  1906. bd_set_size(pd->bdev, (loff_t)lba << 11);
  1907. q = bdev_get_queue(pd->bdev);
  1908. if (write) {
  1909. if ((ret = pkt_open_write(pd)))
  1910. goto out_putdev;
  1911. /*
  1912. * Some CDRW drives can not handle writes larger than one packet,
  1913. * even if the size is a multiple of the packet size.
  1914. */
  1915. spin_lock_irq(q->queue_lock);
  1916. blk_queue_max_hw_sectors(q, pd->settings.size);
  1917. spin_unlock_irq(q->queue_lock);
  1918. set_bit(PACKET_WRITABLE, &pd->flags);
  1919. } else {
  1920. pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
  1921. clear_bit(PACKET_WRITABLE, &pd->flags);
  1922. }
  1923. if ((ret = pkt_set_segment_merging(pd, q)))
  1924. goto out_putdev;
  1925. if (write) {
  1926. if (!pkt_grow_pktlist(pd, CONFIG_CDROM_PKTCDVD_BUFFERS)) {
  1927. pr_err("not enough memory for buffers\n");
  1928. ret = -ENOMEM;
  1929. goto out_putdev;
  1930. }
  1931. pr_info("%lukB available on disc\n", lba << 1);
  1932. }
  1933. return 0;
  1934. out_putdev:
  1935. blkdev_put(pd->bdev, FMODE_READ | FMODE_EXCL);
  1936. out:
  1937. return ret;
  1938. }
  1939. /*
  1940. * called when the device is closed. makes sure that the device flushes
  1941. * the internal cache before we close.
  1942. */
  1943. static void pkt_release_dev(struct pktcdvd_device *pd, int flush)
  1944. {
  1945. if (flush && pkt_flush_cache(pd))
  1946. pkt_dbg(1, "%s not flushing cache\n", pd->name);
  1947. pkt_lock_door(pd, 0);
  1948. pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
  1949. blkdev_put(pd->bdev, FMODE_READ | FMODE_EXCL);
  1950. pkt_shrink_pktlist(pd);
  1951. }
  1952. static struct pktcdvd_device *pkt_find_dev_from_minor(unsigned int dev_minor)
  1953. {
  1954. if (dev_minor >= MAX_WRITERS)
  1955. return NULL;
  1956. return pkt_devs[dev_minor];
  1957. }
  1958. static int pkt_open(struct block_device *bdev, fmode_t mode)
  1959. {
  1960. struct pktcdvd_device *pd = NULL;
  1961. int ret;
  1962. pkt_dbg(2, "entering\n");
  1963. mutex_lock(&pktcdvd_mutex);
  1964. mutex_lock(&ctl_mutex);
  1965. pd = pkt_find_dev_from_minor(MINOR(bdev->bd_dev));
  1966. if (!pd) {
  1967. ret = -ENODEV;
  1968. goto out;
  1969. }
  1970. BUG_ON(pd->refcnt < 0);
  1971. pd->refcnt++;
  1972. if (pd->refcnt > 1) {
  1973. if ((mode & FMODE_WRITE) &&
  1974. !test_bit(PACKET_WRITABLE, &pd->flags)) {
  1975. ret = -EBUSY;
  1976. goto out_dec;
  1977. }
  1978. } else {
  1979. ret = pkt_open_dev(pd, mode & FMODE_WRITE);
  1980. if (ret)
  1981. goto out_dec;
  1982. /*
  1983. * needed here as well, since ext2 (among others) may change
  1984. * the blocksize at mount time
  1985. */
  1986. set_blocksize(bdev, CD_FRAMESIZE);
  1987. }
  1988. mutex_unlock(&ctl_mutex);
  1989. mutex_unlock(&pktcdvd_mutex);
  1990. return 0;
  1991. out_dec:
  1992. pd->refcnt--;
  1993. out:
  1994. pkt_dbg(2, "failed (%d)\n", ret);
  1995. mutex_unlock(&ctl_mutex);
  1996. mutex_unlock(&pktcdvd_mutex);
  1997. return ret;
  1998. }
  1999. static void pkt_close(struct gendisk *disk, fmode_t mode)
  2000. {
  2001. struct pktcdvd_device *pd = disk->private_data;
  2002. mutex_lock(&pktcdvd_mutex);
  2003. mutex_lock(&ctl_mutex);
  2004. pd->refcnt--;
  2005. BUG_ON(pd->refcnt < 0);
  2006. if (pd->refcnt == 0) {
  2007. int flush = test_bit(PACKET_WRITABLE, &pd->flags);
  2008. pkt_release_dev(pd, flush);
  2009. }
  2010. mutex_unlock(&ctl_mutex);
  2011. mutex_unlock(&pktcdvd_mutex);
  2012. }
  2013. static void pkt_end_io_read_cloned(struct bio *bio, int err)
  2014. {
  2015. struct packet_stacked_data *psd = bio->bi_private;
  2016. struct pktcdvd_device *pd = psd->pd;
  2017. bio_put(bio);
  2018. bio_endio(psd->bio, err);
  2019. mempool_free(psd, psd_pool);
  2020. pkt_bio_finished(pd);
  2021. }
  2022. static void pkt_make_request(struct request_queue *q, struct bio *bio)
  2023. {
  2024. struct pktcdvd_device *pd;
  2025. char b[BDEVNAME_SIZE];
  2026. sector_t zone;
  2027. struct packet_data *pkt;
  2028. int was_empty, blocked_bio;
  2029. struct pkt_rb_node *node;
  2030. pd = q->queuedata;
  2031. if (!pd) {
  2032. pr_err("%s incorrect request queue\n",
  2033. bdevname(bio->bi_bdev, b));
  2034. goto end_io;
  2035. }
  2036. /*
  2037. * Clone READ bios so we can have our own bi_end_io callback.
  2038. */
  2039. if (bio_data_dir(bio) == READ) {
  2040. struct bio *cloned_bio = bio_clone(bio, GFP_NOIO);
  2041. struct packet_stacked_data *psd = mempool_alloc(psd_pool, GFP_NOIO);
  2042. psd->pd = pd;
  2043. psd->bio = bio;
  2044. cloned_bio->bi_bdev = pd->bdev;
  2045. cloned_bio->bi_private = psd;
  2046. cloned_bio->bi_end_io = pkt_end_io_read_cloned;
  2047. pd->stats.secs_r += bio_sectors(bio);
  2048. pkt_queue_bio(pd, cloned_bio);
  2049. return;
  2050. }
  2051. if (!test_bit(PACKET_WRITABLE, &pd->flags)) {
  2052. pr_notice("WRITE for ro device %s (%llu)\n",
  2053. pd->name, (unsigned long long)bio->bi_sector);
  2054. goto end_io;
  2055. }
  2056. if (!bio->bi_size || (bio->bi_size % CD_FRAMESIZE)) {
  2057. pr_err("wrong bio size\n");
  2058. goto end_io;
  2059. }
  2060. blk_queue_bounce(q, &bio);
  2061. zone = get_zone(bio->bi_sector, pd);
  2062. pkt_dbg(2, "start = %6llx stop = %6llx\n",
  2063. (unsigned long long)bio->bi_sector,
  2064. (unsigned long long)bio_end_sector(bio));
  2065. /* Check if we have to split the bio */
  2066. {
  2067. struct bio_pair *bp;
  2068. sector_t last_zone;
  2069. int first_sectors;
  2070. last_zone = get_zone(bio_end_sector(bio) - 1, pd);
  2071. if (last_zone != zone) {
  2072. BUG_ON(last_zone != zone + pd->settings.size);
  2073. first_sectors = last_zone - bio->bi_sector;
  2074. bp = bio_split(bio, first_sectors);
  2075. BUG_ON(!bp);
  2076. pkt_make_request(q, &bp->bio1);
  2077. pkt_make_request(q, &bp->bio2);
  2078. bio_pair_release(bp);
  2079. return;
  2080. }
  2081. }
  2082. /*
  2083. * If we find a matching packet in state WAITING or READ_WAIT, we can
  2084. * just append this bio to that packet.
  2085. */
  2086. spin_lock(&pd->cdrw.active_list_lock);
  2087. blocked_bio = 0;
  2088. list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
  2089. if (pkt->sector == zone) {
  2090. spin_lock(&pkt->lock);
  2091. if ((pkt->state == PACKET_WAITING_STATE) ||
  2092. (pkt->state == PACKET_READ_WAIT_STATE)) {
  2093. bio_list_add(&pkt->orig_bios, bio);
  2094. pkt->write_size += bio->bi_size / CD_FRAMESIZE;
  2095. if ((pkt->write_size >= pkt->frames) &&
  2096. (pkt->state == PACKET_WAITING_STATE)) {
  2097. atomic_inc(&pkt->run_sm);
  2098. wake_up(&pd->wqueue);
  2099. }
  2100. spin_unlock(&pkt->lock);
  2101. spin_unlock(&pd->cdrw.active_list_lock);
  2102. return;
  2103. } else {
  2104. blocked_bio = 1;
  2105. }
  2106. spin_unlock(&pkt->lock);
  2107. }
  2108. }
  2109. spin_unlock(&pd->cdrw.active_list_lock);
  2110. /*
  2111. * Test if there is enough room left in the bio work queue
  2112. * (queue size >= congestion on mark).
  2113. * If not, wait till the work queue size is below the congestion off mark.
  2114. */
  2115. spin_lock(&pd->lock);
  2116. if (pd->write_congestion_on > 0
  2117. && pd->bio_queue_size >= pd->write_congestion_on) {
  2118. set_bdi_congested(&q->backing_dev_info, BLK_RW_ASYNC);
  2119. do {
  2120. spin_unlock(&pd->lock);
  2121. congestion_wait(BLK_RW_ASYNC, HZ);
  2122. spin_lock(&pd->lock);
  2123. } while(pd->bio_queue_size > pd->write_congestion_off);
  2124. }
  2125. spin_unlock(&pd->lock);
  2126. /*
  2127. * No matching packet found. Store the bio in the work queue.
  2128. */
  2129. node = mempool_alloc(pd->rb_pool, GFP_NOIO);
  2130. node->bio = bio;
  2131. spin_lock(&pd->lock);
  2132. BUG_ON(pd->bio_queue_size < 0);
  2133. was_empty = (pd->bio_queue_size == 0);
  2134. pkt_rbtree_insert(pd, node);
  2135. spin_unlock(&pd->lock);
  2136. /*
  2137. * Wake up the worker thread.
  2138. */
  2139. atomic_set(&pd->scan_queue, 1);
  2140. if (was_empty) {
  2141. /* This wake_up is required for correct operation */
  2142. wake_up(&pd->wqueue);
  2143. } else if (!list_empty(&pd->cdrw.pkt_free_list) && !blocked_bio) {
  2144. /*
  2145. * This wake up is not required for correct operation,
  2146. * but improves performance in some cases.
  2147. */
  2148. wake_up(&pd->wqueue);
  2149. }
  2150. return;
  2151. end_io:
  2152. bio_io_error(bio);
  2153. }
  2154. static int pkt_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd,
  2155. struct bio_vec *bvec)
  2156. {
  2157. struct pktcdvd_device *pd = q->queuedata;
  2158. sector_t zone = get_zone(bmd->bi_sector, pd);
  2159. int used = ((bmd->bi_sector - zone) << 9) + bmd->bi_size;
  2160. int remaining = (pd->settings.size << 9) - used;
  2161. int remaining2;
  2162. /*
  2163. * A bio <= PAGE_SIZE must be allowed. If it crosses a packet
  2164. * boundary, pkt_make_request() will split the bio.
  2165. */
  2166. remaining2 = PAGE_SIZE - bmd->bi_size;
  2167. remaining = max(remaining, remaining2);
  2168. BUG_ON(remaining < 0);
  2169. return remaining;
  2170. }
  2171. static void pkt_init_queue(struct pktcdvd_device *pd)
  2172. {
  2173. struct request_queue *q = pd->disk->queue;
  2174. blk_queue_make_request(q, pkt_make_request);
  2175. blk_queue_logical_block_size(q, CD_FRAMESIZE);
  2176. blk_queue_max_hw_sectors(q, PACKET_MAX_SECTORS);
  2177. blk_queue_merge_bvec(q, pkt_merge_bvec);
  2178. q->queuedata = pd;
  2179. }
  2180. static int pkt_seq_show(struct seq_file *m, void *p)
  2181. {
  2182. struct pktcdvd_device *pd = m->private;
  2183. char *msg;
  2184. char bdev_buf[BDEVNAME_SIZE];
  2185. int states[PACKET_NUM_STATES];
  2186. seq_printf(m, "Writer %s mapped to %s:\n", pd->name,
  2187. bdevname(pd->bdev, bdev_buf));
  2188. seq_printf(m, "\nSettings:\n");
  2189. seq_printf(m, "\tpacket size:\t\t%dkB\n", pd->settings.size / 2);
  2190. if (pd->settings.write_type == 0)
  2191. msg = "Packet";
  2192. else
  2193. msg = "Unknown";
  2194. seq_printf(m, "\twrite type:\t\t%s\n", msg);
  2195. seq_printf(m, "\tpacket type:\t\t%s\n", pd->settings.fp ? "Fixed" : "Variable");
  2196. seq_printf(m, "\tlink loss:\t\t%d\n", pd->settings.link_loss);
  2197. seq_printf(m, "\ttrack mode:\t\t%d\n", pd->settings.track_mode);
  2198. if (pd->settings.block_mode == PACKET_BLOCK_MODE1)
  2199. msg = "Mode 1";
  2200. else if (pd->settings.block_mode == PACKET_BLOCK_MODE2)
  2201. msg = "Mode 2";
  2202. else
  2203. msg = "Unknown";
  2204. seq_printf(m, "\tblock mode:\t\t%s\n", msg);
  2205. seq_printf(m, "\nStatistics:\n");
  2206. seq_printf(m, "\tpackets started:\t%lu\n", pd->stats.pkt_started);
  2207. seq_printf(m, "\tpackets ended:\t\t%lu\n", pd->stats.pkt_ended);
  2208. seq_printf(m, "\twritten:\t\t%lukB\n", pd->stats.secs_w >> 1);
  2209. seq_printf(m, "\tread gather:\t\t%lukB\n", pd->stats.secs_rg >> 1);
  2210. seq_printf(m, "\tread:\t\t\t%lukB\n", pd->stats.secs_r >> 1);
  2211. seq_printf(m, "\nMisc:\n");
  2212. seq_printf(m, "\treference count:\t%d\n", pd->refcnt);
  2213. seq_printf(m, "\tflags:\t\t\t0x%lx\n", pd->flags);
  2214. seq_printf(m, "\tread speed:\t\t%ukB/s\n", pd->read_speed);
  2215. seq_printf(m, "\twrite speed:\t\t%ukB/s\n", pd->write_speed);
  2216. seq_printf(m, "\tstart offset:\t\t%lu\n", pd->offset);
  2217. seq_printf(m, "\tmode page offset:\t%u\n", pd->mode_offset);
  2218. seq_printf(m, "\nQueue state:\n");
  2219. seq_printf(m, "\tbios queued:\t\t%d\n", pd->bio_queue_size);
  2220. seq_printf(m, "\tbios pending:\t\t%d\n", atomic_read(&pd->cdrw.pending_bios));
  2221. seq_printf(m, "\tcurrent sector:\t\t0x%llx\n", (unsigned long long)pd->current_sector);
  2222. pkt_count_states(pd, states);
  2223. seq_printf(m, "\tstate:\t\t\ti:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
  2224. states[0], states[1], states[2], states[3], states[4], states[5]);
  2225. seq_printf(m, "\twrite congestion marks:\toff=%d on=%d\n",
  2226. pd->write_congestion_off,
  2227. pd->write_congestion_on);
  2228. return 0;
  2229. }
  2230. static int pkt_seq_open(struct inode *inode, struct file *file)
  2231. {
  2232. return single_open(file, pkt_seq_show, PDE_DATA(inode));
  2233. }
  2234. static const struct file_operations pkt_proc_fops = {
  2235. .open = pkt_seq_open,
  2236. .read = seq_read,
  2237. .llseek = seq_lseek,
  2238. .release = single_release
  2239. };
  2240. static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
  2241. {
  2242. int i;
  2243. int ret = 0;
  2244. char b[BDEVNAME_SIZE];
  2245. struct block_device *bdev;
  2246. if (pd->pkt_dev == dev) {
  2247. pr_err("recursive setup not allowed\n");
  2248. return -EBUSY;
  2249. }
  2250. for (i = 0; i < MAX_WRITERS; i++) {
  2251. struct pktcdvd_device *pd2 = pkt_devs[i];
  2252. if (!pd2)
  2253. continue;
  2254. if (pd2->bdev->bd_dev == dev) {
  2255. pr_err("%s already setup\n", bdevname(pd2->bdev, b));
  2256. return -EBUSY;
  2257. }
  2258. if (pd2->pkt_dev == dev) {
  2259. pr_err("can't chain pktcdvd devices\n");
  2260. return -EBUSY;
  2261. }
  2262. }
  2263. bdev = bdget(dev);
  2264. if (!bdev)
  2265. return -ENOMEM;
  2266. ret = blkdev_get(bdev, FMODE_READ | FMODE_NDELAY, NULL);
  2267. if (ret)
  2268. return ret;
  2269. /* This is safe, since we have a reference from open(). */
  2270. __module_get(THIS_MODULE);
  2271. pd->bdev = bdev;
  2272. set_blocksize(bdev, CD_FRAMESIZE);
  2273. pkt_init_queue(pd);
  2274. atomic_set(&pd->cdrw.pending_bios, 0);
  2275. pd->cdrw.thread = kthread_run(kcdrwd, pd, "%s", pd->name);
  2276. if (IS_ERR(pd->cdrw.thread)) {
  2277. pr_err("can't start kernel thread\n");
  2278. ret = -ENOMEM;
  2279. goto out_mem;
  2280. }
  2281. proc_create_data(pd->name, 0, pkt_proc, &pkt_proc_fops, pd);
  2282. pkt_dbg(1, "writer %s mapped to %s\n", pd->name, bdevname(bdev, b));
  2283. return 0;
  2284. out_mem:
  2285. blkdev_put(bdev, FMODE_READ | FMODE_NDELAY);
  2286. /* This is safe: open() is still holding a reference. */
  2287. module_put(THIS_MODULE);
  2288. return ret;
  2289. }
  2290. static int pkt_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg)
  2291. {
  2292. struct pktcdvd_device *pd = bdev->bd_disk->private_data;
  2293. int ret;
  2294. pkt_dbg(2, "cmd %x, dev %d:%d\n",
  2295. cmd, MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev));
  2296. mutex_lock(&pktcdvd_mutex);
  2297. switch (cmd) {
  2298. case CDROMEJECT:
  2299. /*
  2300. * The door gets locked when the device is opened, so we
  2301. * have to unlock it or else the eject command fails.
  2302. */
  2303. if (pd->refcnt == 1)
  2304. pkt_lock_door(pd, 0);
  2305. /* fallthru */
  2306. /*
  2307. * forward selected CDROM ioctls to CD-ROM, for UDF
  2308. */
  2309. case CDROMMULTISESSION:
  2310. case CDROMREADTOCENTRY:
  2311. case CDROM_LAST_WRITTEN:
  2312. case CDROM_SEND_PACKET:
  2313. case SCSI_IOCTL_SEND_COMMAND:
  2314. ret = __blkdev_driver_ioctl(pd->bdev, mode, cmd, arg);
  2315. break;
  2316. default:
  2317. pkt_dbg(2, "Unknown ioctl for %s (%x)\n", pd->name, cmd);
  2318. ret = -ENOTTY;
  2319. }
  2320. mutex_unlock(&pktcdvd_mutex);
  2321. return ret;
  2322. }
  2323. static unsigned int pkt_check_events(struct gendisk *disk,
  2324. unsigned int clearing)
  2325. {
  2326. struct pktcdvd_device *pd = disk->private_data;
  2327. struct gendisk *attached_disk;
  2328. if (!pd)
  2329. return 0;
  2330. if (!pd->bdev)
  2331. return 0;
  2332. attached_disk = pd->bdev->bd_disk;
  2333. if (!attached_disk || !attached_disk->fops->check_events)
  2334. return 0;
  2335. return attached_disk->fops->check_events(attached_disk, clearing);
  2336. }
  2337. static const struct block_device_operations pktcdvd_ops = {
  2338. .owner = THIS_MODULE,
  2339. .open = pkt_open,
  2340. .release = pkt_close,
  2341. .ioctl = pkt_ioctl,
  2342. .check_events = pkt_check_events,
  2343. };
  2344. static char *pktcdvd_devnode(struct gendisk *gd, umode_t *mode)
  2345. {
  2346. return kasprintf(GFP_KERNEL, "pktcdvd/%s", gd->disk_name);
  2347. }
  2348. /*
  2349. * Set up mapping from pktcdvd device to CD-ROM device.
  2350. */
  2351. static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
  2352. {
  2353. int idx;
  2354. int ret = -ENOMEM;
  2355. struct pktcdvd_device *pd;
  2356. struct gendisk *disk;
  2357. mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
  2358. for (idx = 0; idx < MAX_WRITERS; idx++)
  2359. if (!pkt_devs[idx])
  2360. break;
  2361. if (idx == MAX_WRITERS) {
  2362. pr_err("max %d writers supported\n", MAX_WRITERS);
  2363. ret = -EBUSY;
  2364. goto out_mutex;
  2365. }
  2366. pd = kzalloc(sizeof(struct pktcdvd_device), GFP_KERNEL);
  2367. if (!pd)
  2368. goto out_mutex;
  2369. pd->rb_pool = mempool_create_kmalloc_pool(PKT_RB_POOL_SIZE,
  2370. sizeof(struct pkt_rb_node));
  2371. if (!pd->rb_pool)
  2372. goto out_mem;
  2373. INIT_LIST_HEAD(&pd->cdrw.pkt_free_list);
  2374. INIT_LIST_HEAD(&pd->cdrw.pkt_active_list);
  2375. spin_lock_init(&pd->cdrw.active_list_lock);
  2376. spin_lock_init(&pd->lock);
  2377. spin_lock_init(&pd->iosched.lock);
  2378. bio_list_init(&pd->iosched.read_queue);
  2379. bio_list_init(&pd->iosched.write_queue);
  2380. sprintf(pd->name, DRIVER_NAME"%d", idx);
  2381. init_waitqueue_head(&pd->wqueue);
  2382. pd->bio_queue = RB_ROOT;
  2383. pd->write_congestion_on = write_congestion_on;
  2384. pd->write_congestion_off = write_congestion_off;
  2385. disk = alloc_disk(1);
  2386. if (!disk)
  2387. goto out_mem;
  2388. pd->disk = disk;
  2389. disk->major = pktdev_major;
  2390. disk->first_minor = idx;
  2391. disk->fops = &pktcdvd_ops;
  2392. disk->flags = GENHD_FL_REMOVABLE;
  2393. strcpy(disk->disk_name, pd->name);
  2394. disk->devnode = pktcdvd_devnode;
  2395. disk->private_data = pd;
  2396. disk->queue = blk_alloc_queue(GFP_KERNEL);
  2397. if (!disk->queue)
  2398. goto out_mem2;
  2399. pd->pkt_dev = MKDEV(pktdev_major, idx);
  2400. ret = pkt_new_dev(pd, dev);
  2401. if (ret)
  2402. goto out_new_dev;
  2403. /* inherit events of the host device */
  2404. disk->events = pd->bdev->bd_disk->events;
  2405. disk->async_events = pd->bdev->bd_disk->async_events;
  2406. add_disk(disk);
  2407. pkt_sysfs_dev_new(pd);
  2408. pkt_debugfs_dev_new(pd);
  2409. pkt_devs[idx] = pd;
  2410. if (pkt_dev)
  2411. *pkt_dev = pd->pkt_dev;
  2412. mutex_unlock(&ctl_mutex);
  2413. return 0;
  2414. out_new_dev:
  2415. blk_cleanup_queue(disk->queue);
  2416. out_mem2:
  2417. put_disk(disk);
  2418. out_mem:
  2419. if (pd->rb_pool)
  2420. mempool_destroy(pd->rb_pool);
  2421. kfree(pd);
  2422. out_mutex:
  2423. mutex_unlock(&ctl_mutex);
  2424. pr_err("setup of pktcdvd device failed\n");
  2425. return ret;
  2426. }
  2427. /*
  2428. * Tear down mapping from pktcdvd device to CD-ROM device.
  2429. */
  2430. static int pkt_remove_dev(dev_t pkt_dev)
  2431. {
  2432. struct pktcdvd_device *pd;
  2433. int idx;
  2434. int ret = 0;
  2435. mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
  2436. for (idx = 0; idx < MAX_WRITERS; idx++) {
  2437. pd = pkt_devs[idx];
  2438. if (pd && (pd->pkt_dev == pkt_dev))
  2439. break;
  2440. }
  2441. if (idx == MAX_WRITERS) {
  2442. pkt_dbg(1, "dev not setup\n");
  2443. ret = -ENXIO;
  2444. goto out;
  2445. }
  2446. if (pd->refcnt > 0) {
  2447. ret = -EBUSY;
  2448. goto out;
  2449. }
  2450. if (!IS_ERR(pd->cdrw.thread))
  2451. kthread_stop(pd->cdrw.thread);
  2452. pkt_devs[idx] = NULL;
  2453. pkt_debugfs_dev_remove(pd);
  2454. pkt_sysfs_dev_remove(pd);
  2455. blkdev_put(pd->bdev, FMODE_READ | FMODE_NDELAY);
  2456. remove_proc_entry(pd->name, pkt_proc);
  2457. pkt_dbg(1, "writer %s unmapped\n", pd->name);
  2458. del_gendisk(pd->disk);
  2459. blk_cleanup_queue(pd->disk->queue);
  2460. put_disk(pd->disk);
  2461. mempool_destroy(pd->rb_pool);
  2462. kfree(pd);
  2463. /* This is safe: open() is still holding a reference. */
  2464. module_put(THIS_MODULE);
  2465. out:
  2466. mutex_unlock(&ctl_mutex);
  2467. return ret;
  2468. }
  2469. static void pkt_get_status(struct pkt_ctrl_command *ctrl_cmd)
  2470. {
  2471. struct pktcdvd_device *pd;
  2472. mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
  2473. pd = pkt_find_dev_from_minor(ctrl_cmd->dev_index);
  2474. if (pd) {
  2475. ctrl_cmd->dev = new_encode_dev(pd->bdev->bd_dev);
  2476. ctrl_cmd->pkt_dev = new_encode_dev(pd->pkt_dev);
  2477. } else {
  2478. ctrl_cmd->dev = 0;
  2479. ctrl_cmd->pkt_dev = 0;
  2480. }
  2481. ctrl_cmd->num_devices = MAX_WRITERS;
  2482. mutex_unlock(&ctl_mutex);
  2483. }
  2484. static long pkt_ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
  2485. {
  2486. void __user *argp = (void __user *)arg;
  2487. struct pkt_ctrl_command ctrl_cmd;
  2488. int ret = 0;
  2489. dev_t pkt_dev = 0;
  2490. if (cmd != PACKET_CTRL_CMD)
  2491. return -ENOTTY;
  2492. if (copy_from_user(&ctrl_cmd, argp, sizeof(struct pkt_ctrl_command)))
  2493. return -EFAULT;
  2494. switch (ctrl_cmd.command) {
  2495. case PKT_CTRL_CMD_SETUP:
  2496. if (!capable(CAP_SYS_ADMIN))
  2497. return -EPERM;
  2498. ret = pkt_setup_dev(new_decode_dev(ctrl_cmd.dev), &pkt_dev);
  2499. ctrl_cmd.pkt_dev = new_encode_dev(pkt_dev);
  2500. break;
  2501. case PKT_CTRL_CMD_TEARDOWN:
  2502. if (!capable(CAP_SYS_ADMIN))
  2503. return -EPERM;
  2504. ret = pkt_remove_dev(new_decode_dev(ctrl_cmd.pkt_dev));
  2505. break;
  2506. case PKT_CTRL_CMD_STATUS:
  2507. pkt_get_status(&ctrl_cmd);
  2508. break;
  2509. default:
  2510. return -ENOTTY;
  2511. }
  2512. if (copy_to_user(argp, &ctrl_cmd, sizeof(struct pkt_ctrl_command)))
  2513. return -EFAULT;
  2514. return ret;
  2515. }
  2516. #ifdef CONFIG_COMPAT
  2517. static long pkt_ctl_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
  2518. {
  2519. return pkt_ctl_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
  2520. }
  2521. #endif
  2522. static const struct file_operations pkt_ctl_fops = {
  2523. .open = nonseekable_open,
  2524. .unlocked_ioctl = pkt_ctl_ioctl,
  2525. #ifdef CONFIG_COMPAT
  2526. .compat_ioctl = pkt_ctl_compat_ioctl,
  2527. #endif
  2528. .owner = THIS_MODULE,
  2529. .llseek = no_llseek,
  2530. };
  2531. static struct miscdevice pkt_misc = {
  2532. .minor = MISC_DYNAMIC_MINOR,
  2533. .name = DRIVER_NAME,
  2534. .nodename = "pktcdvd/control",
  2535. .fops = &pkt_ctl_fops
  2536. };
  2537. static int __init pkt_init(void)
  2538. {
  2539. int ret;
  2540. mutex_init(&ctl_mutex);
  2541. psd_pool = mempool_create_kmalloc_pool(PSD_POOL_SIZE,
  2542. sizeof(struct packet_stacked_data));
  2543. if (!psd_pool)
  2544. return -ENOMEM;
  2545. ret = register_blkdev(pktdev_major, DRIVER_NAME);
  2546. if (ret < 0) {
  2547. pr_err("unable to register block device\n");
  2548. goto out2;
  2549. }
  2550. if (!pktdev_major)
  2551. pktdev_major = ret;
  2552. ret = pkt_sysfs_init();
  2553. if (ret)
  2554. goto out;
  2555. pkt_debugfs_init();
  2556. ret = misc_register(&pkt_misc);
  2557. if (ret) {
  2558. pr_err("unable to register misc device\n");
  2559. goto out_misc;
  2560. }
  2561. pkt_proc = proc_mkdir("driver/"DRIVER_NAME, NULL);
  2562. return 0;
  2563. out_misc:
  2564. pkt_debugfs_cleanup();
  2565. pkt_sysfs_cleanup();
  2566. out:
  2567. unregister_blkdev(pktdev_major, DRIVER_NAME);
  2568. out2:
  2569. mempool_destroy(psd_pool);
  2570. return ret;
  2571. }
  2572. static void __exit pkt_exit(void)
  2573. {
  2574. remove_proc_entry("driver/"DRIVER_NAME, NULL);
  2575. misc_deregister(&pkt_misc);
  2576. pkt_debugfs_cleanup();
  2577. pkt_sysfs_cleanup();
  2578. unregister_blkdev(pktdev_major, DRIVER_NAME);
  2579. mempool_destroy(psd_pool);
  2580. }
  2581. MODULE_DESCRIPTION("Packet writing layer for CD/DVD drives");
  2582. MODULE_AUTHOR("Jens Axboe <axboe@suse.de>");
  2583. MODULE_LICENSE("GPL");
  2584. module_init(pkt_init);
  2585. module_exit(pkt_exit);