pktcdvd.c 76 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098
  1. /*
  2. * Copyright (C) 2000 Jens Axboe <axboe@suse.de>
  3. * Copyright (C) 2001-2004 Peter Osterlund <petero2@telia.com>
  4. * Copyright (C) 2006 Thomas Maier <balagi@justmail.de>
  5. *
  6. * May be copied or modified under the terms of the GNU General Public
  7. * License. See linux/COPYING for more information.
  8. *
  9. * Packet writing layer for ATAPI and SCSI CD-RW, DVD+RW, DVD-RW and
  10. * DVD-RAM devices.
  11. *
  12. * Theory of operation:
  13. *
  14. * At the lowest level, there is the standard driver for the CD/DVD device,
  15. * typically ide-cd.c or sr.c. This driver can handle read and write requests,
  16. * but it doesn't know anything about the special restrictions that apply to
  17. * packet writing. One restriction is that write requests must be aligned to
  18. * packet boundaries on the physical media, and the size of a write request
  19. * must be equal to the packet size. Another restriction is that a
  20. * GPCMD_FLUSH_CACHE command has to be issued to the drive before a read
  21. * command, if the previous command was a write.
  22. *
  23. * The purpose of the packet writing driver is to hide these restrictions from
  24. * higher layers, such as file systems, and present a block device that can be
  25. * randomly read and written using 2kB-sized blocks.
  26. *
  27. * The lowest layer in the packet writing driver is the packet I/O scheduler.
  28. * Its data is defined by the struct packet_iosched and includes two bio
  29. * queues with pending read and write requests. These queues are processed
  30. * by the pkt_iosched_process_queue() function. The write requests in this
  31. * queue are already properly aligned and sized. This layer is responsible for
  32. * issuing the flush cache commands and scheduling the I/O in a good order.
  33. *
  34. * The next layer transforms unaligned write requests to aligned writes. This
  35. * transformation requires reading missing pieces of data from the underlying
  36. * block device, assembling the pieces to full packets and queuing them to the
  37. * packet I/O scheduler.
  38. *
  39. * At the top layer there is a custom make_request_fn function that forwards
  40. * read requests directly to the iosched queue and puts write requests in the
  41. * unaligned write queue. A kernel thread performs the necessary read
  42. * gathering to convert the unaligned writes to aligned writes and then feeds
  43. * them to the packet I/O scheduler.
  44. *
  45. *************************************************************************/
  46. #include <linux/pktcdvd.h>
  47. #include <linux/module.h>
  48. #include <linux/types.h>
  49. #include <linux/kernel.h>
  50. #include <linux/kthread.h>
  51. #include <linux/errno.h>
  52. #include <linux/spinlock.h>
  53. #include <linux/file.h>
  54. #include <linux/proc_fs.h>
  55. #include <linux/seq_file.h>
  56. #include <linux/miscdevice.h>
  57. #include <linux/freezer.h>
  58. #include <linux/mutex.h>
  59. #include <scsi/scsi_cmnd.h>
  60. #include <scsi/scsi_ioctl.h>
  61. #include <scsi/scsi.h>
  62. #include <linux/debugfs.h>
  63. #include <linux/device.h>
  64. #include <asm/uaccess.h>
  65. #define DRIVER_NAME "pktcdvd"
  66. #if PACKET_DEBUG
  67. #define DPRINTK(fmt, args...) printk(KERN_NOTICE fmt, ##args)
  68. #else
  69. #define DPRINTK(fmt, args...)
  70. #endif
  71. #if PACKET_DEBUG > 1
  72. #define VPRINTK(fmt, args...) printk(KERN_NOTICE fmt, ##args)
  73. #else
  74. #define VPRINTK(fmt, args...)
  75. #endif
  76. #define MAX_SPEED 0xffff
  77. #define ZONE(sector, pd) (((sector) + (pd)->offset) & ~((pd)->settings.size - 1))
  78. static struct pktcdvd_device *pkt_devs[MAX_WRITERS];
  79. static struct proc_dir_entry *pkt_proc;
  80. static int pktdev_major;
  81. static int write_congestion_on = PKT_WRITE_CONGESTION_ON;
  82. static int write_congestion_off = PKT_WRITE_CONGESTION_OFF;
  83. static struct mutex ctl_mutex; /* Serialize open/close/setup/teardown */
  84. static mempool_t *psd_pool;
  85. static struct class *class_pktcdvd = NULL; /* /sys/class/pktcdvd */
  86. static struct dentry *pkt_debugfs_root = NULL; /* /sys/kernel/debug/pktcdvd */
  87. /* forward declaration */
  88. static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev);
  89. static int pkt_remove_dev(dev_t pkt_dev);
  90. static int pkt_seq_show(struct seq_file *m, void *p);
  91. /*
  92. * create and register a pktcdvd kernel object.
  93. */
  94. static struct pktcdvd_kobj* pkt_kobj_create(struct pktcdvd_device *pd,
  95. const char* name,
  96. struct kobject* parent,
  97. struct kobj_type* ktype)
  98. {
  99. struct pktcdvd_kobj *p;
  100. int error;
  101. p = kzalloc(sizeof(*p), GFP_KERNEL);
  102. if (!p)
  103. return NULL;
  104. p->pd = pd;
  105. error = kobject_init_and_add(&p->kobj, ktype, parent, "%s", name);
  106. if (error) {
  107. kobject_put(&p->kobj);
  108. return NULL;
  109. }
  110. kobject_uevent(&p->kobj, KOBJ_ADD);
  111. return p;
  112. }
  113. /*
  114. * remove a pktcdvd kernel object.
  115. */
  116. static void pkt_kobj_remove(struct pktcdvd_kobj *p)
  117. {
  118. if (p)
  119. kobject_put(&p->kobj);
  120. }
  121. /*
  122. * default release function for pktcdvd kernel objects.
  123. */
  124. static void pkt_kobj_release(struct kobject *kobj)
  125. {
  126. kfree(to_pktcdvdkobj(kobj));
  127. }
  128. /**********************************************************
  129. *
  130. * sysfs interface for pktcdvd
  131. * by (C) 2006 Thomas Maier <balagi@justmail.de>
  132. *
  133. **********************************************************/
  134. #define DEF_ATTR(_obj,_name,_mode) \
  135. static struct attribute _obj = { .name = _name, .mode = _mode }
  136. /**********************************************************
  137. /sys/class/pktcdvd/pktcdvd[0-7]/
  138. stat/reset
  139. stat/packets_started
  140. stat/packets_finished
  141. stat/kb_written
  142. stat/kb_read
  143. stat/kb_read_gather
  144. write_queue/size
  145. write_queue/congestion_off
  146. write_queue/congestion_on
  147. **********************************************************/
  148. DEF_ATTR(kobj_pkt_attr_st1, "reset", 0200);
  149. DEF_ATTR(kobj_pkt_attr_st2, "packets_started", 0444);
  150. DEF_ATTR(kobj_pkt_attr_st3, "packets_finished", 0444);
  151. DEF_ATTR(kobj_pkt_attr_st4, "kb_written", 0444);
  152. DEF_ATTR(kobj_pkt_attr_st5, "kb_read", 0444);
  153. DEF_ATTR(kobj_pkt_attr_st6, "kb_read_gather", 0444);
  154. static struct attribute *kobj_pkt_attrs_stat[] = {
  155. &kobj_pkt_attr_st1,
  156. &kobj_pkt_attr_st2,
  157. &kobj_pkt_attr_st3,
  158. &kobj_pkt_attr_st4,
  159. &kobj_pkt_attr_st5,
  160. &kobj_pkt_attr_st6,
  161. NULL
  162. };
  163. DEF_ATTR(kobj_pkt_attr_wq1, "size", 0444);
  164. DEF_ATTR(kobj_pkt_attr_wq2, "congestion_off", 0644);
  165. DEF_ATTR(kobj_pkt_attr_wq3, "congestion_on", 0644);
  166. static struct attribute *kobj_pkt_attrs_wqueue[] = {
  167. &kobj_pkt_attr_wq1,
  168. &kobj_pkt_attr_wq2,
  169. &kobj_pkt_attr_wq3,
  170. NULL
  171. };
  172. static ssize_t kobj_pkt_show(struct kobject *kobj,
  173. struct attribute *attr, char *data)
  174. {
  175. struct pktcdvd_device *pd = to_pktcdvdkobj(kobj)->pd;
  176. int n = 0;
  177. int v;
  178. if (strcmp(attr->name, "packets_started") == 0) {
  179. n = sprintf(data, "%lu\n", pd->stats.pkt_started);
  180. } else if (strcmp(attr->name, "packets_finished") == 0) {
  181. n = sprintf(data, "%lu\n", pd->stats.pkt_ended);
  182. } else if (strcmp(attr->name, "kb_written") == 0) {
  183. n = sprintf(data, "%lu\n", pd->stats.secs_w >> 1);
  184. } else if (strcmp(attr->name, "kb_read") == 0) {
  185. n = sprintf(data, "%lu\n", pd->stats.secs_r >> 1);
  186. } else if (strcmp(attr->name, "kb_read_gather") == 0) {
  187. n = sprintf(data, "%lu\n", pd->stats.secs_rg >> 1);
  188. } else if (strcmp(attr->name, "size") == 0) {
  189. spin_lock(&pd->lock);
  190. v = pd->bio_queue_size;
  191. spin_unlock(&pd->lock);
  192. n = sprintf(data, "%d\n", v);
  193. } else if (strcmp(attr->name, "congestion_off") == 0) {
  194. spin_lock(&pd->lock);
  195. v = pd->write_congestion_off;
  196. spin_unlock(&pd->lock);
  197. n = sprintf(data, "%d\n", v);
  198. } else if (strcmp(attr->name, "congestion_on") == 0) {
  199. spin_lock(&pd->lock);
  200. v = pd->write_congestion_on;
  201. spin_unlock(&pd->lock);
  202. n = sprintf(data, "%d\n", v);
  203. }
  204. return n;
  205. }
  206. static void init_write_congestion_marks(int* lo, int* hi)
  207. {
  208. if (*hi > 0) {
  209. *hi = max(*hi, 500);
  210. *hi = min(*hi, 1000000);
  211. if (*lo <= 0)
  212. *lo = *hi - 100;
  213. else {
  214. *lo = min(*lo, *hi - 100);
  215. *lo = max(*lo, 100);
  216. }
  217. } else {
  218. *hi = -1;
  219. *lo = -1;
  220. }
  221. }
  222. static ssize_t kobj_pkt_store(struct kobject *kobj,
  223. struct attribute *attr,
  224. const char *data, size_t len)
  225. {
  226. struct pktcdvd_device *pd = to_pktcdvdkobj(kobj)->pd;
  227. int val;
  228. if (strcmp(attr->name, "reset") == 0 && len > 0) {
  229. pd->stats.pkt_started = 0;
  230. pd->stats.pkt_ended = 0;
  231. pd->stats.secs_w = 0;
  232. pd->stats.secs_rg = 0;
  233. pd->stats.secs_r = 0;
  234. } else if (strcmp(attr->name, "congestion_off") == 0
  235. && sscanf(data, "%d", &val) == 1) {
  236. spin_lock(&pd->lock);
  237. pd->write_congestion_off = val;
  238. init_write_congestion_marks(&pd->write_congestion_off,
  239. &pd->write_congestion_on);
  240. spin_unlock(&pd->lock);
  241. } else if (strcmp(attr->name, "congestion_on") == 0
  242. && sscanf(data, "%d", &val) == 1) {
  243. spin_lock(&pd->lock);
  244. pd->write_congestion_on = val;
  245. init_write_congestion_marks(&pd->write_congestion_off,
  246. &pd->write_congestion_on);
  247. spin_unlock(&pd->lock);
  248. }
  249. return len;
  250. }
  251. static const struct sysfs_ops kobj_pkt_ops = {
  252. .show = kobj_pkt_show,
  253. .store = kobj_pkt_store
  254. };
  255. static struct kobj_type kobj_pkt_type_stat = {
  256. .release = pkt_kobj_release,
  257. .sysfs_ops = &kobj_pkt_ops,
  258. .default_attrs = kobj_pkt_attrs_stat
  259. };
  260. static struct kobj_type kobj_pkt_type_wqueue = {
  261. .release = pkt_kobj_release,
  262. .sysfs_ops = &kobj_pkt_ops,
  263. .default_attrs = kobj_pkt_attrs_wqueue
  264. };
  265. static void pkt_sysfs_dev_new(struct pktcdvd_device *pd)
  266. {
  267. if (class_pktcdvd) {
  268. pd->dev = device_create(class_pktcdvd, NULL, MKDEV(0, 0), NULL,
  269. "%s", pd->name);
  270. if (IS_ERR(pd->dev))
  271. pd->dev = NULL;
  272. }
  273. if (pd->dev) {
  274. pd->kobj_stat = pkt_kobj_create(pd, "stat",
  275. &pd->dev->kobj,
  276. &kobj_pkt_type_stat);
  277. pd->kobj_wqueue = pkt_kobj_create(pd, "write_queue",
  278. &pd->dev->kobj,
  279. &kobj_pkt_type_wqueue);
  280. }
  281. }
  282. static void pkt_sysfs_dev_remove(struct pktcdvd_device *pd)
  283. {
  284. pkt_kobj_remove(pd->kobj_stat);
  285. pkt_kobj_remove(pd->kobj_wqueue);
  286. if (class_pktcdvd)
  287. device_unregister(pd->dev);
  288. }
  289. /********************************************************************
  290. /sys/class/pktcdvd/
  291. add map block device
  292. remove unmap packet dev
  293. device_map show mappings
  294. *******************************************************************/
  295. static void class_pktcdvd_release(struct class *cls)
  296. {
  297. kfree(cls);
  298. }
  299. static ssize_t class_pktcdvd_show_map(struct class *c,
  300. struct class_attribute *attr,
  301. char *data)
  302. {
  303. int n = 0;
  304. int idx;
  305. mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
  306. for (idx = 0; idx < MAX_WRITERS; idx++) {
  307. struct pktcdvd_device *pd = pkt_devs[idx];
  308. if (!pd)
  309. continue;
  310. n += sprintf(data+n, "%s %u:%u %u:%u\n",
  311. pd->name,
  312. MAJOR(pd->pkt_dev), MINOR(pd->pkt_dev),
  313. MAJOR(pd->bdev->bd_dev),
  314. MINOR(pd->bdev->bd_dev));
  315. }
  316. mutex_unlock(&ctl_mutex);
  317. return n;
  318. }
  319. static ssize_t class_pktcdvd_store_add(struct class *c,
  320. struct class_attribute *attr,
  321. const char *buf,
  322. size_t count)
  323. {
  324. unsigned int major, minor;
  325. if (sscanf(buf, "%u:%u", &major, &minor) == 2) {
  326. /* pkt_setup_dev() expects caller to hold reference to self */
  327. if (!try_module_get(THIS_MODULE))
  328. return -ENODEV;
  329. pkt_setup_dev(MKDEV(major, minor), NULL);
  330. module_put(THIS_MODULE);
  331. return count;
  332. }
  333. return -EINVAL;
  334. }
  335. static ssize_t class_pktcdvd_store_remove(struct class *c,
  336. struct class_attribute *attr,
  337. const char *buf,
  338. size_t count)
  339. {
  340. unsigned int major, minor;
  341. if (sscanf(buf, "%u:%u", &major, &minor) == 2) {
  342. pkt_remove_dev(MKDEV(major, minor));
  343. return count;
  344. }
  345. return -EINVAL;
  346. }
  347. static struct class_attribute class_pktcdvd_attrs[] = {
  348. __ATTR(add, 0200, NULL, class_pktcdvd_store_add),
  349. __ATTR(remove, 0200, NULL, class_pktcdvd_store_remove),
  350. __ATTR(device_map, 0444, class_pktcdvd_show_map, NULL),
  351. __ATTR_NULL
  352. };
  353. static int pkt_sysfs_init(void)
  354. {
  355. int ret = 0;
  356. /*
  357. * create control files in sysfs
  358. * /sys/class/pktcdvd/...
  359. */
  360. class_pktcdvd = kzalloc(sizeof(*class_pktcdvd), GFP_KERNEL);
  361. if (!class_pktcdvd)
  362. return -ENOMEM;
  363. class_pktcdvd->name = DRIVER_NAME;
  364. class_pktcdvd->owner = THIS_MODULE;
  365. class_pktcdvd->class_release = class_pktcdvd_release;
  366. class_pktcdvd->class_attrs = class_pktcdvd_attrs;
  367. ret = class_register(class_pktcdvd);
  368. if (ret) {
  369. kfree(class_pktcdvd);
  370. class_pktcdvd = NULL;
  371. printk(DRIVER_NAME": failed to create class pktcdvd\n");
  372. return ret;
  373. }
  374. return 0;
  375. }
  376. static void pkt_sysfs_cleanup(void)
  377. {
  378. if (class_pktcdvd)
  379. class_destroy(class_pktcdvd);
  380. class_pktcdvd = NULL;
  381. }
  382. /********************************************************************
  383. entries in debugfs
  384. /sys/kernel/debug/pktcdvd[0-7]/
  385. info
  386. *******************************************************************/
  387. static int pkt_debugfs_seq_show(struct seq_file *m, void *p)
  388. {
  389. return pkt_seq_show(m, p);
  390. }
  391. static int pkt_debugfs_fops_open(struct inode *inode, struct file *file)
  392. {
  393. return single_open(file, pkt_debugfs_seq_show, inode->i_private);
  394. }
  395. static const struct file_operations debug_fops = {
  396. .open = pkt_debugfs_fops_open,
  397. .read = seq_read,
  398. .llseek = seq_lseek,
  399. .release = single_release,
  400. .owner = THIS_MODULE,
  401. };
  402. static void pkt_debugfs_dev_new(struct pktcdvd_device *pd)
  403. {
  404. if (!pkt_debugfs_root)
  405. return;
  406. pd->dfs_f_info = NULL;
  407. pd->dfs_d_root = debugfs_create_dir(pd->name, pkt_debugfs_root);
  408. if (IS_ERR(pd->dfs_d_root)) {
  409. pd->dfs_d_root = NULL;
  410. return;
  411. }
  412. pd->dfs_f_info = debugfs_create_file("info", S_IRUGO,
  413. pd->dfs_d_root, pd, &debug_fops);
  414. if (IS_ERR(pd->dfs_f_info)) {
  415. pd->dfs_f_info = NULL;
  416. return;
  417. }
  418. }
  419. static void pkt_debugfs_dev_remove(struct pktcdvd_device *pd)
  420. {
  421. if (!pkt_debugfs_root)
  422. return;
  423. if (pd->dfs_f_info)
  424. debugfs_remove(pd->dfs_f_info);
  425. pd->dfs_f_info = NULL;
  426. if (pd->dfs_d_root)
  427. debugfs_remove(pd->dfs_d_root);
  428. pd->dfs_d_root = NULL;
  429. }
  430. static void pkt_debugfs_init(void)
  431. {
  432. pkt_debugfs_root = debugfs_create_dir(DRIVER_NAME, NULL);
  433. if (IS_ERR(pkt_debugfs_root)) {
  434. pkt_debugfs_root = NULL;
  435. return;
  436. }
  437. }
  438. static void pkt_debugfs_cleanup(void)
  439. {
  440. if (!pkt_debugfs_root)
  441. return;
  442. debugfs_remove(pkt_debugfs_root);
  443. pkt_debugfs_root = NULL;
  444. }
  445. /* ----------------------------------------------------------*/
  446. static void pkt_bio_finished(struct pktcdvd_device *pd)
  447. {
  448. BUG_ON(atomic_read(&pd->cdrw.pending_bios) <= 0);
  449. if (atomic_dec_and_test(&pd->cdrw.pending_bios)) {
  450. VPRINTK(DRIVER_NAME": queue empty\n");
  451. atomic_set(&pd->iosched.attention, 1);
  452. wake_up(&pd->wqueue);
  453. }
  454. }
  455. static void pkt_bio_destructor(struct bio *bio)
  456. {
  457. kfree(bio->bi_io_vec);
  458. kfree(bio);
  459. }
  460. static struct bio *pkt_bio_alloc(int nr_iovecs)
  461. {
  462. struct bio_vec *bvl = NULL;
  463. struct bio *bio;
  464. bio = kmalloc(sizeof(struct bio), GFP_KERNEL);
  465. if (!bio)
  466. goto no_bio;
  467. bio_init(bio);
  468. bvl = kcalloc(nr_iovecs, sizeof(struct bio_vec), GFP_KERNEL);
  469. if (!bvl)
  470. goto no_bvl;
  471. bio->bi_max_vecs = nr_iovecs;
  472. bio->bi_io_vec = bvl;
  473. bio->bi_destructor = pkt_bio_destructor;
  474. return bio;
  475. no_bvl:
  476. kfree(bio);
  477. no_bio:
  478. return NULL;
  479. }
  480. /*
  481. * Allocate a packet_data struct
  482. */
  483. static struct packet_data *pkt_alloc_packet_data(int frames)
  484. {
  485. int i;
  486. struct packet_data *pkt;
  487. pkt = kzalloc(sizeof(struct packet_data), GFP_KERNEL);
  488. if (!pkt)
  489. goto no_pkt;
  490. pkt->frames = frames;
  491. pkt->w_bio = pkt_bio_alloc(frames);
  492. if (!pkt->w_bio)
  493. goto no_bio;
  494. for (i = 0; i < frames / FRAMES_PER_PAGE; i++) {
  495. pkt->pages[i] = alloc_page(GFP_KERNEL|__GFP_ZERO);
  496. if (!pkt->pages[i])
  497. goto no_page;
  498. }
  499. spin_lock_init(&pkt->lock);
  500. bio_list_init(&pkt->orig_bios);
  501. for (i = 0; i < frames; i++) {
  502. struct bio *bio = pkt_bio_alloc(1);
  503. if (!bio)
  504. goto no_rd_bio;
  505. pkt->r_bios[i] = bio;
  506. }
  507. return pkt;
  508. no_rd_bio:
  509. for (i = 0; i < frames; i++) {
  510. struct bio *bio = pkt->r_bios[i];
  511. if (bio)
  512. bio_put(bio);
  513. }
  514. no_page:
  515. for (i = 0; i < frames / FRAMES_PER_PAGE; i++)
  516. if (pkt->pages[i])
  517. __free_page(pkt->pages[i]);
  518. bio_put(pkt->w_bio);
  519. no_bio:
  520. kfree(pkt);
  521. no_pkt:
  522. return NULL;
  523. }
  524. /*
  525. * Free a packet_data struct
  526. */
  527. static void pkt_free_packet_data(struct packet_data *pkt)
  528. {
  529. int i;
  530. for (i = 0; i < pkt->frames; i++) {
  531. struct bio *bio = pkt->r_bios[i];
  532. if (bio)
  533. bio_put(bio);
  534. }
  535. for (i = 0; i < pkt->frames / FRAMES_PER_PAGE; i++)
  536. __free_page(pkt->pages[i]);
  537. bio_put(pkt->w_bio);
  538. kfree(pkt);
  539. }
  540. static void pkt_shrink_pktlist(struct pktcdvd_device *pd)
  541. {
  542. struct packet_data *pkt, *next;
  543. BUG_ON(!list_empty(&pd->cdrw.pkt_active_list));
  544. list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_free_list, list) {
  545. pkt_free_packet_data(pkt);
  546. }
  547. INIT_LIST_HEAD(&pd->cdrw.pkt_free_list);
  548. }
  549. static int pkt_grow_pktlist(struct pktcdvd_device *pd, int nr_packets)
  550. {
  551. struct packet_data *pkt;
  552. BUG_ON(!list_empty(&pd->cdrw.pkt_free_list));
  553. while (nr_packets > 0) {
  554. pkt = pkt_alloc_packet_data(pd->settings.size >> 2);
  555. if (!pkt) {
  556. pkt_shrink_pktlist(pd);
  557. return 0;
  558. }
  559. pkt->id = nr_packets;
  560. pkt->pd = pd;
  561. list_add(&pkt->list, &pd->cdrw.pkt_free_list);
  562. nr_packets--;
  563. }
  564. return 1;
  565. }
  566. static inline struct pkt_rb_node *pkt_rbtree_next(struct pkt_rb_node *node)
  567. {
  568. struct rb_node *n = rb_next(&node->rb_node);
  569. if (!n)
  570. return NULL;
  571. return rb_entry(n, struct pkt_rb_node, rb_node);
  572. }
  573. static void pkt_rbtree_erase(struct pktcdvd_device *pd, struct pkt_rb_node *node)
  574. {
  575. rb_erase(&node->rb_node, &pd->bio_queue);
  576. mempool_free(node, pd->rb_pool);
  577. pd->bio_queue_size--;
  578. BUG_ON(pd->bio_queue_size < 0);
  579. }
  580. /*
  581. * Find the first node in the pd->bio_queue rb tree with a starting sector >= s.
  582. */
  583. static struct pkt_rb_node *pkt_rbtree_find(struct pktcdvd_device *pd, sector_t s)
  584. {
  585. struct rb_node *n = pd->bio_queue.rb_node;
  586. struct rb_node *next;
  587. struct pkt_rb_node *tmp;
  588. if (!n) {
  589. BUG_ON(pd->bio_queue_size > 0);
  590. return NULL;
  591. }
  592. for (;;) {
  593. tmp = rb_entry(n, struct pkt_rb_node, rb_node);
  594. if (s <= tmp->bio->bi_sector)
  595. next = n->rb_left;
  596. else
  597. next = n->rb_right;
  598. if (!next)
  599. break;
  600. n = next;
  601. }
  602. if (s > tmp->bio->bi_sector) {
  603. tmp = pkt_rbtree_next(tmp);
  604. if (!tmp)
  605. return NULL;
  606. }
  607. BUG_ON(s > tmp->bio->bi_sector);
  608. return tmp;
  609. }
  610. /*
  611. * Insert a node into the pd->bio_queue rb tree.
  612. */
  613. static void pkt_rbtree_insert(struct pktcdvd_device *pd, struct pkt_rb_node *node)
  614. {
  615. struct rb_node **p = &pd->bio_queue.rb_node;
  616. struct rb_node *parent = NULL;
  617. sector_t s = node->bio->bi_sector;
  618. struct pkt_rb_node *tmp;
  619. while (*p) {
  620. parent = *p;
  621. tmp = rb_entry(parent, struct pkt_rb_node, rb_node);
  622. if (s < tmp->bio->bi_sector)
  623. p = &(*p)->rb_left;
  624. else
  625. p = &(*p)->rb_right;
  626. }
  627. rb_link_node(&node->rb_node, parent, p);
  628. rb_insert_color(&node->rb_node, &pd->bio_queue);
  629. pd->bio_queue_size++;
  630. }
  631. /*
  632. * Send a packet_command to the underlying block device and
  633. * wait for completion.
  634. */
  635. static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *cgc)
  636. {
  637. struct request_queue *q = bdev_get_queue(pd->bdev);
  638. struct request *rq;
  639. int ret = 0;
  640. rq = blk_get_request(q, (cgc->data_direction == CGC_DATA_WRITE) ?
  641. WRITE : READ, __GFP_WAIT);
  642. if (cgc->buflen) {
  643. if (blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen, __GFP_WAIT))
  644. goto out;
  645. }
  646. rq->cmd_len = COMMAND_SIZE(cgc->cmd[0]);
  647. memcpy(rq->cmd, cgc->cmd, CDROM_PACKET_SIZE);
  648. rq->timeout = 60*HZ;
  649. rq->cmd_type = REQ_TYPE_BLOCK_PC;
  650. rq->cmd_flags |= REQ_HARDBARRIER;
  651. if (cgc->quiet)
  652. rq->cmd_flags |= REQ_QUIET;
  653. blk_execute_rq(rq->q, pd->bdev->bd_disk, rq, 0);
  654. if (rq->errors)
  655. ret = -EIO;
  656. out:
  657. blk_put_request(rq);
  658. return ret;
  659. }
  660. /*
  661. * A generic sense dump / resolve mechanism should be implemented across
  662. * all ATAPI + SCSI devices.
  663. */
  664. static void pkt_dump_sense(struct packet_command *cgc)
  665. {
  666. static char *info[9] = { "No sense", "Recovered error", "Not ready",
  667. "Medium error", "Hardware error", "Illegal request",
  668. "Unit attention", "Data protect", "Blank check" };
  669. int i;
  670. struct request_sense *sense = cgc->sense;
  671. printk(DRIVER_NAME":");
  672. for (i = 0; i < CDROM_PACKET_SIZE; i++)
  673. printk(" %02x", cgc->cmd[i]);
  674. printk(" - ");
  675. if (sense == NULL) {
  676. printk("no sense\n");
  677. return;
  678. }
  679. printk("sense %02x.%02x.%02x", sense->sense_key, sense->asc, sense->ascq);
  680. if (sense->sense_key > 8) {
  681. printk(" (INVALID)\n");
  682. return;
  683. }
  684. printk(" (%s)\n", info[sense->sense_key]);
  685. }
  686. /*
  687. * flush the drive cache to media
  688. */
  689. static int pkt_flush_cache(struct pktcdvd_device *pd)
  690. {
  691. struct packet_command cgc;
  692. init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
  693. cgc.cmd[0] = GPCMD_FLUSH_CACHE;
  694. cgc.quiet = 1;
  695. /*
  696. * the IMMED bit -- we default to not setting it, although that
  697. * would allow a much faster close, this is safer
  698. */
  699. #if 0
  700. cgc.cmd[1] = 1 << 1;
  701. #endif
  702. return pkt_generic_packet(pd, &cgc);
  703. }
  704. /*
  705. * speed is given as the normal factor, e.g. 4 for 4x
  706. */
  707. static noinline_for_stack int pkt_set_speed(struct pktcdvd_device *pd,
  708. unsigned write_speed, unsigned read_speed)
  709. {
  710. struct packet_command cgc;
  711. struct request_sense sense;
  712. int ret;
  713. init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
  714. cgc.sense = &sense;
  715. cgc.cmd[0] = GPCMD_SET_SPEED;
  716. cgc.cmd[2] = (read_speed >> 8) & 0xff;
  717. cgc.cmd[3] = read_speed & 0xff;
  718. cgc.cmd[4] = (write_speed >> 8) & 0xff;
  719. cgc.cmd[5] = write_speed & 0xff;
  720. if ((ret = pkt_generic_packet(pd, &cgc)))
  721. pkt_dump_sense(&cgc);
  722. return ret;
  723. }
  724. /*
  725. * Queue a bio for processing by the low-level CD device. Must be called
  726. * from process context.
  727. */
  728. static void pkt_queue_bio(struct pktcdvd_device *pd, struct bio *bio)
  729. {
  730. spin_lock(&pd->iosched.lock);
  731. if (bio_data_dir(bio) == READ)
  732. bio_list_add(&pd->iosched.read_queue, bio);
  733. else
  734. bio_list_add(&pd->iosched.write_queue, bio);
  735. spin_unlock(&pd->iosched.lock);
  736. atomic_set(&pd->iosched.attention, 1);
  737. wake_up(&pd->wqueue);
  738. }
  739. /*
  740. * Process the queued read/write requests. This function handles special
  741. * requirements for CDRW drives:
  742. * - A cache flush command must be inserted before a read request if the
  743. * previous request was a write.
  744. * - Switching between reading and writing is slow, so don't do it more often
  745. * than necessary.
  746. * - Optimize for throughput at the expense of latency. This means that streaming
  747. * writes will never be interrupted by a read, but if the drive has to seek
  748. * before the next write, switch to reading instead if there are any pending
  749. * read requests.
  750. * - Set the read speed according to current usage pattern. When only reading
  751. * from the device, it's best to use the highest possible read speed, but
  752. * when switching often between reading and writing, it's better to have the
  753. * same read and write speeds.
  754. */
  755. static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
  756. {
  757. if (atomic_read(&pd->iosched.attention) == 0)
  758. return;
  759. atomic_set(&pd->iosched.attention, 0);
  760. for (;;) {
  761. struct bio *bio;
  762. int reads_queued, writes_queued;
  763. spin_lock(&pd->iosched.lock);
  764. reads_queued = !bio_list_empty(&pd->iosched.read_queue);
  765. writes_queued = !bio_list_empty(&pd->iosched.write_queue);
  766. spin_unlock(&pd->iosched.lock);
  767. if (!reads_queued && !writes_queued)
  768. break;
  769. if (pd->iosched.writing) {
  770. int need_write_seek = 1;
  771. spin_lock(&pd->iosched.lock);
  772. bio = bio_list_peek(&pd->iosched.write_queue);
  773. spin_unlock(&pd->iosched.lock);
  774. if (bio && (bio->bi_sector == pd->iosched.last_write))
  775. need_write_seek = 0;
  776. if (need_write_seek && reads_queued) {
  777. if (atomic_read(&pd->cdrw.pending_bios) > 0) {
  778. VPRINTK(DRIVER_NAME": write, waiting\n");
  779. break;
  780. }
  781. pkt_flush_cache(pd);
  782. pd->iosched.writing = 0;
  783. }
  784. } else {
  785. if (!reads_queued && writes_queued) {
  786. if (atomic_read(&pd->cdrw.pending_bios) > 0) {
  787. VPRINTK(DRIVER_NAME": read, waiting\n");
  788. break;
  789. }
  790. pd->iosched.writing = 1;
  791. }
  792. }
  793. spin_lock(&pd->iosched.lock);
  794. if (pd->iosched.writing)
  795. bio = bio_list_pop(&pd->iosched.write_queue);
  796. else
  797. bio = bio_list_pop(&pd->iosched.read_queue);
  798. spin_unlock(&pd->iosched.lock);
  799. if (!bio)
  800. continue;
  801. if (bio_data_dir(bio) == READ)
  802. pd->iosched.successive_reads += bio->bi_size >> 10;
  803. else {
  804. pd->iosched.successive_reads = 0;
  805. pd->iosched.last_write = bio->bi_sector + bio_sectors(bio);
  806. }
  807. if (pd->iosched.successive_reads >= HI_SPEED_SWITCH) {
  808. if (pd->read_speed == pd->write_speed) {
  809. pd->read_speed = MAX_SPEED;
  810. pkt_set_speed(pd, pd->write_speed, pd->read_speed);
  811. }
  812. } else {
  813. if (pd->read_speed != pd->write_speed) {
  814. pd->read_speed = pd->write_speed;
  815. pkt_set_speed(pd, pd->write_speed, pd->read_speed);
  816. }
  817. }
  818. atomic_inc(&pd->cdrw.pending_bios);
  819. generic_make_request(bio);
  820. }
  821. }
  822. /*
  823. * Special care is needed if the underlying block device has a small
  824. * max_phys_segments value.
  825. */
  826. static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q)
  827. {
  828. if ((pd->settings.size << 9) / CD_FRAMESIZE
  829. <= queue_max_segments(q)) {
  830. /*
  831. * The cdrom device can handle one segment/frame
  832. */
  833. clear_bit(PACKET_MERGE_SEGS, &pd->flags);
  834. return 0;
  835. } else if ((pd->settings.size << 9) / PAGE_SIZE
  836. <= queue_max_segments(q)) {
  837. /*
  838. * We can handle this case at the expense of some extra memory
  839. * copies during write operations
  840. */
  841. set_bit(PACKET_MERGE_SEGS, &pd->flags);
  842. return 0;
  843. } else {
  844. printk(DRIVER_NAME": cdrom max_phys_segments too small\n");
  845. return -EIO;
  846. }
  847. }
  848. /*
  849. * Copy CD_FRAMESIZE bytes from src_bio into a destination page
  850. */
  851. static void pkt_copy_bio_data(struct bio *src_bio, int seg, int offs, struct page *dst_page, int dst_offs)
  852. {
  853. unsigned int copy_size = CD_FRAMESIZE;
  854. while (copy_size > 0) {
  855. struct bio_vec *src_bvl = bio_iovec_idx(src_bio, seg);
  856. void *vfrom = kmap_atomic(src_bvl->bv_page, KM_USER0) +
  857. src_bvl->bv_offset + offs;
  858. void *vto = page_address(dst_page) + dst_offs;
  859. int len = min_t(int, copy_size, src_bvl->bv_len - offs);
  860. BUG_ON(len < 0);
  861. memcpy(vto, vfrom, len);
  862. kunmap_atomic(vfrom, KM_USER0);
  863. seg++;
  864. offs = 0;
  865. dst_offs += len;
  866. copy_size -= len;
  867. }
  868. }
  869. /*
  870. * Copy all data for this packet to pkt->pages[], so that
  871. * a) The number of required segments for the write bio is minimized, which
  872. * is necessary for some scsi controllers.
  873. * b) The data can be used as cache to avoid read requests if we receive a
  874. * new write request for the same zone.
  875. */
  876. static void pkt_make_local_copy(struct packet_data *pkt, struct bio_vec *bvec)
  877. {
  878. int f, p, offs;
  879. /* Copy all data to pkt->pages[] */
  880. p = 0;
  881. offs = 0;
  882. for (f = 0; f < pkt->frames; f++) {
  883. if (bvec[f].bv_page != pkt->pages[p]) {
  884. void *vfrom = kmap_atomic(bvec[f].bv_page, KM_USER0) + bvec[f].bv_offset;
  885. void *vto = page_address(pkt->pages[p]) + offs;
  886. memcpy(vto, vfrom, CD_FRAMESIZE);
  887. kunmap_atomic(vfrom, KM_USER0);
  888. bvec[f].bv_page = pkt->pages[p];
  889. bvec[f].bv_offset = offs;
  890. } else {
  891. BUG_ON(bvec[f].bv_offset != offs);
  892. }
  893. offs += CD_FRAMESIZE;
  894. if (offs >= PAGE_SIZE) {
  895. offs = 0;
  896. p++;
  897. }
  898. }
  899. }
  900. static void pkt_end_io_read(struct bio *bio, int err)
  901. {
  902. struct packet_data *pkt = bio->bi_private;
  903. struct pktcdvd_device *pd = pkt->pd;
  904. BUG_ON(!pd);
  905. VPRINTK("pkt_end_io_read: bio=%p sec0=%llx sec=%llx err=%d\n", bio,
  906. (unsigned long long)pkt->sector, (unsigned long long)bio->bi_sector, err);
  907. if (err)
  908. atomic_inc(&pkt->io_errors);
  909. if (atomic_dec_and_test(&pkt->io_wait)) {
  910. atomic_inc(&pkt->run_sm);
  911. wake_up(&pd->wqueue);
  912. }
  913. pkt_bio_finished(pd);
  914. }
  915. static void pkt_end_io_packet_write(struct bio *bio, int err)
  916. {
  917. struct packet_data *pkt = bio->bi_private;
  918. struct pktcdvd_device *pd = pkt->pd;
  919. BUG_ON(!pd);
  920. VPRINTK("pkt_end_io_packet_write: id=%d, err=%d\n", pkt->id, err);
  921. pd->stats.pkt_ended++;
  922. pkt_bio_finished(pd);
  923. atomic_dec(&pkt->io_wait);
  924. atomic_inc(&pkt->run_sm);
  925. wake_up(&pd->wqueue);
  926. }
  927. /*
  928. * Schedule reads for the holes in a packet
  929. */
  930. static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
  931. {
  932. int frames_read = 0;
  933. struct bio *bio;
  934. int f;
  935. char written[PACKET_MAX_SIZE];
  936. BUG_ON(bio_list_empty(&pkt->orig_bios));
  937. atomic_set(&pkt->io_wait, 0);
  938. atomic_set(&pkt->io_errors, 0);
  939. /*
  940. * Figure out which frames we need to read before we can write.
  941. */
  942. memset(written, 0, sizeof(written));
  943. spin_lock(&pkt->lock);
  944. bio_list_for_each(bio, &pkt->orig_bios) {
  945. int first_frame = (bio->bi_sector - pkt->sector) / (CD_FRAMESIZE >> 9);
  946. int num_frames = bio->bi_size / CD_FRAMESIZE;
  947. pd->stats.secs_w += num_frames * (CD_FRAMESIZE >> 9);
  948. BUG_ON(first_frame < 0);
  949. BUG_ON(first_frame + num_frames > pkt->frames);
  950. for (f = first_frame; f < first_frame + num_frames; f++)
  951. written[f] = 1;
  952. }
  953. spin_unlock(&pkt->lock);
  954. if (pkt->cache_valid) {
  955. VPRINTK("pkt_gather_data: zone %llx cached\n",
  956. (unsigned long long)pkt->sector);
  957. goto out_account;
  958. }
  959. /*
  960. * Schedule reads for missing parts of the packet.
  961. */
  962. for (f = 0; f < pkt->frames; f++) {
  963. struct bio_vec *vec;
  964. int p, offset;
  965. if (written[f])
  966. continue;
  967. bio = pkt->r_bios[f];
  968. vec = bio->bi_io_vec;
  969. bio_init(bio);
  970. bio->bi_max_vecs = 1;
  971. bio->bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
  972. bio->bi_bdev = pd->bdev;
  973. bio->bi_end_io = pkt_end_io_read;
  974. bio->bi_private = pkt;
  975. bio->bi_io_vec = vec;
  976. bio->bi_destructor = pkt_bio_destructor;
  977. p = (f * CD_FRAMESIZE) / PAGE_SIZE;
  978. offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
  979. VPRINTK("pkt_gather_data: Adding frame %d, page:%p offs:%d\n",
  980. f, pkt->pages[p], offset);
  981. if (!bio_add_page(bio, pkt->pages[p], CD_FRAMESIZE, offset))
  982. BUG();
  983. atomic_inc(&pkt->io_wait);
  984. bio->bi_rw = READ;
  985. pkt_queue_bio(pd, bio);
  986. frames_read++;
  987. }
  988. out_account:
  989. VPRINTK("pkt_gather_data: need %d frames for zone %llx\n",
  990. frames_read, (unsigned long long)pkt->sector);
  991. pd->stats.pkt_started++;
  992. pd->stats.secs_rg += frames_read * (CD_FRAMESIZE >> 9);
  993. }
  994. /*
  995. * Find a packet matching zone, or the least recently used packet if
  996. * there is no match.
  997. */
  998. static struct packet_data *pkt_get_packet_data(struct pktcdvd_device *pd, int zone)
  999. {
  1000. struct packet_data *pkt;
  1001. list_for_each_entry(pkt, &pd->cdrw.pkt_free_list, list) {
  1002. if (pkt->sector == zone || pkt->list.next == &pd->cdrw.pkt_free_list) {
  1003. list_del_init(&pkt->list);
  1004. if (pkt->sector != zone)
  1005. pkt->cache_valid = 0;
  1006. return pkt;
  1007. }
  1008. }
  1009. BUG();
  1010. return NULL;
  1011. }
  1012. static void pkt_put_packet_data(struct pktcdvd_device *pd, struct packet_data *pkt)
  1013. {
  1014. if (pkt->cache_valid) {
  1015. list_add(&pkt->list, &pd->cdrw.pkt_free_list);
  1016. } else {
  1017. list_add_tail(&pkt->list, &pd->cdrw.pkt_free_list);
  1018. }
  1019. }
  1020. /*
  1021. * recover a failed write, query for relocation if possible
  1022. *
  1023. * returns 1 if recovery is possible, or 0 if not
  1024. *
  1025. */
  1026. static int pkt_start_recovery(struct packet_data *pkt)
  1027. {
  1028. /*
  1029. * FIXME. We need help from the file system to implement
  1030. * recovery handling.
  1031. */
  1032. return 0;
  1033. #if 0
  1034. struct request *rq = pkt->rq;
  1035. struct pktcdvd_device *pd = rq->rq_disk->private_data;
  1036. struct block_device *pkt_bdev;
  1037. struct super_block *sb = NULL;
  1038. unsigned long old_block, new_block;
  1039. sector_t new_sector;
  1040. pkt_bdev = bdget(kdev_t_to_nr(pd->pkt_dev));
  1041. if (pkt_bdev) {
  1042. sb = get_super(pkt_bdev);
  1043. bdput(pkt_bdev);
  1044. }
  1045. if (!sb)
  1046. return 0;
  1047. if (!sb->s_op || !sb->s_op->relocate_blocks)
  1048. goto out;
  1049. old_block = pkt->sector / (CD_FRAMESIZE >> 9);
  1050. if (sb->s_op->relocate_blocks(sb, old_block, &new_block))
  1051. goto out;
  1052. new_sector = new_block * (CD_FRAMESIZE >> 9);
  1053. pkt->sector = new_sector;
  1054. pkt->bio->bi_sector = new_sector;
  1055. pkt->bio->bi_next = NULL;
  1056. pkt->bio->bi_flags = 1 << BIO_UPTODATE;
  1057. pkt->bio->bi_idx = 0;
  1058. BUG_ON(pkt->bio->bi_rw != (1 << BIO_RW));
  1059. BUG_ON(pkt->bio->bi_vcnt != pkt->frames);
  1060. BUG_ON(pkt->bio->bi_size != pkt->frames * CD_FRAMESIZE);
  1061. BUG_ON(pkt->bio->bi_end_io != pkt_end_io_packet_write);
  1062. BUG_ON(pkt->bio->bi_private != pkt);
  1063. drop_super(sb);
  1064. return 1;
  1065. out:
  1066. drop_super(sb);
  1067. return 0;
  1068. #endif
  1069. }
  1070. static inline void pkt_set_state(struct packet_data *pkt, enum packet_data_state state)
  1071. {
  1072. #if PACKET_DEBUG > 1
  1073. static const char *state_name[] = {
  1074. "IDLE", "WAITING", "READ_WAIT", "WRITE_WAIT", "RECOVERY", "FINISHED"
  1075. };
  1076. enum packet_data_state old_state = pkt->state;
  1077. VPRINTK("pkt %2d : s=%6llx %s -> %s\n", pkt->id, (unsigned long long)pkt->sector,
  1078. state_name[old_state], state_name[state]);
  1079. #endif
  1080. pkt->state = state;
  1081. }
  1082. /*
  1083. * Scan the work queue to see if we can start a new packet.
  1084. * returns non-zero if any work was done.
  1085. */
  1086. static int pkt_handle_queue(struct pktcdvd_device *pd)
  1087. {
  1088. struct packet_data *pkt, *p;
  1089. struct bio *bio = NULL;
  1090. sector_t zone = 0; /* Suppress gcc warning */
  1091. struct pkt_rb_node *node, *first_node;
  1092. struct rb_node *n;
  1093. int wakeup;
  1094. VPRINTK("handle_queue\n");
  1095. atomic_set(&pd->scan_queue, 0);
  1096. if (list_empty(&pd->cdrw.pkt_free_list)) {
  1097. VPRINTK("handle_queue: no pkt\n");
  1098. return 0;
  1099. }
  1100. /*
  1101. * Try to find a zone we are not already working on.
  1102. */
  1103. spin_lock(&pd->lock);
  1104. first_node = pkt_rbtree_find(pd, pd->current_sector);
  1105. if (!first_node) {
  1106. n = rb_first(&pd->bio_queue);
  1107. if (n)
  1108. first_node = rb_entry(n, struct pkt_rb_node, rb_node);
  1109. }
  1110. node = first_node;
  1111. while (node) {
  1112. bio = node->bio;
  1113. zone = ZONE(bio->bi_sector, pd);
  1114. list_for_each_entry(p, &pd->cdrw.pkt_active_list, list) {
  1115. if (p->sector == zone) {
  1116. bio = NULL;
  1117. goto try_next_bio;
  1118. }
  1119. }
  1120. break;
  1121. try_next_bio:
  1122. node = pkt_rbtree_next(node);
  1123. if (!node) {
  1124. n = rb_first(&pd->bio_queue);
  1125. if (n)
  1126. node = rb_entry(n, struct pkt_rb_node, rb_node);
  1127. }
  1128. if (node == first_node)
  1129. node = NULL;
  1130. }
  1131. spin_unlock(&pd->lock);
  1132. if (!bio) {
  1133. VPRINTK("handle_queue: no bio\n");
  1134. return 0;
  1135. }
  1136. pkt = pkt_get_packet_data(pd, zone);
  1137. pd->current_sector = zone + pd->settings.size;
  1138. pkt->sector = zone;
  1139. BUG_ON(pkt->frames != pd->settings.size >> 2);
  1140. pkt->write_size = 0;
  1141. /*
  1142. * Scan work queue for bios in the same zone and link them
  1143. * to this packet.
  1144. */
  1145. spin_lock(&pd->lock);
  1146. VPRINTK("pkt_handle_queue: looking for zone %llx\n", (unsigned long long)zone);
  1147. while ((node = pkt_rbtree_find(pd, zone)) != NULL) {
  1148. bio = node->bio;
  1149. VPRINTK("pkt_handle_queue: found zone=%llx\n",
  1150. (unsigned long long)ZONE(bio->bi_sector, pd));
  1151. if (ZONE(bio->bi_sector, pd) != zone)
  1152. break;
  1153. pkt_rbtree_erase(pd, node);
  1154. spin_lock(&pkt->lock);
  1155. bio_list_add(&pkt->orig_bios, bio);
  1156. pkt->write_size += bio->bi_size / CD_FRAMESIZE;
  1157. spin_unlock(&pkt->lock);
  1158. }
  1159. /* check write congestion marks, and if bio_queue_size is
  1160. below, wake up any waiters */
  1161. wakeup = (pd->write_congestion_on > 0
  1162. && pd->bio_queue_size <= pd->write_congestion_off);
  1163. spin_unlock(&pd->lock);
  1164. if (wakeup) {
  1165. clear_bdi_congested(&pd->disk->queue->backing_dev_info,
  1166. BLK_RW_ASYNC);
  1167. }
  1168. pkt->sleep_time = max(PACKET_WAIT_TIME, 1);
  1169. pkt_set_state(pkt, PACKET_WAITING_STATE);
  1170. atomic_set(&pkt->run_sm, 1);
  1171. spin_lock(&pd->cdrw.active_list_lock);
  1172. list_add(&pkt->list, &pd->cdrw.pkt_active_list);
  1173. spin_unlock(&pd->cdrw.active_list_lock);
  1174. return 1;
  1175. }
  1176. /*
  1177. * Assemble a bio to write one packet and queue the bio for processing
  1178. * by the underlying block device.
  1179. */
  1180. static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
  1181. {
  1182. struct bio *bio;
  1183. int f;
  1184. int frames_write;
  1185. struct bio_vec *bvec = pkt->w_bio->bi_io_vec;
  1186. for (f = 0; f < pkt->frames; f++) {
  1187. bvec[f].bv_page = pkt->pages[(f * CD_FRAMESIZE) / PAGE_SIZE];
  1188. bvec[f].bv_offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
  1189. }
  1190. /*
  1191. * Fill-in bvec with data from orig_bios.
  1192. */
  1193. frames_write = 0;
  1194. spin_lock(&pkt->lock);
  1195. bio_list_for_each(bio, &pkt->orig_bios) {
  1196. int segment = bio->bi_idx;
  1197. int src_offs = 0;
  1198. int first_frame = (bio->bi_sector - pkt->sector) / (CD_FRAMESIZE >> 9);
  1199. int num_frames = bio->bi_size / CD_FRAMESIZE;
  1200. BUG_ON(first_frame < 0);
  1201. BUG_ON(first_frame + num_frames > pkt->frames);
  1202. for (f = first_frame; f < first_frame + num_frames; f++) {
  1203. struct bio_vec *src_bvl = bio_iovec_idx(bio, segment);
  1204. while (src_offs >= src_bvl->bv_len) {
  1205. src_offs -= src_bvl->bv_len;
  1206. segment++;
  1207. BUG_ON(segment >= bio->bi_vcnt);
  1208. src_bvl = bio_iovec_idx(bio, segment);
  1209. }
  1210. if (src_bvl->bv_len - src_offs >= CD_FRAMESIZE) {
  1211. bvec[f].bv_page = src_bvl->bv_page;
  1212. bvec[f].bv_offset = src_bvl->bv_offset + src_offs;
  1213. } else {
  1214. pkt_copy_bio_data(bio, segment, src_offs,
  1215. bvec[f].bv_page, bvec[f].bv_offset);
  1216. }
  1217. src_offs += CD_FRAMESIZE;
  1218. frames_write++;
  1219. }
  1220. }
  1221. pkt_set_state(pkt, PACKET_WRITE_WAIT_STATE);
  1222. spin_unlock(&pkt->lock);
  1223. VPRINTK("pkt_start_write: Writing %d frames for zone %llx\n",
  1224. frames_write, (unsigned long long)pkt->sector);
  1225. BUG_ON(frames_write != pkt->write_size);
  1226. if (test_bit(PACKET_MERGE_SEGS, &pd->flags) || (pkt->write_size < pkt->frames)) {
  1227. pkt_make_local_copy(pkt, bvec);
  1228. pkt->cache_valid = 1;
  1229. } else {
  1230. pkt->cache_valid = 0;
  1231. }
  1232. /* Start the write request */
  1233. bio_init(pkt->w_bio);
  1234. pkt->w_bio->bi_max_vecs = PACKET_MAX_SIZE;
  1235. pkt->w_bio->bi_sector = pkt->sector;
  1236. pkt->w_bio->bi_bdev = pd->bdev;
  1237. pkt->w_bio->bi_end_io = pkt_end_io_packet_write;
  1238. pkt->w_bio->bi_private = pkt;
  1239. pkt->w_bio->bi_io_vec = bvec;
  1240. pkt->w_bio->bi_destructor = pkt_bio_destructor;
  1241. for (f = 0; f < pkt->frames; f++)
  1242. if (!bio_add_page(pkt->w_bio, bvec[f].bv_page, CD_FRAMESIZE, bvec[f].bv_offset))
  1243. BUG();
  1244. VPRINTK(DRIVER_NAME": vcnt=%d\n", pkt->w_bio->bi_vcnt);
  1245. atomic_set(&pkt->io_wait, 1);
  1246. pkt->w_bio->bi_rw = WRITE;
  1247. pkt_queue_bio(pd, pkt->w_bio);
  1248. }
  1249. static void pkt_finish_packet(struct packet_data *pkt, int uptodate)
  1250. {
  1251. struct bio *bio;
  1252. if (!uptodate)
  1253. pkt->cache_valid = 0;
  1254. /* Finish all bios corresponding to this packet */
  1255. while ((bio = bio_list_pop(&pkt->orig_bios)))
  1256. bio_endio(bio, uptodate ? 0 : -EIO);
  1257. }
  1258. static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data *pkt)
  1259. {
  1260. int uptodate;
  1261. VPRINTK("run_state_machine: pkt %d\n", pkt->id);
  1262. for (;;) {
  1263. switch (pkt->state) {
  1264. case PACKET_WAITING_STATE:
  1265. if ((pkt->write_size < pkt->frames) && (pkt->sleep_time > 0))
  1266. return;
  1267. pkt->sleep_time = 0;
  1268. pkt_gather_data(pd, pkt);
  1269. pkt_set_state(pkt, PACKET_READ_WAIT_STATE);
  1270. break;
  1271. case PACKET_READ_WAIT_STATE:
  1272. if (atomic_read(&pkt->io_wait) > 0)
  1273. return;
  1274. if (atomic_read(&pkt->io_errors) > 0) {
  1275. pkt_set_state(pkt, PACKET_RECOVERY_STATE);
  1276. } else {
  1277. pkt_start_write(pd, pkt);
  1278. }
  1279. break;
  1280. case PACKET_WRITE_WAIT_STATE:
  1281. if (atomic_read(&pkt->io_wait) > 0)
  1282. return;
  1283. if (test_bit(BIO_UPTODATE, &pkt->w_bio->bi_flags)) {
  1284. pkt_set_state(pkt, PACKET_FINISHED_STATE);
  1285. } else {
  1286. pkt_set_state(pkt, PACKET_RECOVERY_STATE);
  1287. }
  1288. break;
  1289. case PACKET_RECOVERY_STATE:
  1290. if (pkt_start_recovery(pkt)) {
  1291. pkt_start_write(pd, pkt);
  1292. } else {
  1293. VPRINTK("No recovery possible\n");
  1294. pkt_set_state(pkt, PACKET_FINISHED_STATE);
  1295. }
  1296. break;
  1297. case PACKET_FINISHED_STATE:
  1298. uptodate = test_bit(BIO_UPTODATE, &pkt->w_bio->bi_flags);
  1299. pkt_finish_packet(pkt, uptodate);
  1300. return;
  1301. default:
  1302. BUG();
  1303. break;
  1304. }
  1305. }
  1306. }
  1307. static void pkt_handle_packets(struct pktcdvd_device *pd)
  1308. {
  1309. struct packet_data *pkt, *next;
  1310. VPRINTK("pkt_handle_packets\n");
  1311. /*
  1312. * Run state machine for active packets
  1313. */
  1314. list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
  1315. if (atomic_read(&pkt->run_sm) > 0) {
  1316. atomic_set(&pkt->run_sm, 0);
  1317. pkt_run_state_machine(pd, pkt);
  1318. }
  1319. }
  1320. /*
  1321. * Move no longer active packets to the free list
  1322. */
  1323. spin_lock(&pd->cdrw.active_list_lock);
  1324. list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_active_list, list) {
  1325. if (pkt->state == PACKET_FINISHED_STATE) {
  1326. list_del(&pkt->list);
  1327. pkt_put_packet_data(pd, pkt);
  1328. pkt_set_state(pkt, PACKET_IDLE_STATE);
  1329. atomic_set(&pd->scan_queue, 1);
  1330. }
  1331. }
  1332. spin_unlock(&pd->cdrw.active_list_lock);
  1333. }
  1334. static void pkt_count_states(struct pktcdvd_device *pd, int *states)
  1335. {
  1336. struct packet_data *pkt;
  1337. int i;
  1338. for (i = 0; i < PACKET_NUM_STATES; i++)
  1339. states[i] = 0;
  1340. spin_lock(&pd->cdrw.active_list_lock);
  1341. list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
  1342. states[pkt->state]++;
  1343. }
  1344. spin_unlock(&pd->cdrw.active_list_lock);
  1345. }
  1346. /*
  1347. * kcdrwd is woken up when writes have been queued for one of our
  1348. * registered devices
  1349. */
  1350. static int kcdrwd(void *foobar)
  1351. {
  1352. struct pktcdvd_device *pd = foobar;
  1353. struct packet_data *pkt;
  1354. long min_sleep_time, residue;
  1355. set_user_nice(current, -20);
  1356. set_freezable();
  1357. for (;;) {
  1358. DECLARE_WAITQUEUE(wait, current);
  1359. /*
  1360. * Wait until there is something to do
  1361. */
  1362. add_wait_queue(&pd->wqueue, &wait);
  1363. for (;;) {
  1364. set_current_state(TASK_INTERRUPTIBLE);
  1365. /* Check if we need to run pkt_handle_queue */
  1366. if (atomic_read(&pd->scan_queue) > 0)
  1367. goto work_to_do;
  1368. /* Check if we need to run the state machine for some packet */
  1369. list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
  1370. if (atomic_read(&pkt->run_sm) > 0)
  1371. goto work_to_do;
  1372. }
  1373. /* Check if we need to process the iosched queues */
  1374. if (atomic_read(&pd->iosched.attention) != 0)
  1375. goto work_to_do;
  1376. /* Otherwise, go to sleep */
  1377. if (PACKET_DEBUG > 1) {
  1378. int states[PACKET_NUM_STATES];
  1379. pkt_count_states(pd, states);
  1380. VPRINTK("kcdrwd: i:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
  1381. states[0], states[1], states[2], states[3],
  1382. states[4], states[5]);
  1383. }
  1384. min_sleep_time = MAX_SCHEDULE_TIMEOUT;
  1385. list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
  1386. if (pkt->sleep_time && pkt->sleep_time < min_sleep_time)
  1387. min_sleep_time = pkt->sleep_time;
  1388. }
  1389. generic_unplug_device(bdev_get_queue(pd->bdev));
  1390. VPRINTK("kcdrwd: sleeping\n");
  1391. residue = schedule_timeout(min_sleep_time);
  1392. VPRINTK("kcdrwd: wake up\n");
  1393. /* make swsusp happy with our thread */
  1394. try_to_freeze();
  1395. list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
  1396. if (!pkt->sleep_time)
  1397. continue;
  1398. pkt->sleep_time -= min_sleep_time - residue;
  1399. if (pkt->sleep_time <= 0) {
  1400. pkt->sleep_time = 0;
  1401. atomic_inc(&pkt->run_sm);
  1402. }
  1403. }
  1404. if (kthread_should_stop())
  1405. break;
  1406. }
  1407. work_to_do:
  1408. set_current_state(TASK_RUNNING);
  1409. remove_wait_queue(&pd->wqueue, &wait);
  1410. if (kthread_should_stop())
  1411. break;
  1412. /*
  1413. * if pkt_handle_queue returns true, we can queue
  1414. * another request.
  1415. */
  1416. while (pkt_handle_queue(pd))
  1417. ;
  1418. /*
  1419. * Handle packet state machine
  1420. */
  1421. pkt_handle_packets(pd);
  1422. /*
  1423. * Handle iosched queues
  1424. */
  1425. pkt_iosched_process_queue(pd);
  1426. }
  1427. return 0;
  1428. }
  1429. static void pkt_print_settings(struct pktcdvd_device *pd)
  1430. {
  1431. printk(DRIVER_NAME": %s packets, ", pd->settings.fp ? "Fixed" : "Variable");
  1432. printk("%u blocks, ", pd->settings.size >> 2);
  1433. printk("Mode-%c disc\n", pd->settings.block_mode == 8 ? '1' : '2');
  1434. }
  1435. static int pkt_mode_sense(struct pktcdvd_device *pd, struct packet_command *cgc, int page_code, int page_control)
  1436. {
  1437. memset(cgc->cmd, 0, sizeof(cgc->cmd));
  1438. cgc->cmd[0] = GPCMD_MODE_SENSE_10;
  1439. cgc->cmd[2] = page_code | (page_control << 6);
  1440. cgc->cmd[7] = cgc->buflen >> 8;
  1441. cgc->cmd[8] = cgc->buflen & 0xff;
  1442. cgc->data_direction = CGC_DATA_READ;
  1443. return pkt_generic_packet(pd, cgc);
  1444. }
  1445. static int pkt_mode_select(struct pktcdvd_device *pd, struct packet_command *cgc)
  1446. {
  1447. memset(cgc->cmd, 0, sizeof(cgc->cmd));
  1448. memset(cgc->buffer, 0, 2);
  1449. cgc->cmd[0] = GPCMD_MODE_SELECT_10;
  1450. cgc->cmd[1] = 0x10; /* PF */
  1451. cgc->cmd[7] = cgc->buflen >> 8;
  1452. cgc->cmd[8] = cgc->buflen & 0xff;
  1453. cgc->data_direction = CGC_DATA_WRITE;
  1454. return pkt_generic_packet(pd, cgc);
  1455. }
  1456. static int pkt_get_disc_info(struct pktcdvd_device *pd, disc_information *di)
  1457. {
  1458. struct packet_command cgc;
  1459. int ret;
  1460. /* set up command and get the disc info */
  1461. init_cdrom_command(&cgc, di, sizeof(*di), CGC_DATA_READ);
  1462. cgc.cmd[0] = GPCMD_READ_DISC_INFO;
  1463. cgc.cmd[8] = cgc.buflen = 2;
  1464. cgc.quiet = 1;
  1465. if ((ret = pkt_generic_packet(pd, &cgc)))
  1466. return ret;
  1467. /* not all drives have the same disc_info length, so requeue
  1468. * packet with the length the drive tells us it can supply
  1469. */
  1470. cgc.buflen = be16_to_cpu(di->disc_information_length) +
  1471. sizeof(di->disc_information_length);
  1472. if (cgc.buflen > sizeof(disc_information))
  1473. cgc.buflen = sizeof(disc_information);
  1474. cgc.cmd[8] = cgc.buflen;
  1475. return pkt_generic_packet(pd, &cgc);
  1476. }
  1477. static int pkt_get_track_info(struct pktcdvd_device *pd, __u16 track, __u8 type, track_information *ti)
  1478. {
  1479. struct packet_command cgc;
  1480. int ret;
  1481. init_cdrom_command(&cgc, ti, 8, CGC_DATA_READ);
  1482. cgc.cmd[0] = GPCMD_READ_TRACK_RZONE_INFO;
  1483. cgc.cmd[1] = type & 3;
  1484. cgc.cmd[4] = (track & 0xff00) >> 8;
  1485. cgc.cmd[5] = track & 0xff;
  1486. cgc.cmd[8] = 8;
  1487. cgc.quiet = 1;
  1488. if ((ret = pkt_generic_packet(pd, &cgc)))
  1489. return ret;
  1490. cgc.buflen = be16_to_cpu(ti->track_information_length) +
  1491. sizeof(ti->track_information_length);
  1492. if (cgc.buflen > sizeof(track_information))
  1493. cgc.buflen = sizeof(track_information);
  1494. cgc.cmd[8] = cgc.buflen;
  1495. return pkt_generic_packet(pd, &cgc);
  1496. }
  1497. static noinline_for_stack int pkt_get_last_written(struct pktcdvd_device *pd,
  1498. long *last_written)
  1499. {
  1500. disc_information di;
  1501. track_information ti;
  1502. __u32 last_track;
  1503. int ret = -1;
  1504. if ((ret = pkt_get_disc_info(pd, &di)))
  1505. return ret;
  1506. last_track = (di.last_track_msb << 8) | di.last_track_lsb;
  1507. if ((ret = pkt_get_track_info(pd, last_track, 1, &ti)))
  1508. return ret;
  1509. /* if this track is blank, try the previous. */
  1510. if (ti.blank) {
  1511. last_track--;
  1512. if ((ret = pkt_get_track_info(pd, last_track, 1, &ti)))
  1513. return ret;
  1514. }
  1515. /* if last recorded field is valid, return it. */
  1516. if (ti.lra_v) {
  1517. *last_written = be32_to_cpu(ti.last_rec_address);
  1518. } else {
  1519. /* make it up instead */
  1520. *last_written = be32_to_cpu(ti.track_start) +
  1521. be32_to_cpu(ti.track_size);
  1522. if (ti.free_blocks)
  1523. *last_written -= (be32_to_cpu(ti.free_blocks) + 7);
  1524. }
  1525. return 0;
  1526. }
  1527. /*
  1528. * write mode select package based on pd->settings
  1529. */
  1530. static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd)
  1531. {
  1532. struct packet_command cgc;
  1533. struct request_sense sense;
  1534. write_param_page *wp;
  1535. char buffer[128];
  1536. int ret, size;
  1537. /* doesn't apply to DVD+RW or DVD-RAM */
  1538. if ((pd->mmc3_profile == 0x1a) || (pd->mmc3_profile == 0x12))
  1539. return 0;
  1540. memset(buffer, 0, sizeof(buffer));
  1541. init_cdrom_command(&cgc, buffer, sizeof(*wp), CGC_DATA_READ);
  1542. cgc.sense = &sense;
  1543. if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0))) {
  1544. pkt_dump_sense(&cgc);
  1545. return ret;
  1546. }
  1547. size = 2 + ((buffer[0] << 8) | (buffer[1] & 0xff));
  1548. pd->mode_offset = (buffer[6] << 8) | (buffer[7] & 0xff);
  1549. if (size > sizeof(buffer))
  1550. size = sizeof(buffer);
  1551. /*
  1552. * now get it all
  1553. */
  1554. init_cdrom_command(&cgc, buffer, size, CGC_DATA_READ);
  1555. cgc.sense = &sense;
  1556. if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0))) {
  1557. pkt_dump_sense(&cgc);
  1558. return ret;
  1559. }
  1560. /*
  1561. * write page is offset header + block descriptor length
  1562. */
  1563. wp = (write_param_page *) &buffer[sizeof(struct mode_page_header) + pd->mode_offset];
  1564. wp->fp = pd->settings.fp;
  1565. wp->track_mode = pd->settings.track_mode;
  1566. wp->write_type = pd->settings.write_type;
  1567. wp->data_block_type = pd->settings.block_mode;
  1568. wp->multi_session = 0;
  1569. #ifdef PACKET_USE_LS
  1570. wp->link_size = 7;
  1571. wp->ls_v = 1;
  1572. #endif
  1573. if (wp->data_block_type == PACKET_BLOCK_MODE1) {
  1574. wp->session_format = 0;
  1575. wp->subhdr2 = 0x20;
  1576. } else if (wp->data_block_type == PACKET_BLOCK_MODE2) {
  1577. wp->session_format = 0x20;
  1578. wp->subhdr2 = 8;
  1579. #if 0
  1580. wp->mcn[0] = 0x80;
  1581. memcpy(&wp->mcn[1], PACKET_MCN, sizeof(wp->mcn) - 1);
  1582. #endif
  1583. } else {
  1584. /*
  1585. * paranoia
  1586. */
  1587. printk(DRIVER_NAME": write mode wrong %d\n", wp->data_block_type);
  1588. return 1;
  1589. }
  1590. wp->packet_size = cpu_to_be32(pd->settings.size >> 2);
  1591. cgc.buflen = cgc.cmd[8] = size;
  1592. if ((ret = pkt_mode_select(pd, &cgc))) {
  1593. pkt_dump_sense(&cgc);
  1594. return ret;
  1595. }
  1596. pkt_print_settings(pd);
  1597. return 0;
  1598. }
  1599. /*
  1600. * 1 -- we can write to this track, 0 -- we can't
  1601. */
  1602. static int pkt_writable_track(struct pktcdvd_device *pd, track_information *ti)
  1603. {
  1604. switch (pd->mmc3_profile) {
  1605. case 0x1a: /* DVD+RW */
  1606. case 0x12: /* DVD-RAM */
  1607. /* The track is always writable on DVD+RW/DVD-RAM */
  1608. return 1;
  1609. default:
  1610. break;
  1611. }
  1612. if (!ti->packet || !ti->fp)
  1613. return 0;
  1614. /*
  1615. * "good" settings as per Mt Fuji.
  1616. */
  1617. if (ti->rt == 0 && ti->blank == 0)
  1618. return 1;
  1619. if (ti->rt == 0 && ti->blank == 1)
  1620. return 1;
  1621. if (ti->rt == 1 && ti->blank == 0)
  1622. return 1;
  1623. printk(DRIVER_NAME": bad state %d-%d-%d\n", ti->rt, ti->blank, ti->packet);
  1624. return 0;
  1625. }
  1626. /*
  1627. * 1 -- we can write to this disc, 0 -- we can't
  1628. */
  1629. static int pkt_writable_disc(struct pktcdvd_device *pd, disc_information *di)
  1630. {
  1631. switch (pd->mmc3_profile) {
  1632. case 0x0a: /* CD-RW */
  1633. case 0xffff: /* MMC3 not supported */
  1634. break;
  1635. case 0x1a: /* DVD+RW */
  1636. case 0x13: /* DVD-RW */
  1637. case 0x12: /* DVD-RAM */
  1638. return 1;
  1639. default:
  1640. VPRINTK(DRIVER_NAME": Wrong disc profile (%x)\n", pd->mmc3_profile);
  1641. return 0;
  1642. }
  1643. /*
  1644. * for disc type 0xff we should probably reserve a new track.
  1645. * but i'm not sure, should we leave this to user apps? probably.
  1646. */
  1647. if (di->disc_type == 0xff) {
  1648. printk(DRIVER_NAME": Unknown disc. No track?\n");
  1649. return 0;
  1650. }
  1651. if (di->disc_type != 0x20 && di->disc_type != 0) {
  1652. printk(DRIVER_NAME": Wrong disc type (%x)\n", di->disc_type);
  1653. return 0;
  1654. }
  1655. if (di->erasable == 0) {
  1656. printk(DRIVER_NAME": Disc not erasable\n");
  1657. return 0;
  1658. }
  1659. if (di->border_status == PACKET_SESSION_RESERVED) {
  1660. printk(DRIVER_NAME": Can't write to last track (reserved)\n");
  1661. return 0;
  1662. }
  1663. return 1;
  1664. }
  1665. static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
  1666. {
  1667. struct packet_command cgc;
  1668. unsigned char buf[12];
  1669. disc_information di;
  1670. track_information ti;
  1671. int ret, track;
  1672. init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
  1673. cgc.cmd[0] = GPCMD_GET_CONFIGURATION;
  1674. cgc.cmd[8] = 8;
  1675. ret = pkt_generic_packet(pd, &cgc);
  1676. pd->mmc3_profile = ret ? 0xffff : buf[6] << 8 | buf[7];
  1677. memset(&di, 0, sizeof(disc_information));
  1678. memset(&ti, 0, sizeof(track_information));
  1679. if ((ret = pkt_get_disc_info(pd, &di))) {
  1680. printk("failed get_disc\n");
  1681. return ret;
  1682. }
  1683. if (!pkt_writable_disc(pd, &di))
  1684. return -EROFS;
  1685. pd->type = di.erasable ? PACKET_CDRW : PACKET_CDR;
  1686. track = 1; /* (di.last_track_msb << 8) | di.last_track_lsb; */
  1687. if ((ret = pkt_get_track_info(pd, track, 1, &ti))) {
  1688. printk(DRIVER_NAME": failed get_track\n");
  1689. return ret;
  1690. }
  1691. if (!pkt_writable_track(pd, &ti)) {
  1692. printk(DRIVER_NAME": can't write to this track\n");
  1693. return -EROFS;
  1694. }
  1695. /*
  1696. * we keep packet size in 512 byte units, makes it easier to
  1697. * deal with request calculations.
  1698. */
  1699. pd->settings.size = be32_to_cpu(ti.fixed_packet_size) << 2;
  1700. if (pd->settings.size == 0) {
  1701. printk(DRIVER_NAME": detected zero packet size!\n");
  1702. return -ENXIO;
  1703. }
  1704. if (pd->settings.size > PACKET_MAX_SECTORS) {
  1705. printk(DRIVER_NAME": packet size is too big\n");
  1706. return -EROFS;
  1707. }
  1708. pd->settings.fp = ti.fp;
  1709. pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1);
  1710. if (ti.nwa_v) {
  1711. pd->nwa = be32_to_cpu(ti.next_writable);
  1712. set_bit(PACKET_NWA_VALID, &pd->flags);
  1713. }
  1714. /*
  1715. * in theory we could use lra on -RW media as well and just zero
  1716. * blocks that haven't been written yet, but in practice that
  1717. * is just a no-go. we'll use that for -R, naturally.
  1718. */
  1719. if (ti.lra_v) {
  1720. pd->lra = be32_to_cpu(ti.last_rec_address);
  1721. set_bit(PACKET_LRA_VALID, &pd->flags);
  1722. } else {
  1723. pd->lra = 0xffffffff;
  1724. set_bit(PACKET_LRA_VALID, &pd->flags);
  1725. }
  1726. /*
  1727. * fine for now
  1728. */
  1729. pd->settings.link_loss = 7;
  1730. pd->settings.write_type = 0; /* packet */
  1731. pd->settings.track_mode = ti.track_mode;
  1732. /*
  1733. * mode1 or mode2 disc
  1734. */
  1735. switch (ti.data_mode) {
  1736. case PACKET_MODE1:
  1737. pd->settings.block_mode = PACKET_BLOCK_MODE1;
  1738. break;
  1739. case PACKET_MODE2:
  1740. pd->settings.block_mode = PACKET_BLOCK_MODE2;
  1741. break;
  1742. default:
  1743. printk(DRIVER_NAME": unknown data mode\n");
  1744. return -EROFS;
  1745. }
  1746. return 0;
  1747. }
  1748. /*
  1749. * enable/disable write caching on drive
  1750. */
  1751. static noinline_for_stack int pkt_write_caching(struct pktcdvd_device *pd,
  1752. int set)
  1753. {
  1754. struct packet_command cgc;
  1755. struct request_sense sense;
  1756. unsigned char buf[64];
  1757. int ret;
  1758. init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
  1759. cgc.sense = &sense;
  1760. cgc.buflen = pd->mode_offset + 12;
  1761. /*
  1762. * caching mode page might not be there, so quiet this command
  1763. */
  1764. cgc.quiet = 1;
  1765. if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WCACHING_PAGE, 0)))
  1766. return ret;
  1767. buf[pd->mode_offset + 10] |= (!!set << 2);
  1768. cgc.buflen = cgc.cmd[8] = 2 + ((buf[0] << 8) | (buf[1] & 0xff));
  1769. ret = pkt_mode_select(pd, &cgc);
  1770. if (ret) {
  1771. printk(DRIVER_NAME": write caching control failed\n");
  1772. pkt_dump_sense(&cgc);
  1773. } else if (!ret && set)
  1774. printk(DRIVER_NAME": enabled write caching on %s\n", pd->name);
  1775. return ret;
  1776. }
  1777. static int pkt_lock_door(struct pktcdvd_device *pd, int lockflag)
  1778. {
  1779. struct packet_command cgc;
  1780. init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
  1781. cgc.cmd[0] = GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL;
  1782. cgc.cmd[4] = lockflag ? 1 : 0;
  1783. return pkt_generic_packet(pd, &cgc);
  1784. }
  1785. /*
  1786. * Returns drive maximum write speed
  1787. */
  1788. static noinline_for_stack int pkt_get_max_speed(struct pktcdvd_device *pd,
  1789. unsigned *write_speed)
  1790. {
  1791. struct packet_command cgc;
  1792. struct request_sense sense;
  1793. unsigned char buf[256+18];
  1794. unsigned char *cap_buf;
  1795. int ret, offset;
  1796. cap_buf = &buf[sizeof(struct mode_page_header) + pd->mode_offset];
  1797. init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_UNKNOWN);
  1798. cgc.sense = &sense;
  1799. ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
  1800. if (ret) {
  1801. cgc.buflen = pd->mode_offset + cap_buf[1] + 2 +
  1802. sizeof(struct mode_page_header);
  1803. ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
  1804. if (ret) {
  1805. pkt_dump_sense(&cgc);
  1806. return ret;
  1807. }
  1808. }
  1809. offset = 20; /* Obsoleted field, used by older drives */
  1810. if (cap_buf[1] >= 28)
  1811. offset = 28; /* Current write speed selected */
  1812. if (cap_buf[1] >= 30) {
  1813. /* If the drive reports at least one "Logical Unit Write
  1814. * Speed Performance Descriptor Block", use the information
  1815. * in the first block. (contains the highest speed)
  1816. */
  1817. int num_spdb = (cap_buf[30] << 8) + cap_buf[31];
  1818. if (num_spdb > 0)
  1819. offset = 34;
  1820. }
  1821. *write_speed = (cap_buf[offset] << 8) | cap_buf[offset + 1];
  1822. return 0;
  1823. }
  1824. /* These tables from cdrecord - I don't have orange book */
  1825. /* standard speed CD-RW (1-4x) */
  1826. static char clv_to_speed[16] = {
  1827. /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */
  1828. 0, 2, 4, 6, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
  1829. };
  1830. /* high speed CD-RW (-10x) */
  1831. static char hs_clv_to_speed[16] = {
  1832. /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */
  1833. 0, 2, 4, 6, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
  1834. };
  1835. /* ultra high speed CD-RW */
  1836. static char us_clv_to_speed[16] = {
  1837. /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 */
  1838. 0, 2, 4, 8, 0, 0,16, 0,24,32,40,48, 0, 0, 0, 0
  1839. };
  1840. /*
  1841. * reads the maximum media speed from ATIP
  1842. */
  1843. static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd,
  1844. unsigned *speed)
  1845. {
  1846. struct packet_command cgc;
  1847. struct request_sense sense;
  1848. unsigned char buf[64];
  1849. unsigned int size, st, sp;
  1850. int ret;
  1851. init_cdrom_command(&cgc, buf, 2, CGC_DATA_READ);
  1852. cgc.sense = &sense;
  1853. cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
  1854. cgc.cmd[1] = 2;
  1855. cgc.cmd[2] = 4; /* READ ATIP */
  1856. cgc.cmd[8] = 2;
  1857. ret = pkt_generic_packet(pd, &cgc);
  1858. if (ret) {
  1859. pkt_dump_sense(&cgc);
  1860. return ret;
  1861. }
  1862. size = ((unsigned int) buf[0]<<8) + buf[1] + 2;
  1863. if (size > sizeof(buf))
  1864. size = sizeof(buf);
  1865. init_cdrom_command(&cgc, buf, size, CGC_DATA_READ);
  1866. cgc.sense = &sense;
  1867. cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
  1868. cgc.cmd[1] = 2;
  1869. cgc.cmd[2] = 4;
  1870. cgc.cmd[8] = size;
  1871. ret = pkt_generic_packet(pd, &cgc);
  1872. if (ret) {
  1873. pkt_dump_sense(&cgc);
  1874. return ret;
  1875. }
  1876. if (!(buf[6] & 0x40)) {
  1877. printk(DRIVER_NAME": Disc type is not CD-RW\n");
  1878. return 1;
  1879. }
  1880. if (!(buf[6] & 0x4)) {
  1881. printk(DRIVER_NAME": A1 values on media are not valid, maybe not CDRW?\n");
  1882. return 1;
  1883. }
  1884. st = (buf[6] >> 3) & 0x7; /* disc sub-type */
  1885. sp = buf[16] & 0xf; /* max speed from ATIP A1 field */
  1886. /* Info from cdrecord */
  1887. switch (st) {
  1888. case 0: /* standard speed */
  1889. *speed = clv_to_speed[sp];
  1890. break;
  1891. case 1: /* high speed */
  1892. *speed = hs_clv_to_speed[sp];
  1893. break;
  1894. case 2: /* ultra high speed */
  1895. *speed = us_clv_to_speed[sp];
  1896. break;
  1897. default:
  1898. printk(DRIVER_NAME": Unknown disc sub-type %d\n",st);
  1899. return 1;
  1900. }
  1901. if (*speed) {
  1902. printk(DRIVER_NAME": Max. media speed: %d\n",*speed);
  1903. return 0;
  1904. } else {
  1905. printk(DRIVER_NAME": Unknown speed %d for sub-type %d\n",sp,st);
  1906. return 1;
  1907. }
  1908. }
  1909. static noinline_for_stack int pkt_perform_opc(struct pktcdvd_device *pd)
  1910. {
  1911. struct packet_command cgc;
  1912. struct request_sense sense;
  1913. int ret;
  1914. VPRINTK(DRIVER_NAME": Performing OPC\n");
  1915. init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
  1916. cgc.sense = &sense;
  1917. cgc.timeout = 60*HZ;
  1918. cgc.cmd[0] = GPCMD_SEND_OPC;
  1919. cgc.cmd[1] = 1;
  1920. if ((ret = pkt_generic_packet(pd, &cgc)))
  1921. pkt_dump_sense(&cgc);
  1922. return ret;
  1923. }
  1924. static int pkt_open_write(struct pktcdvd_device *pd)
  1925. {
  1926. int ret;
  1927. unsigned int write_speed, media_write_speed, read_speed;
  1928. if ((ret = pkt_probe_settings(pd))) {
  1929. VPRINTK(DRIVER_NAME": %s failed probe\n", pd->name);
  1930. return ret;
  1931. }
  1932. if ((ret = pkt_set_write_settings(pd))) {
  1933. DPRINTK(DRIVER_NAME": %s failed saving write settings\n", pd->name);
  1934. return -EIO;
  1935. }
  1936. pkt_write_caching(pd, USE_WCACHING);
  1937. if ((ret = pkt_get_max_speed(pd, &write_speed)))
  1938. write_speed = 16 * 177;
  1939. switch (pd->mmc3_profile) {
  1940. case 0x13: /* DVD-RW */
  1941. case 0x1a: /* DVD+RW */
  1942. case 0x12: /* DVD-RAM */
  1943. DPRINTK(DRIVER_NAME": write speed %ukB/s\n", write_speed);
  1944. break;
  1945. default:
  1946. if ((ret = pkt_media_speed(pd, &media_write_speed)))
  1947. media_write_speed = 16;
  1948. write_speed = min(write_speed, media_write_speed * 177);
  1949. DPRINTK(DRIVER_NAME": write speed %ux\n", write_speed / 176);
  1950. break;
  1951. }
  1952. read_speed = write_speed;
  1953. if ((ret = pkt_set_speed(pd, write_speed, read_speed))) {
  1954. DPRINTK(DRIVER_NAME": %s couldn't set write speed\n", pd->name);
  1955. return -EIO;
  1956. }
  1957. pd->write_speed = write_speed;
  1958. pd->read_speed = read_speed;
  1959. if ((ret = pkt_perform_opc(pd))) {
  1960. DPRINTK(DRIVER_NAME": %s Optimum Power Calibration failed\n", pd->name);
  1961. }
  1962. return 0;
  1963. }
  1964. /*
  1965. * called at open time.
  1966. */
  1967. static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write)
  1968. {
  1969. int ret;
  1970. long lba;
  1971. struct request_queue *q;
  1972. /*
  1973. * We need to re-open the cdrom device without O_NONBLOCK to be able
  1974. * to read/write from/to it. It is already opened in O_NONBLOCK mode
  1975. * so bdget() can't fail.
  1976. */
  1977. bdget(pd->bdev->bd_dev);
  1978. if ((ret = blkdev_get(pd->bdev, FMODE_READ)))
  1979. goto out;
  1980. if ((ret = bd_claim(pd->bdev, pd)))
  1981. goto out_putdev;
  1982. if ((ret = pkt_get_last_written(pd, &lba))) {
  1983. printk(DRIVER_NAME": pkt_get_last_written failed\n");
  1984. goto out_unclaim;
  1985. }
  1986. set_capacity(pd->disk, lba << 2);
  1987. set_capacity(pd->bdev->bd_disk, lba << 2);
  1988. bd_set_size(pd->bdev, (loff_t)lba << 11);
  1989. q = bdev_get_queue(pd->bdev);
  1990. if (write) {
  1991. if ((ret = pkt_open_write(pd)))
  1992. goto out_unclaim;
  1993. /*
  1994. * Some CDRW drives can not handle writes larger than one packet,
  1995. * even if the size is a multiple of the packet size.
  1996. */
  1997. spin_lock_irq(q->queue_lock);
  1998. blk_queue_max_hw_sectors(q, pd->settings.size);
  1999. spin_unlock_irq(q->queue_lock);
  2000. set_bit(PACKET_WRITABLE, &pd->flags);
  2001. } else {
  2002. pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
  2003. clear_bit(PACKET_WRITABLE, &pd->flags);
  2004. }
  2005. if ((ret = pkt_set_segment_merging(pd, q)))
  2006. goto out_unclaim;
  2007. if (write) {
  2008. if (!pkt_grow_pktlist(pd, CONFIG_CDROM_PKTCDVD_BUFFERS)) {
  2009. printk(DRIVER_NAME": not enough memory for buffers\n");
  2010. ret = -ENOMEM;
  2011. goto out_unclaim;
  2012. }
  2013. printk(DRIVER_NAME": %lukB available on disc\n", lba << 1);
  2014. }
  2015. return 0;
  2016. out_unclaim:
  2017. bd_release(pd->bdev);
  2018. out_putdev:
  2019. blkdev_put(pd->bdev, FMODE_READ);
  2020. out:
  2021. return ret;
  2022. }
  2023. /*
  2024. * called when the device is closed. makes sure that the device flushes
  2025. * the internal cache before we close.
  2026. */
  2027. static void pkt_release_dev(struct pktcdvd_device *pd, int flush)
  2028. {
  2029. if (flush && pkt_flush_cache(pd))
  2030. DPRINTK(DRIVER_NAME": %s not flushing cache\n", pd->name);
  2031. pkt_lock_door(pd, 0);
  2032. pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
  2033. bd_release(pd->bdev);
  2034. blkdev_put(pd->bdev, FMODE_READ);
  2035. pkt_shrink_pktlist(pd);
  2036. }
  2037. static struct pktcdvd_device *pkt_find_dev_from_minor(int dev_minor)
  2038. {
  2039. if (dev_minor >= MAX_WRITERS)
  2040. return NULL;
  2041. return pkt_devs[dev_minor];
  2042. }
  2043. static int pkt_open(struct block_device *bdev, fmode_t mode)
  2044. {
  2045. struct pktcdvd_device *pd = NULL;
  2046. int ret;
  2047. VPRINTK(DRIVER_NAME": entering open\n");
  2048. mutex_lock(&ctl_mutex);
  2049. pd = pkt_find_dev_from_minor(MINOR(bdev->bd_dev));
  2050. if (!pd) {
  2051. ret = -ENODEV;
  2052. goto out;
  2053. }
  2054. BUG_ON(pd->refcnt < 0);
  2055. pd->refcnt++;
  2056. if (pd->refcnt > 1) {
  2057. if ((mode & FMODE_WRITE) &&
  2058. !test_bit(PACKET_WRITABLE, &pd->flags)) {
  2059. ret = -EBUSY;
  2060. goto out_dec;
  2061. }
  2062. } else {
  2063. ret = pkt_open_dev(pd, mode & FMODE_WRITE);
  2064. if (ret)
  2065. goto out_dec;
  2066. /*
  2067. * needed here as well, since ext2 (among others) may change
  2068. * the blocksize at mount time
  2069. */
  2070. set_blocksize(bdev, CD_FRAMESIZE);
  2071. }
  2072. mutex_unlock(&ctl_mutex);
  2073. return 0;
  2074. out_dec:
  2075. pd->refcnt--;
  2076. out:
  2077. VPRINTK(DRIVER_NAME": failed open (%d)\n", ret);
  2078. mutex_unlock(&ctl_mutex);
  2079. return ret;
  2080. }
  2081. static int pkt_close(struct gendisk *disk, fmode_t mode)
  2082. {
  2083. struct pktcdvd_device *pd = disk->private_data;
  2084. int ret = 0;
  2085. mutex_lock(&ctl_mutex);
  2086. pd->refcnt--;
  2087. BUG_ON(pd->refcnt < 0);
  2088. if (pd->refcnt == 0) {
  2089. int flush = test_bit(PACKET_WRITABLE, &pd->flags);
  2090. pkt_release_dev(pd, flush);
  2091. }
  2092. mutex_unlock(&ctl_mutex);
  2093. return ret;
  2094. }
  2095. static void pkt_end_io_read_cloned(struct bio *bio, int err)
  2096. {
  2097. struct packet_stacked_data *psd = bio->bi_private;
  2098. struct pktcdvd_device *pd = psd->pd;
  2099. bio_put(bio);
  2100. bio_endio(psd->bio, err);
  2101. mempool_free(psd, psd_pool);
  2102. pkt_bio_finished(pd);
  2103. }
  2104. static int pkt_make_request(struct request_queue *q, struct bio *bio)
  2105. {
  2106. struct pktcdvd_device *pd;
  2107. char b[BDEVNAME_SIZE];
  2108. sector_t zone;
  2109. struct packet_data *pkt;
  2110. int was_empty, blocked_bio;
  2111. struct pkt_rb_node *node;
  2112. pd = q->queuedata;
  2113. if (!pd) {
  2114. printk(DRIVER_NAME": %s incorrect request queue\n", bdevname(bio->bi_bdev, b));
  2115. goto end_io;
  2116. }
  2117. /*
  2118. * Clone READ bios so we can have our own bi_end_io callback.
  2119. */
  2120. if (bio_data_dir(bio) == READ) {
  2121. struct bio *cloned_bio = bio_clone(bio, GFP_NOIO);
  2122. struct packet_stacked_data *psd = mempool_alloc(psd_pool, GFP_NOIO);
  2123. psd->pd = pd;
  2124. psd->bio = bio;
  2125. cloned_bio->bi_bdev = pd->bdev;
  2126. cloned_bio->bi_private = psd;
  2127. cloned_bio->bi_end_io = pkt_end_io_read_cloned;
  2128. pd->stats.secs_r += bio->bi_size >> 9;
  2129. pkt_queue_bio(pd, cloned_bio);
  2130. return 0;
  2131. }
  2132. if (!test_bit(PACKET_WRITABLE, &pd->flags)) {
  2133. printk(DRIVER_NAME": WRITE for ro device %s (%llu)\n",
  2134. pd->name, (unsigned long long)bio->bi_sector);
  2135. goto end_io;
  2136. }
  2137. if (!bio->bi_size || (bio->bi_size % CD_FRAMESIZE)) {
  2138. printk(DRIVER_NAME": wrong bio size\n");
  2139. goto end_io;
  2140. }
  2141. blk_queue_bounce(q, &bio);
  2142. zone = ZONE(bio->bi_sector, pd);
  2143. VPRINTK("pkt_make_request: start = %6llx stop = %6llx\n",
  2144. (unsigned long long)bio->bi_sector,
  2145. (unsigned long long)(bio->bi_sector + bio_sectors(bio)));
  2146. /* Check if we have to split the bio */
  2147. {
  2148. struct bio_pair *bp;
  2149. sector_t last_zone;
  2150. int first_sectors;
  2151. last_zone = ZONE(bio->bi_sector + bio_sectors(bio) - 1, pd);
  2152. if (last_zone != zone) {
  2153. BUG_ON(last_zone != zone + pd->settings.size);
  2154. first_sectors = last_zone - bio->bi_sector;
  2155. bp = bio_split(bio, first_sectors);
  2156. BUG_ON(!bp);
  2157. pkt_make_request(q, &bp->bio1);
  2158. pkt_make_request(q, &bp->bio2);
  2159. bio_pair_release(bp);
  2160. return 0;
  2161. }
  2162. }
  2163. /*
  2164. * If we find a matching packet in state WAITING or READ_WAIT, we can
  2165. * just append this bio to that packet.
  2166. */
  2167. spin_lock(&pd->cdrw.active_list_lock);
  2168. blocked_bio = 0;
  2169. list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
  2170. if (pkt->sector == zone) {
  2171. spin_lock(&pkt->lock);
  2172. if ((pkt->state == PACKET_WAITING_STATE) ||
  2173. (pkt->state == PACKET_READ_WAIT_STATE)) {
  2174. bio_list_add(&pkt->orig_bios, bio);
  2175. pkt->write_size += bio->bi_size / CD_FRAMESIZE;
  2176. if ((pkt->write_size >= pkt->frames) &&
  2177. (pkt->state == PACKET_WAITING_STATE)) {
  2178. atomic_inc(&pkt->run_sm);
  2179. wake_up(&pd->wqueue);
  2180. }
  2181. spin_unlock(&pkt->lock);
  2182. spin_unlock(&pd->cdrw.active_list_lock);
  2183. return 0;
  2184. } else {
  2185. blocked_bio = 1;
  2186. }
  2187. spin_unlock(&pkt->lock);
  2188. }
  2189. }
  2190. spin_unlock(&pd->cdrw.active_list_lock);
  2191. /*
  2192. * Test if there is enough room left in the bio work queue
  2193. * (queue size >= congestion on mark).
  2194. * If not, wait till the work queue size is below the congestion off mark.
  2195. */
  2196. spin_lock(&pd->lock);
  2197. if (pd->write_congestion_on > 0
  2198. && pd->bio_queue_size >= pd->write_congestion_on) {
  2199. set_bdi_congested(&q->backing_dev_info, BLK_RW_ASYNC);
  2200. do {
  2201. spin_unlock(&pd->lock);
  2202. congestion_wait(BLK_RW_ASYNC, HZ);
  2203. spin_lock(&pd->lock);
  2204. } while(pd->bio_queue_size > pd->write_congestion_off);
  2205. }
  2206. spin_unlock(&pd->lock);
  2207. /*
  2208. * No matching packet found. Store the bio in the work queue.
  2209. */
  2210. node = mempool_alloc(pd->rb_pool, GFP_NOIO);
  2211. node->bio = bio;
  2212. spin_lock(&pd->lock);
  2213. BUG_ON(pd->bio_queue_size < 0);
  2214. was_empty = (pd->bio_queue_size == 0);
  2215. pkt_rbtree_insert(pd, node);
  2216. spin_unlock(&pd->lock);
  2217. /*
  2218. * Wake up the worker thread.
  2219. */
  2220. atomic_set(&pd->scan_queue, 1);
  2221. if (was_empty) {
  2222. /* This wake_up is required for correct operation */
  2223. wake_up(&pd->wqueue);
  2224. } else if (!list_empty(&pd->cdrw.pkt_free_list) && !blocked_bio) {
  2225. /*
  2226. * This wake up is not required for correct operation,
  2227. * but improves performance in some cases.
  2228. */
  2229. wake_up(&pd->wqueue);
  2230. }
  2231. return 0;
  2232. end_io:
  2233. bio_io_error(bio);
  2234. return 0;
  2235. }
  2236. static int pkt_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd,
  2237. struct bio_vec *bvec)
  2238. {
  2239. struct pktcdvd_device *pd = q->queuedata;
  2240. sector_t zone = ZONE(bmd->bi_sector, pd);
  2241. int used = ((bmd->bi_sector - zone) << 9) + bmd->bi_size;
  2242. int remaining = (pd->settings.size << 9) - used;
  2243. int remaining2;
  2244. /*
  2245. * A bio <= PAGE_SIZE must be allowed. If it crosses a packet
  2246. * boundary, pkt_make_request() will split the bio.
  2247. */
  2248. remaining2 = PAGE_SIZE - bmd->bi_size;
  2249. remaining = max(remaining, remaining2);
  2250. BUG_ON(remaining < 0);
  2251. return remaining;
  2252. }
  2253. static void pkt_init_queue(struct pktcdvd_device *pd)
  2254. {
  2255. struct request_queue *q = pd->disk->queue;
  2256. blk_queue_make_request(q, pkt_make_request);
  2257. blk_queue_logical_block_size(q, CD_FRAMESIZE);
  2258. blk_queue_max_hw_sectors(q, PACKET_MAX_SECTORS);
  2259. blk_queue_merge_bvec(q, pkt_merge_bvec);
  2260. q->queuedata = pd;
  2261. }
  2262. static int pkt_seq_show(struct seq_file *m, void *p)
  2263. {
  2264. struct pktcdvd_device *pd = m->private;
  2265. char *msg;
  2266. char bdev_buf[BDEVNAME_SIZE];
  2267. int states[PACKET_NUM_STATES];
  2268. seq_printf(m, "Writer %s mapped to %s:\n", pd->name,
  2269. bdevname(pd->bdev, bdev_buf));
  2270. seq_printf(m, "\nSettings:\n");
  2271. seq_printf(m, "\tpacket size:\t\t%dkB\n", pd->settings.size / 2);
  2272. if (pd->settings.write_type == 0)
  2273. msg = "Packet";
  2274. else
  2275. msg = "Unknown";
  2276. seq_printf(m, "\twrite type:\t\t%s\n", msg);
  2277. seq_printf(m, "\tpacket type:\t\t%s\n", pd->settings.fp ? "Fixed" : "Variable");
  2278. seq_printf(m, "\tlink loss:\t\t%d\n", pd->settings.link_loss);
  2279. seq_printf(m, "\ttrack mode:\t\t%d\n", pd->settings.track_mode);
  2280. if (pd->settings.block_mode == PACKET_BLOCK_MODE1)
  2281. msg = "Mode 1";
  2282. else if (pd->settings.block_mode == PACKET_BLOCK_MODE2)
  2283. msg = "Mode 2";
  2284. else
  2285. msg = "Unknown";
  2286. seq_printf(m, "\tblock mode:\t\t%s\n", msg);
  2287. seq_printf(m, "\nStatistics:\n");
  2288. seq_printf(m, "\tpackets started:\t%lu\n", pd->stats.pkt_started);
  2289. seq_printf(m, "\tpackets ended:\t\t%lu\n", pd->stats.pkt_ended);
  2290. seq_printf(m, "\twritten:\t\t%lukB\n", pd->stats.secs_w >> 1);
  2291. seq_printf(m, "\tread gather:\t\t%lukB\n", pd->stats.secs_rg >> 1);
  2292. seq_printf(m, "\tread:\t\t\t%lukB\n", pd->stats.secs_r >> 1);
  2293. seq_printf(m, "\nMisc:\n");
  2294. seq_printf(m, "\treference count:\t%d\n", pd->refcnt);
  2295. seq_printf(m, "\tflags:\t\t\t0x%lx\n", pd->flags);
  2296. seq_printf(m, "\tread speed:\t\t%ukB/s\n", pd->read_speed);
  2297. seq_printf(m, "\twrite speed:\t\t%ukB/s\n", pd->write_speed);
  2298. seq_printf(m, "\tstart offset:\t\t%lu\n", pd->offset);
  2299. seq_printf(m, "\tmode page offset:\t%u\n", pd->mode_offset);
  2300. seq_printf(m, "\nQueue state:\n");
  2301. seq_printf(m, "\tbios queued:\t\t%d\n", pd->bio_queue_size);
  2302. seq_printf(m, "\tbios pending:\t\t%d\n", atomic_read(&pd->cdrw.pending_bios));
  2303. seq_printf(m, "\tcurrent sector:\t\t0x%llx\n", (unsigned long long)pd->current_sector);
  2304. pkt_count_states(pd, states);
  2305. seq_printf(m, "\tstate:\t\t\ti:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
  2306. states[0], states[1], states[2], states[3], states[4], states[5]);
  2307. seq_printf(m, "\twrite congestion marks:\toff=%d on=%d\n",
  2308. pd->write_congestion_off,
  2309. pd->write_congestion_on);
  2310. return 0;
  2311. }
  2312. static int pkt_seq_open(struct inode *inode, struct file *file)
  2313. {
  2314. return single_open(file, pkt_seq_show, PDE(inode)->data);
  2315. }
  2316. static const struct file_operations pkt_proc_fops = {
  2317. .open = pkt_seq_open,
  2318. .read = seq_read,
  2319. .llseek = seq_lseek,
  2320. .release = single_release
  2321. };
  2322. static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
  2323. {
  2324. int i;
  2325. int ret = 0;
  2326. char b[BDEVNAME_SIZE];
  2327. struct block_device *bdev;
  2328. if (pd->pkt_dev == dev) {
  2329. printk(DRIVER_NAME": Recursive setup not allowed\n");
  2330. return -EBUSY;
  2331. }
  2332. for (i = 0; i < MAX_WRITERS; i++) {
  2333. struct pktcdvd_device *pd2 = pkt_devs[i];
  2334. if (!pd2)
  2335. continue;
  2336. if (pd2->bdev->bd_dev == dev) {
  2337. printk(DRIVER_NAME": %s already setup\n", bdevname(pd2->bdev, b));
  2338. return -EBUSY;
  2339. }
  2340. if (pd2->pkt_dev == dev) {
  2341. printk(DRIVER_NAME": Can't chain pktcdvd devices\n");
  2342. return -EBUSY;
  2343. }
  2344. }
  2345. bdev = bdget(dev);
  2346. if (!bdev)
  2347. return -ENOMEM;
  2348. ret = blkdev_get(bdev, FMODE_READ | FMODE_NDELAY);
  2349. if (ret)
  2350. return ret;
  2351. /* This is safe, since we have a reference from open(). */
  2352. __module_get(THIS_MODULE);
  2353. pd->bdev = bdev;
  2354. set_blocksize(bdev, CD_FRAMESIZE);
  2355. pkt_init_queue(pd);
  2356. atomic_set(&pd->cdrw.pending_bios, 0);
  2357. pd->cdrw.thread = kthread_run(kcdrwd, pd, "%s", pd->name);
  2358. if (IS_ERR(pd->cdrw.thread)) {
  2359. printk(DRIVER_NAME": can't start kernel thread\n");
  2360. ret = -ENOMEM;
  2361. goto out_mem;
  2362. }
  2363. proc_create_data(pd->name, 0, pkt_proc, &pkt_proc_fops, pd);
  2364. DPRINTK(DRIVER_NAME": writer %s mapped to %s\n", pd->name, bdevname(bdev, b));
  2365. return 0;
  2366. out_mem:
  2367. blkdev_put(bdev, FMODE_READ | FMODE_NDELAY);
  2368. /* This is safe: open() is still holding a reference. */
  2369. module_put(THIS_MODULE);
  2370. return ret;
  2371. }
  2372. static int pkt_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg)
  2373. {
  2374. struct pktcdvd_device *pd = bdev->bd_disk->private_data;
  2375. VPRINTK("pkt_ioctl: cmd %x, dev %d:%d\n", cmd,
  2376. MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev));
  2377. switch (cmd) {
  2378. case CDROMEJECT:
  2379. /*
  2380. * The door gets locked when the device is opened, so we
  2381. * have to unlock it or else the eject command fails.
  2382. */
  2383. if (pd->refcnt == 1)
  2384. pkt_lock_door(pd, 0);
  2385. /* fallthru */
  2386. /*
  2387. * forward selected CDROM ioctls to CD-ROM, for UDF
  2388. */
  2389. case CDROMMULTISESSION:
  2390. case CDROMREADTOCENTRY:
  2391. case CDROM_LAST_WRITTEN:
  2392. case CDROM_SEND_PACKET:
  2393. case SCSI_IOCTL_SEND_COMMAND:
  2394. return __blkdev_driver_ioctl(pd->bdev, mode, cmd, arg);
  2395. default:
  2396. VPRINTK(DRIVER_NAME": Unknown ioctl for %s (%x)\n", pd->name, cmd);
  2397. return -ENOTTY;
  2398. }
  2399. return 0;
  2400. }
  2401. static int pkt_media_changed(struct gendisk *disk)
  2402. {
  2403. struct pktcdvd_device *pd = disk->private_data;
  2404. struct gendisk *attached_disk;
  2405. if (!pd)
  2406. return 0;
  2407. if (!pd->bdev)
  2408. return 0;
  2409. attached_disk = pd->bdev->bd_disk;
  2410. if (!attached_disk)
  2411. return 0;
  2412. return attached_disk->fops->media_changed(attached_disk);
  2413. }
  2414. static const struct block_device_operations pktcdvd_ops = {
  2415. .owner = THIS_MODULE,
  2416. .open = pkt_open,
  2417. .release = pkt_close,
  2418. .locked_ioctl = pkt_ioctl,
  2419. .media_changed = pkt_media_changed,
  2420. };
  2421. static char *pktcdvd_devnode(struct gendisk *gd, mode_t *mode)
  2422. {
  2423. return kasprintf(GFP_KERNEL, "pktcdvd/%s", gd->disk_name);
  2424. }
  2425. /*
  2426. * Set up mapping from pktcdvd device to CD-ROM device.
  2427. */
  2428. static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
  2429. {
  2430. int idx;
  2431. int ret = -ENOMEM;
  2432. struct pktcdvd_device *pd;
  2433. struct gendisk *disk;
  2434. mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
  2435. for (idx = 0; idx < MAX_WRITERS; idx++)
  2436. if (!pkt_devs[idx])
  2437. break;
  2438. if (idx == MAX_WRITERS) {
  2439. printk(DRIVER_NAME": max %d writers supported\n", MAX_WRITERS);
  2440. ret = -EBUSY;
  2441. goto out_mutex;
  2442. }
  2443. pd = kzalloc(sizeof(struct pktcdvd_device), GFP_KERNEL);
  2444. if (!pd)
  2445. goto out_mutex;
  2446. pd->rb_pool = mempool_create_kmalloc_pool(PKT_RB_POOL_SIZE,
  2447. sizeof(struct pkt_rb_node));
  2448. if (!pd->rb_pool)
  2449. goto out_mem;
  2450. INIT_LIST_HEAD(&pd->cdrw.pkt_free_list);
  2451. INIT_LIST_HEAD(&pd->cdrw.pkt_active_list);
  2452. spin_lock_init(&pd->cdrw.active_list_lock);
  2453. spin_lock_init(&pd->lock);
  2454. spin_lock_init(&pd->iosched.lock);
  2455. bio_list_init(&pd->iosched.read_queue);
  2456. bio_list_init(&pd->iosched.write_queue);
  2457. sprintf(pd->name, DRIVER_NAME"%d", idx);
  2458. init_waitqueue_head(&pd->wqueue);
  2459. pd->bio_queue = RB_ROOT;
  2460. pd->write_congestion_on = write_congestion_on;
  2461. pd->write_congestion_off = write_congestion_off;
  2462. disk = alloc_disk(1);
  2463. if (!disk)
  2464. goto out_mem;
  2465. pd->disk = disk;
  2466. disk->major = pktdev_major;
  2467. disk->first_minor = idx;
  2468. disk->fops = &pktcdvd_ops;
  2469. disk->flags = GENHD_FL_REMOVABLE;
  2470. strcpy(disk->disk_name, pd->name);
  2471. disk->devnode = pktcdvd_devnode;
  2472. disk->private_data = pd;
  2473. disk->queue = blk_alloc_queue(GFP_KERNEL);
  2474. if (!disk->queue)
  2475. goto out_mem2;
  2476. pd->pkt_dev = MKDEV(pktdev_major, idx);
  2477. ret = pkt_new_dev(pd, dev);
  2478. if (ret)
  2479. goto out_new_dev;
  2480. add_disk(disk);
  2481. pkt_sysfs_dev_new(pd);
  2482. pkt_debugfs_dev_new(pd);
  2483. pkt_devs[idx] = pd;
  2484. if (pkt_dev)
  2485. *pkt_dev = pd->pkt_dev;
  2486. mutex_unlock(&ctl_mutex);
  2487. return 0;
  2488. out_new_dev:
  2489. blk_cleanup_queue(disk->queue);
  2490. out_mem2:
  2491. put_disk(disk);
  2492. out_mem:
  2493. if (pd->rb_pool)
  2494. mempool_destroy(pd->rb_pool);
  2495. kfree(pd);
  2496. out_mutex:
  2497. mutex_unlock(&ctl_mutex);
  2498. printk(DRIVER_NAME": setup of pktcdvd device failed\n");
  2499. return ret;
  2500. }
  2501. /*
  2502. * Tear down mapping from pktcdvd device to CD-ROM device.
  2503. */
  2504. static int pkt_remove_dev(dev_t pkt_dev)
  2505. {
  2506. struct pktcdvd_device *pd;
  2507. int idx;
  2508. int ret = 0;
  2509. mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
  2510. for (idx = 0; idx < MAX_WRITERS; idx++) {
  2511. pd = pkt_devs[idx];
  2512. if (pd && (pd->pkt_dev == pkt_dev))
  2513. break;
  2514. }
  2515. if (idx == MAX_WRITERS) {
  2516. DPRINTK(DRIVER_NAME": dev not setup\n");
  2517. ret = -ENXIO;
  2518. goto out;
  2519. }
  2520. if (pd->refcnt > 0) {
  2521. ret = -EBUSY;
  2522. goto out;
  2523. }
  2524. if (!IS_ERR(pd->cdrw.thread))
  2525. kthread_stop(pd->cdrw.thread);
  2526. pkt_devs[idx] = NULL;
  2527. pkt_debugfs_dev_remove(pd);
  2528. pkt_sysfs_dev_remove(pd);
  2529. blkdev_put(pd->bdev, FMODE_READ | FMODE_NDELAY);
  2530. remove_proc_entry(pd->name, pkt_proc);
  2531. DPRINTK(DRIVER_NAME": writer %s unmapped\n", pd->name);
  2532. del_gendisk(pd->disk);
  2533. blk_cleanup_queue(pd->disk->queue);
  2534. put_disk(pd->disk);
  2535. mempool_destroy(pd->rb_pool);
  2536. kfree(pd);
  2537. /* This is safe: open() is still holding a reference. */
  2538. module_put(THIS_MODULE);
  2539. out:
  2540. mutex_unlock(&ctl_mutex);
  2541. return ret;
  2542. }
  2543. static void pkt_get_status(struct pkt_ctrl_command *ctrl_cmd)
  2544. {
  2545. struct pktcdvd_device *pd;
  2546. mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
  2547. pd = pkt_find_dev_from_minor(ctrl_cmd->dev_index);
  2548. if (pd) {
  2549. ctrl_cmd->dev = new_encode_dev(pd->bdev->bd_dev);
  2550. ctrl_cmd->pkt_dev = new_encode_dev(pd->pkt_dev);
  2551. } else {
  2552. ctrl_cmd->dev = 0;
  2553. ctrl_cmd->pkt_dev = 0;
  2554. }
  2555. ctrl_cmd->num_devices = MAX_WRITERS;
  2556. mutex_unlock(&ctl_mutex);
  2557. }
  2558. static int pkt_ctl_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
  2559. {
  2560. void __user *argp = (void __user *)arg;
  2561. struct pkt_ctrl_command ctrl_cmd;
  2562. int ret = 0;
  2563. dev_t pkt_dev = 0;
  2564. if (cmd != PACKET_CTRL_CMD)
  2565. return -ENOTTY;
  2566. if (copy_from_user(&ctrl_cmd, argp, sizeof(struct pkt_ctrl_command)))
  2567. return -EFAULT;
  2568. switch (ctrl_cmd.command) {
  2569. case PKT_CTRL_CMD_SETUP:
  2570. if (!capable(CAP_SYS_ADMIN))
  2571. return -EPERM;
  2572. ret = pkt_setup_dev(new_decode_dev(ctrl_cmd.dev), &pkt_dev);
  2573. ctrl_cmd.pkt_dev = new_encode_dev(pkt_dev);
  2574. break;
  2575. case PKT_CTRL_CMD_TEARDOWN:
  2576. if (!capable(CAP_SYS_ADMIN))
  2577. return -EPERM;
  2578. ret = pkt_remove_dev(new_decode_dev(ctrl_cmd.pkt_dev));
  2579. break;
  2580. case PKT_CTRL_CMD_STATUS:
  2581. pkt_get_status(&ctrl_cmd);
  2582. break;
  2583. default:
  2584. return -ENOTTY;
  2585. }
  2586. if (copy_to_user(argp, &ctrl_cmd, sizeof(struct pkt_ctrl_command)))
  2587. return -EFAULT;
  2588. return ret;
  2589. }
  2590. static const struct file_operations pkt_ctl_fops = {
  2591. .ioctl = pkt_ctl_ioctl,
  2592. .owner = THIS_MODULE,
  2593. };
  2594. static struct miscdevice pkt_misc = {
  2595. .minor = MISC_DYNAMIC_MINOR,
  2596. .name = DRIVER_NAME,
  2597. .nodename = "pktcdvd/control",
  2598. .fops = &pkt_ctl_fops
  2599. };
  2600. static int __init pkt_init(void)
  2601. {
  2602. int ret;
  2603. mutex_init(&ctl_mutex);
  2604. psd_pool = mempool_create_kmalloc_pool(PSD_POOL_SIZE,
  2605. sizeof(struct packet_stacked_data));
  2606. if (!psd_pool)
  2607. return -ENOMEM;
  2608. ret = register_blkdev(pktdev_major, DRIVER_NAME);
  2609. if (ret < 0) {
  2610. printk(DRIVER_NAME": Unable to register block device\n");
  2611. goto out2;
  2612. }
  2613. if (!pktdev_major)
  2614. pktdev_major = ret;
  2615. ret = pkt_sysfs_init();
  2616. if (ret)
  2617. goto out;
  2618. pkt_debugfs_init();
  2619. ret = misc_register(&pkt_misc);
  2620. if (ret) {
  2621. printk(DRIVER_NAME": Unable to register misc device\n");
  2622. goto out_misc;
  2623. }
  2624. pkt_proc = proc_mkdir("driver/"DRIVER_NAME, NULL);
  2625. return 0;
  2626. out_misc:
  2627. pkt_debugfs_cleanup();
  2628. pkt_sysfs_cleanup();
  2629. out:
  2630. unregister_blkdev(pktdev_major, DRIVER_NAME);
  2631. out2:
  2632. mempool_destroy(psd_pool);
  2633. return ret;
  2634. }
  2635. static void __exit pkt_exit(void)
  2636. {
  2637. remove_proc_entry("driver/"DRIVER_NAME, NULL);
  2638. misc_deregister(&pkt_misc);
  2639. pkt_debugfs_cleanup();
  2640. pkt_sysfs_cleanup();
  2641. unregister_blkdev(pktdev_major, DRIVER_NAME);
  2642. mempool_destroy(psd_pool);
  2643. }
  2644. MODULE_DESCRIPTION("Packet writing layer for CD/DVD drives");
  2645. MODULE_AUTHOR("Jens Axboe <axboe@suse.de>");
  2646. MODULE_LICENSE("GPL");
  2647. module_init(pkt_init);
  2648. module_exit(pkt_exit);