sg.c 70 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625
  1. /*
  2. * History:
  3. * Started: Aug 9 by Lawrence Foard (entropy@world.std.com),
  4. * to allow user process control of SCSI devices.
  5. * Development Sponsored by Killy Corp. NY NY
  6. *
  7. * Original driver (sg.c):
  8. * Copyright (C) 1992 Lawrence Foard
  9. * Version 2 and 3 extensions to driver:
  10. * Copyright (C) 1998 - 2005 Douglas Gilbert
  11. *
  12. * Modified 19-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support
  13. *
  14. * This program is free software; you can redistribute it and/or modify
  15. * it under the terms of the GNU General Public License as published by
  16. * the Free Software Foundation; either version 2, or (at your option)
  17. * any later version.
  18. *
  19. */
  20. static int sg_version_num = 30534; /* 2 digits for each component */
  21. #define SG_VERSION_STR "3.5.34"
  22. /*
  23. * D. P. Gilbert (dgilbert@interlog.com, dougg@triode.net.au), notes:
  24. * - scsi logging is available via SCSI_LOG_TIMEOUT macros. First
  25. * the kernel/module needs to be built with CONFIG_SCSI_LOGGING
  26. * (otherwise the macros compile to empty statements).
  27. *
  28. */
  29. #include <linux/module.h>
  30. #include <linux/fs.h>
  31. #include <linux/kernel.h>
  32. #include <linux/sched.h>
  33. #include <linux/string.h>
  34. #include <linux/mm.h>
  35. #include <linux/errno.h>
  36. #include <linux/mtio.h>
  37. #include <linux/ioctl.h>
  38. #include <linux/slab.h>
  39. #include <linux/fcntl.h>
  40. #include <linux/init.h>
  41. #include <linux/poll.h>
  42. #include <linux/moduleparam.h>
  43. #include <linux/cdev.h>
  44. #include <linux/idr.h>
  45. #include <linux/seq_file.h>
  46. #include <linux/blkdev.h>
  47. #include <linux/delay.h>
  48. #include <linux/blktrace_api.h>
  49. #include <linux/mutex.h>
  50. #include <linux/ratelimit.h>
  51. #include "scsi.h"
  52. #include <scsi/scsi_dbg.h>
  53. #include <scsi/scsi_host.h>
  54. #include <scsi/scsi_driver.h>
  55. #include <scsi/scsi_ioctl.h>
  56. #include <scsi/sg.h>
  57. #include "scsi_logging.h"
  58. #ifdef CONFIG_SCSI_PROC_FS
  59. #include <linux/proc_fs.h>
  60. static char *sg_version_date = "20061027";
  61. static int sg_proc_init(void);
  62. static void sg_proc_cleanup(void);
  63. #endif
  64. #define SG_ALLOW_DIO_DEF 0
  65. #define SG_MAX_DEVS 32768
  66. /*
  67. * Suppose you want to calculate the formula muldiv(x,m,d)=int(x * m / d)
  68. * Then when using 32 bit integers x * m may overflow during the calculation.
  69. * Replacing muldiv(x) by muldiv(x)=((x % d) * m) / d + int(x / d) * m
  70. * calculates the same, but prevents the overflow when both m and d
  71. * are "small" numbers (like HZ and USER_HZ).
  72. * Of course an overflow is inavoidable if the result of muldiv doesn't fit
  73. * in 32 bits.
  74. */
  75. #define MULDIV(X,MUL,DIV) ((((X % DIV) * MUL) / DIV) + ((X / DIV) * MUL))
  76. #define SG_DEFAULT_TIMEOUT MULDIV(SG_DEFAULT_TIMEOUT_USER, HZ, USER_HZ)
  77. int sg_big_buff = SG_DEF_RESERVED_SIZE;
  78. /* N.B. This variable is readable and writeable via
  79. /proc/scsi/sg/def_reserved_size . Each time sg_open() is called a buffer
  80. of this size (or less if there is not enough memory) will be reserved
  81. for use by this file descriptor. [Deprecated usage: this variable is also
  82. readable via /proc/sys/kernel/sg-big-buff if the sg driver is built into
  83. the kernel (i.e. it is not a module).] */
  84. static int def_reserved_size = -1; /* picks up init parameter */
  85. static int sg_allow_dio = SG_ALLOW_DIO_DEF;
  86. static int scatter_elem_sz = SG_SCATTER_SZ;
  87. static int scatter_elem_sz_prev = SG_SCATTER_SZ;
  88. #define SG_SECTOR_SZ 512
  89. static int sg_add(struct device *, struct class_interface *);
  90. static void sg_remove(struct device *, struct class_interface *);
  91. static DEFINE_MUTEX(sg_mutex);
  92. static DEFINE_IDR(sg_index_idr);
  93. static DEFINE_RWLOCK(sg_index_lock); /* Also used to lock
  94. file descriptor list for device */
  95. static struct class_interface sg_interface = {
  96. .add_dev = sg_add,
  97. .remove_dev = sg_remove,
  98. };
  99. typedef struct sg_scatter_hold { /* holding area for scsi scatter gather info */
  100. unsigned short k_use_sg; /* Count of kernel scatter-gather pieces */
  101. unsigned sglist_len; /* size of malloc'd scatter-gather list ++ */
  102. unsigned bufflen; /* Size of (aggregate) data buffer */
  103. struct page **pages;
  104. int page_order;
  105. char dio_in_use; /* 0->indirect IO (or mmap), 1->dio */
  106. unsigned char cmd_opcode; /* first byte of command */
  107. } Sg_scatter_hold;
  108. struct sg_device; /* forward declarations */
  109. struct sg_fd;
  110. typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */
  111. struct sg_request *nextrp; /* NULL -> tail request (slist) */
  112. struct sg_fd *parentfp; /* NULL -> not in use */
  113. Sg_scatter_hold data; /* hold buffer, perhaps scatter list */
  114. sg_io_hdr_t header; /* scsi command+info, see <scsi/sg.h> */
  115. unsigned char sense_b[SCSI_SENSE_BUFFERSIZE];
  116. char res_used; /* 1 -> using reserve buffer, 0 -> not ... */
  117. char orphan; /* 1 -> drop on sight, 0 -> normal */
  118. char sg_io_owned; /* 1 -> packet belongs to SG_IO */
  119. volatile char done; /* 0->before bh, 1->before read, 2->read */
  120. struct request *rq;
  121. struct bio *bio;
  122. struct execute_work ew;
  123. } Sg_request;
  124. typedef struct sg_fd { /* holds the state of a file descriptor */
  125. struct list_head sfd_siblings;
  126. struct sg_device *parentdp; /* owning device */
  127. wait_queue_head_t read_wait; /* queue read until command done */
  128. rwlock_t rq_list_lock; /* protect access to list in req_arr */
  129. int timeout; /* defaults to SG_DEFAULT_TIMEOUT */
  130. int timeout_user; /* defaults to SG_DEFAULT_TIMEOUT_USER */
  131. Sg_scatter_hold reserve; /* buffer held for this file descriptor */
  132. unsigned save_scat_len; /* original length of trunc. scat. element */
  133. Sg_request *headrp; /* head of request slist, NULL->empty */
  134. struct fasync_struct *async_qp; /* used by asynchronous notification */
  135. Sg_request req_arr[SG_MAX_QUEUE]; /* used as singly-linked list */
  136. char low_dma; /* as in parent but possibly overridden to 1 */
  137. char force_packid; /* 1 -> pack_id input to read(), 0 -> ignored */
  138. volatile char closed; /* 1 -> fd closed but request(s) outstanding */
  139. char cmd_q; /* 1 -> allow command queuing, 0 -> don't */
  140. char next_cmd_len; /* 0 -> automatic (def), >0 -> use on next write() */
  141. char keep_orphan; /* 0 -> drop orphan (def), 1 -> keep for read() */
  142. char mmap_called; /* 0 -> mmap() never called on this fd */
  143. struct kref f_ref;
  144. struct execute_work ew;
  145. } Sg_fd;
  146. typedef struct sg_device { /* holds the state of each scsi generic device */
  147. struct scsi_device *device;
  148. wait_queue_head_t o_excl_wait; /* queue open() when O_EXCL in use */
  149. int sg_tablesize; /* adapter's max scatter-gather table size */
  150. u32 index; /* device index number */
  151. struct list_head sfds;
  152. volatile char detached; /* 0->attached, 1->detached pending removal */
  153. volatile char exclude; /* opened for exclusive access */
  154. char sgdebug; /* 0->off, 1->sense, 9->dump dev, 10-> all devs */
  155. struct gendisk *disk;
  156. struct cdev * cdev; /* char_dev [sysfs: /sys/cdev/major/sg<n>] */
  157. struct kref d_ref;
  158. } Sg_device;
  159. /* tasklet or soft irq callback */
  160. static void sg_rq_end_io(struct request *rq, int uptodate);
  161. static int sg_start_req(Sg_request *srp, unsigned char *cmd);
  162. static int sg_finish_rem_req(Sg_request * srp);
  163. static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size);
  164. static ssize_t sg_new_read(Sg_fd * sfp, char __user *buf, size_t count,
  165. Sg_request * srp);
  166. static ssize_t sg_new_write(Sg_fd *sfp, struct file *file,
  167. const char __user *buf, size_t count, int blocking,
  168. int read_only, int sg_io_owned, Sg_request **o_srp);
  169. static int sg_common_write(Sg_fd * sfp, Sg_request * srp,
  170. unsigned char *cmnd, int timeout, int blocking);
  171. static int sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer);
  172. static void sg_remove_scat(Sg_scatter_hold * schp);
  173. static void sg_build_reserve(Sg_fd * sfp, int req_size);
  174. static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size);
  175. static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp);
  176. static Sg_fd *sg_add_sfp(Sg_device * sdp, int dev);
  177. static void sg_remove_sfp(struct kref *);
  178. static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id);
  179. static Sg_request *sg_add_request(Sg_fd * sfp);
  180. static int sg_remove_request(Sg_fd * sfp, Sg_request * srp);
  181. static int sg_res_in_use(Sg_fd * sfp);
  182. static Sg_device *sg_get_dev(int dev);
  183. static void sg_put_dev(Sg_device *sdp);
  184. #define SZ_SG_HEADER sizeof(struct sg_header)
  185. #define SZ_SG_IO_HDR sizeof(sg_io_hdr_t)
  186. #define SZ_SG_IOVEC sizeof(sg_iovec_t)
  187. #define SZ_SG_REQ_INFO sizeof(sg_req_info_t)
  188. static int sg_allow_access(struct file *filp, unsigned char *cmd)
  189. {
  190. struct sg_fd *sfp = filp->private_data;
  191. if (sfp->parentdp->device->type == TYPE_SCANNER)
  192. return 0;
  193. return blk_verify_command(cmd, filp->f_mode & FMODE_WRITE);
  194. }
  195. static int
  196. sg_open(struct inode *inode, struct file *filp)
  197. {
  198. int dev = iminor(inode);
  199. int flags = filp->f_flags;
  200. struct request_queue *q;
  201. Sg_device *sdp;
  202. Sg_fd *sfp;
  203. int res;
  204. int retval;
  205. mutex_lock(&sg_mutex);
  206. nonseekable_open(inode, filp);
  207. SCSI_LOG_TIMEOUT(3, printk("sg_open: dev=%d, flags=0x%x\n", dev, flags));
  208. sdp = sg_get_dev(dev);
  209. if (IS_ERR(sdp)) {
  210. retval = PTR_ERR(sdp);
  211. sdp = NULL;
  212. goto sg_put;
  213. }
  214. /* This driver's module count bumped by fops_get in <linux/fs.h> */
  215. /* Prevent the device driver from vanishing while we sleep */
  216. retval = scsi_device_get(sdp->device);
  217. if (retval)
  218. goto sg_put;
  219. retval = scsi_autopm_get_device(sdp->device);
  220. if (retval)
  221. goto sdp_put;
  222. if (!((flags & O_NONBLOCK) ||
  223. scsi_block_when_processing_errors(sdp->device))) {
  224. retval = -ENXIO;
  225. /* we are in error recovery for this device */
  226. goto error_out;
  227. }
  228. if (flags & O_EXCL) {
  229. if (O_RDONLY == (flags & O_ACCMODE)) {
  230. retval = -EPERM; /* Can't lock it with read only access */
  231. goto error_out;
  232. }
  233. if (!list_empty(&sdp->sfds) && (flags & O_NONBLOCK)) {
  234. retval = -EBUSY;
  235. goto error_out;
  236. }
  237. res = 0;
  238. __wait_event_interruptible(sdp->o_excl_wait,
  239. ((!list_empty(&sdp->sfds) || sdp->exclude) ? 0 : (sdp->exclude = 1)), res);
  240. if (res) {
  241. retval = res; /* -ERESTARTSYS because signal hit process */
  242. goto error_out;
  243. }
  244. } else if (sdp->exclude) { /* some other fd has an exclusive lock on dev */
  245. if (flags & O_NONBLOCK) {
  246. retval = -EBUSY;
  247. goto error_out;
  248. }
  249. res = 0;
  250. __wait_event_interruptible(sdp->o_excl_wait, (!sdp->exclude),
  251. res);
  252. if (res) {
  253. retval = res; /* -ERESTARTSYS because signal hit process */
  254. goto error_out;
  255. }
  256. }
  257. if (sdp->detached) {
  258. retval = -ENODEV;
  259. goto error_out;
  260. }
  261. if (list_empty(&sdp->sfds)) { /* no existing opens on this device */
  262. sdp->sgdebug = 0;
  263. q = sdp->device->request_queue;
  264. sdp->sg_tablesize = queue_max_segments(q);
  265. }
  266. if ((sfp = sg_add_sfp(sdp, dev)))
  267. filp->private_data = sfp;
  268. else {
  269. if (flags & O_EXCL) {
  270. sdp->exclude = 0; /* undo if error */
  271. wake_up_interruptible(&sdp->o_excl_wait);
  272. }
  273. retval = -ENOMEM;
  274. goto error_out;
  275. }
  276. retval = 0;
  277. error_out:
  278. if (retval) {
  279. scsi_autopm_put_device(sdp->device);
  280. sdp_put:
  281. scsi_device_put(sdp->device);
  282. }
  283. sg_put:
  284. if (sdp)
  285. sg_put_dev(sdp);
  286. mutex_unlock(&sg_mutex);
  287. return retval;
  288. }
  289. /* Following function was formerly called 'sg_close' */
  290. static int
  291. sg_release(struct inode *inode, struct file *filp)
  292. {
  293. Sg_device *sdp;
  294. Sg_fd *sfp;
  295. if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
  296. return -ENXIO;
  297. SCSI_LOG_TIMEOUT(3, printk("sg_release: %s\n", sdp->disk->disk_name));
  298. sfp->closed = 1;
  299. sdp->exclude = 0;
  300. wake_up_interruptible(&sdp->o_excl_wait);
  301. scsi_autopm_put_device(sdp->device);
  302. kref_put(&sfp->f_ref, sg_remove_sfp);
  303. return 0;
  304. }
  305. static ssize_t
  306. sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
  307. {
  308. Sg_device *sdp;
  309. Sg_fd *sfp;
  310. Sg_request *srp;
  311. int req_pack_id = -1;
  312. sg_io_hdr_t *hp;
  313. struct sg_header *old_hdr = NULL;
  314. int retval = 0;
  315. if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
  316. return -ENXIO;
  317. SCSI_LOG_TIMEOUT(3, printk("sg_read: %s, count=%d\n",
  318. sdp->disk->disk_name, (int) count));
  319. if (!access_ok(VERIFY_WRITE, buf, count))
  320. return -EFAULT;
  321. if (sfp->force_packid && (count >= SZ_SG_HEADER)) {
  322. old_hdr = kmalloc(SZ_SG_HEADER, GFP_KERNEL);
  323. if (!old_hdr)
  324. return -ENOMEM;
  325. if (__copy_from_user(old_hdr, buf, SZ_SG_HEADER)) {
  326. retval = -EFAULT;
  327. goto free_old_hdr;
  328. }
  329. if (old_hdr->reply_len < 0) {
  330. if (count >= SZ_SG_IO_HDR) {
  331. sg_io_hdr_t *new_hdr;
  332. new_hdr = kmalloc(SZ_SG_IO_HDR, GFP_KERNEL);
  333. if (!new_hdr) {
  334. retval = -ENOMEM;
  335. goto free_old_hdr;
  336. }
  337. retval =__copy_from_user
  338. (new_hdr, buf, SZ_SG_IO_HDR);
  339. req_pack_id = new_hdr->pack_id;
  340. kfree(new_hdr);
  341. if (retval) {
  342. retval = -EFAULT;
  343. goto free_old_hdr;
  344. }
  345. }
  346. } else
  347. req_pack_id = old_hdr->pack_id;
  348. }
  349. srp = sg_get_rq_mark(sfp, req_pack_id);
  350. if (!srp) { /* now wait on packet to arrive */
  351. if (sdp->detached) {
  352. retval = -ENODEV;
  353. goto free_old_hdr;
  354. }
  355. if (filp->f_flags & O_NONBLOCK) {
  356. retval = -EAGAIN;
  357. goto free_old_hdr;
  358. }
  359. while (1) {
  360. retval = 0; /* following macro beats race condition */
  361. __wait_event_interruptible(sfp->read_wait,
  362. (sdp->detached ||
  363. (srp = sg_get_rq_mark(sfp, req_pack_id))),
  364. retval);
  365. if (sdp->detached) {
  366. retval = -ENODEV;
  367. goto free_old_hdr;
  368. }
  369. if (0 == retval)
  370. break;
  371. /* -ERESTARTSYS as signal hit process */
  372. goto free_old_hdr;
  373. }
  374. }
  375. if (srp->header.interface_id != '\0') {
  376. retval = sg_new_read(sfp, buf, count, srp);
  377. goto free_old_hdr;
  378. }
  379. hp = &srp->header;
  380. if (old_hdr == NULL) {
  381. old_hdr = kmalloc(SZ_SG_HEADER, GFP_KERNEL);
  382. if (! old_hdr) {
  383. retval = -ENOMEM;
  384. goto free_old_hdr;
  385. }
  386. }
  387. memset(old_hdr, 0, SZ_SG_HEADER);
  388. old_hdr->reply_len = (int) hp->timeout;
  389. old_hdr->pack_len = old_hdr->reply_len; /* old, strange behaviour */
  390. old_hdr->pack_id = hp->pack_id;
  391. old_hdr->twelve_byte =
  392. ((srp->data.cmd_opcode >= 0xc0) && (12 == hp->cmd_len)) ? 1 : 0;
  393. old_hdr->target_status = hp->masked_status;
  394. old_hdr->host_status = hp->host_status;
  395. old_hdr->driver_status = hp->driver_status;
  396. if ((CHECK_CONDITION & hp->masked_status) ||
  397. (DRIVER_SENSE & hp->driver_status))
  398. memcpy(old_hdr->sense_buffer, srp->sense_b,
  399. sizeof (old_hdr->sense_buffer));
  400. switch (hp->host_status) {
  401. /* This setup of 'result' is for backward compatibility and is best
  402. ignored by the user who should use target, host + driver status */
  403. case DID_OK:
  404. case DID_PASSTHROUGH:
  405. case DID_SOFT_ERROR:
  406. old_hdr->result = 0;
  407. break;
  408. case DID_NO_CONNECT:
  409. case DID_BUS_BUSY:
  410. case DID_TIME_OUT:
  411. old_hdr->result = EBUSY;
  412. break;
  413. case DID_BAD_TARGET:
  414. case DID_ABORT:
  415. case DID_PARITY:
  416. case DID_RESET:
  417. case DID_BAD_INTR:
  418. old_hdr->result = EIO;
  419. break;
  420. case DID_ERROR:
  421. old_hdr->result = (srp->sense_b[0] == 0 &&
  422. hp->masked_status == GOOD) ? 0 : EIO;
  423. break;
  424. default:
  425. old_hdr->result = EIO;
  426. break;
  427. }
  428. /* Now copy the result back to the user buffer. */
  429. if (count >= SZ_SG_HEADER) {
  430. if (__copy_to_user(buf, old_hdr, SZ_SG_HEADER)) {
  431. retval = -EFAULT;
  432. goto free_old_hdr;
  433. }
  434. buf += SZ_SG_HEADER;
  435. if (count > old_hdr->reply_len)
  436. count = old_hdr->reply_len;
  437. if (count > SZ_SG_HEADER) {
  438. if (sg_read_oxfer(srp, buf, count - SZ_SG_HEADER)) {
  439. retval = -EFAULT;
  440. goto free_old_hdr;
  441. }
  442. }
  443. } else
  444. count = (old_hdr->result == 0) ? 0 : -EIO;
  445. sg_finish_rem_req(srp);
  446. retval = count;
  447. free_old_hdr:
  448. kfree(old_hdr);
  449. return retval;
  450. }
  451. static ssize_t
  452. sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp)
  453. {
  454. sg_io_hdr_t *hp = &srp->header;
  455. int err = 0;
  456. int len;
  457. if (count < SZ_SG_IO_HDR) {
  458. err = -EINVAL;
  459. goto err_out;
  460. }
  461. hp->sb_len_wr = 0;
  462. if ((hp->mx_sb_len > 0) && hp->sbp) {
  463. if ((CHECK_CONDITION & hp->masked_status) ||
  464. (DRIVER_SENSE & hp->driver_status)) {
  465. int sb_len = SCSI_SENSE_BUFFERSIZE;
  466. sb_len = (hp->mx_sb_len > sb_len) ? sb_len : hp->mx_sb_len;
  467. len = 8 + (int) srp->sense_b[7]; /* Additional sense length field */
  468. len = (len > sb_len) ? sb_len : len;
  469. if (copy_to_user(hp->sbp, srp->sense_b, len)) {
  470. err = -EFAULT;
  471. goto err_out;
  472. }
  473. hp->sb_len_wr = len;
  474. }
  475. }
  476. if (hp->masked_status || hp->host_status || hp->driver_status)
  477. hp->info |= SG_INFO_CHECK;
  478. if (copy_to_user(buf, hp, SZ_SG_IO_HDR)) {
  479. err = -EFAULT;
  480. goto err_out;
  481. }
  482. err_out:
  483. err = sg_finish_rem_req(srp);
  484. return (0 == err) ? count : err;
  485. }
  486. static ssize_t
  487. sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
  488. {
  489. int mxsize, cmd_size, k;
  490. int input_size, blocking;
  491. unsigned char opcode;
  492. Sg_device *sdp;
  493. Sg_fd *sfp;
  494. Sg_request *srp;
  495. struct sg_header old_hdr;
  496. sg_io_hdr_t *hp;
  497. unsigned char cmnd[MAX_COMMAND_SIZE];
  498. if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
  499. return -ENXIO;
  500. SCSI_LOG_TIMEOUT(3, printk("sg_write: %s, count=%d\n",
  501. sdp->disk->disk_name, (int) count));
  502. if (sdp->detached)
  503. return -ENODEV;
  504. if (!((filp->f_flags & O_NONBLOCK) ||
  505. scsi_block_when_processing_errors(sdp->device)))
  506. return -ENXIO;
  507. if (!access_ok(VERIFY_READ, buf, count))
  508. return -EFAULT; /* protects following copy_from_user()s + get_user()s */
  509. if (count < SZ_SG_HEADER)
  510. return -EIO;
  511. if (__copy_from_user(&old_hdr, buf, SZ_SG_HEADER))
  512. return -EFAULT;
  513. blocking = !(filp->f_flags & O_NONBLOCK);
  514. if (old_hdr.reply_len < 0)
  515. return sg_new_write(sfp, filp, buf, count,
  516. blocking, 0, 0, NULL);
  517. if (count < (SZ_SG_HEADER + 6))
  518. return -EIO; /* The minimum scsi command length is 6 bytes. */
  519. if (!(srp = sg_add_request(sfp))) {
  520. SCSI_LOG_TIMEOUT(1, printk("sg_write: queue full\n"));
  521. return -EDOM;
  522. }
  523. buf += SZ_SG_HEADER;
  524. __get_user(opcode, buf);
  525. if (sfp->next_cmd_len > 0) {
  526. if (sfp->next_cmd_len > MAX_COMMAND_SIZE) {
  527. SCSI_LOG_TIMEOUT(1, printk("sg_write: command length too long\n"));
  528. sfp->next_cmd_len = 0;
  529. sg_remove_request(sfp, srp);
  530. return -EIO;
  531. }
  532. cmd_size = sfp->next_cmd_len;
  533. sfp->next_cmd_len = 0; /* reset so only this write() effected */
  534. } else {
  535. cmd_size = COMMAND_SIZE(opcode); /* based on SCSI command group */
  536. if ((opcode >= 0xc0) && old_hdr.twelve_byte)
  537. cmd_size = 12;
  538. }
  539. SCSI_LOG_TIMEOUT(4, printk(
  540. "sg_write: scsi opcode=0x%02x, cmd_size=%d\n", (int) opcode, cmd_size));
  541. /* Determine buffer size. */
  542. input_size = count - cmd_size;
  543. mxsize = (input_size > old_hdr.reply_len) ? input_size : old_hdr.reply_len;
  544. mxsize -= SZ_SG_HEADER;
  545. input_size -= SZ_SG_HEADER;
  546. if (input_size < 0) {
  547. sg_remove_request(sfp, srp);
  548. return -EIO; /* User did not pass enough bytes for this command. */
  549. }
  550. hp = &srp->header;
  551. hp->interface_id = '\0'; /* indicator of old interface tunnelled */
  552. hp->cmd_len = (unsigned char) cmd_size;
  553. hp->iovec_count = 0;
  554. hp->mx_sb_len = 0;
  555. if (input_size > 0)
  556. hp->dxfer_direction = (old_hdr.reply_len > SZ_SG_HEADER) ?
  557. SG_DXFER_TO_FROM_DEV : SG_DXFER_TO_DEV;
  558. else
  559. hp->dxfer_direction = (mxsize > 0) ? SG_DXFER_FROM_DEV : SG_DXFER_NONE;
  560. hp->dxfer_len = mxsize;
  561. if (hp->dxfer_direction == SG_DXFER_TO_DEV)
  562. hp->dxferp = (char __user *)buf + cmd_size;
  563. else
  564. hp->dxferp = NULL;
  565. hp->sbp = NULL;
  566. hp->timeout = old_hdr.reply_len; /* structure abuse ... */
  567. hp->flags = input_size; /* structure abuse ... */
  568. hp->pack_id = old_hdr.pack_id;
  569. hp->usr_ptr = NULL;
  570. if (__copy_from_user(cmnd, buf, cmd_size))
  571. return -EFAULT;
  572. /*
  573. * SG_DXFER_TO_FROM_DEV is functionally equivalent to SG_DXFER_FROM_DEV,
  574. * but is is possible that the app intended SG_DXFER_TO_DEV, because there
  575. * is a non-zero input_size, so emit a warning.
  576. */
  577. if (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV) {
  578. static char cmd[TASK_COMM_LEN];
  579. if (strcmp(current->comm, cmd)) {
  580. printk_ratelimited(KERN_WARNING
  581. "sg_write: data in/out %d/%d bytes "
  582. "for SCSI command 0x%x-- guessing "
  583. "data in;\n program %s not setting "
  584. "count and/or reply_len properly\n",
  585. old_hdr.reply_len - (int)SZ_SG_HEADER,
  586. input_size, (unsigned int) cmnd[0],
  587. current->comm);
  588. strcpy(cmd, current->comm);
  589. }
  590. }
  591. k = sg_common_write(sfp, srp, cmnd, sfp->timeout, blocking);
  592. return (k < 0) ? k : count;
  593. }
  594. static ssize_t
  595. sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf,
  596. size_t count, int blocking, int read_only, int sg_io_owned,
  597. Sg_request **o_srp)
  598. {
  599. int k;
  600. Sg_request *srp;
  601. sg_io_hdr_t *hp;
  602. unsigned char cmnd[MAX_COMMAND_SIZE];
  603. int timeout;
  604. unsigned long ul_timeout;
  605. if (count < SZ_SG_IO_HDR)
  606. return -EINVAL;
  607. if (!access_ok(VERIFY_READ, buf, count))
  608. return -EFAULT; /* protects following copy_from_user()s + get_user()s */
  609. sfp->cmd_q = 1; /* when sg_io_hdr seen, set command queuing on */
  610. if (!(srp = sg_add_request(sfp))) {
  611. SCSI_LOG_TIMEOUT(1, printk("sg_new_write: queue full\n"));
  612. return -EDOM;
  613. }
  614. srp->sg_io_owned = sg_io_owned;
  615. hp = &srp->header;
  616. if (__copy_from_user(hp, buf, SZ_SG_IO_HDR)) {
  617. sg_remove_request(sfp, srp);
  618. return -EFAULT;
  619. }
  620. if (hp->interface_id != 'S') {
  621. sg_remove_request(sfp, srp);
  622. return -ENOSYS;
  623. }
  624. if (hp->flags & SG_FLAG_MMAP_IO) {
  625. if (hp->dxfer_len > sfp->reserve.bufflen) {
  626. sg_remove_request(sfp, srp);
  627. return -ENOMEM; /* MMAP_IO size must fit in reserve buffer */
  628. }
  629. if (hp->flags & SG_FLAG_DIRECT_IO) {
  630. sg_remove_request(sfp, srp);
  631. return -EINVAL; /* either MMAP_IO or DIRECT_IO (not both) */
  632. }
  633. if (sg_res_in_use(sfp)) {
  634. sg_remove_request(sfp, srp);
  635. return -EBUSY; /* reserve buffer already being used */
  636. }
  637. }
  638. ul_timeout = msecs_to_jiffies(srp->header.timeout);
  639. timeout = (ul_timeout < INT_MAX) ? ul_timeout : INT_MAX;
  640. if ((!hp->cmdp) || (hp->cmd_len < 6) || (hp->cmd_len > sizeof (cmnd))) {
  641. sg_remove_request(sfp, srp);
  642. return -EMSGSIZE;
  643. }
  644. if (!access_ok(VERIFY_READ, hp->cmdp, hp->cmd_len)) {
  645. sg_remove_request(sfp, srp);
  646. return -EFAULT; /* protects following copy_from_user()s + get_user()s */
  647. }
  648. if (__copy_from_user(cmnd, hp->cmdp, hp->cmd_len)) {
  649. sg_remove_request(sfp, srp);
  650. return -EFAULT;
  651. }
  652. if (read_only && sg_allow_access(file, cmnd)) {
  653. sg_remove_request(sfp, srp);
  654. return -EPERM;
  655. }
  656. k = sg_common_write(sfp, srp, cmnd, timeout, blocking);
  657. if (k < 0)
  658. return k;
  659. if (o_srp)
  660. *o_srp = srp;
  661. return count;
  662. }
  663. static int
  664. sg_common_write(Sg_fd * sfp, Sg_request * srp,
  665. unsigned char *cmnd, int timeout, int blocking)
  666. {
  667. int k, data_dir;
  668. Sg_device *sdp = sfp->parentdp;
  669. sg_io_hdr_t *hp = &srp->header;
  670. srp->data.cmd_opcode = cmnd[0]; /* hold opcode of command */
  671. hp->status = 0;
  672. hp->masked_status = 0;
  673. hp->msg_status = 0;
  674. hp->info = 0;
  675. hp->host_status = 0;
  676. hp->driver_status = 0;
  677. hp->resid = 0;
  678. SCSI_LOG_TIMEOUT(4, printk("sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n",
  679. (int) cmnd[0], (int) hp->cmd_len));
  680. k = sg_start_req(srp, cmnd);
  681. if (k) {
  682. SCSI_LOG_TIMEOUT(1, printk("sg_common_write: start_req err=%d\n", k));
  683. sg_finish_rem_req(srp);
  684. return k; /* probably out of space --> ENOMEM */
  685. }
  686. if (sdp->detached) {
  687. if (srp->bio)
  688. blk_end_request_all(srp->rq, -EIO);
  689. sg_finish_rem_req(srp);
  690. return -ENODEV;
  691. }
  692. switch (hp->dxfer_direction) {
  693. case SG_DXFER_TO_FROM_DEV:
  694. case SG_DXFER_FROM_DEV:
  695. data_dir = DMA_FROM_DEVICE;
  696. break;
  697. case SG_DXFER_TO_DEV:
  698. data_dir = DMA_TO_DEVICE;
  699. break;
  700. case SG_DXFER_UNKNOWN:
  701. data_dir = DMA_BIDIRECTIONAL;
  702. break;
  703. default:
  704. data_dir = DMA_NONE;
  705. break;
  706. }
  707. hp->duration = jiffies_to_msecs(jiffies);
  708. srp->rq->timeout = timeout;
  709. kref_get(&sfp->f_ref); /* sg_rq_end_io() does kref_put(). */
  710. blk_execute_rq_nowait(sdp->device->request_queue, sdp->disk,
  711. srp->rq, 1, sg_rq_end_io);
  712. return 0;
  713. }
  714. static int
  715. sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
  716. {
  717. void __user *p = (void __user *)arg;
  718. int __user *ip = p;
  719. int result, val, read_only;
  720. Sg_device *sdp;
  721. Sg_fd *sfp;
  722. Sg_request *srp;
  723. unsigned long iflags;
  724. if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
  725. return -ENXIO;
  726. SCSI_LOG_TIMEOUT(3, printk("sg_ioctl: %s, cmd=0x%x\n",
  727. sdp->disk->disk_name, (int) cmd_in));
  728. read_only = (O_RDWR != (filp->f_flags & O_ACCMODE));
  729. switch (cmd_in) {
  730. case SG_IO:
  731. {
  732. int blocking = 1; /* ignore O_NONBLOCK flag */
  733. if (sdp->detached)
  734. return -ENODEV;
  735. if (!scsi_block_when_processing_errors(sdp->device))
  736. return -ENXIO;
  737. if (!access_ok(VERIFY_WRITE, p, SZ_SG_IO_HDR))
  738. return -EFAULT;
  739. result =
  740. sg_new_write(sfp, filp, p, SZ_SG_IO_HDR,
  741. blocking, read_only, 1, &srp);
  742. if (result < 0)
  743. return result;
  744. while (1) {
  745. result = 0; /* following macro to beat race condition */
  746. __wait_event_interruptible(sfp->read_wait,
  747. (srp->done || sdp->detached),
  748. result);
  749. if (sdp->detached)
  750. return -ENODEV;
  751. write_lock_irq(&sfp->rq_list_lock);
  752. if (srp->done) {
  753. srp->done = 2;
  754. write_unlock_irq(&sfp->rq_list_lock);
  755. break;
  756. }
  757. srp->orphan = 1;
  758. write_unlock_irq(&sfp->rq_list_lock);
  759. return result; /* -ERESTARTSYS because signal hit process */
  760. }
  761. result = sg_new_read(sfp, p, SZ_SG_IO_HDR, srp);
  762. return (result < 0) ? result : 0;
  763. }
  764. case SG_SET_TIMEOUT:
  765. result = get_user(val, ip);
  766. if (result)
  767. return result;
  768. if (val < 0)
  769. return -EIO;
  770. if (val >= MULDIV (INT_MAX, USER_HZ, HZ))
  771. val = MULDIV (INT_MAX, USER_HZ, HZ);
  772. sfp->timeout_user = val;
  773. sfp->timeout = MULDIV (val, HZ, USER_HZ);
  774. return 0;
  775. case SG_GET_TIMEOUT: /* N.B. User receives timeout as return value */
  776. /* strange ..., for backward compatibility */
  777. return sfp->timeout_user;
  778. case SG_SET_FORCE_LOW_DMA:
  779. result = get_user(val, ip);
  780. if (result)
  781. return result;
  782. if (val) {
  783. sfp->low_dma = 1;
  784. if ((0 == sfp->low_dma) && (0 == sg_res_in_use(sfp))) {
  785. val = (int) sfp->reserve.bufflen;
  786. sg_remove_scat(&sfp->reserve);
  787. sg_build_reserve(sfp, val);
  788. }
  789. } else {
  790. if (sdp->detached)
  791. return -ENODEV;
  792. sfp->low_dma = sdp->device->host->unchecked_isa_dma;
  793. }
  794. return 0;
  795. case SG_GET_LOW_DMA:
  796. return put_user((int) sfp->low_dma, ip);
  797. case SG_GET_SCSI_ID:
  798. if (!access_ok(VERIFY_WRITE, p, sizeof (sg_scsi_id_t)))
  799. return -EFAULT;
  800. else {
  801. sg_scsi_id_t __user *sg_idp = p;
  802. if (sdp->detached)
  803. return -ENODEV;
  804. __put_user((int) sdp->device->host->host_no,
  805. &sg_idp->host_no);
  806. __put_user((int) sdp->device->channel,
  807. &sg_idp->channel);
  808. __put_user((int) sdp->device->id, &sg_idp->scsi_id);
  809. __put_user((int) sdp->device->lun, &sg_idp->lun);
  810. __put_user((int) sdp->device->type, &sg_idp->scsi_type);
  811. __put_user((short) sdp->device->host->cmd_per_lun,
  812. &sg_idp->h_cmd_per_lun);
  813. __put_user((short) sdp->device->queue_depth,
  814. &sg_idp->d_queue_depth);
  815. __put_user(0, &sg_idp->unused[0]);
  816. __put_user(0, &sg_idp->unused[1]);
  817. return 0;
  818. }
  819. case SG_SET_FORCE_PACK_ID:
  820. result = get_user(val, ip);
  821. if (result)
  822. return result;
  823. sfp->force_packid = val ? 1 : 0;
  824. return 0;
  825. case SG_GET_PACK_ID:
  826. if (!access_ok(VERIFY_WRITE, ip, sizeof (int)))
  827. return -EFAULT;
  828. read_lock_irqsave(&sfp->rq_list_lock, iflags);
  829. for (srp = sfp->headrp; srp; srp = srp->nextrp) {
  830. if ((1 == srp->done) && (!srp->sg_io_owned)) {
  831. read_unlock_irqrestore(&sfp->rq_list_lock,
  832. iflags);
  833. __put_user(srp->header.pack_id, ip);
  834. return 0;
  835. }
  836. }
  837. read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
  838. __put_user(-1, ip);
  839. return 0;
  840. case SG_GET_NUM_WAITING:
  841. read_lock_irqsave(&sfp->rq_list_lock, iflags);
  842. for (val = 0, srp = sfp->headrp; srp; srp = srp->nextrp) {
  843. if ((1 == srp->done) && (!srp->sg_io_owned))
  844. ++val;
  845. }
  846. read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
  847. return put_user(val, ip);
  848. case SG_GET_SG_TABLESIZE:
  849. return put_user(sdp->sg_tablesize, ip);
  850. case SG_SET_RESERVED_SIZE:
  851. result = get_user(val, ip);
  852. if (result)
  853. return result;
  854. if (val < 0)
  855. return -EINVAL;
  856. val = min_t(int, val,
  857. queue_max_sectors(sdp->device->request_queue) * 512);
  858. if (val != sfp->reserve.bufflen) {
  859. if (sg_res_in_use(sfp) || sfp->mmap_called)
  860. return -EBUSY;
  861. sg_remove_scat(&sfp->reserve);
  862. sg_build_reserve(sfp, val);
  863. }
  864. return 0;
  865. case SG_GET_RESERVED_SIZE:
  866. val = min_t(int, sfp->reserve.bufflen,
  867. queue_max_sectors(sdp->device->request_queue) * 512);
  868. return put_user(val, ip);
  869. case SG_SET_COMMAND_Q:
  870. result = get_user(val, ip);
  871. if (result)
  872. return result;
  873. sfp->cmd_q = val ? 1 : 0;
  874. return 0;
  875. case SG_GET_COMMAND_Q:
  876. return put_user((int) sfp->cmd_q, ip);
  877. case SG_SET_KEEP_ORPHAN:
  878. result = get_user(val, ip);
  879. if (result)
  880. return result;
  881. sfp->keep_orphan = val;
  882. return 0;
  883. case SG_GET_KEEP_ORPHAN:
  884. return put_user((int) sfp->keep_orphan, ip);
  885. case SG_NEXT_CMD_LEN:
  886. result = get_user(val, ip);
  887. if (result)
  888. return result;
  889. sfp->next_cmd_len = (val > 0) ? val : 0;
  890. return 0;
  891. case SG_GET_VERSION_NUM:
  892. return put_user(sg_version_num, ip);
  893. case SG_GET_ACCESS_COUNT:
  894. /* faked - we don't have a real access count anymore */
  895. val = (sdp->device ? 1 : 0);
  896. return put_user(val, ip);
  897. case SG_GET_REQUEST_TABLE:
  898. if (!access_ok(VERIFY_WRITE, p, SZ_SG_REQ_INFO * SG_MAX_QUEUE))
  899. return -EFAULT;
  900. else {
  901. sg_req_info_t *rinfo;
  902. unsigned int ms;
  903. rinfo = kmalloc(SZ_SG_REQ_INFO * SG_MAX_QUEUE,
  904. GFP_KERNEL);
  905. if (!rinfo)
  906. return -ENOMEM;
  907. read_lock_irqsave(&sfp->rq_list_lock, iflags);
  908. for (srp = sfp->headrp, val = 0; val < SG_MAX_QUEUE;
  909. ++val, srp = srp ? srp->nextrp : srp) {
  910. memset(&rinfo[val], 0, SZ_SG_REQ_INFO);
  911. if (srp) {
  912. rinfo[val].req_state = srp->done + 1;
  913. rinfo[val].problem =
  914. srp->header.masked_status &
  915. srp->header.host_status &
  916. srp->header.driver_status;
  917. if (srp->done)
  918. rinfo[val].duration =
  919. srp->header.duration;
  920. else {
  921. ms = jiffies_to_msecs(jiffies);
  922. rinfo[val].duration =
  923. (ms > srp->header.duration) ?
  924. (ms - srp->header.duration) : 0;
  925. }
  926. rinfo[val].orphan = srp->orphan;
  927. rinfo[val].sg_io_owned =
  928. srp->sg_io_owned;
  929. rinfo[val].pack_id =
  930. srp->header.pack_id;
  931. rinfo[val].usr_ptr =
  932. srp->header.usr_ptr;
  933. }
  934. }
  935. read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
  936. result = __copy_to_user(p, rinfo,
  937. SZ_SG_REQ_INFO * SG_MAX_QUEUE);
  938. result = result ? -EFAULT : 0;
  939. kfree(rinfo);
  940. return result;
  941. }
  942. case SG_EMULATED_HOST:
  943. if (sdp->detached)
  944. return -ENODEV;
  945. return put_user(sdp->device->host->hostt->emulated, ip);
  946. case SG_SCSI_RESET:
  947. if (sdp->detached)
  948. return -ENODEV;
  949. if (filp->f_flags & O_NONBLOCK) {
  950. if (scsi_host_in_recovery(sdp->device->host))
  951. return -EBUSY;
  952. } else if (!scsi_block_when_processing_errors(sdp->device))
  953. return -EBUSY;
  954. result = get_user(val, ip);
  955. if (result)
  956. return result;
  957. if (SG_SCSI_RESET_NOTHING == val)
  958. return 0;
  959. switch (val) {
  960. case SG_SCSI_RESET_DEVICE:
  961. val = SCSI_TRY_RESET_DEVICE;
  962. break;
  963. case SG_SCSI_RESET_TARGET:
  964. val = SCSI_TRY_RESET_TARGET;
  965. break;
  966. case SG_SCSI_RESET_BUS:
  967. val = SCSI_TRY_RESET_BUS;
  968. break;
  969. case SG_SCSI_RESET_HOST:
  970. val = SCSI_TRY_RESET_HOST;
  971. break;
  972. default:
  973. return -EINVAL;
  974. }
  975. if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
  976. return -EACCES;
  977. return (scsi_reset_provider(sdp->device, val) ==
  978. SUCCESS) ? 0 : -EIO;
  979. case SCSI_IOCTL_SEND_COMMAND:
  980. if (sdp->detached)
  981. return -ENODEV;
  982. if (read_only) {
  983. unsigned char opcode = WRITE_6;
  984. Scsi_Ioctl_Command __user *siocp = p;
  985. if (copy_from_user(&opcode, siocp->data, 1))
  986. return -EFAULT;
  987. if (sg_allow_access(filp, &opcode))
  988. return -EPERM;
  989. }
  990. return sg_scsi_ioctl(sdp->device->request_queue, NULL, filp->f_mode, p);
  991. case SG_SET_DEBUG:
  992. result = get_user(val, ip);
  993. if (result)
  994. return result;
  995. sdp->sgdebug = (char) val;
  996. return 0;
  997. case SCSI_IOCTL_GET_IDLUN:
  998. case SCSI_IOCTL_GET_BUS_NUMBER:
  999. case SCSI_IOCTL_PROBE_HOST:
  1000. case SG_GET_TRANSFORM:
  1001. if (sdp->detached)
  1002. return -ENODEV;
  1003. return scsi_ioctl(sdp->device, cmd_in, p);
  1004. case BLKSECTGET:
  1005. return put_user(queue_max_sectors(sdp->device->request_queue) * 512,
  1006. ip);
  1007. case BLKTRACESETUP:
  1008. return blk_trace_setup(sdp->device->request_queue,
  1009. sdp->disk->disk_name,
  1010. MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
  1011. NULL,
  1012. (char *)arg);
  1013. case BLKTRACESTART:
  1014. return blk_trace_startstop(sdp->device->request_queue, 1);
  1015. case BLKTRACESTOP:
  1016. return blk_trace_startstop(sdp->device->request_queue, 0);
  1017. case BLKTRACETEARDOWN:
  1018. return blk_trace_remove(sdp->device->request_queue);
  1019. default:
  1020. if (read_only)
  1021. return -EPERM; /* don't know so take safe approach */
  1022. return scsi_ioctl(sdp->device, cmd_in, p);
  1023. }
  1024. }
  1025. static long
  1026. sg_unlocked_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
  1027. {
  1028. int ret;
  1029. mutex_lock(&sg_mutex);
  1030. ret = sg_ioctl(filp, cmd_in, arg);
  1031. mutex_unlock(&sg_mutex);
  1032. return ret;
  1033. }
  1034. #ifdef CONFIG_COMPAT
  1035. static long sg_compat_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
  1036. {
  1037. Sg_device *sdp;
  1038. Sg_fd *sfp;
  1039. struct scsi_device *sdev;
  1040. if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
  1041. return -ENXIO;
  1042. sdev = sdp->device;
  1043. if (sdev->host->hostt->compat_ioctl) {
  1044. int ret;
  1045. ret = sdev->host->hostt->compat_ioctl(sdev, cmd_in, (void __user *)arg);
  1046. return ret;
  1047. }
  1048. return -ENOIOCTLCMD;
  1049. }
  1050. #endif
  1051. static unsigned int
  1052. sg_poll(struct file *filp, poll_table * wait)
  1053. {
  1054. unsigned int res = 0;
  1055. Sg_device *sdp;
  1056. Sg_fd *sfp;
  1057. Sg_request *srp;
  1058. int count = 0;
  1059. unsigned long iflags;
  1060. if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))
  1061. || sfp->closed)
  1062. return POLLERR;
  1063. poll_wait(filp, &sfp->read_wait, wait);
  1064. read_lock_irqsave(&sfp->rq_list_lock, iflags);
  1065. for (srp = sfp->headrp; srp; srp = srp->nextrp) {
  1066. /* if any read waiting, flag it */
  1067. if ((0 == res) && (1 == srp->done) && (!srp->sg_io_owned))
  1068. res = POLLIN | POLLRDNORM;
  1069. ++count;
  1070. }
  1071. read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
  1072. if (sdp->detached)
  1073. res |= POLLHUP;
  1074. else if (!sfp->cmd_q) {
  1075. if (0 == count)
  1076. res |= POLLOUT | POLLWRNORM;
  1077. } else if (count < SG_MAX_QUEUE)
  1078. res |= POLLOUT | POLLWRNORM;
  1079. SCSI_LOG_TIMEOUT(3, printk("sg_poll: %s, res=0x%x\n",
  1080. sdp->disk->disk_name, (int) res));
  1081. return res;
  1082. }
  1083. static int
  1084. sg_fasync(int fd, struct file *filp, int mode)
  1085. {
  1086. Sg_device *sdp;
  1087. Sg_fd *sfp;
  1088. if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
  1089. return -ENXIO;
  1090. SCSI_LOG_TIMEOUT(3, printk("sg_fasync: %s, mode=%d\n",
  1091. sdp->disk->disk_name, mode));
  1092. return fasync_helper(fd, filp, mode, &sfp->async_qp);
  1093. }
  1094. static int
  1095. sg_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  1096. {
  1097. Sg_fd *sfp;
  1098. unsigned long offset, len, sa;
  1099. Sg_scatter_hold *rsv_schp;
  1100. int k, length;
  1101. if ((NULL == vma) || (!(sfp = (Sg_fd *) vma->vm_private_data)))
  1102. return VM_FAULT_SIGBUS;
  1103. rsv_schp = &sfp->reserve;
  1104. offset = vmf->pgoff << PAGE_SHIFT;
  1105. if (offset >= rsv_schp->bufflen)
  1106. return VM_FAULT_SIGBUS;
  1107. SCSI_LOG_TIMEOUT(3, printk("sg_vma_fault: offset=%lu, scatg=%d\n",
  1108. offset, rsv_schp->k_use_sg));
  1109. sa = vma->vm_start;
  1110. length = 1 << (PAGE_SHIFT + rsv_schp->page_order);
  1111. for (k = 0; k < rsv_schp->k_use_sg && sa < vma->vm_end; k++) {
  1112. len = vma->vm_end - sa;
  1113. len = (len < length) ? len : length;
  1114. if (offset < len) {
  1115. struct page *page = nth_page(rsv_schp->pages[k],
  1116. offset >> PAGE_SHIFT);
  1117. get_page(page); /* increment page count */
  1118. vmf->page = page;
  1119. return 0; /* success */
  1120. }
  1121. sa += len;
  1122. offset -= len;
  1123. }
  1124. return VM_FAULT_SIGBUS;
  1125. }
  1126. static const struct vm_operations_struct sg_mmap_vm_ops = {
  1127. .fault = sg_vma_fault,
  1128. };
  1129. static int
  1130. sg_mmap(struct file *filp, struct vm_area_struct *vma)
  1131. {
  1132. Sg_fd *sfp;
  1133. unsigned long req_sz, len, sa;
  1134. Sg_scatter_hold *rsv_schp;
  1135. int k, length;
  1136. if ((!filp) || (!vma) || (!(sfp = (Sg_fd *) filp->private_data)))
  1137. return -ENXIO;
  1138. req_sz = vma->vm_end - vma->vm_start;
  1139. SCSI_LOG_TIMEOUT(3, printk("sg_mmap starting, vm_start=%p, len=%d\n",
  1140. (void *) vma->vm_start, (int) req_sz));
  1141. if (vma->vm_pgoff)
  1142. return -EINVAL; /* want no offset */
  1143. rsv_schp = &sfp->reserve;
  1144. if (req_sz > rsv_schp->bufflen)
  1145. return -ENOMEM; /* cannot map more than reserved buffer */
  1146. sa = vma->vm_start;
  1147. length = 1 << (PAGE_SHIFT + rsv_schp->page_order);
  1148. for (k = 0; k < rsv_schp->k_use_sg && sa < vma->vm_end; k++) {
  1149. len = vma->vm_end - sa;
  1150. len = (len < length) ? len : length;
  1151. sa += len;
  1152. }
  1153. sfp->mmap_called = 1;
  1154. vma->vm_flags |= VM_RESERVED;
  1155. vma->vm_private_data = sfp;
  1156. vma->vm_ops = &sg_mmap_vm_ops;
  1157. return 0;
  1158. }
  1159. static void sg_rq_end_io_usercontext(struct work_struct *work)
  1160. {
  1161. struct sg_request *srp = container_of(work, struct sg_request, ew.work);
  1162. struct sg_fd *sfp = srp->parentfp;
  1163. sg_finish_rem_req(srp);
  1164. kref_put(&sfp->f_ref, sg_remove_sfp);
  1165. }
  1166. /*
  1167. * This function is a "bottom half" handler that is called by the mid
  1168. * level when a command is completed (or has failed).
  1169. */
  1170. static void sg_rq_end_io(struct request *rq, int uptodate)
  1171. {
  1172. struct sg_request *srp = rq->end_io_data;
  1173. Sg_device *sdp;
  1174. Sg_fd *sfp;
  1175. unsigned long iflags;
  1176. unsigned int ms;
  1177. char *sense;
  1178. int result, resid, done = 1;
  1179. if (WARN_ON(srp->done != 0))
  1180. return;
  1181. sfp = srp->parentfp;
  1182. if (WARN_ON(sfp == NULL))
  1183. return;
  1184. sdp = sfp->parentdp;
  1185. if (unlikely(sdp->detached))
  1186. printk(KERN_INFO "sg_rq_end_io: device detached\n");
  1187. sense = rq->sense;
  1188. result = rq->errors;
  1189. resid = rq->resid_len;
  1190. SCSI_LOG_TIMEOUT(4, printk("sg_cmd_done: %s, pack_id=%d, res=0x%x\n",
  1191. sdp->disk->disk_name, srp->header.pack_id, result));
  1192. srp->header.resid = resid;
  1193. ms = jiffies_to_msecs(jiffies);
  1194. srp->header.duration = (ms > srp->header.duration) ?
  1195. (ms - srp->header.duration) : 0;
  1196. if (0 != result) {
  1197. struct scsi_sense_hdr sshdr;
  1198. srp->header.status = 0xff & result;
  1199. srp->header.masked_status = status_byte(result);
  1200. srp->header.msg_status = msg_byte(result);
  1201. srp->header.host_status = host_byte(result);
  1202. srp->header.driver_status = driver_byte(result);
  1203. if ((sdp->sgdebug > 0) &&
  1204. ((CHECK_CONDITION == srp->header.masked_status) ||
  1205. (COMMAND_TERMINATED == srp->header.masked_status)))
  1206. __scsi_print_sense("sg_cmd_done", sense,
  1207. SCSI_SENSE_BUFFERSIZE);
  1208. /* Following if statement is a patch supplied by Eric Youngdale */
  1209. if (driver_byte(result) != 0
  1210. && scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr)
  1211. && !scsi_sense_is_deferred(&sshdr)
  1212. && sshdr.sense_key == UNIT_ATTENTION
  1213. && sdp->device->removable) {
  1214. /* Detected possible disc change. Set the bit - this */
  1215. /* may be used if there are filesystems using this device */
  1216. sdp->device->changed = 1;
  1217. }
  1218. }
  1219. /* Rely on write phase to clean out srp status values, so no "else" */
  1220. write_lock_irqsave(&sfp->rq_list_lock, iflags);
  1221. if (unlikely(srp->orphan)) {
  1222. if (sfp->keep_orphan)
  1223. srp->sg_io_owned = 0;
  1224. else
  1225. done = 0;
  1226. }
  1227. srp->done = done;
  1228. write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
  1229. if (likely(done)) {
  1230. /* Now wake up any sg_read() that is waiting for this
  1231. * packet.
  1232. */
  1233. wake_up_interruptible(&sfp->read_wait);
  1234. kill_fasync(&sfp->async_qp, SIGPOLL, POLL_IN);
  1235. kref_put(&sfp->f_ref, sg_remove_sfp);
  1236. } else {
  1237. INIT_WORK(&srp->ew.work, sg_rq_end_io_usercontext);
  1238. schedule_work(&srp->ew.work);
  1239. }
  1240. }
  1241. static const struct file_operations sg_fops = {
  1242. .owner = THIS_MODULE,
  1243. .read = sg_read,
  1244. .write = sg_write,
  1245. .poll = sg_poll,
  1246. .unlocked_ioctl = sg_unlocked_ioctl,
  1247. #ifdef CONFIG_COMPAT
  1248. .compat_ioctl = sg_compat_ioctl,
  1249. #endif
  1250. .open = sg_open,
  1251. .mmap = sg_mmap,
  1252. .release = sg_release,
  1253. .fasync = sg_fasync,
  1254. .llseek = no_llseek,
  1255. };
  1256. static struct class *sg_sysfs_class;
  1257. static int sg_sysfs_valid = 0;
  1258. static Sg_device *sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
  1259. {
  1260. struct request_queue *q = scsidp->request_queue;
  1261. Sg_device *sdp;
  1262. unsigned long iflags;
  1263. int error;
  1264. u32 k;
  1265. sdp = kzalloc(sizeof(Sg_device), GFP_KERNEL);
  1266. if (!sdp) {
  1267. printk(KERN_WARNING "kmalloc Sg_device failure\n");
  1268. return ERR_PTR(-ENOMEM);
  1269. }
  1270. if (!idr_pre_get(&sg_index_idr, GFP_KERNEL)) {
  1271. printk(KERN_WARNING "idr expansion Sg_device failure\n");
  1272. error = -ENOMEM;
  1273. goto out;
  1274. }
  1275. write_lock_irqsave(&sg_index_lock, iflags);
  1276. error = idr_get_new(&sg_index_idr, sdp, &k);
  1277. if (error) {
  1278. write_unlock_irqrestore(&sg_index_lock, iflags);
  1279. printk(KERN_WARNING "idr allocation Sg_device failure: %d\n",
  1280. error);
  1281. goto out;
  1282. }
  1283. if (unlikely(k >= SG_MAX_DEVS))
  1284. goto overflow;
  1285. SCSI_LOG_TIMEOUT(3, printk("sg_alloc: dev=%d \n", k));
  1286. sprintf(disk->disk_name, "sg%d", k);
  1287. disk->first_minor = k;
  1288. sdp->disk = disk;
  1289. sdp->device = scsidp;
  1290. INIT_LIST_HEAD(&sdp->sfds);
  1291. init_waitqueue_head(&sdp->o_excl_wait);
  1292. sdp->sg_tablesize = queue_max_segments(q);
  1293. sdp->index = k;
  1294. kref_init(&sdp->d_ref);
  1295. write_unlock_irqrestore(&sg_index_lock, iflags);
  1296. error = 0;
  1297. out:
  1298. if (error) {
  1299. kfree(sdp);
  1300. return ERR_PTR(error);
  1301. }
  1302. return sdp;
  1303. overflow:
  1304. idr_remove(&sg_index_idr, k);
  1305. write_unlock_irqrestore(&sg_index_lock, iflags);
  1306. sdev_printk(KERN_WARNING, scsidp,
  1307. "Unable to attach sg device type=%d, minor "
  1308. "number exceeds %d\n", scsidp->type, SG_MAX_DEVS - 1);
  1309. error = -ENODEV;
  1310. goto out;
  1311. }
  1312. static int
  1313. sg_add(struct device *cl_dev, struct class_interface *cl_intf)
  1314. {
  1315. struct scsi_device *scsidp = to_scsi_device(cl_dev->parent);
  1316. struct gendisk *disk;
  1317. Sg_device *sdp = NULL;
  1318. struct cdev * cdev = NULL;
  1319. int error;
  1320. unsigned long iflags;
  1321. disk = alloc_disk(1);
  1322. if (!disk) {
  1323. printk(KERN_WARNING "alloc_disk failed\n");
  1324. return -ENOMEM;
  1325. }
  1326. disk->major = SCSI_GENERIC_MAJOR;
  1327. error = -ENOMEM;
  1328. cdev = cdev_alloc();
  1329. if (!cdev) {
  1330. printk(KERN_WARNING "cdev_alloc failed\n");
  1331. goto out;
  1332. }
  1333. cdev->owner = THIS_MODULE;
  1334. cdev->ops = &sg_fops;
  1335. sdp = sg_alloc(disk, scsidp);
  1336. if (IS_ERR(sdp)) {
  1337. printk(KERN_WARNING "sg_alloc failed\n");
  1338. error = PTR_ERR(sdp);
  1339. goto out;
  1340. }
  1341. error = cdev_add(cdev, MKDEV(SCSI_GENERIC_MAJOR, sdp->index), 1);
  1342. if (error)
  1343. goto cdev_add_err;
  1344. sdp->cdev = cdev;
  1345. if (sg_sysfs_valid) {
  1346. struct device *sg_class_member;
  1347. sg_class_member = device_create(sg_sysfs_class, cl_dev->parent,
  1348. MKDEV(SCSI_GENERIC_MAJOR,
  1349. sdp->index),
  1350. sdp, "%s", disk->disk_name);
  1351. if (IS_ERR(sg_class_member)) {
  1352. printk(KERN_ERR "sg_add: "
  1353. "device_create failed\n");
  1354. error = PTR_ERR(sg_class_member);
  1355. goto cdev_add_err;
  1356. }
  1357. error = sysfs_create_link(&scsidp->sdev_gendev.kobj,
  1358. &sg_class_member->kobj, "generic");
  1359. if (error)
  1360. printk(KERN_ERR "sg_add: unable to make symlink "
  1361. "'generic' back to sg%d\n", sdp->index);
  1362. } else
  1363. printk(KERN_WARNING "sg_add: sg_sys Invalid\n");
  1364. sdev_printk(KERN_NOTICE, scsidp,
  1365. "Attached scsi generic sg%d type %d\n", sdp->index,
  1366. scsidp->type);
  1367. dev_set_drvdata(cl_dev, sdp);
  1368. return 0;
  1369. cdev_add_err:
  1370. write_lock_irqsave(&sg_index_lock, iflags);
  1371. idr_remove(&sg_index_idr, sdp->index);
  1372. write_unlock_irqrestore(&sg_index_lock, iflags);
  1373. kfree(sdp);
  1374. out:
  1375. put_disk(disk);
  1376. if (cdev)
  1377. cdev_del(cdev);
  1378. return error;
  1379. }
  1380. static void sg_device_destroy(struct kref *kref)
  1381. {
  1382. struct sg_device *sdp = container_of(kref, struct sg_device, d_ref);
  1383. unsigned long flags;
  1384. /* CAUTION! Note that the device can still be found via idr_find()
  1385. * even though the refcount is 0. Therefore, do idr_remove() BEFORE
  1386. * any other cleanup.
  1387. */
  1388. write_lock_irqsave(&sg_index_lock, flags);
  1389. idr_remove(&sg_index_idr, sdp->index);
  1390. write_unlock_irqrestore(&sg_index_lock, flags);
  1391. SCSI_LOG_TIMEOUT(3,
  1392. printk("sg_device_destroy: %s\n",
  1393. sdp->disk->disk_name));
  1394. put_disk(sdp->disk);
  1395. kfree(sdp);
  1396. }
  1397. static void sg_remove(struct device *cl_dev, struct class_interface *cl_intf)
  1398. {
  1399. struct scsi_device *scsidp = to_scsi_device(cl_dev->parent);
  1400. Sg_device *sdp = dev_get_drvdata(cl_dev);
  1401. unsigned long iflags;
  1402. Sg_fd *sfp;
  1403. if (!sdp || sdp->detached)
  1404. return;
  1405. SCSI_LOG_TIMEOUT(3, printk("sg_remove: %s\n", sdp->disk->disk_name));
  1406. /* Need a write lock to set sdp->detached. */
  1407. write_lock_irqsave(&sg_index_lock, iflags);
  1408. sdp->detached = 1;
  1409. list_for_each_entry(sfp, &sdp->sfds, sfd_siblings) {
  1410. wake_up_interruptible(&sfp->read_wait);
  1411. kill_fasync(&sfp->async_qp, SIGPOLL, POLL_HUP);
  1412. }
  1413. write_unlock_irqrestore(&sg_index_lock, iflags);
  1414. sysfs_remove_link(&scsidp->sdev_gendev.kobj, "generic");
  1415. device_destroy(sg_sysfs_class, MKDEV(SCSI_GENERIC_MAJOR, sdp->index));
  1416. cdev_del(sdp->cdev);
  1417. sdp->cdev = NULL;
  1418. sg_put_dev(sdp);
  1419. }
  1420. module_param_named(scatter_elem_sz, scatter_elem_sz, int, S_IRUGO | S_IWUSR);
  1421. module_param_named(def_reserved_size, def_reserved_size, int,
  1422. S_IRUGO | S_IWUSR);
  1423. module_param_named(allow_dio, sg_allow_dio, int, S_IRUGO | S_IWUSR);
  1424. MODULE_AUTHOR("Douglas Gilbert");
  1425. MODULE_DESCRIPTION("SCSI generic (sg) driver");
  1426. MODULE_LICENSE("GPL");
  1427. MODULE_VERSION(SG_VERSION_STR);
  1428. MODULE_ALIAS_CHARDEV_MAJOR(SCSI_GENERIC_MAJOR);
  1429. MODULE_PARM_DESC(scatter_elem_sz, "scatter gather element "
  1430. "size (default: max(SG_SCATTER_SZ, PAGE_SIZE))");
  1431. MODULE_PARM_DESC(def_reserved_size, "size of buffer reserved for each fd");
  1432. MODULE_PARM_DESC(allow_dio, "allow direct I/O (default: 0 (disallow))");
  1433. static int __init
  1434. init_sg(void)
  1435. {
  1436. int rc;
  1437. if (scatter_elem_sz < PAGE_SIZE) {
  1438. scatter_elem_sz = PAGE_SIZE;
  1439. scatter_elem_sz_prev = scatter_elem_sz;
  1440. }
  1441. if (def_reserved_size >= 0)
  1442. sg_big_buff = def_reserved_size;
  1443. else
  1444. def_reserved_size = sg_big_buff;
  1445. rc = register_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0),
  1446. SG_MAX_DEVS, "sg");
  1447. if (rc)
  1448. return rc;
  1449. sg_sysfs_class = class_create(THIS_MODULE, "scsi_generic");
  1450. if ( IS_ERR(sg_sysfs_class) ) {
  1451. rc = PTR_ERR(sg_sysfs_class);
  1452. goto err_out;
  1453. }
  1454. sg_sysfs_valid = 1;
  1455. rc = scsi_register_interface(&sg_interface);
  1456. if (0 == rc) {
  1457. #ifdef CONFIG_SCSI_PROC_FS
  1458. sg_proc_init();
  1459. #endif /* CONFIG_SCSI_PROC_FS */
  1460. return 0;
  1461. }
  1462. class_destroy(sg_sysfs_class);
  1463. err_out:
  1464. unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0), SG_MAX_DEVS);
  1465. return rc;
  1466. }
  1467. static void __exit
  1468. exit_sg(void)
  1469. {
  1470. #ifdef CONFIG_SCSI_PROC_FS
  1471. sg_proc_cleanup();
  1472. #endif /* CONFIG_SCSI_PROC_FS */
  1473. scsi_unregister_interface(&sg_interface);
  1474. class_destroy(sg_sysfs_class);
  1475. sg_sysfs_valid = 0;
  1476. unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0),
  1477. SG_MAX_DEVS);
  1478. idr_destroy(&sg_index_idr);
  1479. }
  1480. static int sg_start_req(Sg_request *srp, unsigned char *cmd)
  1481. {
  1482. int res;
  1483. struct request *rq;
  1484. Sg_fd *sfp = srp->parentfp;
  1485. sg_io_hdr_t *hp = &srp->header;
  1486. int dxfer_len = (int) hp->dxfer_len;
  1487. int dxfer_dir = hp->dxfer_direction;
  1488. unsigned int iov_count = hp->iovec_count;
  1489. Sg_scatter_hold *req_schp = &srp->data;
  1490. Sg_scatter_hold *rsv_schp = &sfp->reserve;
  1491. struct request_queue *q = sfp->parentdp->device->request_queue;
  1492. struct rq_map_data *md, map_data;
  1493. int rw = hp->dxfer_direction == SG_DXFER_TO_DEV ? WRITE : READ;
  1494. SCSI_LOG_TIMEOUT(4, printk(KERN_INFO "sg_start_req: dxfer_len=%d\n",
  1495. dxfer_len));
  1496. rq = blk_get_request(q, rw, GFP_ATOMIC);
  1497. if (!rq)
  1498. return -ENOMEM;
  1499. memcpy(rq->cmd, cmd, hp->cmd_len);
  1500. rq->cmd_len = hp->cmd_len;
  1501. rq->cmd_type = REQ_TYPE_BLOCK_PC;
  1502. srp->rq = rq;
  1503. rq->end_io_data = srp;
  1504. rq->sense = srp->sense_b;
  1505. rq->retries = SG_DEFAULT_RETRIES;
  1506. if ((dxfer_len <= 0) || (dxfer_dir == SG_DXFER_NONE))
  1507. return 0;
  1508. if (sg_allow_dio && hp->flags & SG_FLAG_DIRECT_IO &&
  1509. dxfer_dir != SG_DXFER_UNKNOWN && !iov_count &&
  1510. !sfp->parentdp->device->host->unchecked_isa_dma &&
  1511. blk_rq_aligned(q, (unsigned long)hp->dxferp, dxfer_len))
  1512. md = NULL;
  1513. else
  1514. md = &map_data;
  1515. if (md) {
  1516. if (!sg_res_in_use(sfp) && dxfer_len <= rsv_schp->bufflen)
  1517. sg_link_reserve(sfp, srp, dxfer_len);
  1518. else {
  1519. res = sg_build_indirect(req_schp, sfp, dxfer_len);
  1520. if (res)
  1521. return res;
  1522. }
  1523. md->pages = req_schp->pages;
  1524. md->page_order = req_schp->page_order;
  1525. md->nr_entries = req_schp->k_use_sg;
  1526. md->offset = 0;
  1527. md->null_mapped = hp->dxferp ? 0 : 1;
  1528. if (dxfer_dir == SG_DXFER_TO_FROM_DEV)
  1529. md->from_user = 1;
  1530. else
  1531. md->from_user = 0;
  1532. }
  1533. if (iov_count) {
  1534. int len, size = sizeof(struct sg_iovec) * iov_count;
  1535. struct iovec *iov;
  1536. iov = memdup_user(hp->dxferp, size);
  1537. if (IS_ERR(iov))
  1538. return PTR_ERR(iov);
  1539. len = iov_length(iov, iov_count);
  1540. if (hp->dxfer_len < len) {
  1541. iov_count = iov_shorten(iov, iov_count, hp->dxfer_len);
  1542. len = hp->dxfer_len;
  1543. }
  1544. res = blk_rq_map_user_iov(q, rq, md, (struct sg_iovec *)iov,
  1545. iov_count,
  1546. len, GFP_ATOMIC);
  1547. kfree(iov);
  1548. } else
  1549. res = blk_rq_map_user(q, rq, md, hp->dxferp,
  1550. hp->dxfer_len, GFP_ATOMIC);
  1551. if (!res) {
  1552. srp->bio = rq->bio;
  1553. if (!md) {
  1554. req_schp->dio_in_use = 1;
  1555. hp->info |= SG_INFO_DIRECT_IO;
  1556. }
  1557. }
  1558. return res;
  1559. }
  1560. static int sg_finish_rem_req(Sg_request * srp)
  1561. {
  1562. int ret = 0;
  1563. Sg_fd *sfp = srp->parentfp;
  1564. Sg_scatter_hold *req_schp = &srp->data;
  1565. SCSI_LOG_TIMEOUT(4, printk("sg_finish_rem_req: res_used=%d\n", (int) srp->res_used));
  1566. if (srp->rq) {
  1567. if (srp->bio)
  1568. ret = blk_rq_unmap_user(srp->bio);
  1569. blk_put_request(srp->rq);
  1570. }
  1571. if (srp->res_used)
  1572. sg_unlink_reserve(sfp, srp);
  1573. else
  1574. sg_remove_scat(req_schp);
  1575. sg_remove_request(sfp, srp);
  1576. return ret;
  1577. }
  1578. static int
  1579. sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp, int tablesize)
  1580. {
  1581. int sg_bufflen = tablesize * sizeof(struct page *);
  1582. gfp_t gfp_flags = GFP_ATOMIC | __GFP_NOWARN;
  1583. schp->pages = kzalloc(sg_bufflen, gfp_flags);
  1584. if (!schp->pages)
  1585. return -ENOMEM;
  1586. schp->sglist_len = sg_bufflen;
  1587. return tablesize; /* number of scat_gath elements allocated */
  1588. }
  1589. static int
  1590. sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
  1591. {
  1592. int ret_sz = 0, i, k, rem_sz, num, mx_sc_elems;
  1593. int sg_tablesize = sfp->parentdp->sg_tablesize;
  1594. int blk_size = buff_size, order;
  1595. gfp_t gfp_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN;
  1596. if (blk_size < 0)
  1597. return -EFAULT;
  1598. if (0 == blk_size)
  1599. ++blk_size; /* don't know why */
  1600. /* round request up to next highest SG_SECTOR_SZ byte boundary */
  1601. blk_size = ALIGN(blk_size, SG_SECTOR_SZ);
  1602. SCSI_LOG_TIMEOUT(4, printk("sg_build_indirect: buff_size=%d, blk_size=%d\n",
  1603. buff_size, blk_size));
  1604. /* N.B. ret_sz carried into this block ... */
  1605. mx_sc_elems = sg_build_sgat(schp, sfp, sg_tablesize);
  1606. if (mx_sc_elems < 0)
  1607. return mx_sc_elems; /* most likely -ENOMEM */
  1608. num = scatter_elem_sz;
  1609. if (unlikely(num != scatter_elem_sz_prev)) {
  1610. if (num < PAGE_SIZE) {
  1611. scatter_elem_sz = PAGE_SIZE;
  1612. scatter_elem_sz_prev = PAGE_SIZE;
  1613. } else
  1614. scatter_elem_sz_prev = num;
  1615. }
  1616. if (sfp->low_dma)
  1617. gfp_mask |= GFP_DMA;
  1618. if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
  1619. gfp_mask |= __GFP_ZERO;
  1620. order = get_order(num);
  1621. retry:
  1622. ret_sz = 1 << (PAGE_SHIFT + order);
  1623. for (k = 0, rem_sz = blk_size; rem_sz > 0 && k < mx_sc_elems;
  1624. k++, rem_sz -= ret_sz) {
  1625. num = (rem_sz > scatter_elem_sz_prev) ?
  1626. scatter_elem_sz_prev : rem_sz;
  1627. schp->pages[k] = alloc_pages(gfp_mask, order);
  1628. if (!schp->pages[k])
  1629. goto out;
  1630. if (num == scatter_elem_sz_prev) {
  1631. if (unlikely(ret_sz > scatter_elem_sz_prev)) {
  1632. scatter_elem_sz = ret_sz;
  1633. scatter_elem_sz_prev = ret_sz;
  1634. }
  1635. }
  1636. SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k=%d, num=%d, "
  1637. "ret_sz=%d\n", k, num, ret_sz));
  1638. } /* end of for loop */
  1639. schp->page_order = order;
  1640. schp->k_use_sg = k;
  1641. SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k_use_sg=%d, "
  1642. "rem_sz=%d\n", k, rem_sz));
  1643. schp->bufflen = blk_size;
  1644. if (rem_sz > 0) /* must have failed */
  1645. return -ENOMEM;
  1646. return 0;
  1647. out:
  1648. for (i = 0; i < k; i++)
  1649. __free_pages(schp->pages[i], order);
  1650. if (--order >= 0)
  1651. goto retry;
  1652. return -ENOMEM;
  1653. }
  1654. static void
  1655. sg_remove_scat(Sg_scatter_hold * schp)
  1656. {
  1657. SCSI_LOG_TIMEOUT(4, printk("sg_remove_scat: k_use_sg=%d\n", schp->k_use_sg));
  1658. if (schp->pages && schp->sglist_len > 0) {
  1659. if (!schp->dio_in_use) {
  1660. int k;
  1661. for (k = 0; k < schp->k_use_sg && schp->pages[k]; k++) {
  1662. SCSI_LOG_TIMEOUT(5, printk(
  1663. "sg_remove_scat: k=%d, pg=0x%p\n",
  1664. k, schp->pages[k]));
  1665. __free_pages(schp->pages[k], schp->page_order);
  1666. }
  1667. kfree(schp->pages);
  1668. }
  1669. }
  1670. memset(schp, 0, sizeof (*schp));
  1671. }
  1672. static int
  1673. sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer)
  1674. {
  1675. Sg_scatter_hold *schp = &srp->data;
  1676. int k, num;
  1677. SCSI_LOG_TIMEOUT(4, printk("sg_read_oxfer: num_read_xfer=%d\n",
  1678. num_read_xfer));
  1679. if ((!outp) || (num_read_xfer <= 0))
  1680. return 0;
  1681. num = 1 << (PAGE_SHIFT + schp->page_order);
  1682. for (k = 0; k < schp->k_use_sg && schp->pages[k]; k++) {
  1683. if (num > num_read_xfer) {
  1684. if (__copy_to_user(outp, page_address(schp->pages[k]),
  1685. num_read_xfer))
  1686. return -EFAULT;
  1687. break;
  1688. } else {
  1689. if (__copy_to_user(outp, page_address(schp->pages[k]),
  1690. num))
  1691. return -EFAULT;
  1692. num_read_xfer -= num;
  1693. if (num_read_xfer <= 0)
  1694. break;
  1695. outp += num;
  1696. }
  1697. }
  1698. return 0;
  1699. }
  1700. static void
  1701. sg_build_reserve(Sg_fd * sfp, int req_size)
  1702. {
  1703. Sg_scatter_hold *schp = &sfp->reserve;
  1704. SCSI_LOG_TIMEOUT(4, printk("sg_build_reserve: req_size=%d\n", req_size));
  1705. do {
  1706. if (req_size < PAGE_SIZE)
  1707. req_size = PAGE_SIZE;
  1708. if (0 == sg_build_indirect(schp, sfp, req_size))
  1709. return;
  1710. else
  1711. sg_remove_scat(schp);
  1712. req_size >>= 1; /* divide by 2 */
  1713. } while (req_size > (PAGE_SIZE / 2));
  1714. }
  1715. static void
  1716. sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size)
  1717. {
  1718. Sg_scatter_hold *req_schp = &srp->data;
  1719. Sg_scatter_hold *rsv_schp = &sfp->reserve;
  1720. int k, num, rem;
  1721. srp->res_used = 1;
  1722. SCSI_LOG_TIMEOUT(4, printk("sg_link_reserve: size=%d\n", size));
  1723. rem = size;
  1724. num = 1 << (PAGE_SHIFT + rsv_schp->page_order);
  1725. for (k = 0; k < rsv_schp->k_use_sg; k++) {
  1726. if (rem <= num) {
  1727. req_schp->k_use_sg = k + 1;
  1728. req_schp->sglist_len = rsv_schp->sglist_len;
  1729. req_schp->pages = rsv_schp->pages;
  1730. req_schp->bufflen = size;
  1731. req_schp->page_order = rsv_schp->page_order;
  1732. break;
  1733. } else
  1734. rem -= num;
  1735. }
  1736. if (k >= rsv_schp->k_use_sg)
  1737. SCSI_LOG_TIMEOUT(1, printk("sg_link_reserve: BAD size\n"));
  1738. }
  1739. static void
  1740. sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp)
  1741. {
  1742. Sg_scatter_hold *req_schp = &srp->data;
  1743. SCSI_LOG_TIMEOUT(4, printk("sg_unlink_reserve: req->k_use_sg=%d\n",
  1744. (int) req_schp->k_use_sg));
  1745. req_schp->k_use_sg = 0;
  1746. req_schp->bufflen = 0;
  1747. req_schp->pages = NULL;
  1748. req_schp->page_order = 0;
  1749. req_schp->sglist_len = 0;
  1750. sfp->save_scat_len = 0;
  1751. srp->res_used = 0;
  1752. }
  1753. static Sg_request *
  1754. sg_get_rq_mark(Sg_fd * sfp, int pack_id)
  1755. {
  1756. Sg_request *resp;
  1757. unsigned long iflags;
  1758. write_lock_irqsave(&sfp->rq_list_lock, iflags);
  1759. for (resp = sfp->headrp; resp; resp = resp->nextrp) {
  1760. /* look for requests that are ready + not SG_IO owned */
  1761. if ((1 == resp->done) && (!resp->sg_io_owned) &&
  1762. ((-1 == pack_id) || (resp->header.pack_id == pack_id))) {
  1763. resp->done = 2; /* guard against other readers */
  1764. break;
  1765. }
  1766. }
  1767. write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
  1768. return resp;
  1769. }
  1770. /* always adds to end of list */
  1771. static Sg_request *
  1772. sg_add_request(Sg_fd * sfp)
  1773. {
  1774. int k;
  1775. unsigned long iflags;
  1776. Sg_request *resp;
  1777. Sg_request *rp = sfp->req_arr;
  1778. write_lock_irqsave(&sfp->rq_list_lock, iflags);
  1779. resp = sfp->headrp;
  1780. if (!resp) {
  1781. memset(rp, 0, sizeof (Sg_request));
  1782. rp->parentfp = sfp;
  1783. resp = rp;
  1784. sfp->headrp = resp;
  1785. } else {
  1786. if (0 == sfp->cmd_q)
  1787. resp = NULL; /* command queuing disallowed */
  1788. else {
  1789. for (k = 0; k < SG_MAX_QUEUE; ++k, ++rp) {
  1790. if (!rp->parentfp)
  1791. break;
  1792. }
  1793. if (k < SG_MAX_QUEUE) {
  1794. memset(rp, 0, sizeof (Sg_request));
  1795. rp->parentfp = sfp;
  1796. while (resp->nextrp)
  1797. resp = resp->nextrp;
  1798. resp->nextrp = rp;
  1799. resp = rp;
  1800. } else
  1801. resp = NULL;
  1802. }
  1803. }
  1804. if (resp) {
  1805. resp->nextrp = NULL;
  1806. resp->header.duration = jiffies_to_msecs(jiffies);
  1807. }
  1808. write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
  1809. return resp;
  1810. }
  1811. /* Return of 1 for found; 0 for not found */
  1812. static int
  1813. sg_remove_request(Sg_fd * sfp, Sg_request * srp)
  1814. {
  1815. Sg_request *prev_rp;
  1816. Sg_request *rp;
  1817. unsigned long iflags;
  1818. int res = 0;
  1819. if ((!sfp) || (!srp) || (!sfp->headrp))
  1820. return res;
  1821. write_lock_irqsave(&sfp->rq_list_lock, iflags);
  1822. prev_rp = sfp->headrp;
  1823. if (srp == prev_rp) {
  1824. sfp->headrp = prev_rp->nextrp;
  1825. prev_rp->parentfp = NULL;
  1826. res = 1;
  1827. } else {
  1828. while ((rp = prev_rp->nextrp)) {
  1829. if (srp == rp) {
  1830. prev_rp->nextrp = rp->nextrp;
  1831. rp->parentfp = NULL;
  1832. res = 1;
  1833. break;
  1834. }
  1835. prev_rp = rp;
  1836. }
  1837. }
  1838. write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
  1839. return res;
  1840. }
  1841. static Sg_fd *
  1842. sg_add_sfp(Sg_device * sdp, int dev)
  1843. {
  1844. Sg_fd *sfp;
  1845. unsigned long iflags;
  1846. int bufflen;
  1847. sfp = kzalloc(sizeof(*sfp), GFP_ATOMIC | __GFP_NOWARN);
  1848. if (!sfp)
  1849. return NULL;
  1850. init_waitqueue_head(&sfp->read_wait);
  1851. rwlock_init(&sfp->rq_list_lock);
  1852. kref_init(&sfp->f_ref);
  1853. sfp->timeout = SG_DEFAULT_TIMEOUT;
  1854. sfp->timeout_user = SG_DEFAULT_TIMEOUT_USER;
  1855. sfp->force_packid = SG_DEF_FORCE_PACK_ID;
  1856. sfp->low_dma = (SG_DEF_FORCE_LOW_DMA == 0) ?
  1857. sdp->device->host->unchecked_isa_dma : 1;
  1858. sfp->cmd_q = SG_DEF_COMMAND_Q;
  1859. sfp->keep_orphan = SG_DEF_KEEP_ORPHAN;
  1860. sfp->parentdp = sdp;
  1861. write_lock_irqsave(&sg_index_lock, iflags);
  1862. list_add_tail(&sfp->sfd_siblings, &sdp->sfds);
  1863. write_unlock_irqrestore(&sg_index_lock, iflags);
  1864. SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: sfp=0x%p\n", sfp));
  1865. if (unlikely(sg_big_buff != def_reserved_size))
  1866. sg_big_buff = def_reserved_size;
  1867. bufflen = min_t(int, sg_big_buff,
  1868. queue_max_sectors(sdp->device->request_queue) * 512);
  1869. sg_build_reserve(sfp, bufflen);
  1870. SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: bufflen=%d, k_use_sg=%d\n",
  1871. sfp->reserve.bufflen, sfp->reserve.k_use_sg));
  1872. kref_get(&sdp->d_ref);
  1873. __module_get(THIS_MODULE);
  1874. return sfp;
  1875. }
  1876. static void sg_remove_sfp_usercontext(struct work_struct *work)
  1877. {
  1878. struct sg_fd *sfp = container_of(work, struct sg_fd, ew.work);
  1879. struct sg_device *sdp = sfp->parentdp;
  1880. /* Cleanup any responses which were never read(). */
  1881. while (sfp->headrp)
  1882. sg_finish_rem_req(sfp->headrp);
  1883. if (sfp->reserve.bufflen > 0) {
  1884. SCSI_LOG_TIMEOUT(6,
  1885. printk("sg_remove_sfp: bufflen=%d, k_use_sg=%d\n",
  1886. (int) sfp->reserve.bufflen,
  1887. (int) sfp->reserve.k_use_sg));
  1888. sg_remove_scat(&sfp->reserve);
  1889. }
  1890. SCSI_LOG_TIMEOUT(6,
  1891. printk("sg_remove_sfp: %s, sfp=0x%p\n",
  1892. sdp->disk->disk_name,
  1893. sfp));
  1894. kfree(sfp);
  1895. scsi_device_put(sdp->device);
  1896. sg_put_dev(sdp);
  1897. module_put(THIS_MODULE);
  1898. }
  1899. static void sg_remove_sfp(struct kref *kref)
  1900. {
  1901. struct sg_fd *sfp = container_of(kref, struct sg_fd, f_ref);
  1902. struct sg_device *sdp = sfp->parentdp;
  1903. unsigned long iflags;
  1904. write_lock_irqsave(&sg_index_lock, iflags);
  1905. list_del(&sfp->sfd_siblings);
  1906. write_unlock_irqrestore(&sg_index_lock, iflags);
  1907. wake_up_interruptible(&sdp->o_excl_wait);
  1908. INIT_WORK(&sfp->ew.work, sg_remove_sfp_usercontext);
  1909. schedule_work(&sfp->ew.work);
  1910. }
  1911. static int
  1912. sg_res_in_use(Sg_fd * sfp)
  1913. {
  1914. const Sg_request *srp;
  1915. unsigned long iflags;
  1916. read_lock_irqsave(&sfp->rq_list_lock, iflags);
  1917. for (srp = sfp->headrp; srp; srp = srp->nextrp)
  1918. if (srp->res_used)
  1919. break;
  1920. read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
  1921. return srp ? 1 : 0;
  1922. }
  1923. #ifdef CONFIG_SCSI_PROC_FS
  1924. static int
  1925. sg_idr_max_id(int id, void *p, void *data)
  1926. {
  1927. int *k = data;
  1928. if (*k < id)
  1929. *k = id;
  1930. return 0;
  1931. }
  1932. static int
  1933. sg_last_dev(void)
  1934. {
  1935. int k = -1;
  1936. unsigned long iflags;
  1937. read_lock_irqsave(&sg_index_lock, iflags);
  1938. idr_for_each(&sg_index_idr, sg_idr_max_id, &k);
  1939. read_unlock_irqrestore(&sg_index_lock, iflags);
  1940. return k + 1; /* origin 1 */
  1941. }
  1942. #endif
  1943. /* must be called with sg_index_lock held */
  1944. static Sg_device *sg_lookup_dev(int dev)
  1945. {
  1946. return idr_find(&sg_index_idr, dev);
  1947. }
  1948. static Sg_device *sg_get_dev(int dev)
  1949. {
  1950. struct sg_device *sdp;
  1951. unsigned long flags;
  1952. read_lock_irqsave(&sg_index_lock, flags);
  1953. sdp = sg_lookup_dev(dev);
  1954. if (!sdp)
  1955. sdp = ERR_PTR(-ENXIO);
  1956. else if (sdp->detached) {
  1957. /* If sdp->detached, then the refcount may already be 0, in
  1958. * which case it would be a bug to do kref_get().
  1959. */
  1960. sdp = ERR_PTR(-ENODEV);
  1961. } else
  1962. kref_get(&sdp->d_ref);
  1963. read_unlock_irqrestore(&sg_index_lock, flags);
  1964. return sdp;
  1965. }
  1966. static void sg_put_dev(struct sg_device *sdp)
  1967. {
  1968. kref_put(&sdp->d_ref, sg_device_destroy);
  1969. }
  1970. #ifdef CONFIG_SCSI_PROC_FS
  1971. static struct proc_dir_entry *sg_proc_sgp = NULL;
  1972. static char sg_proc_sg_dirname[] = "scsi/sg";
  1973. static int sg_proc_seq_show_int(struct seq_file *s, void *v);
  1974. static int sg_proc_single_open_adio(struct inode *inode, struct file *file);
  1975. static ssize_t sg_proc_write_adio(struct file *filp, const char __user *buffer,
  1976. size_t count, loff_t *off);
  1977. static const struct file_operations adio_fops = {
  1978. .owner = THIS_MODULE,
  1979. .open = sg_proc_single_open_adio,
  1980. .read = seq_read,
  1981. .llseek = seq_lseek,
  1982. .write = sg_proc_write_adio,
  1983. .release = single_release,
  1984. };
  1985. static int sg_proc_single_open_dressz(struct inode *inode, struct file *file);
  1986. static ssize_t sg_proc_write_dressz(struct file *filp,
  1987. const char __user *buffer, size_t count, loff_t *off);
  1988. static const struct file_operations dressz_fops = {
  1989. .owner = THIS_MODULE,
  1990. .open = sg_proc_single_open_dressz,
  1991. .read = seq_read,
  1992. .llseek = seq_lseek,
  1993. .write = sg_proc_write_dressz,
  1994. .release = single_release,
  1995. };
  1996. static int sg_proc_seq_show_version(struct seq_file *s, void *v);
  1997. static int sg_proc_single_open_version(struct inode *inode, struct file *file);
  1998. static const struct file_operations version_fops = {
  1999. .owner = THIS_MODULE,
  2000. .open = sg_proc_single_open_version,
  2001. .read = seq_read,
  2002. .llseek = seq_lseek,
  2003. .release = single_release,
  2004. };
  2005. static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v);
  2006. static int sg_proc_single_open_devhdr(struct inode *inode, struct file *file);
  2007. static const struct file_operations devhdr_fops = {
  2008. .owner = THIS_MODULE,
  2009. .open = sg_proc_single_open_devhdr,
  2010. .read = seq_read,
  2011. .llseek = seq_lseek,
  2012. .release = single_release,
  2013. };
  2014. static int sg_proc_seq_show_dev(struct seq_file *s, void *v);
  2015. static int sg_proc_open_dev(struct inode *inode, struct file *file);
  2016. static void * dev_seq_start(struct seq_file *s, loff_t *pos);
  2017. static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos);
  2018. static void dev_seq_stop(struct seq_file *s, void *v);
  2019. static const struct file_operations dev_fops = {
  2020. .owner = THIS_MODULE,
  2021. .open = sg_proc_open_dev,
  2022. .read = seq_read,
  2023. .llseek = seq_lseek,
  2024. .release = seq_release,
  2025. };
  2026. static const struct seq_operations dev_seq_ops = {
  2027. .start = dev_seq_start,
  2028. .next = dev_seq_next,
  2029. .stop = dev_seq_stop,
  2030. .show = sg_proc_seq_show_dev,
  2031. };
  2032. static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v);
  2033. static int sg_proc_open_devstrs(struct inode *inode, struct file *file);
  2034. static const struct file_operations devstrs_fops = {
  2035. .owner = THIS_MODULE,
  2036. .open = sg_proc_open_devstrs,
  2037. .read = seq_read,
  2038. .llseek = seq_lseek,
  2039. .release = seq_release,
  2040. };
  2041. static const struct seq_operations devstrs_seq_ops = {
  2042. .start = dev_seq_start,
  2043. .next = dev_seq_next,
  2044. .stop = dev_seq_stop,
  2045. .show = sg_proc_seq_show_devstrs,
  2046. };
  2047. static int sg_proc_seq_show_debug(struct seq_file *s, void *v);
  2048. static int sg_proc_open_debug(struct inode *inode, struct file *file);
  2049. static const struct file_operations debug_fops = {
  2050. .owner = THIS_MODULE,
  2051. .open = sg_proc_open_debug,
  2052. .read = seq_read,
  2053. .llseek = seq_lseek,
  2054. .release = seq_release,
  2055. };
  2056. static const struct seq_operations debug_seq_ops = {
  2057. .start = dev_seq_start,
  2058. .next = dev_seq_next,
  2059. .stop = dev_seq_stop,
  2060. .show = sg_proc_seq_show_debug,
  2061. };
  2062. struct sg_proc_leaf {
  2063. const char * name;
  2064. const struct file_operations * fops;
  2065. };
  2066. static struct sg_proc_leaf sg_proc_leaf_arr[] = {
  2067. {"allow_dio", &adio_fops},
  2068. {"debug", &debug_fops},
  2069. {"def_reserved_size", &dressz_fops},
  2070. {"device_hdr", &devhdr_fops},
  2071. {"devices", &dev_fops},
  2072. {"device_strs", &devstrs_fops},
  2073. {"version", &version_fops}
  2074. };
  2075. static int
  2076. sg_proc_init(void)
  2077. {
  2078. int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
  2079. int k;
  2080. sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
  2081. if (!sg_proc_sgp)
  2082. return 1;
  2083. for (k = 0; k < num_leaves; ++k) {
  2084. struct sg_proc_leaf *leaf = &sg_proc_leaf_arr[k];
  2085. umode_t mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO;
  2086. proc_create(leaf->name, mask, sg_proc_sgp, leaf->fops);
  2087. }
  2088. return 0;
  2089. }
  2090. static void
  2091. sg_proc_cleanup(void)
  2092. {
  2093. int k;
  2094. int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
  2095. if (!sg_proc_sgp)
  2096. return;
  2097. for (k = 0; k < num_leaves; ++k)
  2098. remove_proc_entry(sg_proc_leaf_arr[k].name, sg_proc_sgp);
  2099. remove_proc_entry(sg_proc_sg_dirname, NULL);
  2100. }
  2101. static int sg_proc_seq_show_int(struct seq_file *s, void *v)
  2102. {
  2103. seq_printf(s, "%d\n", *((int *)s->private));
  2104. return 0;
  2105. }
  2106. static int sg_proc_single_open_adio(struct inode *inode, struct file *file)
  2107. {
  2108. return single_open(file, sg_proc_seq_show_int, &sg_allow_dio);
  2109. }
  2110. static ssize_t
  2111. sg_proc_write_adio(struct file *filp, const char __user *buffer,
  2112. size_t count, loff_t *off)
  2113. {
  2114. int err;
  2115. unsigned long num;
  2116. if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
  2117. return -EACCES;
  2118. err = kstrtoul_from_user(buffer, count, 0, &num);
  2119. if (err)
  2120. return err;
  2121. sg_allow_dio = num ? 1 : 0;
  2122. return count;
  2123. }
  2124. static int sg_proc_single_open_dressz(struct inode *inode, struct file *file)
  2125. {
  2126. return single_open(file, sg_proc_seq_show_int, &sg_big_buff);
  2127. }
  2128. static ssize_t
  2129. sg_proc_write_dressz(struct file *filp, const char __user *buffer,
  2130. size_t count, loff_t *off)
  2131. {
  2132. int err;
  2133. unsigned long k = ULONG_MAX;
  2134. if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
  2135. return -EACCES;
  2136. err = kstrtoul_from_user(buffer, count, 0, &k);
  2137. if (err)
  2138. return err;
  2139. if (k <= 1048576) { /* limit "big buff" to 1 MB */
  2140. sg_big_buff = k;
  2141. return count;
  2142. }
  2143. return -ERANGE;
  2144. }
  2145. static int sg_proc_seq_show_version(struct seq_file *s, void *v)
  2146. {
  2147. seq_printf(s, "%d\t%s [%s]\n", sg_version_num, SG_VERSION_STR,
  2148. sg_version_date);
  2149. return 0;
  2150. }
  2151. static int sg_proc_single_open_version(struct inode *inode, struct file *file)
  2152. {
  2153. return single_open(file, sg_proc_seq_show_version, NULL);
  2154. }
  2155. static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v)
  2156. {
  2157. seq_printf(s, "host\tchan\tid\tlun\ttype\topens\tqdepth\tbusy\t"
  2158. "online\n");
  2159. return 0;
  2160. }
  2161. static int sg_proc_single_open_devhdr(struct inode *inode, struct file *file)
  2162. {
  2163. return single_open(file, sg_proc_seq_show_devhdr, NULL);
  2164. }
  2165. struct sg_proc_deviter {
  2166. loff_t index;
  2167. size_t max;
  2168. };
  2169. static void * dev_seq_start(struct seq_file *s, loff_t *pos)
  2170. {
  2171. struct sg_proc_deviter * it = kmalloc(sizeof(*it), GFP_KERNEL);
  2172. s->private = it;
  2173. if (! it)
  2174. return NULL;
  2175. it->index = *pos;
  2176. it->max = sg_last_dev();
  2177. if (it->index >= it->max)
  2178. return NULL;
  2179. return it;
  2180. }
  2181. static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos)
  2182. {
  2183. struct sg_proc_deviter * it = s->private;
  2184. *pos = ++it->index;
  2185. return (it->index < it->max) ? it : NULL;
  2186. }
  2187. static void dev_seq_stop(struct seq_file *s, void *v)
  2188. {
  2189. kfree(s->private);
  2190. }
  2191. static int sg_proc_open_dev(struct inode *inode, struct file *file)
  2192. {
  2193. return seq_open(file, &dev_seq_ops);
  2194. }
  2195. static int sg_proc_seq_show_dev(struct seq_file *s, void *v)
  2196. {
  2197. struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
  2198. Sg_device *sdp;
  2199. struct scsi_device *scsidp;
  2200. unsigned long iflags;
  2201. read_lock_irqsave(&sg_index_lock, iflags);
  2202. sdp = it ? sg_lookup_dev(it->index) : NULL;
  2203. if (sdp && (scsidp = sdp->device) && (!sdp->detached))
  2204. seq_printf(s, "%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\n",
  2205. scsidp->host->host_no, scsidp->channel,
  2206. scsidp->id, scsidp->lun, (int) scsidp->type,
  2207. 1,
  2208. (int) scsidp->queue_depth,
  2209. (int) scsidp->device_busy,
  2210. (int) scsi_device_online(scsidp));
  2211. else
  2212. seq_printf(s, "-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\n");
  2213. read_unlock_irqrestore(&sg_index_lock, iflags);
  2214. return 0;
  2215. }
  2216. static int sg_proc_open_devstrs(struct inode *inode, struct file *file)
  2217. {
  2218. return seq_open(file, &devstrs_seq_ops);
  2219. }
  2220. static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v)
  2221. {
  2222. struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
  2223. Sg_device *sdp;
  2224. struct scsi_device *scsidp;
  2225. unsigned long iflags;
  2226. read_lock_irqsave(&sg_index_lock, iflags);
  2227. sdp = it ? sg_lookup_dev(it->index) : NULL;
  2228. if (sdp && (scsidp = sdp->device) && (!sdp->detached))
  2229. seq_printf(s, "%8.8s\t%16.16s\t%4.4s\n",
  2230. scsidp->vendor, scsidp->model, scsidp->rev);
  2231. else
  2232. seq_printf(s, "<no active device>\n");
  2233. read_unlock_irqrestore(&sg_index_lock, iflags);
  2234. return 0;
  2235. }
  2236. /* must be called while holding sg_index_lock */
  2237. static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
  2238. {
  2239. int k, m, new_interface, blen, usg;
  2240. Sg_request *srp;
  2241. Sg_fd *fp;
  2242. const sg_io_hdr_t *hp;
  2243. const char * cp;
  2244. unsigned int ms;
  2245. k = 0;
  2246. list_for_each_entry(fp, &sdp->sfds, sfd_siblings) {
  2247. k++;
  2248. read_lock(&fp->rq_list_lock); /* irqs already disabled */
  2249. seq_printf(s, " FD(%d): timeout=%dms bufflen=%d "
  2250. "(res)sgat=%d low_dma=%d\n", k,
  2251. jiffies_to_msecs(fp->timeout),
  2252. fp->reserve.bufflen,
  2253. (int) fp->reserve.k_use_sg,
  2254. (int) fp->low_dma);
  2255. seq_printf(s, " cmd_q=%d f_packid=%d k_orphan=%d closed=%d\n",
  2256. (int) fp->cmd_q, (int) fp->force_packid,
  2257. (int) fp->keep_orphan, (int) fp->closed);
  2258. for (m = 0, srp = fp->headrp;
  2259. srp != NULL;
  2260. ++m, srp = srp->nextrp) {
  2261. hp = &srp->header;
  2262. new_interface = (hp->interface_id == '\0') ? 0 : 1;
  2263. if (srp->res_used) {
  2264. if (new_interface &&
  2265. (SG_FLAG_MMAP_IO & hp->flags))
  2266. cp = " mmap>> ";
  2267. else
  2268. cp = " rb>> ";
  2269. } else {
  2270. if (SG_INFO_DIRECT_IO_MASK & hp->info)
  2271. cp = " dio>> ";
  2272. else
  2273. cp = " ";
  2274. }
  2275. seq_printf(s, cp);
  2276. blen = srp->data.bufflen;
  2277. usg = srp->data.k_use_sg;
  2278. seq_printf(s, srp->done ?
  2279. ((1 == srp->done) ? "rcv:" : "fin:")
  2280. : "act:");
  2281. seq_printf(s, " id=%d blen=%d",
  2282. srp->header.pack_id, blen);
  2283. if (srp->done)
  2284. seq_printf(s, " dur=%d", hp->duration);
  2285. else {
  2286. ms = jiffies_to_msecs(jiffies);
  2287. seq_printf(s, " t_o/elap=%d/%d",
  2288. (new_interface ? hp->timeout :
  2289. jiffies_to_msecs(fp->timeout)),
  2290. (ms > hp->duration ? ms - hp->duration : 0));
  2291. }
  2292. seq_printf(s, "ms sgat=%d op=0x%02x\n", usg,
  2293. (int) srp->data.cmd_opcode);
  2294. }
  2295. if (0 == m)
  2296. seq_printf(s, " No requests active\n");
  2297. read_unlock(&fp->rq_list_lock);
  2298. }
  2299. }
  2300. static int sg_proc_open_debug(struct inode *inode, struct file *file)
  2301. {
  2302. return seq_open(file, &debug_seq_ops);
  2303. }
  2304. static int sg_proc_seq_show_debug(struct seq_file *s, void *v)
  2305. {
  2306. struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
  2307. Sg_device *sdp;
  2308. unsigned long iflags;
  2309. if (it && (0 == it->index)) {
  2310. seq_printf(s, "max_active_device=%d(origin 1)\n",
  2311. (int)it->max);
  2312. seq_printf(s, " def_reserved_size=%d\n", sg_big_buff);
  2313. }
  2314. read_lock_irqsave(&sg_index_lock, iflags);
  2315. sdp = it ? sg_lookup_dev(it->index) : NULL;
  2316. if (sdp && !list_empty(&sdp->sfds)) {
  2317. struct scsi_device *scsidp = sdp->device;
  2318. seq_printf(s, " >>> device=%s ", sdp->disk->disk_name);
  2319. if (sdp->detached)
  2320. seq_printf(s, "detached pending close ");
  2321. else
  2322. seq_printf
  2323. (s, "scsi%d chan=%d id=%d lun=%d em=%d",
  2324. scsidp->host->host_no,
  2325. scsidp->channel, scsidp->id,
  2326. scsidp->lun,
  2327. scsidp->host->hostt->emulated);
  2328. seq_printf(s, " sg_tablesize=%d excl=%d\n",
  2329. sdp->sg_tablesize, sdp->exclude);
  2330. sg_proc_debug_helper(s, sdp);
  2331. }
  2332. read_unlock_irqrestore(&sg_index_lock, iflags);
  2333. return 0;
  2334. }
  2335. #endif /* CONFIG_SCSI_PROC_FS */
  2336. module_init(init_sg);
  2337. module_exit(exit_sg);