sg.c 79 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977
  1. /*
  2. * History:
  3. * Started: Aug 9 by Lawrence Foard (entropy@world.std.com),
  4. * to allow user process control of SCSI devices.
  5. * Development Sponsored by Killy Corp. NY NY
  6. *
  7. * Original driver (sg.c):
  8. * Copyright (C) 1992 Lawrence Foard
  9. * Version 2 and 3 extensions to driver:
  10. * Copyright (C) 1998 - 2005 Douglas Gilbert
  11. *
  12. * Modified 19-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support
  13. *
  14. * This program is free software; you can redistribute it and/or modify
  15. * it under the terms of the GNU General Public License as published by
  16. * the Free Software Foundation; either version 2, or (at your option)
  17. * any later version.
  18. *
  19. */
  20. static int sg_version_num = 30533; /* 2 digits for each component */
  21. #define SG_VERSION_STR "3.5.33"
  22. /*
  23. * D. P. Gilbert (dgilbert@interlog.com, dougg@triode.net.au), notes:
  24. * - scsi logging is available via SCSI_LOG_TIMEOUT macros. First
  25. * the kernel/module needs to be built with CONFIG_SCSI_LOGGING
  26. * (otherwise the macros compile to empty statements).
  27. *
  28. */
  29. #include <linux/config.h>
  30. #include <linux/module.h>
  31. #include <linux/fs.h>
  32. #include <linux/kernel.h>
  33. #include <linux/sched.h>
  34. #include <linux/string.h>
  35. #include <linux/mm.h>
  36. #include <linux/errno.h>
  37. #include <linux/mtio.h>
  38. #include <linux/ioctl.h>
  39. #include <linux/fcntl.h>
  40. #include <linux/init.h>
  41. #include <linux/poll.h>
  42. #include <linux/smp_lock.h>
  43. #include <linux/moduleparam.h>
  44. #include <linux/devfs_fs_kernel.h>
  45. #include <linux/cdev.h>
  46. #include <linux/seq_file.h>
  47. #include <linux/blkdev.h>
  48. #include <linux/delay.h>
  49. #include <linux/scatterlist.h>
  50. #include "scsi.h"
  51. #include <scsi/scsi_dbg.h>
  52. #include <scsi/scsi_host.h>
  53. #include <scsi/scsi_driver.h>
  54. #include <scsi/scsi_ioctl.h>
  55. #include <scsi/sg.h>
  56. #include "scsi_logging.h"
  57. #ifdef CONFIG_SCSI_PROC_FS
  58. #include <linux/proc_fs.h>
  59. static char *sg_version_date = "20050908";
  60. static int sg_proc_init(void);
  61. static void sg_proc_cleanup(void);
  62. #endif
  63. #define SG_ALLOW_DIO_DEF 0
  64. #define SG_ALLOW_DIO_CODE /* compile out by commenting this define */
  65. #define SG_MAX_DEVS 32768
  66. /*
  67. * Suppose you want to calculate the formula muldiv(x,m,d)=int(x * m / d)
  68. * Then when using 32 bit integers x * m may overflow during the calculation.
  69. * Replacing muldiv(x) by muldiv(x)=((x % d) * m) / d + int(x / d) * m
  70. * calculates the same, but prevents the overflow when both m and d
  71. * are "small" numbers (like HZ and USER_HZ).
  72. * Of course an overflow is inavoidable if the result of muldiv doesn't fit
  73. * in 32 bits.
  74. */
  75. #define MULDIV(X,MUL,DIV) ((((X % DIV) * MUL) / DIV) + ((X / DIV) * MUL))
  76. #define SG_DEFAULT_TIMEOUT MULDIV(SG_DEFAULT_TIMEOUT_USER, HZ, USER_HZ)
  77. int sg_big_buff = SG_DEF_RESERVED_SIZE;
  78. /* N.B. This variable is readable and writeable via
  79. /proc/scsi/sg/def_reserved_size . Each time sg_open() is called a buffer
  80. of this size (or less if there is not enough memory) will be reserved
  81. for use by this file descriptor. [Deprecated usage: this variable is also
  82. readable via /proc/sys/kernel/sg-big-buff if the sg driver is built into
  83. the kernel (i.e. it is not a module).] */
  84. static int def_reserved_size = -1; /* picks up init parameter */
  85. static int sg_allow_dio = SG_ALLOW_DIO_DEF;
  86. #define SG_SECTOR_SZ 512
  87. #define SG_SECTOR_MSK (SG_SECTOR_SZ - 1)
  88. #define SG_DEV_ARR_LUMP 32 /* amount to over allocate sg_dev_arr by */
  89. static int sg_add(struct class_device *, struct class_interface *);
  90. static void sg_remove(struct class_device *, struct class_interface *);
  91. static DEFINE_RWLOCK(sg_dev_arr_lock); /* Also used to lock
  92. file descriptor list for device */
  93. static struct class_interface sg_interface = {
  94. .add = sg_add,
  95. .remove = sg_remove,
  96. };
  97. typedef struct sg_scatter_hold { /* holding area for scsi scatter gather info */
  98. unsigned short k_use_sg; /* Count of kernel scatter-gather pieces */
  99. unsigned short sglist_len; /* size of malloc'd scatter-gather list ++ */
  100. unsigned bufflen; /* Size of (aggregate) data buffer */
  101. unsigned b_malloc_len; /* actual len malloc'ed in buffer */
  102. struct scatterlist *buffer;/* scatter list */
  103. char dio_in_use; /* 0->indirect IO (or mmap), 1->dio */
  104. unsigned char cmd_opcode; /* first byte of command */
  105. } Sg_scatter_hold;
  106. struct sg_device; /* forward declarations */
  107. struct sg_fd;
  108. typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */
  109. struct sg_request *nextrp; /* NULL -> tail request (slist) */
  110. struct sg_fd *parentfp; /* NULL -> not in use */
  111. Sg_scatter_hold data; /* hold buffer, perhaps scatter list */
  112. sg_io_hdr_t header; /* scsi command+info, see <scsi/sg.h> */
  113. unsigned char sense_b[SCSI_SENSE_BUFFERSIZE];
  114. char res_used; /* 1 -> using reserve buffer, 0 -> not ... */
  115. char orphan; /* 1 -> drop on sight, 0 -> normal */
  116. char sg_io_owned; /* 1 -> packet belongs to SG_IO */
  117. volatile char done; /* 0->before bh, 1->before read, 2->read */
  118. } Sg_request;
  119. typedef struct sg_fd { /* holds the state of a file descriptor */
  120. struct sg_fd *nextfp; /* NULL when last opened fd on this device */
  121. struct sg_device *parentdp; /* owning device */
  122. wait_queue_head_t read_wait; /* queue read until command done */
  123. rwlock_t rq_list_lock; /* protect access to list in req_arr */
  124. int timeout; /* defaults to SG_DEFAULT_TIMEOUT */
  125. int timeout_user; /* defaults to SG_DEFAULT_TIMEOUT_USER */
  126. Sg_scatter_hold reserve; /* buffer held for this file descriptor */
  127. unsigned save_scat_len; /* original length of trunc. scat. element */
  128. Sg_request *headrp; /* head of request slist, NULL->empty */
  129. struct fasync_struct *async_qp; /* used by asynchronous notification */
  130. Sg_request req_arr[SG_MAX_QUEUE]; /* used as singly-linked list */
  131. char low_dma; /* as in parent but possibly overridden to 1 */
  132. char force_packid; /* 1 -> pack_id input to read(), 0 -> ignored */
  133. volatile char closed; /* 1 -> fd closed but request(s) outstanding */
  134. char cmd_q; /* 1 -> allow command queuing, 0 -> don't */
  135. char next_cmd_len; /* 0 -> automatic (def), >0 -> use on next write() */
  136. char keep_orphan; /* 0 -> drop orphan (def), 1 -> keep for read() */
  137. char mmap_called; /* 0 -> mmap() never called on this fd */
  138. } Sg_fd;
  139. typedef struct sg_device { /* holds the state of each scsi generic device */
  140. struct scsi_device *device;
  141. wait_queue_head_t o_excl_wait; /* queue open() when O_EXCL in use */
  142. int sg_tablesize; /* adapter's max scatter-gather table size */
  143. Sg_fd *headfp; /* first open fd belonging to this device */
  144. volatile char detached; /* 0->attached, 1->detached pending removal */
  145. volatile char exclude; /* opened for exclusive access */
  146. char sgdebug; /* 0->off, 1->sense, 9->dump dev, 10-> all devs */
  147. struct gendisk *disk;
  148. struct cdev * cdev; /* char_dev [sysfs: /sys/cdev/major/sg<n>] */
  149. } Sg_device;
  150. static int sg_fasync(int fd, struct file *filp, int mode);
  151. /* tasklet or soft irq callback */
  152. static void sg_cmd_done(void *data, char *sense, int result, int resid);
  153. static int sg_start_req(Sg_request * srp);
  154. static void sg_finish_rem_req(Sg_request * srp);
  155. static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size);
  156. static int sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp,
  157. int tablesize);
  158. static ssize_t sg_new_read(Sg_fd * sfp, char __user *buf, size_t count,
  159. Sg_request * srp);
  160. static ssize_t sg_new_write(Sg_fd * sfp, const char __user *buf, size_t count,
  161. int blocking, int read_only, Sg_request ** o_srp);
  162. static int sg_common_write(Sg_fd * sfp, Sg_request * srp,
  163. unsigned char *cmnd, int timeout, int blocking);
  164. static int sg_u_iovec(sg_io_hdr_t * hp, int sg_num, int ind,
  165. int wr_xf, int *countp, unsigned char __user **up);
  166. static int sg_write_xfer(Sg_request * srp);
  167. static int sg_read_xfer(Sg_request * srp);
  168. static int sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer);
  169. static void sg_remove_scat(Sg_scatter_hold * schp);
  170. static void sg_build_reserve(Sg_fd * sfp, int req_size);
  171. static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size);
  172. static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp);
  173. static struct page *sg_page_malloc(int rqSz, int lowDma, int *retSzp);
  174. static void sg_page_free(struct page *page, int size);
  175. static Sg_fd *sg_add_sfp(Sg_device * sdp, int dev);
  176. static int sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp);
  177. static void __sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp);
  178. static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id);
  179. static Sg_request *sg_add_request(Sg_fd * sfp);
  180. static int sg_remove_request(Sg_fd * sfp, Sg_request * srp);
  181. static int sg_res_in_use(Sg_fd * sfp);
  182. static int sg_allow_access(unsigned char opcode, char dev_type);
  183. static int sg_build_direct(Sg_request * srp, Sg_fd * sfp, int dxfer_len);
  184. static Sg_device *sg_get_dev(int dev);
  185. #ifdef CONFIG_SCSI_PROC_FS
  186. static int sg_last_dev(void);
  187. #endif
  188. static Sg_device **sg_dev_arr = NULL;
  189. static int sg_dev_max;
  190. static int sg_nr_dev;
  191. #define SZ_SG_HEADER sizeof(struct sg_header)
  192. #define SZ_SG_IO_HDR sizeof(sg_io_hdr_t)
  193. #define SZ_SG_IOVEC sizeof(sg_iovec_t)
  194. #define SZ_SG_REQ_INFO sizeof(sg_req_info_t)
  195. static int
  196. sg_open(struct inode *inode, struct file *filp)
  197. {
  198. int dev = iminor(inode);
  199. int flags = filp->f_flags;
  200. struct request_queue *q;
  201. Sg_device *sdp;
  202. Sg_fd *sfp;
  203. int res;
  204. int retval;
  205. nonseekable_open(inode, filp);
  206. SCSI_LOG_TIMEOUT(3, printk("sg_open: dev=%d, flags=0x%x\n", dev, flags));
  207. sdp = sg_get_dev(dev);
  208. if ((!sdp) || (!sdp->device))
  209. return -ENXIO;
  210. if (sdp->detached)
  211. return -ENODEV;
  212. /* This driver's module count bumped by fops_get in <linux/fs.h> */
  213. /* Prevent the device driver from vanishing while we sleep */
  214. retval = scsi_device_get(sdp->device);
  215. if (retval)
  216. return retval;
  217. if (!((flags & O_NONBLOCK) ||
  218. scsi_block_when_processing_errors(sdp->device))) {
  219. retval = -ENXIO;
  220. /* we are in error recovery for this device */
  221. goto error_out;
  222. }
  223. if (flags & O_EXCL) {
  224. if (O_RDONLY == (flags & O_ACCMODE)) {
  225. retval = -EPERM; /* Can't lock it with read only access */
  226. goto error_out;
  227. }
  228. if (sdp->headfp && (flags & O_NONBLOCK)) {
  229. retval = -EBUSY;
  230. goto error_out;
  231. }
  232. res = 0;
  233. __wait_event_interruptible(sdp->o_excl_wait,
  234. ((sdp->headfp || sdp->exclude) ? 0 : (sdp->exclude = 1)), res);
  235. if (res) {
  236. retval = res; /* -ERESTARTSYS because signal hit process */
  237. goto error_out;
  238. }
  239. } else if (sdp->exclude) { /* some other fd has an exclusive lock on dev */
  240. if (flags & O_NONBLOCK) {
  241. retval = -EBUSY;
  242. goto error_out;
  243. }
  244. res = 0;
  245. __wait_event_interruptible(sdp->o_excl_wait, (!sdp->exclude),
  246. res);
  247. if (res) {
  248. retval = res; /* -ERESTARTSYS because signal hit process */
  249. goto error_out;
  250. }
  251. }
  252. if (sdp->detached) {
  253. retval = -ENODEV;
  254. goto error_out;
  255. }
  256. if (!sdp->headfp) { /* no existing opens on this device */
  257. sdp->sgdebug = 0;
  258. q = sdp->device->request_queue;
  259. sdp->sg_tablesize = min(q->max_hw_segments,
  260. q->max_phys_segments);
  261. }
  262. if ((sfp = sg_add_sfp(sdp, dev)))
  263. filp->private_data = sfp;
  264. else {
  265. if (flags & O_EXCL)
  266. sdp->exclude = 0; /* undo if error */
  267. retval = -ENOMEM;
  268. goto error_out;
  269. }
  270. return 0;
  271. error_out:
  272. scsi_device_put(sdp->device);
  273. return retval;
  274. }
  275. /* Following function was formerly called 'sg_close' */
  276. static int
  277. sg_release(struct inode *inode, struct file *filp)
  278. {
  279. Sg_device *sdp;
  280. Sg_fd *sfp;
  281. if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
  282. return -ENXIO;
  283. SCSI_LOG_TIMEOUT(3, printk("sg_release: %s\n", sdp->disk->disk_name));
  284. sg_fasync(-1, filp, 0); /* remove filp from async notification list */
  285. if (0 == sg_remove_sfp(sdp, sfp)) { /* Returns 1 when sdp gone */
  286. if (!sdp->detached) {
  287. scsi_device_put(sdp->device);
  288. }
  289. sdp->exclude = 0;
  290. wake_up_interruptible(&sdp->o_excl_wait);
  291. }
  292. return 0;
  293. }
  294. static ssize_t
  295. sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
  296. {
  297. Sg_device *sdp;
  298. Sg_fd *sfp;
  299. Sg_request *srp;
  300. int req_pack_id = -1;
  301. sg_io_hdr_t *hp;
  302. struct sg_header *old_hdr = NULL;
  303. int retval = 0;
  304. if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
  305. return -ENXIO;
  306. SCSI_LOG_TIMEOUT(3, printk("sg_read: %s, count=%d\n",
  307. sdp->disk->disk_name, (int) count));
  308. if (!access_ok(VERIFY_WRITE, buf, count))
  309. return -EFAULT;
  310. if (sfp->force_packid && (count >= SZ_SG_HEADER)) {
  311. old_hdr = kmalloc(SZ_SG_HEADER, GFP_KERNEL);
  312. if (!old_hdr)
  313. return -ENOMEM;
  314. if (__copy_from_user(old_hdr, buf, SZ_SG_HEADER)) {
  315. retval = -EFAULT;
  316. goto free_old_hdr;
  317. }
  318. if (old_hdr->reply_len < 0) {
  319. if (count >= SZ_SG_IO_HDR) {
  320. sg_io_hdr_t *new_hdr;
  321. new_hdr = kmalloc(SZ_SG_IO_HDR, GFP_KERNEL);
  322. if (!new_hdr) {
  323. retval = -ENOMEM;
  324. goto free_old_hdr;
  325. }
  326. retval =__copy_from_user
  327. (new_hdr, buf, SZ_SG_IO_HDR);
  328. req_pack_id = new_hdr->pack_id;
  329. kfree(new_hdr);
  330. if (retval) {
  331. retval = -EFAULT;
  332. goto free_old_hdr;
  333. }
  334. }
  335. } else
  336. req_pack_id = old_hdr->pack_id;
  337. }
  338. srp = sg_get_rq_mark(sfp, req_pack_id);
  339. if (!srp) { /* now wait on packet to arrive */
  340. if (sdp->detached) {
  341. retval = -ENODEV;
  342. goto free_old_hdr;
  343. }
  344. if (filp->f_flags & O_NONBLOCK) {
  345. retval = -EAGAIN;
  346. goto free_old_hdr;
  347. }
  348. while (1) {
  349. retval = 0; /* following macro beats race condition */
  350. __wait_event_interruptible(sfp->read_wait,
  351. (sdp->detached ||
  352. (srp = sg_get_rq_mark(sfp, req_pack_id))),
  353. retval);
  354. if (sdp->detached) {
  355. retval = -ENODEV;
  356. goto free_old_hdr;
  357. }
  358. if (0 == retval)
  359. break;
  360. /* -ERESTARTSYS as signal hit process */
  361. goto free_old_hdr;
  362. }
  363. }
  364. if (srp->header.interface_id != '\0') {
  365. retval = sg_new_read(sfp, buf, count, srp);
  366. goto free_old_hdr;
  367. }
  368. hp = &srp->header;
  369. if (old_hdr == NULL) {
  370. old_hdr = kmalloc(SZ_SG_HEADER, GFP_KERNEL);
  371. if (! old_hdr) {
  372. retval = -ENOMEM;
  373. goto free_old_hdr;
  374. }
  375. }
  376. memset(old_hdr, 0, SZ_SG_HEADER);
  377. old_hdr->reply_len = (int) hp->timeout;
  378. old_hdr->pack_len = old_hdr->reply_len; /* old, strange behaviour */
  379. old_hdr->pack_id = hp->pack_id;
  380. old_hdr->twelve_byte =
  381. ((srp->data.cmd_opcode >= 0xc0) && (12 == hp->cmd_len)) ? 1 : 0;
  382. old_hdr->target_status = hp->masked_status;
  383. old_hdr->host_status = hp->host_status;
  384. old_hdr->driver_status = hp->driver_status;
  385. if ((CHECK_CONDITION & hp->masked_status) ||
  386. (DRIVER_SENSE & hp->driver_status))
  387. memcpy(old_hdr->sense_buffer, srp->sense_b,
  388. sizeof (old_hdr->sense_buffer));
  389. switch (hp->host_status) {
  390. /* This setup of 'result' is for backward compatibility and is best
  391. ignored by the user who should use target, host + driver status */
  392. case DID_OK:
  393. case DID_PASSTHROUGH:
  394. case DID_SOFT_ERROR:
  395. old_hdr->result = 0;
  396. break;
  397. case DID_NO_CONNECT:
  398. case DID_BUS_BUSY:
  399. case DID_TIME_OUT:
  400. old_hdr->result = EBUSY;
  401. break;
  402. case DID_BAD_TARGET:
  403. case DID_ABORT:
  404. case DID_PARITY:
  405. case DID_RESET:
  406. case DID_BAD_INTR:
  407. old_hdr->result = EIO;
  408. break;
  409. case DID_ERROR:
  410. old_hdr->result = (srp->sense_b[0] == 0 &&
  411. hp->masked_status == GOOD) ? 0 : EIO;
  412. break;
  413. default:
  414. old_hdr->result = EIO;
  415. break;
  416. }
  417. /* Now copy the result back to the user buffer. */
  418. if (count >= SZ_SG_HEADER) {
  419. if (__copy_to_user(buf, old_hdr, SZ_SG_HEADER)) {
  420. retval = -EFAULT;
  421. goto free_old_hdr;
  422. }
  423. buf += SZ_SG_HEADER;
  424. if (count > old_hdr->reply_len)
  425. count = old_hdr->reply_len;
  426. if (count > SZ_SG_HEADER) {
  427. if (sg_read_oxfer(srp, buf, count - SZ_SG_HEADER)) {
  428. retval = -EFAULT;
  429. goto free_old_hdr;
  430. }
  431. }
  432. } else
  433. count = (old_hdr->result == 0) ? 0 : -EIO;
  434. sg_finish_rem_req(srp);
  435. retval = count;
  436. free_old_hdr:
  437. kfree(old_hdr);
  438. return retval;
  439. }
  440. static ssize_t
  441. sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp)
  442. {
  443. sg_io_hdr_t *hp = &srp->header;
  444. int err = 0;
  445. int len;
  446. if (count < SZ_SG_IO_HDR) {
  447. err = -EINVAL;
  448. goto err_out;
  449. }
  450. hp->sb_len_wr = 0;
  451. if ((hp->mx_sb_len > 0) && hp->sbp) {
  452. if ((CHECK_CONDITION & hp->masked_status) ||
  453. (DRIVER_SENSE & hp->driver_status)) {
  454. int sb_len = SCSI_SENSE_BUFFERSIZE;
  455. sb_len = (hp->mx_sb_len > sb_len) ? sb_len : hp->mx_sb_len;
  456. len = 8 + (int) srp->sense_b[7]; /* Additional sense length field */
  457. len = (len > sb_len) ? sb_len : len;
  458. if (copy_to_user(hp->sbp, srp->sense_b, len)) {
  459. err = -EFAULT;
  460. goto err_out;
  461. }
  462. hp->sb_len_wr = len;
  463. }
  464. }
  465. if (hp->masked_status || hp->host_status || hp->driver_status)
  466. hp->info |= SG_INFO_CHECK;
  467. if (copy_to_user(buf, hp, SZ_SG_IO_HDR)) {
  468. err = -EFAULT;
  469. goto err_out;
  470. }
  471. err = sg_read_xfer(srp);
  472. err_out:
  473. sg_finish_rem_req(srp);
  474. return (0 == err) ? count : err;
  475. }
  476. static ssize_t
  477. sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
  478. {
  479. int mxsize, cmd_size, k;
  480. int input_size, blocking;
  481. unsigned char opcode;
  482. Sg_device *sdp;
  483. Sg_fd *sfp;
  484. Sg_request *srp;
  485. struct sg_header old_hdr;
  486. sg_io_hdr_t *hp;
  487. unsigned char cmnd[MAX_COMMAND_SIZE];
  488. if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
  489. return -ENXIO;
  490. SCSI_LOG_TIMEOUT(3, printk("sg_write: %s, count=%d\n",
  491. sdp->disk->disk_name, (int) count));
  492. if (sdp->detached)
  493. return -ENODEV;
  494. if (!((filp->f_flags & O_NONBLOCK) ||
  495. scsi_block_when_processing_errors(sdp->device)))
  496. return -ENXIO;
  497. if (!access_ok(VERIFY_READ, buf, count))
  498. return -EFAULT; /* protects following copy_from_user()s + get_user()s */
  499. if (count < SZ_SG_HEADER)
  500. return -EIO;
  501. if (__copy_from_user(&old_hdr, buf, SZ_SG_HEADER))
  502. return -EFAULT;
  503. blocking = !(filp->f_flags & O_NONBLOCK);
  504. if (old_hdr.reply_len < 0)
  505. return sg_new_write(sfp, buf, count, blocking, 0, NULL);
  506. if (count < (SZ_SG_HEADER + 6))
  507. return -EIO; /* The minimum scsi command length is 6 bytes. */
  508. if (!(srp = sg_add_request(sfp))) {
  509. SCSI_LOG_TIMEOUT(1, printk("sg_write: queue full\n"));
  510. return -EDOM;
  511. }
  512. buf += SZ_SG_HEADER;
  513. __get_user(opcode, buf);
  514. if (sfp->next_cmd_len > 0) {
  515. if (sfp->next_cmd_len > MAX_COMMAND_SIZE) {
  516. SCSI_LOG_TIMEOUT(1, printk("sg_write: command length too long\n"));
  517. sfp->next_cmd_len = 0;
  518. sg_remove_request(sfp, srp);
  519. return -EIO;
  520. }
  521. cmd_size = sfp->next_cmd_len;
  522. sfp->next_cmd_len = 0; /* reset so only this write() effected */
  523. } else {
  524. cmd_size = COMMAND_SIZE(opcode); /* based on SCSI command group */
  525. if ((opcode >= 0xc0) && old_hdr.twelve_byte)
  526. cmd_size = 12;
  527. }
  528. SCSI_LOG_TIMEOUT(4, printk(
  529. "sg_write: scsi opcode=0x%02x, cmd_size=%d\n", (int) opcode, cmd_size));
  530. /* Determine buffer size. */
  531. input_size = count - cmd_size;
  532. mxsize = (input_size > old_hdr.reply_len) ? input_size : old_hdr.reply_len;
  533. mxsize -= SZ_SG_HEADER;
  534. input_size -= SZ_SG_HEADER;
  535. if (input_size < 0) {
  536. sg_remove_request(sfp, srp);
  537. return -EIO; /* User did not pass enough bytes for this command. */
  538. }
  539. hp = &srp->header;
  540. hp->interface_id = '\0'; /* indicator of old interface tunnelled */
  541. hp->cmd_len = (unsigned char) cmd_size;
  542. hp->iovec_count = 0;
  543. hp->mx_sb_len = 0;
  544. if (input_size > 0)
  545. hp->dxfer_direction = (old_hdr.reply_len > SZ_SG_HEADER) ?
  546. SG_DXFER_TO_FROM_DEV : SG_DXFER_TO_DEV;
  547. else
  548. hp->dxfer_direction = (mxsize > 0) ? SG_DXFER_FROM_DEV : SG_DXFER_NONE;
  549. hp->dxfer_len = mxsize;
  550. hp->dxferp = (char __user *)buf + cmd_size;
  551. hp->sbp = NULL;
  552. hp->timeout = old_hdr.reply_len; /* structure abuse ... */
  553. hp->flags = input_size; /* structure abuse ... */
  554. hp->pack_id = old_hdr.pack_id;
  555. hp->usr_ptr = NULL;
  556. if (__copy_from_user(cmnd, buf, cmd_size))
  557. return -EFAULT;
  558. /*
  559. * SG_DXFER_TO_FROM_DEV is functionally equivalent to SG_DXFER_FROM_DEV,
  560. * but is is possible that the app intended SG_DXFER_TO_DEV, because there
  561. * is a non-zero input_size, so emit a warning.
  562. */
  563. if (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV)
  564. if (printk_ratelimit())
  565. printk(KERN_WARNING
  566. "sg_write: data in/out %d/%d bytes for SCSI command 0x%x--"
  567. "guessing data in;\n" KERN_WARNING " "
  568. "program %s not setting count and/or reply_len properly\n",
  569. old_hdr.reply_len - (int)SZ_SG_HEADER,
  570. input_size, (unsigned int) cmnd[0],
  571. current->comm);
  572. k = sg_common_write(sfp, srp, cmnd, sfp->timeout, blocking);
  573. return (k < 0) ? k : count;
  574. }
  575. static ssize_t
  576. sg_new_write(Sg_fd * sfp, const char __user *buf, size_t count,
  577. int blocking, int read_only, Sg_request ** o_srp)
  578. {
  579. int k;
  580. Sg_request *srp;
  581. sg_io_hdr_t *hp;
  582. unsigned char cmnd[MAX_COMMAND_SIZE];
  583. int timeout;
  584. unsigned long ul_timeout;
  585. if (count < SZ_SG_IO_HDR)
  586. return -EINVAL;
  587. if (!access_ok(VERIFY_READ, buf, count))
  588. return -EFAULT; /* protects following copy_from_user()s + get_user()s */
  589. sfp->cmd_q = 1; /* when sg_io_hdr seen, set command queuing on */
  590. if (!(srp = sg_add_request(sfp))) {
  591. SCSI_LOG_TIMEOUT(1, printk("sg_new_write: queue full\n"));
  592. return -EDOM;
  593. }
  594. hp = &srp->header;
  595. if (__copy_from_user(hp, buf, SZ_SG_IO_HDR)) {
  596. sg_remove_request(sfp, srp);
  597. return -EFAULT;
  598. }
  599. if (hp->interface_id != 'S') {
  600. sg_remove_request(sfp, srp);
  601. return -ENOSYS;
  602. }
  603. if (hp->flags & SG_FLAG_MMAP_IO) {
  604. if (hp->dxfer_len > sfp->reserve.bufflen) {
  605. sg_remove_request(sfp, srp);
  606. return -ENOMEM; /* MMAP_IO size must fit in reserve buffer */
  607. }
  608. if (hp->flags & SG_FLAG_DIRECT_IO) {
  609. sg_remove_request(sfp, srp);
  610. return -EINVAL; /* either MMAP_IO or DIRECT_IO (not both) */
  611. }
  612. if (sg_res_in_use(sfp)) {
  613. sg_remove_request(sfp, srp);
  614. return -EBUSY; /* reserve buffer already being used */
  615. }
  616. }
  617. ul_timeout = msecs_to_jiffies(srp->header.timeout);
  618. timeout = (ul_timeout < INT_MAX) ? ul_timeout : INT_MAX;
  619. if ((!hp->cmdp) || (hp->cmd_len < 6) || (hp->cmd_len > sizeof (cmnd))) {
  620. sg_remove_request(sfp, srp);
  621. return -EMSGSIZE;
  622. }
  623. if (!access_ok(VERIFY_READ, hp->cmdp, hp->cmd_len)) {
  624. sg_remove_request(sfp, srp);
  625. return -EFAULT; /* protects following copy_from_user()s + get_user()s */
  626. }
  627. if (__copy_from_user(cmnd, hp->cmdp, hp->cmd_len)) {
  628. sg_remove_request(sfp, srp);
  629. return -EFAULT;
  630. }
  631. if (read_only &&
  632. (!sg_allow_access(cmnd[0], sfp->parentdp->device->type))) {
  633. sg_remove_request(sfp, srp);
  634. return -EPERM;
  635. }
  636. k = sg_common_write(sfp, srp, cmnd, timeout, blocking);
  637. if (k < 0)
  638. return k;
  639. if (o_srp)
  640. *o_srp = srp;
  641. return count;
  642. }
  643. static int
  644. sg_common_write(Sg_fd * sfp, Sg_request * srp,
  645. unsigned char *cmnd, int timeout, int blocking)
  646. {
  647. int k, data_dir;
  648. Sg_device *sdp = sfp->parentdp;
  649. sg_io_hdr_t *hp = &srp->header;
  650. srp->data.cmd_opcode = cmnd[0]; /* hold opcode of command */
  651. hp->status = 0;
  652. hp->masked_status = 0;
  653. hp->msg_status = 0;
  654. hp->info = 0;
  655. hp->host_status = 0;
  656. hp->driver_status = 0;
  657. hp->resid = 0;
  658. SCSI_LOG_TIMEOUT(4, printk("sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n",
  659. (int) cmnd[0], (int) hp->cmd_len));
  660. if ((k = sg_start_req(srp))) {
  661. SCSI_LOG_TIMEOUT(1, printk("sg_write: start_req err=%d\n", k));
  662. sg_finish_rem_req(srp);
  663. return k; /* probably out of space --> ENOMEM */
  664. }
  665. if ((k = sg_write_xfer(srp))) {
  666. SCSI_LOG_TIMEOUT(1, printk("sg_write: write_xfer, bad address\n"));
  667. sg_finish_rem_req(srp);
  668. return k;
  669. }
  670. if (sdp->detached) {
  671. sg_finish_rem_req(srp);
  672. return -ENODEV;
  673. }
  674. switch (hp->dxfer_direction) {
  675. case SG_DXFER_TO_FROM_DEV:
  676. case SG_DXFER_FROM_DEV:
  677. data_dir = DMA_FROM_DEVICE;
  678. break;
  679. case SG_DXFER_TO_DEV:
  680. data_dir = DMA_TO_DEVICE;
  681. break;
  682. case SG_DXFER_UNKNOWN:
  683. data_dir = DMA_BIDIRECTIONAL;
  684. break;
  685. default:
  686. data_dir = DMA_NONE;
  687. break;
  688. }
  689. hp->duration = jiffies_to_msecs(jiffies);
  690. /* Now send everything of to mid-level. The next time we hear about this
  691. packet is when sg_cmd_done() is called (i.e. a callback). */
  692. if (scsi_execute_async(sdp->device, cmnd, hp->cmd_len, data_dir, srp->data.buffer,
  693. hp->dxfer_len, srp->data.k_use_sg, timeout,
  694. SG_DEFAULT_RETRIES, srp, sg_cmd_done,
  695. GFP_ATOMIC)) {
  696. SCSI_LOG_TIMEOUT(1, printk("sg_write: scsi_execute_async failed\n"));
  697. /*
  698. * most likely out of mem, but could also be a bad map
  699. */
  700. return -ENOMEM;
  701. } else
  702. return 0;
  703. }
  704. static int
  705. sg_srp_done(Sg_request *srp, Sg_fd *sfp)
  706. {
  707. unsigned long iflags;
  708. int done;
  709. read_lock_irqsave(&sfp->rq_list_lock, iflags);
  710. done = srp->done;
  711. read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
  712. return done;
  713. }
  714. static int
  715. sg_ioctl(struct inode *inode, struct file *filp,
  716. unsigned int cmd_in, unsigned long arg)
  717. {
  718. void __user *p = (void __user *)arg;
  719. int __user *ip = p;
  720. int result, val, read_only;
  721. Sg_device *sdp;
  722. Sg_fd *sfp;
  723. Sg_request *srp;
  724. unsigned long iflags;
  725. if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
  726. return -ENXIO;
  727. SCSI_LOG_TIMEOUT(3, printk("sg_ioctl: %s, cmd=0x%x\n",
  728. sdp->disk->disk_name, (int) cmd_in));
  729. read_only = (O_RDWR != (filp->f_flags & O_ACCMODE));
  730. switch (cmd_in) {
  731. case SG_IO:
  732. {
  733. int blocking = 1; /* ignore O_NONBLOCK flag */
  734. if (sdp->detached)
  735. return -ENODEV;
  736. if (!scsi_block_when_processing_errors(sdp->device))
  737. return -ENXIO;
  738. if (!access_ok(VERIFY_WRITE, p, SZ_SG_IO_HDR))
  739. return -EFAULT;
  740. result =
  741. sg_new_write(sfp, p, SZ_SG_IO_HDR,
  742. blocking, read_only, &srp);
  743. if (result < 0)
  744. return result;
  745. srp->sg_io_owned = 1;
  746. while (1) {
  747. result = 0; /* following macro to beat race condition */
  748. __wait_event_interruptible(sfp->read_wait,
  749. (sdp->detached || sfp->closed || sg_srp_done(srp, sfp)),
  750. result);
  751. if (sdp->detached)
  752. return -ENODEV;
  753. if (sfp->closed)
  754. return 0; /* request packet dropped already */
  755. if (0 == result)
  756. break;
  757. srp->orphan = 1;
  758. return result; /* -ERESTARTSYS because signal hit process */
  759. }
  760. write_lock_irqsave(&sfp->rq_list_lock, iflags);
  761. srp->done = 2;
  762. write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
  763. result = sg_new_read(sfp, p, SZ_SG_IO_HDR, srp);
  764. return (result < 0) ? result : 0;
  765. }
  766. case SG_SET_TIMEOUT:
  767. result = get_user(val, ip);
  768. if (result)
  769. return result;
  770. if (val < 0)
  771. return -EIO;
  772. if (val >= MULDIV (INT_MAX, USER_HZ, HZ))
  773. val = MULDIV (INT_MAX, USER_HZ, HZ);
  774. sfp->timeout_user = val;
  775. sfp->timeout = MULDIV (val, HZ, USER_HZ);
  776. return 0;
  777. case SG_GET_TIMEOUT: /* N.B. User receives timeout as return value */
  778. /* strange ..., for backward compatibility */
  779. return sfp->timeout_user;
  780. case SG_SET_FORCE_LOW_DMA:
  781. result = get_user(val, ip);
  782. if (result)
  783. return result;
  784. if (val) {
  785. sfp->low_dma = 1;
  786. if ((0 == sfp->low_dma) && (0 == sg_res_in_use(sfp))) {
  787. val = (int) sfp->reserve.bufflen;
  788. sg_remove_scat(&sfp->reserve);
  789. sg_build_reserve(sfp, val);
  790. }
  791. } else {
  792. if (sdp->detached)
  793. return -ENODEV;
  794. sfp->low_dma = sdp->device->host->unchecked_isa_dma;
  795. }
  796. return 0;
  797. case SG_GET_LOW_DMA:
  798. return put_user((int) sfp->low_dma, ip);
  799. case SG_GET_SCSI_ID:
  800. if (!access_ok(VERIFY_WRITE, p, sizeof (sg_scsi_id_t)))
  801. return -EFAULT;
  802. else {
  803. sg_scsi_id_t __user *sg_idp = p;
  804. if (sdp->detached)
  805. return -ENODEV;
  806. __put_user((int) sdp->device->host->host_no,
  807. &sg_idp->host_no);
  808. __put_user((int) sdp->device->channel,
  809. &sg_idp->channel);
  810. __put_user((int) sdp->device->id, &sg_idp->scsi_id);
  811. __put_user((int) sdp->device->lun, &sg_idp->lun);
  812. __put_user((int) sdp->device->type, &sg_idp->scsi_type);
  813. __put_user((short) sdp->device->host->cmd_per_lun,
  814. &sg_idp->h_cmd_per_lun);
  815. __put_user((short) sdp->device->queue_depth,
  816. &sg_idp->d_queue_depth);
  817. __put_user(0, &sg_idp->unused[0]);
  818. __put_user(0, &sg_idp->unused[1]);
  819. return 0;
  820. }
  821. case SG_SET_FORCE_PACK_ID:
  822. result = get_user(val, ip);
  823. if (result)
  824. return result;
  825. sfp->force_packid = val ? 1 : 0;
  826. return 0;
  827. case SG_GET_PACK_ID:
  828. if (!access_ok(VERIFY_WRITE, ip, sizeof (int)))
  829. return -EFAULT;
  830. read_lock_irqsave(&sfp->rq_list_lock, iflags);
  831. for (srp = sfp->headrp; srp; srp = srp->nextrp) {
  832. if ((1 == srp->done) && (!srp->sg_io_owned)) {
  833. read_unlock_irqrestore(&sfp->rq_list_lock,
  834. iflags);
  835. __put_user(srp->header.pack_id, ip);
  836. return 0;
  837. }
  838. }
  839. read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
  840. __put_user(-1, ip);
  841. return 0;
  842. case SG_GET_NUM_WAITING:
  843. read_lock_irqsave(&sfp->rq_list_lock, iflags);
  844. for (val = 0, srp = sfp->headrp; srp; srp = srp->nextrp) {
  845. if ((1 == srp->done) && (!srp->sg_io_owned))
  846. ++val;
  847. }
  848. read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
  849. return put_user(val, ip);
  850. case SG_GET_SG_TABLESIZE:
  851. return put_user(sdp->sg_tablesize, ip);
  852. case SG_SET_RESERVED_SIZE:
  853. result = get_user(val, ip);
  854. if (result)
  855. return result;
  856. if (val < 0)
  857. return -EINVAL;
  858. if (val != sfp->reserve.bufflen) {
  859. if (sg_res_in_use(sfp) || sfp->mmap_called)
  860. return -EBUSY;
  861. sg_remove_scat(&sfp->reserve);
  862. sg_build_reserve(sfp, val);
  863. }
  864. return 0;
  865. case SG_GET_RESERVED_SIZE:
  866. val = (int) sfp->reserve.bufflen;
  867. return put_user(val, ip);
  868. case SG_SET_COMMAND_Q:
  869. result = get_user(val, ip);
  870. if (result)
  871. return result;
  872. sfp->cmd_q = val ? 1 : 0;
  873. return 0;
  874. case SG_GET_COMMAND_Q:
  875. return put_user((int) sfp->cmd_q, ip);
  876. case SG_SET_KEEP_ORPHAN:
  877. result = get_user(val, ip);
  878. if (result)
  879. return result;
  880. sfp->keep_orphan = val;
  881. return 0;
  882. case SG_GET_KEEP_ORPHAN:
  883. return put_user((int) sfp->keep_orphan, ip);
  884. case SG_NEXT_CMD_LEN:
  885. result = get_user(val, ip);
  886. if (result)
  887. return result;
  888. sfp->next_cmd_len = (val > 0) ? val : 0;
  889. return 0;
  890. case SG_GET_VERSION_NUM:
  891. return put_user(sg_version_num, ip);
  892. case SG_GET_ACCESS_COUNT:
  893. /* faked - we don't have a real access count anymore */
  894. val = (sdp->device ? 1 : 0);
  895. return put_user(val, ip);
  896. case SG_GET_REQUEST_TABLE:
  897. if (!access_ok(VERIFY_WRITE, p, SZ_SG_REQ_INFO * SG_MAX_QUEUE))
  898. return -EFAULT;
  899. else {
  900. sg_req_info_t *rinfo;
  901. unsigned int ms;
  902. rinfo = kmalloc(SZ_SG_REQ_INFO * SG_MAX_QUEUE,
  903. GFP_KERNEL);
  904. if (!rinfo)
  905. return -ENOMEM;
  906. read_lock_irqsave(&sfp->rq_list_lock, iflags);
  907. for (srp = sfp->headrp, val = 0; val < SG_MAX_QUEUE;
  908. ++val, srp = srp ? srp->nextrp : srp) {
  909. memset(&rinfo[val], 0, SZ_SG_REQ_INFO);
  910. if (srp) {
  911. rinfo[val].req_state = srp->done + 1;
  912. rinfo[val].problem =
  913. srp->header.masked_status &
  914. srp->header.host_status &
  915. srp->header.driver_status;
  916. if (srp->done)
  917. rinfo[val].duration =
  918. srp->header.duration;
  919. else {
  920. ms = jiffies_to_msecs(jiffies);
  921. rinfo[val].duration =
  922. (ms > srp->header.duration) ?
  923. (ms - srp->header.duration) : 0;
  924. }
  925. rinfo[val].orphan = srp->orphan;
  926. rinfo[val].sg_io_owned =
  927. srp->sg_io_owned;
  928. rinfo[val].pack_id =
  929. srp->header.pack_id;
  930. rinfo[val].usr_ptr =
  931. srp->header.usr_ptr;
  932. }
  933. }
  934. read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
  935. result = __copy_to_user(p, rinfo,
  936. SZ_SG_REQ_INFO * SG_MAX_QUEUE);
  937. result = result ? -EFAULT : 0;
  938. kfree(rinfo);
  939. return result;
  940. }
  941. case SG_EMULATED_HOST:
  942. if (sdp->detached)
  943. return -ENODEV;
  944. return put_user(sdp->device->host->hostt->emulated, ip);
  945. case SG_SCSI_RESET:
  946. if (sdp->detached)
  947. return -ENODEV;
  948. if (filp->f_flags & O_NONBLOCK) {
  949. if (scsi_host_in_recovery(sdp->device->host))
  950. return -EBUSY;
  951. } else if (!scsi_block_when_processing_errors(sdp->device))
  952. return -EBUSY;
  953. result = get_user(val, ip);
  954. if (result)
  955. return result;
  956. if (SG_SCSI_RESET_NOTHING == val)
  957. return 0;
  958. switch (val) {
  959. case SG_SCSI_RESET_DEVICE:
  960. val = SCSI_TRY_RESET_DEVICE;
  961. break;
  962. case SG_SCSI_RESET_BUS:
  963. val = SCSI_TRY_RESET_BUS;
  964. break;
  965. case SG_SCSI_RESET_HOST:
  966. val = SCSI_TRY_RESET_HOST;
  967. break;
  968. default:
  969. return -EINVAL;
  970. }
  971. if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
  972. return -EACCES;
  973. return (scsi_reset_provider(sdp->device, val) ==
  974. SUCCESS) ? 0 : -EIO;
  975. case SCSI_IOCTL_SEND_COMMAND:
  976. if (sdp->detached)
  977. return -ENODEV;
  978. if (read_only) {
  979. unsigned char opcode = WRITE_6;
  980. Scsi_Ioctl_Command __user *siocp = p;
  981. if (copy_from_user(&opcode, siocp->data, 1))
  982. return -EFAULT;
  983. if (!sg_allow_access(opcode, sdp->device->type))
  984. return -EPERM;
  985. }
  986. return scsi_ioctl_send_command(sdp->device, p);
  987. case SG_SET_DEBUG:
  988. result = get_user(val, ip);
  989. if (result)
  990. return result;
  991. sdp->sgdebug = (char) val;
  992. return 0;
  993. case SCSI_IOCTL_GET_IDLUN:
  994. case SCSI_IOCTL_GET_BUS_NUMBER:
  995. case SCSI_IOCTL_PROBE_HOST:
  996. case SG_GET_TRANSFORM:
  997. if (sdp->detached)
  998. return -ENODEV;
  999. return scsi_ioctl(sdp->device, cmd_in, p);
  1000. default:
  1001. if (read_only)
  1002. return -EPERM; /* don't know so take safe approach */
  1003. return scsi_ioctl(sdp->device, cmd_in, p);
  1004. }
  1005. }
  1006. #ifdef CONFIG_COMPAT
  1007. static long sg_compat_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
  1008. {
  1009. Sg_device *sdp;
  1010. Sg_fd *sfp;
  1011. struct scsi_device *sdev;
  1012. if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
  1013. return -ENXIO;
  1014. sdev = sdp->device;
  1015. if (sdev->host->hostt->compat_ioctl) {
  1016. int ret;
  1017. ret = sdev->host->hostt->compat_ioctl(sdev, cmd_in, (void __user *)arg);
  1018. return ret;
  1019. }
  1020. return -ENOIOCTLCMD;
  1021. }
  1022. #endif
  1023. static unsigned int
  1024. sg_poll(struct file *filp, poll_table * wait)
  1025. {
  1026. unsigned int res = 0;
  1027. Sg_device *sdp;
  1028. Sg_fd *sfp;
  1029. Sg_request *srp;
  1030. int count = 0;
  1031. unsigned long iflags;
  1032. if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))
  1033. || sfp->closed)
  1034. return POLLERR;
  1035. poll_wait(filp, &sfp->read_wait, wait);
  1036. read_lock_irqsave(&sfp->rq_list_lock, iflags);
  1037. for (srp = sfp->headrp; srp; srp = srp->nextrp) {
  1038. /* if any read waiting, flag it */
  1039. if ((0 == res) && (1 == srp->done) && (!srp->sg_io_owned))
  1040. res = POLLIN | POLLRDNORM;
  1041. ++count;
  1042. }
  1043. read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
  1044. if (sdp->detached)
  1045. res |= POLLHUP;
  1046. else if (!sfp->cmd_q) {
  1047. if (0 == count)
  1048. res |= POLLOUT | POLLWRNORM;
  1049. } else if (count < SG_MAX_QUEUE)
  1050. res |= POLLOUT | POLLWRNORM;
  1051. SCSI_LOG_TIMEOUT(3, printk("sg_poll: %s, res=0x%x\n",
  1052. sdp->disk->disk_name, (int) res));
  1053. return res;
  1054. }
  1055. static int
  1056. sg_fasync(int fd, struct file *filp, int mode)
  1057. {
  1058. int retval;
  1059. Sg_device *sdp;
  1060. Sg_fd *sfp;
  1061. if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
  1062. return -ENXIO;
  1063. SCSI_LOG_TIMEOUT(3, printk("sg_fasync: %s, mode=%d\n",
  1064. sdp->disk->disk_name, mode));
  1065. retval = fasync_helper(fd, filp, mode, &sfp->async_qp);
  1066. return (retval < 0) ? retval : 0;
  1067. }
  1068. /* When startFinish==1 increments page counts for pages other than the
  1069. first of scatter gather elements obtained from alloc_pages().
  1070. When startFinish==0 decrements ... */
  1071. static void
  1072. sg_rb_correct4mmap(Sg_scatter_hold * rsv_schp, int startFinish)
  1073. {
  1074. struct scatterlist *sg = rsv_schp->buffer;
  1075. struct page *page;
  1076. int k, m;
  1077. SCSI_LOG_TIMEOUT(3, printk("sg_rb_correct4mmap: startFinish=%d, scatg=%d\n",
  1078. startFinish, rsv_schp->k_use_sg));
  1079. /* N.B. correction _not_ applied to base page of each allocation */
  1080. for (k = 0; k < rsv_schp->k_use_sg; ++k, ++sg) {
  1081. for (m = PAGE_SIZE; m < sg->length; m += PAGE_SIZE) {
  1082. page = sg->page;
  1083. if (startFinish)
  1084. get_page(page);
  1085. else {
  1086. if (page_count(page) > 0)
  1087. __put_page(page);
  1088. }
  1089. }
  1090. }
  1091. }
  1092. static struct page *
  1093. sg_vma_nopage(struct vm_area_struct *vma, unsigned long addr, int *type)
  1094. {
  1095. Sg_fd *sfp;
  1096. struct page *page = NOPAGE_SIGBUS;
  1097. unsigned long offset, len, sa;
  1098. Sg_scatter_hold *rsv_schp;
  1099. struct scatterlist *sg;
  1100. int k;
  1101. if ((NULL == vma) || (!(sfp = (Sg_fd *) vma->vm_private_data)))
  1102. return page;
  1103. rsv_schp = &sfp->reserve;
  1104. offset = addr - vma->vm_start;
  1105. if (offset >= rsv_schp->bufflen)
  1106. return page;
  1107. SCSI_LOG_TIMEOUT(3, printk("sg_vma_nopage: offset=%lu, scatg=%d\n",
  1108. offset, rsv_schp->k_use_sg));
  1109. sg = rsv_schp->buffer;
  1110. sa = vma->vm_start;
  1111. for (k = 0; (k < rsv_schp->k_use_sg) && (sa < vma->vm_end);
  1112. ++k, ++sg) {
  1113. len = vma->vm_end - sa;
  1114. len = (len < sg->length) ? len : sg->length;
  1115. if (offset < len) {
  1116. page = sg->page;
  1117. get_page(page); /* increment page count */
  1118. break;
  1119. }
  1120. sa += len;
  1121. offset -= len;
  1122. }
  1123. if (type)
  1124. *type = VM_FAULT_MINOR;
  1125. return page;
  1126. }
  1127. static struct vm_operations_struct sg_mmap_vm_ops = {
  1128. .nopage = sg_vma_nopage,
  1129. };
  1130. static int
  1131. sg_mmap(struct file *filp, struct vm_area_struct *vma)
  1132. {
  1133. Sg_fd *sfp;
  1134. unsigned long req_sz, len, sa;
  1135. Sg_scatter_hold *rsv_schp;
  1136. int k;
  1137. struct scatterlist *sg;
  1138. if ((!filp) || (!vma) || (!(sfp = (Sg_fd *) filp->private_data)))
  1139. return -ENXIO;
  1140. req_sz = vma->vm_end - vma->vm_start;
  1141. SCSI_LOG_TIMEOUT(3, printk("sg_mmap starting, vm_start=%p, len=%d\n",
  1142. (void *) vma->vm_start, (int) req_sz));
  1143. if (vma->vm_pgoff)
  1144. return -EINVAL; /* want no offset */
  1145. rsv_schp = &sfp->reserve;
  1146. if (req_sz > rsv_schp->bufflen)
  1147. return -ENOMEM; /* cannot map more than reserved buffer */
  1148. sa = vma->vm_start;
  1149. sg = rsv_schp->buffer;
  1150. for (k = 0; (k < rsv_schp->k_use_sg) && (sa < vma->vm_end);
  1151. ++k, ++sg) {
  1152. len = vma->vm_end - sa;
  1153. len = (len < sg->length) ? len : sg->length;
  1154. sa += len;
  1155. }
  1156. if (0 == sfp->mmap_called) {
  1157. sg_rb_correct4mmap(rsv_schp, 1); /* do only once per fd lifetime */
  1158. sfp->mmap_called = 1;
  1159. }
  1160. vma->vm_flags |= VM_RESERVED;
  1161. vma->vm_private_data = sfp;
  1162. vma->vm_ops = &sg_mmap_vm_ops;
  1163. return 0;
  1164. }
  1165. /* This function is a "bottom half" handler that is called by the
  1166. * mid level when a command is completed (or has failed). */
  1167. static void
  1168. sg_cmd_done(void *data, char *sense, int result, int resid)
  1169. {
  1170. Sg_request *srp = data;
  1171. Sg_device *sdp = NULL;
  1172. Sg_fd *sfp;
  1173. unsigned long iflags;
  1174. unsigned int ms;
  1175. if (NULL == srp) {
  1176. printk(KERN_ERR "sg_cmd_done: NULL request\n");
  1177. return;
  1178. }
  1179. sfp = srp->parentfp;
  1180. if (sfp)
  1181. sdp = sfp->parentdp;
  1182. if ((NULL == sdp) || sdp->detached) {
  1183. printk(KERN_INFO "sg_cmd_done: device detached\n");
  1184. return;
  1185. }
  1186. SCSI_LOG_TIMEOUT(4, printk("sg_cmd_done: %s, pack_id=%d, res=0x%x\n",
  1187. sdp->disk->disk_name, srp->header.pack_id, result));
  1188. srp->header.resid = resid;
  1189. ms = jiffies_to_msecs(jiffies);
  1190. srp->header.duration = (ms > srp->header.duration) ?
  1191. (ms - srp->header.duration) : 0;
  1192. if (0 != result) {
  1193. struct scsi_sense_hdr sshdr;
  1194. memcpy(srp->sense_b, sense, sizeof (srp->sense_b));
  1195. srp->header.status = 0xff & result;
  1196. srp->header.masked_status = status_byte(result);
  1197. srp->header.msg_status = msg_byte(result);
  1198. srp->header.host_status = host_byte(result);
  1199. srp->header.driver_status = driver_byte(result);
  1200. if ((sdp->sgdebug > 0) &&
  1201. ((CHECK_CONDITION == srp->header.masked_status) ||
  1202. (COMMAND_TERMINATED == srp->header.masked_status)))
  1203. __scsi_print_sense("sg_cmd_done", sense,
  1204. SCSI_SENSE_BUFFERSIZE);
  1205. /* Following if statement is a patch supplied by Eric Youngdale */
  1206. if (driver_byte(result) != 0
  1207. && scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr)
  1208. && !scsi_sense_is_deferred(&sshdr)
  1209. && sshdr.sense_key == UNIT_ATTENTION
  1210. && sdp->device->removable) {
  1211. /* Detected possible disc change. Set the bit - this */
  1212. /* may be used if there are filesystems using this device */
  1213. sdp->device->changed = 1;
  1214. }
  1215. }
  1216. /* Rely on write phase to clean out srp status values, so no "else" */
  1217. if (sfp->closed) { /* whoops this fd already released, cleanup */
  1218. SCSI_LOG_TIMEOUT(1, printk("sg_cmd_done: already closed, freeing ...\n"));
  1219. sg_finish_rem_req(srp);
  1220. srp = NULL;
  1221. if (NULL == sfp->headrp) {
  1222. SCSI_LOG_TIMEOUT(1, printk("sg...bh: already closed, final cleanup\n"));
  1223. if (0 == sg_remove_sfp(sdp, sfp)) { /* device still present */
  1224. scsi_device_put(sdp->device);
  1225. }
  1226. sfp = NULL;
  1227. }
  1228. } else if (srp && srp->orphan) {
  1229. if (sfp->keep_orphan)
  1230. srp->sg_io_owned = 0;
  1231. else {
  1232. sg_finish_rem_req(srp);
  1233. srp = NULL;
  1234. }
  1235. }
  1236. if (sfp && srp) {
  1237. /* Now wake up any sg_read() that is waiting for this packet. */
  1238. kill_fasync(&sfp->async_qp, SIGPOLL, POLL_IN);
  1239. write_lock_irqsave(&sfp->rq_list_lock, iflags);
  1240. srp->done = 1;
  1241. wake_up_interruptible(&sfp->read_wait);
  1242. write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
  1243. }
  1244. }
  1245. static struct file_operations sg_fops = {
  1246. .owner = THIS_MODULE,
  1247. .read = sg_read,
  1248. .write = sg_write,
  1249. .poll = sg_poll,
  1250. .ioctl = sg_ioctl,
  1251. #ifdef CONFIG_COMPAT
  1252. .compat_ioctl = sg_compat_ioctl,
  1253. #endif
  1254. .open = sg_open,
  1255. .mmap = sg_mmap,
  1256. .release = sg_release,
  1257. .fasync = sg_fasync,
  1258. };
  1259. static struct class *sg_sysfs_class;
  1260. static int sg_sysfs_valid = 0;
  1261. static int sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
  1262. {
  1263. struct request_queue *q = scsidp->request_queue;
  1264. Sg_device *sdp;
  1265. unsigned long iflags;
  1266. void *old_sg_dev_arr = NULL;
  1267. int k, error;
  1268. sdp = kmalloc(sizeof(Sg_device), GFP_KERNEL);
  1269. if (!sdp) {
  1270. printk(KERN_WARNING "kmalloc Sg_device failure\n");
  1271. return -ENOMEM;
  1272. }
  1273. write_lock_irqsave(&sg_dev_arr_lock, iflags);
  1274. if (unlikely(sg_nr_dev >= sg_dev_max)) { /* try to resize */
  1275. Sg_device **tmp_da;
  1276. int tmp_dev_max = sg_nr_dev + SG_DEV_ARR_LUMP;
  1277. write_unlock_irqrestore(&sg_dev_arr_lock, iflags);
  1278. tmp_da = kmalloc(tmp_dev_max * sizeof(Sg_device *), GFP_KERNEL);
  1279. if (unlikely(!tmp_da))
  1280. goto expand_failed;
  1281. write_lock_irqsave(&sg_dev_arr_lock, iflags);
  1282. memset(tmp_da, 0, tmp_dev_max * sizeof(Sg_device *));
  1283. memcpy(tmp_da, sg_dev_arr, sg_dev_max * sizeof(Sg_device *));
  1284. old_sg_dev_arr = sg_dev_arr;
  1285. sg_dev_arr = tmp_da;
  1286. sg_dev_max = tmp_dev_max;
  1287. }
  1288. for (k = 0; k < sg_dev_max; k++)
  1289. if (!sg_dev_arr[k])
  1290. break;
  1291. if (unlikely(k >= SG_MAX_DEVS))
  1292. goto overflow;
  1293. memset(sdp, 0, sizeof(*sdp));
  1294. SCSI_LOG_TIMEOUT(3, printk("sg_alloc: dev=%d \n", k));
  1295. sprintf(disk->disk_name, "sg%d", k);
  1296. disk->first_minor = k;
  1297. sdp->disk = disk;
  1298. sdp->device = scsidp;
  1299. init_waitqueue_head(&sdp->o_excl_wait);
  1300. sdp->sg_tablesize = min(q->max_hw_segments, q->max_phys_segments);
  1301. sg_nr_dev++;
  1302. sg_dev_arr[k] = sdp;
  1303. write_unlock_irqrestore(&sg_dev_arr_lock, iflags);
  1304. error = k;
  1305. out:
  1306. if (error < 0)
  1307. kfree(sdp);
  1308. kfree(old_sg_dev_arr);
  1309. return error;
  1310. expand_failed:
  1311. printk(KERN_WARNING "sg_alloc: device array cannot be resized\n");
  1312. error = -ENOMEM;
  1313. goto out;
  1314. overflow:
  1315. write_unlock_irqrestore(&sg_dev_arr_lock, iflags);
  1316. sdev_printk(KERN_WARNING, scsidp,
  1317. "Unable to attach sg device type=%d, minor "
  1318. "number exceeds %d\n", scsidp->type, SG_MAX_DEVS - 1);
  1319. error = -ENODEV;
  1320. goto out;
  1321. }
  1322. static int
  1323. sg_add(struct class_device *cl_dev, struct class_interface *cl_intf)
  1324. {
  1325. struct scsi_device *scsidp = to_scsi_device(cl_dev->dev);
  1326. struct gendisk *disk;
  1327. Sg_device *sdp = NULL;
  1328. struct cdev * cdev = NULL;
  1329. int error, k;
  1330. disk = alloc_disk(1);
  1331. if (!disk) {
  1332. printk(KERN_WARNING "alloc_disk failed\n");
  1333. return -ENOMEM;
  1334. }
  1335. disk->major = SCSI_GENERIC_MAJOR;
  1336. error = -ENOMEM;
  1337. cdev = cdev_alloc();
  1338. if (!cdev) {
  1339. printk(KERN_WARNING "cdev_alloc failed\n");
  1340. goto out;
  1341. }
  1342. cdev->owner = THIS_MODULE;
  1343. cdev->ops = &sg_fops;
  1344. error = sg_alloc(disk, scsidp);
  1345. if (error < 0) {
  1346. printk(KERN_WARNING "sg_alloc failed\n");
  1347. goto out;
  1348. }
  1349. k = error;
  1350. sdp = sg_dev_arr[k];
  1351. devfs_mk_cdev(MKDEV(SCSI_GENERIC_MAJOR, k),
  1352. S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP,
  1353. "%s/generic", scsidp->devfs_name);
  1354. error = cdev_add(cdev, MKDEV(SCSI_GENERIC_MAJOR, k), 1);
  1355. if (error) {
  1356. devfs_remove("%s/generic", scsidp->devfs_name);
  1357. goto out;
  1358. }
  1359. sdp->cdev = cdev;
  1360. if (sg_sysfs_valid) {
  1361. struct class_device * sg_class_member;
  1362. sg_class_member = class_device_create(sg_sysfs_class, NULL,
  1363. MKDEV(SCSI_GENERIC_MAJOR, k),
  1364. cl_dev->dev, "%s",
  1365. disk->disk_name);
  1366. if (IS_ERR(sg_class_member))
  1367. printk(KERN_WARNING "sg_add: "
  1368. "class_device_create failed\n");
  1369. class_set_devdata(sg_class_member, sdp);
  1370. error = sysfs_create_link(&scsidp->sdev_gendev.kobj,
  1371. &sg_class_member->kobj, "generic");
  1372. if (error)
  1373. printk(KERN_ERR "sg_add: unable to make symlink "
  1374. "'generic' back to sg%d\n", k);
  1375. } else
  1376. printk(KERN_WARNING "sg_add: sg_sys INvalid\n");
  1377. sdev_printk(KERN_NOTICE, scsidp,
  1378. "Attached scsi generic sg%d type %d\n", k,scsidp->type);
  1379. return 0;
  1380. out:
  1381. put_disk(disk);
  1382. if (cdev)
  1383. cdev_del(cdev);
  1384. return error;
  1385. }
  1386. static void
  1387. sg_remove(struct class_device *cl_dev, struct class_interface *cl_intf)
  1388. {
  1389. struct scsi_device *scsidp = to_scsi_device(cl_dev->dev);
  1390. Sg_device *sdp = NULL;
  1391. unsigned long iflags;
  1392. Sg_fd *sfp;
  1393. Sg_fd *tsfp;
  1394. Sg_request *srp;
  1395. Sg_request *tsrp;
  1396. int k, delay;
  1397. if (NULL == sg_dev_arr)
  1398. return;
  1399. delay = 0;
  1400. write_lock_irqsave(&sg_dev_arr_lock, iflags);
  1401. for (k = 0; k < sg_dev_max; k++) {
  1402. sdp = sg_dev_arr[k];
  1403. if ((NULL == sdp) || (sdp->device != scsidp))
  1404. continue; /* dirty but lowers nesting */
  1405. if (sdp->headfp) {
  1406. sdp->detached = 1;
  1407. for (sfp = sdp->headfp; sfp; sfp = tsfp) {
  1408. tsfp = sfp->nextfp;
  1409. for (srp = sfp->headrp; srp; srp = tsrp) {
  1410. tsrp = srp->nextrp;
  1411. if (sfp->closed || (0 == sg_srp_done(srp, sfp)))
  1412. sg_finish_rem_req(srp);
  1413. }
  1414. if (sfp->closed) {
  1415. scsi_device_put(sdp->device);
  1416. __sg_remove_sfp(sdp, sfp);
  1417. } else {
  1418. delay = 1;
  1419. wake_up_interruptible(&sfp->read_wait);
  1420. kill_fasync(&sfp->async_qp, SIGPOLL,
  1421. POLL_HUP);
  1422. }
  1423. }
  1424. SCSI_LOG_TIMEOUT(3, printk("sg_detach: dev=%d, dirty\n", k));
  1425. if (NULL == sdp->headfp) {
  1426. sg_dev_arr[k] = NULL;
  1427. }
  1428. } else { /* nothing active, simple case */
  1429. SCSI_LOG_TIMEOUT(3, printk("sg_detach: dev=%d\n", k));
  1430. sg_dev_arr[k] = NULL;
  1431. }
  1432. sg_nr_dev--;
  1433. break;
  1434. }
  1435. write_unlock_irqrestore(&sg_dev_arr_lock, iflags);
  1436. if (sdp) {
  1437. sysfs_remove_link(&scsidp->sdev_gendev.kobj, "generic");
  1438. class_device_destroy(sg_sysfs_class, MKDEV(SCSI_GENERIC_MAJOR, k));
  1439. cdev_del(sdp->cdev);
  1440. sdp->cdev = NULL;
  1441. devfs_remove("%s/generic", scsidp->devfs_name);
  1442. put_disk(sdp->disk);
  1443. sdp->disk = NULL;
  1444. if (NULL == sdp->headfp)
  1445. kfree((char *) sdp);
  1446. }
  1447. if (delay)
  1448. msleep(10); /* dirty detach so delay device destruction */
  1449. }
  1450. /* Set 'perm' (4th argument) to 0 to disable module_param's definition
  1451. * of sysfs parameters (which module_param doesn't yet support).
  1452. * Sysfs parameters defined explicitly below.
  1453. */
  1454. module_param_named(def_reserved_size, def_reserved_size, int, S_IRUGO);
  1455. module_param_named(allow_dio, sg_allow_dio, int, S_IRUGO | S_IWUSR);
  1456. MODULE_AUTHOR("Douglas Gilbert");
  1457. MODULE_DESCRIPTION("SCSI generic (sg) driver");
  1458. MODULE_LICENSE("GPL");
  1459. MODULE_VERSION(SG_VERSION_STR);
  1460. MODULE_PARM_DESC(def_reserved_size, "size of buffer reserved for each fd");
  1461. MODULE_PARM_DESC(allow_dio, "allow direct I/O (default: 0 (disallow))");
  1462. static int __init
  1463. init_sg(void)
  1464. {
  1465. int rc;
  1466. if (def_reserved_size >= 0)
  1467. sg_big_buff = def_reserved_size;
  1468. rc = register_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0),
  1469. SG_MAX_DEVS, "sg");
  1470. if (rc)
  1471. return rc;
  1472. sg_sysfs_class = class_create(THIS_MODULE, "scsi_generic");
  1473. if ( IS_ERR(sg_sysfs_class) ) {
  1474. rc = PTR_ERR(sg_sysfs_class);
  1475. goto err_out;
  1476. }
  1477. sg_sysfs_valid = 1;
  1478. rc = scsi_register_interface(&sg_interface);
  1479. if (0 == rc) {
  1480. #ifdef CONFIG_SCSI_PROC_FS
  1481. sg_proc_init();
  1482. #endif /* CONFIG_SCSI_PROC_FS */
  1483. return 0;
  1484. }
  1485. class_destroy(sg_sysfs_class);
  1486. err_out:
  1487. unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0), SG_MAX_DEVS);
  1488. return rc;
  1489. }
  1490. static void __exit
  1491. exit_sg(void)
  1492. {
  1493. #ifdef CONFIG_SCSI_PROC_FS
  1494. sg_proc_cleanup();
  1495. #endif /* CONFIG_SCSI_PROC_FS */
  1496. scsi_unregister_interface(&sg_interface);
  1497. class_destroy(sg_sysfs_class);
  1498. sg_sysfs_valid = 0;
  1499. unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0),
  1500. SG_MAX_DEVS);
  1501. kfree((char *)sg_dev_arr);
  1502. sg_dev_arr = NULL;
  1503. sg_dev_max = 0;
  1504. }
  1505. static int
  1506. sg_start_req(Sg_request * srp)
  1507. {
  1508. int res;
  1509. Sg_fd *sfp = srp->parentfp;
  1510. sg_io_hdr_t *hp = &srp->header;
  1511. int dxfer_len = (int) hp->dxfer_len;
  1512. int dxfer_dir = hp->dxfer_direction;
  1513. Sg_scatter_hold *req_schp = &srp->data;
  1514. Sg_scatter_hold *rsv_schp = &sfp->reserve;
  1515. SCSI_LOG_TIMEOUT(4, printk("sg_start_req: dxfer_len=%d\n", dxfer_len));
  1516. if ((dxfer_len <= 0) || (dxfer_dir == SG_DXFER_NONE))
  1517. return 0;
  1518. if (sg_allow_dio && (hp->flags & SG_FLAG_DIRECT_IO) &&
  1519. (dxfer_dir != SG_DXFER_UNKNOWN) && (0 == hp->iovec_count) &&
  1520. (!sfp->parentdp->device->host->unchecked_isa_dma)) {
  1521. res = sg_build_direct(srp, sfp, dxfer_len);
  1522. if (res <= 0) /* -ve -> error, 0 -> done, 1 -> try indirect */
  1523. return res;
  1524. }
  1525. if ((!sg_res_in_use(sfp)) && (dxfer_len <= rsv_schp->bufflen))
  1526. sg_link_reserve(sfp, srp, dxfer_len);
  1527. else {
  1528. res = sg_build_indirect(req_schp, sfp, dxfer_len);
  1529. if (res) {
  1530. sg_remove_scat(req_schp);
  1531. return res;
  1532. }
  1533. }
  1534. return 0;
  1535. }
  1536. static void
  1537. sg_finish_rem_req(Sg_request * srp)
  1538. {
  1539. Sg_fd *sfp = srp->parentfp;
  1540. Sg_scatter_hold *req_schp = &srp->data;
  1541. SCSI_LOG_TIMEOUT(4, printk("sg_finish_rem_req: res_used=%d\n", (int) srp->res_used));
  1542. if (srp->res_used)
  1543. sg_unlink_reserve(sfp, srp);
  1544. else
  1545. sg_remove_scat(req_schp);
  1546. sg_remove_request(sfp, srp);
  1547. }
  1548. static int
  1549. sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp, int tablesize)
  1550. {
  1551. int sg_bufflen = tablesize * sizeof(struct scatterlist);
  1552. unsigned int gfp_flags = GFP_ATOMIC | __GFP_NOWARN;
  1553. /*
  1554. * TODO: test without low_dma, we should not need it since
  1555. * the block layer will bounce the buffer for us
  1556. *
  1557. * XXX(hch): we shouldn't need GFP_DMA for the actual S/G list.
  1558. */
  1559. if (sfp->low_dma)
  1560. gfp_flags |= GFP_DMA;
  1561. schp->buffer = kzalloc(sg_bufflen, gfp_flags);
  1562. if (!schp->buffer)
  1563. return -ENOMEM;
  1564. schp->sglist_len = sg_bufflen;
  1565. return tablesize; /* number of scat_gath elements allocated */
  1566. }
  1567. #ifdef SG_ALLOW_DIO_CODE
  1568. /* vvvvvvvv following code borrowed from st driver's direct IO vvvvvvvvv */
  1569. /* TODO: hopefully we can use the generic block layer code */
  1570. /* Pin down user pages and put them into a scatter gather list. Returns <= 0 if
  1571. - mapping of all pages not successful
  1572. (i.e., either completely successful or fails)
  1573. */
  1574. static int
  1575. st_map_user_pages(struct scatterlist *sgl, const unsigned int max_pages,
  1576. unsigned long uaddr, size_t count, int rw)
  1577. {
  1578. unsigned long end = (uaddr + count + PAGE_SIZE - 1) >> PAGE_SHIFT;
  1579. unsigned long start = uaddr >> PAGE_SHIFT;
  1580. const int nr_pages = end - start;
  1581. int res, i, j;
  1582. struct page **pages;
  1583. /* User attempted Overflow! */
  1584. if ((uaddr + count) < uaddr)
  1585. return -EINVAL;
  1586. /* Too big */
  1587. if (nr_pages > max_pages)
  1588. return -ENOMEM;
  1589. /* Hmm? */
  1590. if (count == 0)
  1591. return 0;
  1592. if ((pages = kmalloc(max_pages * sizeof(*pages), GFP_ATOMIC)) == NULL)
  1593. return -ENOMEM;
  1594. /* Try to fault in all of the necessary pages */
  1595. down_read(&current->mm->mmap_sem);
  1596. /* rw==READ means read from drive, write into memory area */
  1597. res = get_user_pages(
  1598. current,
  1599. current->mm,
  1600. uaddr,
  1601. nr_pages,
  1602. rw == READ,
  1603. 0, /* don't force */
  1604. pages,
  1605. NULL);
  1606. up_read(&current->mm->mmap_sem);
  1607. /* Errors and no page mapped should return here */
  1608. if (res < nr_pages)
  1609. goto out_unmap;
  1610. for (i=0; i < nr_pages; i++) {
  1611. /* FIXME: flush superflous for rw==READ,
  1612. * probably wrong function for rw==WRITE
  1613. */
  1614. flush_dcache_page(pages[i]);
  1615. /* ?? Is locking needed? I don't think so */
  1616. /* if (TestSetPageLocked(pages[i]))
  1617. goto out_unlock; */
  1618. }
  1619. sgl[0].page = pages[0];
  1620. sgl[0].offset = uaddr & ~PAGE_MASK;
  1621. if (nr_pages > 1) {
  1622. sgl[0].length = PAGE_SIZE - sgl[0].offset;
  1623. count -= sgl[0].length;
  1624. for (i=1; i < nr_pages ; i++) {
  1625. sgl[i].page = pages[i];
  1626. sgl[i].length = count < PAGE_SIZE ? count : PAGE_SIZE;
  1627. count -= PAGE_SIZE;
  1628. }
  1629. }
  1630. else {
  1631. sgl[0].length = count;
  1632. }
  1633. kfree(pages);
  1634. return nr_pages;
  1635. out_unmap:
  1636. if (res > 0) {
  1637. for (j=0; j < res; j++)
  1638. page_cache_release(pages[j]);
  1639. res = 0;
  1640. }
  1641. kfree(pages);
  1642. return res;
  1643. }
  1644. /* And unmap them... */
  1645. static int
  1646. st_unmap_user_pages(struct scatterlist *sgl, const unsigned int nr_pages,
  1647. int dirtied)
  1648. {
  1649. int i;
  1650. for (i=0; i < nr_pages; i++) {
  1651. struct page *page = sgl[i].page;
  1652. if (dirtied)
  1653. SetPageDirty(page);
  1654. /* unlock_page(page); */
  1655. /* FIXME: cache flush missing for rw==READ
  1656. * FIXME: call the correct reference counting function
  1657. */
  1658. page_cache_release(page);
  1659. }
  1660. return 0;
  1661. }
  1662. /* ^^^^^^^^ above code borrowed from st driver's direct IO ^^^^^^^^^ */
  1663. #endif
  1664. /* Returns: -ve -> error, 0 -> done, 1 -> try indirect */
  1665. static int
  1666. sg_build_direct(Sg_request * srp, Sg_fd * sfp, int dxfer_len)
  1667. {
  1668. #ifdef SG_ALLOW_DIO_CODE
  1669. sg_io_hdr_t *hp = &srp->header;
  1670. Sg_scatter_hold *schp = &srp->data;
  1671. int sg_tablesize = sfp->parentdp->sg_tablesize;
  1672. int mx_sc_elems, res;
  1673. struct scsi_device *sdev = sfp->parentdp->device;
  1674. if (((unsigned long)hp->dxferp &
  1675. queue_dma_alignment(sdev->request_queue)) != 0)
  1676. return 1;
  1677. mx_sc_elems = sg_build_sgat(schp, sfp, sg_tablesize);
  1678. if (mx_sc_elems <= 0) {
  1679. return 1;
  1680. }
  1681. res = st_map_user_pages(schp->buffer, mx_sc_elems,
  1682. (unsigned long)hp->dxferp, dxfer_len,
  1683. (SG_DXFER_TO_DEV == hp->dxfer_direction) ? 1 : 0);
  1684. if (res <= 0)
  1685. return 1;
  1686. schp->k_use_sg = res;
  1687. schp->dio_in_use = 1;
  1688. hp->info |= SG_INFO_DIRECT_IO;
  1689. return 0;
  1690. #else
  1691. return 1;
  1692. #endif
  1693. }
  1694. static int
  1695. sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
  1696. {
  1697. struct scatterlist *sg;
  1698. int ret_sz = 0, k, rem_sz, num, mx_sc_elems;
  1699. int sg_tablesize = sfp->parentdp->sg_tablesize;
  1700. int blk_size = buff_size;
  1701. struct page *p = NULL;
  1702. if ((blk_size < 0) || (!sfp))
  1703. return -EFAULT;
  1704. if (0 == blk_size)
  1705. ++blk_size; /* don't know why */
  1706. /* round request up to next highest SG_SECTOR_SZ byte boundary */
  1707. blk_size = (blk_size + SG_SECTOR_MSK) & (~SG_SECTOR_MSK);
  1708. SCSI_LOG_TIMEOUT(4, printk("sg_build_indirect: buff_size=%d, blk_size=%d\n",
  1709. buff_size, blk_size));
  1710. /* N.B. ret_sz carried into this block ... */
  1711. mx_sc_elems = sg_build_sgat(schp, sfp, sg_tablesize);
  1712. if (mx_sc_elems < 0)
  1713. return mx_sc_elems; /* most likely -ENOMEM */
  1714. for (k = 0, sg = schp->buffer, rem_sz = blk_size;
  1715. (rem_sz > 0) && (k < mx_sc_elems);
  1716. ++k, rem_sz -= ret_sz, ++sg) {
  1717. num = (rem_sz > SG_SCATTER_SZ) ? SG_SCATTER_SZ : rem_sz;
  1718. p = sg_page_malloc(num, sfp->low_dma, &ret_sz);
  1719. if (!p)
  1720. return -ENOMEM;
  1721. sg->page = p;
  1722. sg->length = ret_sz;
  1723. SCSI_LOG_TIMEOUT(5, printk("sg_build_build: k=%d, a=0x%p, len=%d\n",
  1724. k, p, ret_sz));
  1725. } /* end of for loop */
  1726. schp->k_use_sg = k;
  1727. SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k_use_sg=%d, rem_sz=%d\n", k, rem_sz));
  1728. schp->bufflen = blk_size;
  1729. if (rem_sz > 0) /* must have failed */
  1730. return -ENOMEM;
  1731. return 0;
  1732. }
  1733. static int
  1734. sg_write_xfer(Sg_request * srp)
  1735. {
  1736. sg_io_hdr_t *hp = &srp->header;
  1737. Sg_scatter_hold *schp = &srp->data;
  1738. struct scatterlist *sg = schp->buffer;
  1739. int num_xfer = 0;
  1740. int j, k, onum, usglen, ksglen, res;
  1741. int iovec_count = (int) hp->iovec_count;
  1742. int dxfer_dir = hp->dxfer_direction;
  1743. unsigned char *p;
  1744. unsigned char __user *up;
  1745. int new_interface = ('\0' == hp->interface_id) ? 0 : 1;
  1746. if ((SG_DXFER_UNKNOWN == dxfer_dir) || (SG_DXFER_TO_DEV == dxfer_dir) ||
  1747. (SG_DXFER_TO_FROM_DEV == dxfer_dir)) {
  1748. num_xfer = (int) (new_interface ? hp->dxfer_len : hp->flags);
  1749. if (schp->bufflen < num_xfer)
  1750. num_xfer = schp->bufflen;
  1751. }
  1752. if ((num_xfer <= 0) || (schp->dio_in_use) ||
  1753. (new_interface
  1754. && ((SG_FLAG_NO_DXFER | SG_FLAG_MMAP_IO) & hp->flags)))
  1755. return 0;
  1756. SCSI_LOG_TIMEOUT(4, printk("sg_write_xfer: num_xfer=%d, iovec_count=%d, k_use_sg=%d\n",
  1757. num_xfer, iovec_count, schp->k_use_sg));
  1758. if (iovec_count) {
  1759. onum = iovec_count;
  1760. if (!access_ok(VERIFY_READ, hp->dxferp, SZ_SG_IOVEC * onum))
  1761. return -EFAULT;
  1762. } else
  1763. onum = 1;
  1764. ksglen = sg->length;
  1765. p = page_address(sg->page);
  1766. for (j = 0, k = 0; j < onum; ++j) {
  1767. res = sg_u_iovec(hp, iovec_count, j, 1, &usglen, &up);
  1768. if (res)
  1769. return res;
  1770. for (; p; ++sg, ksglen = sg->length,
  1771. p = page_address(sg->page)) {
  1772. if (usglen <= 0)
  1773. break;
  1774. if (ksglen > usglen) {
  1775. if (usglen >= num_xfer) {
  1776. if (__copy_from_user(p, up, num_xfer))
  1777. return -EFAULT;
  1778. return 0;
  1779. }
  1780. if (__copy_from_user(p, up, usglen))
  1781. return -EFAULT;
  1782. p += usglen;
  1783. ksglen -= usglen;
  1784. break;
  1785. } else {
  1786. if (ksglen >= num_xfer) {
  1787. if (__copy_from_user(p, up, num_xfer))
  1788. return -EFAULT;
  1789. return 0;
  1790. }
  1791. if (__copy_from_user(p, up, ksglen))
  1792. return -EFAULT;
  1793. up += ksglen;
  1794. usglen -= ksglen;
  1795. }
  1796. ++k;
  1797. if (k >= schp->k_use_sg)
  1798. return 0;
  1799. }
  1800. }
  1801. return 0;
  1802. }
  1803. static int
  1804. sg_u_iovec(sg_io_hdr_t * hp, int sg_num, int ind,
  1805. int wr_xf, int *countp, unsigned char __user **up)
  1806. {
  1807. int num_xfer = (int) hp->dxfer_len;
  1808. unsigned char __user *p = hp->dxferp;
  1809. int count;
  1810. if (0 == sg_num) {
  1811. if (wr_xf && ('\0' == hp->interface_id))
  1812. count = (int) hp->flags; /* holds "old" input_size */
  1813. else
  1814. count = num_xfer;
  1815. } else {
  1816. sg_iovec_t iovec;
  1817. if (__copy_from_user(&iovec, p + ind*SZ_SG_IOVEC, SZ_SG_IOVEC))
  1818. return -EFAULT;
  1819. p = iovec.iov_base;
  1820. count = (int) iovec.iov_len;
  1821. }
  1822. if (!access_ok(wr_xf ? VERIFY_READ : VERIFY_WRITE, p, count))
  1823. return -EFAULT;
  1824. if (up)
  1825. *up = p;
  1826. if (countp)
  1827. *countp = count;
  1828. return 0;
  1829. }
  1830. static void
  1831. sg_remove_scat(Sg_scatter_hold * schp)
  1832. {
  1833. SCSI_LOG_TIMEOUT(4, printk("sg_remove_scat: k_use_sg=%d\n", schp->k_use_sg));
  1834. if (schp->buffer && (schp->sglist_len > 0)) {
  1835. struct scatterlist *sg = schp->buffer;
  1836. if (schp->dio_in_use) {
  1837. #ifdef SG_ALLOW_DIO_CODE
  1838. st_unmap_user_pages(sg, schp->k_use_sg, TRUE);
  1839. #endif
  1840. } else {
  1841. int k;
  1842. for (k = 0; (k < schp->k_use_sg) && sg->page;
  1843. ++k, ++sg) {
  1844. SCSI_LOG_TIMEOUT(5, printk(
  1845. "sg_remove_scat: k=%d, a=0x%p, len=%d\n",
  1846. k, sg->page, sg->length));
  1847. sg_page_free(sg->page, sg->length);
  1848. }
  1849. }
  1850. kfree(schp->buffer);
  1851. }
  1852. memset(schp, 0, sizeof (*schp));
  1853. }
  1854. static int
  1855. sg_read_xfer(Sg_request * srp)
  1856. {
  1857. sg_io_hdr_t *hp = &srp->header;
  1858. Sg_scatter_hold *schp = &srp->data;
  1859. struct scatterlist *sg = schp->buffer;
  1860. int num_xfer = 0;
  1861. int j, k, onum, usglen, ksglen, res;
  1862. int iovec_count = (int) hp->iovec_count;
  1863. int dxfer_dir = hp->dxfer_direction;
  1864. unsigned char *p;
  1865. unsigned char __user *up;
  1866. int new_interface = ('\0' == hp->interface_id) ? 0 : 1;
  1867. if ((SG_DXFER_UNKNOWN == dxfer_dir) || (SG_DXFER_FROM_DEV == dxfer_dir)
  1868. || (SG_DXFER_TO_FROM_DEV == dxfer_dir)) {
  1869. num_xfer = hp->dxfer_len;
  1870. if (schp->bufflen < num_xfer)
  1871. num_xfer = schp->bufflen;
  1872. }
  1873. if ((num_xfer <= 0) || (schp->dio_in_use) ||
  1874. (new_interface
  1875. && ((SG_FLAG_NO_DXFER | SG_FLAG_MMAP_IO) & hp->flags)))
  1876. return 0;
  1877. SCSI_LOG_TIMEOUT(4, printk("sg_read_xfer: num_xfer=%d, iovec_count=%d, k_use_sg=%d\n",
  1878. num_xfer, iovec_count, schp->k_use_sg));
  1879. if (iovec_count) {
  1880. onum = iovec_count;
  1881. if (!access_ok(VERIFY_READ, hp->dxferp, SZ_SG_IOVEC * onum))
  1882. return -EFAULT;
  1883. } else
  1884. onum = 1;
  1885. p = page_address(sg->page);
  1886. ksglen = sg->length;
  1887. for (j = 0, k = 0; j < onum; ++j) {
  1888. res = sg_u_iovec(hp, iovec_count, j, 0, &usglen, &up);
  1889. if (res)
  1890. return res;
  1891. for (; p; ++sg, ksglen = sg->length,
  1892. p = page_address(sg->page)) {
  1893. if (usglen <= 0)
  1894. break;
  1895. if (ksglen > usglen) {
  1896. if (usglen >= num_xfer) {
  1897. if (__copy_to_user(up, p, num_xfer))
  1898. return -EFAULT;
  1899. return 0;
  1900. }
  1901. if (__copy_to_user(up, p, usglen))
  1902. return -EFAULT;
  1903. p += usglen;
  1904. ksglen -= usglen;
  1905. break;
  1906. } else {
  1907. if (ksglen >= num_xfer) {
  1908. if (__copy_to_user(up, p, num_xfer))
  1909. return -EFAULT;
  1910. return 0;
  1911. }
  1912. if (__copy_to_user(up, p, ksglen))
  1913. return -EFAULT;
  1914. up += ksglen;
  1915. usglen -= ksglen;
  1916. }
  1917. ++k;
  1918. if (k >= schp->k_use_sg)
  1919. return 0;
  1920. }
  1921. }
  1922. return 0;
  1923. }
  1924. static int
  1925. sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer)
  1926. {
  1927. Sg_scatter_hold *schp = &srp->data;
  1928. struct scatterlist *sg = schp->buffer;
  1929. int k, num;
  1930. SCSI_LOG_TIMEOUT(4, printk("sg_read_oxfer: num_read_xfer=%d\n",
  1931. num_read_xfer));
  1932. if ((!outp) || (num_read_xfer <= 0))
  1933. return 0;
  1934. for (k = 0; (k < schp->k_use_sg) && sg->page; ++k, ++sg) {
  1935. num = sg->length;
  1936. if (num > num_read_xfer) {
  1937. if (__copy_to_user(outp, page_address(sg->page),
  1938. num_read_xfer))
  1939. return -EFAULT;
  1940. break;
  1941. } else {
  1942. if (__copy_to_user(outp, page_address(sg->page),
  1943. num))
  1944. return -EFAULT;
  1945. num_read_xfer -= num;
  1946. if (num_read_xfer <= 0)
  1947. break;
  1948. outp += num;
  1949. }
  1950. }
  1951. return 0;
  1952. }
  1953. static void
  1954. sg_build_reserve(Sg_fd * sfp, int req_size)
  1955. {
  1956. Sg_scatter_hold *schp = &sfp->reserve;
  1957. SCSI_LOG_TIMEOUT(4, printk("sg_build_reserve: req_size=%d\n", req_size));
  1958. do {
  1959. if (req_size < PAGE_SIZE)
  1960. req_size = PAGE_SIZE;
  1961. if (0 == sg_build_indirect(schp, sfp, req_size))
  1962. return;
  1963. else
  1964. sg_remove_scat(schp);
  1965. req_size >>= 1; /* divide by 2 */
  1966. } while (req_size > (PAGE_SIZE / 2));
  1967. }
  1968. static void
  1969. sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size)
  1970. {
  1971. Sg_scatter_hold *req_schp = &srp->data;
  1972. Sg_scatter_hold *rsv_schp = &sfp->reserve;
  1973. struct scatterlist *sg = rsv_schp->buffer;
  1974. int k, num, rem;
  1975. srp->res_used = 1;
  1976. SCSI_LOG_TIMEOUT(4, printk("sg_link_reserve: size=%d\n", size));
  1977. rem = size = (size + 1) & (~1); /* round to even for aha1542 */
  1978. for (k = 0; k < rsv_schp->k_use_sg; ++k, ++sg) {
  1979. num = sg->length;
  1980. if (rem <= num) {
  1981. sfp->save_scat_len = num;
  1982. sg->length = rem;
  1983. req_schp->k_use_sg = k + 1;
  1984. req_schp->sglist_len = rsv_schp->sglist_len;
  1985. req_schp->buffer = rsv_schp->buffer;
  1986. req_schp->bufflen = size;
  1987. req_schp->b_malloc_len = rsv_schp->b_malloc_len;
  1988. break;
  1989. } else
  1990. rem -= num;
  1991. }
  1992. if (k >= rsv_schp->k_use_sg)
  1993. SCSI_LOG_TIMEOUT(1, printk("sg_link_reserve: BAD size\n"));
  1994. }
  1995. static void
  1996. sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp)
  1997. {
  1998. Sg_scatter_hold *req_schp = &srp->data;
  1999. Sg_scatter_hold *rsv_schp = &sfp->reserve;
  2000. SCSI_LOG_TIMEOUT(4, printk("sg_unlink_reserve: req->k_use_sg=%d\n",
  2001. (int) req_schp->k_use_sg));
  2002. if ((rsv_schp->k_use_sg > 0) && (req_schp->k_use_sg > 0)) {
  2003. struct scatterlist *sg = rsv_schp->buffer;
  2004. if (sfp->save_scat_len > 0)
  2005. (sg + (req_schp->k_use_sg - 1))->length =
  2006. (unsigned) sfp->save_scat_len;
  2007. else
  2008. SCSI_LOG_TIMEOUT(1, printk ("sg_unlink_reserve: BAD save_scat_len\n"));
  2009. }
  2010. req_schp->k_use_sg = 0;
  2011. req_schp->bufflen = 0;
  2012. req_schp->buffer = NULL;
  2013. req_schp->sglist_len = 0;
  2014. sfp->save_scat_len = 0;
  2015. srp->res_used = 0;
  2016. }
  2017. static Sg_request *
  2018. sg_get_rq_mark(Sg_fd * sfp, int pack_id)
  2019. {
  2020. Sg_request *resp;
  2021. unsigned long iflags;
  2022. write_lock_irqsave(&sfp->rq_list_lock, iflags);
  2023. for (resp = sfp->headrp; resp; resp = resp->nextrp) {
  2024. /* look for requests that are ready + not SG_IO owned */
  2025. if ((1 == resp->done) && (!resp->sg_io_owned) &&
  2026. ((-1 == pack_id) || (resp->header.pack_id == pack_id))) {
  2027. resp->done = 2; /* guard against other readers */
  2028. break;
  2029. }
  2030. }
  2031. write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
  2032. return resp;
  2033. }
  2034. #ifdef CONFIG_SCSI_PROC_FS
  2035. static Sg_request *
  2036. sg_get_nth_request(Sg_fd * sfp, int nth)
  2037. {
  2038. Sg_request *resp;
  2039. unsigned long iflags;
  2040. int k;
  2041. read_lock_irqsave(&sfp->rq_list_lock, iflags);
  2042. for (k = 0, resp = sfp->headrp; resp && (k < nth);
  2043. ++k, resp = resp->nextrp) ;
  2044. read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
  2045. return resp;
  2046. }
  2047. #endif
  2048. /* always adds to end of list */
  2049. static Sg_request *
  2050. sg_add_request(Sg_fd * sfp)
  2051. {
  2052. int k;
  2053. unsigned long iflags;
  2054. Sg_request *resp;
  2055. Sg_request *rp = sfp->req_arr;
  2056. write_lock_irqsave(&sfp->rq_list_lock, iflags);
  2057. resp = sfp->headrp;
  2058. if (!resp) {
  2059. memset(rp, 0, sizeof (Sg_request));
  2060. rp->parentfp = sfp;
  2061. resp = rp;
  2062. sfp->headrp = resp;
  2063. } else {
  2064. if (0 == sfp->cmd_q)
  2065. resp = NULL; /* command queuing disallowed */
  2066. else {
  2067. for (k = 0; k < SG_MAX_QUEUE; ++k, ++rp) {
  2068. if (!rp->parentfp)
  2069. break;
  2070. }
  2071. if (k < SG_MAX_QUEUE) {
  2072. memset(rp, 0, sizeof (Sg_request));
  2073. rp->parentfp = sfp;
  2074. while (resp->nextrp)
  2075. resp = resp->nextrp;
  2076. resp->nextrp = rp;
  2077. resp = rp;
  2078. } else
  2079. resp = NULL;
  2080. }
  2081. }
  2082. if (resp) {
  2083. resp->nextrp = NULL;
  2084. resp->header.duration = jiffies_to_msecs(jiffies);
  2085. }
  2086. write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
  2087. return resp;
  2088. }
  2089. /* Return of 1 for found; 0 for not found */
  2090. static int
  2091. sg_remove_request(Sg_fd * sfp, Sg_request * srp)
  2092. {
  2093. Sg_request *prev_rp;
  2094. Sg_request *rp;
  2095. unsigned long iflags;
  2096. int res = 0;
  2097. if ((!sfp) || (!srp) || (!sfp->headrp))
  2098. return res;
  2099. write_lock_irqsave(&sfp->rq_list_lock, iflags);
  2100. prev_rp = sfp->headrp;
  2101. if (srp == prev_rp) {
  2102. sfp->headrp = prev_rp->nextrp;
  2103. prev_rp->parentfp = NULL;
  2104. res = 1;
  2105. } else {
  2106. while ((rp = prev_rp->nextrp)) {
  2107. if (srp == rp) {
  2108. prev_rp->nextrp = rp->nextrp;
  2109. rp->parentfp = NULL;
  2110. res = 1;
  2111. break;
  2112. }
  2113. prev_rp = rp;
  2114. }
  2115. }
  2116. write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
  2117. return res;
  2118. }
  2119. #ifdef CONFIG_SCSI_PROC_FS
  2120. static Sg_fd *
  2121. sg_get_nth_sfp(Sg_device * sdp, int nth)
  2122. {
  2123. Sg_fd *resp;
  2124. unsigned long iflags;
  2125. int k;
  2126. read_lock_irqsave(&sg_dev_arr_lock, iflags);
  2127. for (k = 0, resp = sdp->headfp; resp && (k < nth);
  2128. ++k, resp = resp->nextfp) ;
  2129. read_unlock_irqrestore(&sg_dev_arr_lock, iflags);
  2130. return resp;
  2131. }
  2132. #endif
  2133. static Sg_fd *
  2134. sg_add_sfp(Sg_device * sdp, int dev)
  2135. {
  2136. Sg_fd *sfp;
  2137. unsigned long iflags;
  2138. sfp = kzalloc(sizeof(*sfp), GFP_ATOMIC | __GFP_NOWARN);
  2139. if (!sfp)
  2140. return NULL;
  2141. init_waitqueue_head(&sfp->read_wait);
  2142. rwlock_init(&sfp->rq_list_lock);
  2143. sfp->timeout = SG_DEFAULT_TIMEOUT;
  2144. sfp->timeout_user = SG_DEFAULT_TIMEOUT_USER;
  2145. sfp->force_packid = SG_DEF_FORCE_PACK_ID;
  2146. sfp->low_dma = (SG_DEF_FORCE_LOW_DMA == 0) ?
  2147. sdp->device->host->unchecked_isa_dma : 1;
  2148. sfp->cmd_q = SG_DEF_COMMAND_Q;
  2149. sfp->keep_orphan = SG_DEF_KEEP_ORPHAN;
  2150. sfp->parentdp = sdp;
  2151. write_lock_irqsave(&sg_dev_arr_lock, iflags);
  2152. if (!sdp->headfp)
  2153. sdp->headfp = sfp;
  2154. else { /* add to tail of existing list */
  2155. Sg_fd *pfp = sdp->headfp;
  2156. while (pfp->nextfp)
  2157. pfp = pfp->nextfp;
  2158. pfp->nextfp = sfp;
  2159. }
  2160. write_unlock_irqrestore(&sg_dev_arr_lock, iflags);
  2161. SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: sfp=0x%p\n", sfp));
  2162. sg_build_reserve(sfp, sg_big_buff);
  2163. SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: bufflen=%d, k_use_sg=%d\n",
  2164. sfp->reserve.bufflen, sfp->reserve.k_use_sg));
  2165. return sfp;
  2166. }
  2167. static void
  2168. __sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp)
  2169. {
  2170. Sg_fd *fp;
  2171. Sg_fd *prev_fp;
  2172. prev_fp = sdp->headfp;
  2173. if (sfp == prev_fp)
  2174. sdp->headfp = prev_fp->nextfp;
  2175. else {
  2176. while ((fp = prev_fp->nextfp)) {
  2177. if (sfp == fp) {
  2178. prev_fp->nextfp = fp->nextfp;
  2179. break;
  2180. }
  2181. prev_fp = fp;
  2182. }
  2183. }
  2184. if (sfp->reserve.bufflen > 0) {
  2185. SCSI_LOG_TIMEOUT(6,
  2186. printk("__sg_remove_sfp: bufflen=%d, k_use_sg=%d\n",
  2187. (int) sfp->reserve.bufflen, (int) sfp->reserve.k_use_sg));
  2188. if (sfp->mmap_called)
  2189. sg_rb_correct4mmap(&sfp->reserve, 0); /* undo correction */
  2190. sg_remove_scat(&sfp->reserve);
  2191. }
  2192. sfp->parentdp = NULL;
  2193. SCSI_LOG_TIMEOUT(6, printk("__sg_remove_sfp: sfp=0x%p\n", sfp));
  2194. kfree(sfp);
  2195. }
  2196. /* Returns 0 in normal case, 1 when detached and sdp object removed */
  2197. static int
  2198. sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp)
  2199. {
  2200. Sg_request *srp;
  2201. Sg_request *tsrp;
  2202. int dirty = 0;
  2203. int res = 0;
  2204. for (srp = sfp->headrp; srp; srp = tsrp) {
  2205. tsrp = srp->nextrp;
  2206. if (sg_srp_done(srp, sfp))
  2207. sg_finish_rem_req(srp);
  2208. else
  2209. ++dirty;
  2210. }
  2211. if (0 == dirty) {
  2212. unsigned long iflags;
  2213. write_lock_irqsave(&sg_dev_arr_lock, iflags);
  2214. __sg_remove_sfp(sdp, sfp);
  2215. if (sdp->detached && (NULL == sdp->headfp)) {
  2216. int k, maxd;
  2217. maxd = sg_dev_max;
  2218. for (k = 0; k < maxd; ++k) {
  2219. if (sdp == sg_dev_arr[k])
  2220. break;
  2221. }
  2222. if (k < maxd)
  2223. sg_dev_arr[k] = NULL;
  2224. kfree((char *) sdp);
  2225. res = 1;
  2226. }
  2227. write_unlock_irqrestore(&sg_dev_arr_lock, iflags);
  2228. } else {
  2229. /* MOD_INC's to inhibit unloading sg and associated adapter driver */
  2230. /* only bump the access_count if we actually succeeded in
  2231. * throwing another counter on the host module */
  2232. scsi_device_get(sdp->device); /* XXX: retval ignored? */
  2233. sfp->closed = 1; /* flag dirty state on this fd */
  2234. SCSI_LOG_TIMEOUT(1, printk("sg_remove_sfp: worrisome, %d writes pending\n",
  2235. dirty));
  2236. }
  2237. return res;
  2238. }
  2239. static int
  2240. sg_res_in_use(Sg_fd * sfp)
  2241. {
  2242. const Sg_request *srp;
  2243. unsigned long iflags;
  2244. read_lock_irqsave(&sfp->rq_list_lock, iflags);
  2245. for (srp = sfp->headrp; srp; srp = srp->nextrp)
  2246. if (srp->res_used)
  2247. break;
  2248. read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
  2249. return srp ? 1 : 0;
  2250. }
  2251. /* If retSzp==NULL want exact size or fail */
  2252. static struct page *
  2253. sg_page_malloc(int rqSz, int lowDma, int *retSzp)
  2254. {
  2255. struct page *resp = NULL;
  2256. gfp_t page_mask;
  2257. int order, a_size;
  2258. int resSz = rqSz;
  2259. if (rqSz <= 0)
  2260. return resp;
  2261. if (lowDma)
  2262. page_mask = GFP_ATOMIC | GFP_DMA | __GFP_NOWARN;
  2263. else
  2264. page_mask = GFP_ATOMIC | __GFP_NOWARN;
  2265. for (order = 0, a_size = PAGE_SIZE; a_size < rqSz;
  2266. order++, a_size <<= 1) ;
  2267. resp = alloc_pages(page_mask, order);
  2268. while ((!resp) && order && retSzp) {
  2269. --order;
  2270. a_size >>= 1; /* divide by 2, until PAGE_SIZE */
  2271. resp = alloc_pages(page_mask, order); /* try half */
  2272. resSz = a_size;
  2273. }
  2274. if (resp) {
  2275. if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
  2276. memset(page_address(resp), 0, resSz);
  2277. if (retSzp)
  2278. *retSzp = resSz;
  2279. }
  2280. return resp;
  2281. }
  2282. static void
  2283. sg_page_free(struct page *page, int size)
  2284. {
  2285. int order, a_size;
  2286. if (!page)
  2287. return;
  2288. for (order = 0, a_size = PAGE_SIZE; a_size < size;
  2289. order++, a_size <<= 1) ;
  2290. __free_pages(page, order);
  2291. }
  2292. #ifndef MAINTENANCE_IN_CMD
  2293. #define MAINTENANCE_IN_CMD 0xa3
  2294. #endif
  2295. static unsigned char allow_ops[] = { TEST_UNIT_READY, REQUEST_SENSE,
  2296. INQUIRY, READ_CAPACITY, READ_BUFFER, READ_6, READ_10, READ_12,
  2297. READ_16, MODE_SENSE, MODE_SENSE_10, LOG_SENSE, REPORT_LUNS,
  2298. SERVICE_ACTION_IN, RECEIVE_DIAGNOSTIC, READ_LONG, MAINTENANCE_IN_CMD
  2299. };
  2300. static int
  2301. sg_allow_access(unsigned char opcode, char dev_type)
  2302. {
  2303. int k;
  2304. if (TYPE_SCANNER == dev_type) /* TYPE_ROM maybe burner */
  2305. return 1;
  2306. for (k = 0; k < sizeof (allow_ops); ++k) {
  2307. if (opcode == allow_ops[k])
  2308. return 1;
  2309. }
  2310. return 0;
  2311. }
  2312. #ifdef CONFIG_SCSI_PROC_FS
  2313. static int
  2314. sg_last_dev(void)
  2315. {
  2316. int k;
  2317. unsigned long iflags;
  2318. read_lock_irqsave(&sg_dev_arr_lock, iflags);
  2319. for (k = sg_dev_max - 1; k >= 0; --k)
  2320. if (sg_dev_arr[k] && sg_dev_arr[k]->device)
  2321. break;
  2322. read_unlock_irqrestore(&sg_dev_arr_lock, iflags);
  2323. return k + 1; /* origin 1 */
  2324. }
  2325. #endif
  2326. static Sg_device *
  2327. sg_get_dev(int dev)
  2328. {
  2329. Sg_device *sdp = NULL;
  2330. unsigned long iflags;
  2331. if (sg_dev_arr && (dev >= 0)) {
  2332. read_lock_irqsave(&sg_dev_arr_lock, iflags);
  2333. if (dev < sg_dev_max)
  2334. sdp = sg_dev_arr[dev];
  2335. read_unlock_irqrestore(&sg_dev_arr_lock, iflags);
  2336. }
  2337. return sdp;
  2338. }
  2339. #ifdef CONFIG_SCSI_PROC_FS
  2340. static struct proc_dir_entry *sg_proc_sgp = NULL;
  2341. static char sg_proc_sg_dirname[] = "scsi/sg";
  2342. static int sg_proc_seq_show_int(struct seq_file *s, void *v);
  2343. static int sg_proc_single_open_adio(struct inode *inode, struct file *file);
  2344. static ssize_t sg_proc_write_adio(struct file *filp, const char __user *buffer,
  2345. size_t count, loff_t *off);
  2346. static struct file_operations adio_fops = {
  2347. /* .owner, .read and .llseek added in sg_proc_init() */
  2348. .open = sg_proc_single_open_adio,
  2349. .write = sg_proc_write_adio,
  2350. .release = single_release,
  2351. };
  2352. static int sg_proc_single_open_dressz(struct inode *inode, struct file *file);
  2353. static ssize_t sg_proc_write_dressz(struct file *filp,
  2354. const char __user *buffer, size_t count, loff_t *off);
  2355. static struct file_operations dressz_fops = {
  2356. .open = sg_proc_single_open_dressz,
  2357. .write = sg_proc_write_dressz,
  2358. .release = single_release,
  2359. };
  2360. static int sg_proc_seq_show_version(struct seq_file *s, void *v);
  2361. static int sg_proc_single_open_version(struct inode *inode, struct file *file);
  2362. static struct file_operations version_fops = {
  2363. .open = sg_proc_single_open_version,
  2364. .release = single_release,
  2365. };
  2366. static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v);
  2367. static int sg_proc_single_open_devhdr(struct inode *inode, struct file *file);
  2368. static struct file_operations devhdr_fops = {
  2369. .open = sg_proc_single_open_devhdr,
  2370. .release = single_release,
  2371. };
  2372. static int sg_proc_seq_show_dev(struct seq_file *s, void *v);
  2373. static int sg_proc_open_dev(struct inode *inode, struct file *file);
  2374. static void * dev_seq_start(struct seq_file *s, loff_t *pos);
  2375. static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos);
  2376. static void dev_seq_stop(struct seq_file *s, void *v);
  2377. static struct file_operations dev_fops = {
  2378. .open = sg_proc_open_dev,
  2379. .release = seq_release,
  2380. };
  2381. static struct seq_operations dev_seq_ops = {
  2382. .start = dev_seq_start,
  2383. .next = dev_seq_next,
  2384. .stop = dev_seq_stop,
  2385. .show = sg_proc_seq_show_dev,
  2386. };
  2387. static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v);
  2388. static int sg_proc_open_devstrs(struct inode *inode, struct file *file);
  2389. static struct file_operations devstrs_fops = {
  2390. .open = sg_proc_open_devstrs,
  2391. .release = seq_release,
  2392. };
  2393. static struct seq_operations devstrs_seq_ops = {
  2394. .start = dev_seq_start,
  2395. .next = dev_seq_next,
  2396. .stop = dev_seq_stop,
  2397. .show = sg_proc_seq_show_devstrs,
  2398. };
  2399. static int sg_proc_seq_show_debug(struct seq_file *s, void *v);
  2400. static int sg_proc_open_debug(struct inode *inode, struct file *file);
  2401. static struct file_operations debug_fops = {
  2402. .open = sg_proc_open_debug,
  2403. .release = seq_release,
  2404. };
  2405. static struct seq_operations debug_seq_ops = {
  2406. .start = dev_seq_start,
  2407. .next = dev_seq_next,
  2408. .stop = dev_seq_stop,
  2409. .show = sg_proc_seq_show_debug,
  2410. };
  2411. struct sg_proc_leaf {
  2412. const char * name;
  2413. struct file_operations * fops;
  2414. };
  2415. static struct sg_proc_leaf sg_proc_leaf_arr[] = {
  2416. {"allow_dio", &adio_fops},
  2417. {"debug", &debug_fops},
  2418. {"def_reserved_size", &dressz_fops},
  2419. {"device_hdr", &devhdr_fops},
  2420. {"devices", &dev_fops},
  2421. {"device_strs", &devstrs_fops},
  2422. {"version", &version_fops}
  2423. };
  2424. static int
  2425. sg_proc_init(void)
  2426. {
  2427. int k, mask;
  2428. int num_leaves =
  2429. sizeof (sg_proc_leaf_arr) / sizeof (sg_proc_leaf_arr[0]);
  2430. struct proc_dir_entry *pdep;
  2431. struct sg_proc_leaf * leaf;
  2432. sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
  2433. if (!sg_proc_sgp)
  2434. return 1;
  2435. for (k = 0; k < num_leaves; ++k) {
  2436. leaf = &sg_proc_leaf_arr[k];
  2437. mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO;
  2438. pdep = create_proc_entry(leaf->name, mask, sg_proc_sgp);
  2439. if (pdep) {
  2440. leaf->fops->owner = THIS_MODULE,
  2441. leaf->fops->read = seq_read,
  2442. leaf->fops->llseek = seq_lseek,
  2443. pdep->proc_fops = leaf->fops;
  2444. }
  2445. }
  2446. return 0;
  2447. }
  2448. static void
  2449. sg_proc_cleanup(void)
  2450. {
  2451. int k;
  2452. int num_leaves =
  2453. sizeof (sg_proc_leaf_arr) / sizeof (sg_proc_leaf_arr[0]);
  2454. if (!sg_proc_sgp)
  2455. return;
  2456. for (k = 0; k < num_leaves; ++k)
  2457. remove_proc_entry(sg_proc_leaf_arr[k].name, sg_proc_sgp);
  2458. remove_proc_entry(sg_proc_sg_dirname, NULL);
  2459. }
  2460. static int sg_proc_seq_show_int(struct seq_file *s, void *v)
  2461. {
  2462. seq_printf(s, "%d\n", *((int *)s->private));
  2463. return 0;
  2464. }
  2465. static int sg_proc_single_open_adio(struct inode *inode, struct file *file)
  2466. {
  2467. return single_open(file, sg_proc_seq_show_int, &sg_allow_dio);
  2468. }
  2469. static ssize_t
  2470. sg_proc_write_adio(struct file *filp, const char __user *buffer,
  2471. size_t count, loff_t *off)
  2472. {
  2473. int num;
  2474. char buff[11];
  2475. if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
  2476. return -EACCES;
  2477. num = (count < 10) ? count : 10;
  2478. if (copy_from_user(buff, buffer, num))
  2479. return -EFAULT;
  2480. buff[num] = '\0';
  2481. sg_allow_dio = simple_strtoul(buff, NULL, 10) ? 1 : 0;
  2482. return count;
  2483. }
  2484. static int sg_proc_single_open_dressz(struct inode *inode, struct file *file)
  2485. {
  2486. return single_open(file, sg_proc_seq_show_int, &sg_big_buff);
  2487. }
  2488. static ssize_t
  2489. sg_proc_write_dressz(struct file *filp, const char __user *buffer,
  2490. size_t count, loff_t *off)
  2491. {
  2492. int num;
  2493. unsigned long k = ULONG_MAX;
  2494. char buff[11];
  2495. if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
  2496. return -EACCES;
  2497. num = (count < 10) ? count : 10;
  2498. if (copy_from_user(buff, buffer, num))
  2499. return -EFAULT;
  2500. buff[num] = '\0';
  2501. k = simple_strtoul(buff, NULL, 10);
  2502. if (k <= 1048576) { /* limit "big buff" to 1 MB */
  2503. sg_big_buff = k;
  2504. return count;
  2505. }
  2506. return -ERANGE;
  2507. }
  2508. static int sg_proc_seq_show_version(struct seq_file *s, void *v)
  2509. {
  2510. seq_printf(s, "%d\t%s [%s]\n", sg_version_num, SG_VERSION_STR,
  2511. sg_version_date);
  2512. return 0;
  2513. }
  2514. static int sg_proc_single_open_version(struct inode *inode, struct file *file)
  2515. {
  2516. return single_open(file, sg_proc_seq_show_version, NULL);
  2517. }
  2518. static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v)
  2519. {
  2520. seq_printf(s, "host\tchan\tid\tlun\ttype\topens\tqdepth\tbusy\t"
  2521. "online\n");
  2522. return 0;
  2523. }
  2524. static int sg_proc_single_open_devhdr(struct inode *inode, struct file *file)
  2525. {
  2526. return single_open(file, sg_proc_seq_show_devhdr, NULL);
  2527. }
  2528. struct sg_proc_deviter {
  2529. loff_t index;
  2530. size_t max;
  2531. };
  2532. static void * dev_seq_start(struct seq_file *s, loff_t *pos)
  2533. {
  2534. struct sg_proc_deviter * it = kmalloc(sizeof(*it), GFP_KERNEL);
  2535. s->private = it;
  2536. if (! it)
  2537. return NULL;
  2538. if (NULL == sg_dev_arr)
  2539. return NULL;
  2540. it->index = *pos;
  2541. it->max = sg_last_dev();
  2542. if (it->index >= it->max)
  2543. return NULL;
  2544. return it;
  2545. }
  2546. static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos)
  2547. {
  2548. struct sg_proc_deviter * it = s->private;
  2549. *pos = ++it->index;
  2550. return (it->index < it->max) ? it : NULL;
  2551. }
  2552. static void dev_seq_stop(struct seq_file *s, void *v)
  2553. {
  2554. kfree(s->private);
  2555. }
  2556. static int sg_proc_open_dev(struct inode *inode, struct file *file)
  2557. {
  2558. return seq_open(file, &dev_seq_ops);
  2559. }
  2560. static int sg_proc_seq_show_dev(struct seq_file *s, void *v)
  2561. {
  2562. struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
  2563. Sg_device *sdp;
  2564. struct scsi_device *scsidp;
  2565. sdp = it ? sg_get_dev(it->index) : NULL;
  2566. if (sdp && (scsidp = sdp->device) && (!sdp->detached))
  2567. seq_printf(s, "%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\n",
  2568. scsidp->host->host_no, scsidp->channel,
  2569. scsidp->id, scsidp->lun, (int) scsidp->type,
  2570. 1,
  2571. (int) scsidp->queue_depth,
  2572. (int) scsidp->device_busy,
  2573. (int) scsi_device_online(scsidp));
  2574. else
  2575. seq_printf(s, "-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\n");
  2576. return 0;
  2577. }
  2578. static int sg_proc_open_devstrs(struct inode *inode, struct file *file)
  2579. {
  2580. return seq_open(file, &devstrs_seq_ops);
  2581. }
  2582. static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v)
  2583. {
  2584. struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
  2585. Sg_device *sdp;
  2586. struct scsi_device *scsidp;
  2587. sdp = it ? sg_get_dev(it->index) : NULL;
  2588. if (sdp && (scsidp = sdp->device) && (!sdp->detached))
  2589. seq_printf(s, "%8.8s\t%16.16s\t%4.4s\n",
  2590. scsidp->vendor, scsidp->model, scsidp->rev);
  2591. else
  2592. seq_printf(s, "<no active device>\n");
  2593. return 0;
  2594. }
  2595. static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
  2596. {
  2597. int k, m, new_interface, blen, usg;
  2598. Sg_request *srp;
  2599. Sg_fd *fp;
  2600. const sg_io_hdr_t *hp;
  2601. const char * cp;
  2602. unsigned int ms;
  2603. for (k = 0; (fp = sg_get_nth_sfp(sdp, k)); ++k) {
  2604. seq_printf(s, " FD(%d): timeout=%dms bufflen=%d "
  2605. "(res)sgat=%d low_dma=%d\n", k + 1,
  2606. jiffies_to_msecs(fp->timeout),
  2607. fp->reserve.bufflen,
  2608. (int) fp->reserve.k_use_sg,
  2609. (int) fp->low_dma);
  2610. seq_printf(s, " cmd_q=%d f_packid=%d k_orphan=%d closed=%d\n",
  2611. (int) fp->cmd_q, (int) fp->force_packid,
  2612. (int) fp->keep_orphan, (int) fp->closed);
  2613. for (m = 0; (srp = sg_get_nth_request(fp, m)); ++m) {
  2614. hp = &srp->header;
  2615. new_interface = (hp->interface_id == '\0') ? 0 : 1;
  2616. if (srp->res_used) {
  2617. if (new_interface &&
  2618. (SG_FLAG_MMAP_IO & hp->flags))
  2619. cp = " mmap>> ";
  2620. else
  2621. cp = " rb>> ";
  2622. } else {
  2623. if (SG_INFO_DIRECT_IO_MASK & hp->info)
  2624. cp = " dio>> ";
  2625. else
  2626. cp = " ";
  2627. }
  2628. seq_printf(s, cp);
  2629. blen = srp->data.bufflen;
  2630. usg = srp->data.k_use_sg;
  2631. seq_printf(s, srp->done ?
  2632. ((1 == srp->done) ? "rcv:" : "fin:")
  2633. : "act:");
  2634. seq_printf(s, " id=%d blen=%d",
  2635. srp->header.pack_id, blen);
  2636. if (srp->done)
  2637. seq_printf(s, " dur=%d", hp->duration);
  2638. else {
  2639. ms = jiffies_to_msecs(jiffies);
  2640. seq_printf(s, " t_o/elap=%d/%d",
  2641. (new_interface ? hp->timeout :
  2642. jiffies_to_msecs(fp->timeout)),
  2643. (ms > hp->duration ? ms - hp->duration : 0));
  2644. }
  2645. seq_printf(s, "ms sgat=%d op=0x%02x\n", usg,
  2646. (int) srp->data.cmd_opcode);
  2647. }
  2648. if (0 == m)
  2649. seq_printf(s, " No requests active\n");
  2650. }
  2651. }
  2652. static int sg_proc_open_debug(struct inode *inode, struct file *file)
  2653. {
  2654. return seq_open(file, &debug_seq_ops);
  2655. }
  2656. static int sg_proc_seq_show_debug(struct seq_file *s, void *v)
  2657. {
  2658. struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
  2659. Sg_device *sdp;
  2660. if (it && (0 == it->index)) {
  2661. seq_printf(s, "dev_max(currently)=%d max_active_device=%d "
  2662. "(origin 1)\n", sg_dev_max, (int)it->max);
  2663. seq_printf(s, " def_reserved_size=%d\n", sg_big_buff);
  2664. }
  2665. sdp = it ? sg_get_dev(it->index) : NULL;
  2666. if (sdp) {
  2667. struct scsi_device *scsidp = sdp->device;
  2668. if (NULL == scsidp) {
  2669. seq_printf(s, "device %d detached ??\n",
  2670. (int)it->index);
  2671. return 0;
  2672. }
  2673. if (sg_get_nth_sfp(sdp, 0)) {
  2674. seq_printf(s, " >>> device=%s ",
  2675. sdp->disk->disk_name);
  2676. if (sdp->detached)
  2677. seq_printf(s, "detached pending close ");
  2678. else
  2679. seq_printf
  2680. (s, "scsi%d chan=%d id=%d lun=%d em=%d",
  2681. scsidp->host->host_no,
  2682. scsidp->channel, scsidp->id,
  2683. scsidp->lun,
  2684. scsidp->host->hostt->emulated);
  2685. seq_printf(s, " sg_tablesize=%d excl=%d\n",
  2686. sdp->sg_tablesize, sdp->exclude);
  2687. }
  2688. sg_proc_debug_helper(s, sdp);
  2689. }
  2690. return 0;
  2691. }
  2692. #endif /* CONFIG_SCSI_PROC_FS */
  2693. module_init(init_sg);
  2694. module_exit(exit_sg);
  2695. MODULE_ALIAS_CHARDEV_MAJOR(SCSI_GENERIC_MAJOR);