file_storage.c 102 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606
  1. /*
  2. * file_storage.c -- File-backed USB Storage Gadget, for USB development
  3. *
  4. * Copyright (C) 2003-2008 Alan Stern
  5. * All rights reserved.
  6. *
  7. * Redistribution and use in source and binary forms, with or without
  8. * modification, are permitted provided that the following conditions
  9. * are met:
  10. * 1. Redistributions of source code must retain the above copyright
  11. * notice, this list of conditions, and the following disclaimer,
  12. * without modification.
  13. * 2. Redistributions in binary form must reproduce the above copyright
  14. * notice, this list of conditions and the following disclaimer in the
  15. * documentation and/or other materials provided with the distribution.
  16. * 3. The names of the above-listed copyright holders may not be used
  17. * to endorse or promote products derived from this software without
  18. * specific prior written permission.
  19. *
  20. * ALTERNATIVELY, this software may be distributed under the terms of the
  21. * GNU General Public License ("GPL") as published by the Free Software
  22. * Foundation, either version 2 of that License or (at your option) any
  23. * later version.
  24. *
  25. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
  26. * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
  27. * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
  28. * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
  29. * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
  30. * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
  31. * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
  32. * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
  33. * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
  34. * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  35. * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  36. */
  37. /*
  38. * The File-backed Storage Gadget acts as a USB Mass Storage device,
  39. * appearing to the host as a disk drive or as a CD-ROM drive. In addition
  40. * to providing an example of a genuinely useful gadget driver for a USB
  41. * device, it also illustrates a technique of double-buffering for increased
  42. * throughput. Last but not least, it gives an easy way to probe the
  43. * behavior of the Mass Storage drivers in a USB host.
  44. *
  45. * Backing storage is provided by a regular file or a block device, specified
  46. * by the "file" module parameter. Access can be limited to read-only by
  47. * setting the optional "ro" module parameter. (For CD-ROM emulation,
  48. * access is always read-only.) The gadget will indicate that it has
  49. * removable media if the optional "removable" module parameter is set.
  50. *
  51. * The gadget supports the Control-Bulk (CB), Control-Bulk-Interrupt (CBI),
  52. * and Bulk-Only (also known as Bulk-Bulk-Bulk or BBB) transports, selected
  53. * by the optional "transport" module parameter. It also supports the
  54. * following protocols: RBC (0x01), ATAPI or SFF-8020i (0x02), QIC-157 (0c03),
  55. * UFI (0x04), SFF-8070i (0x05), and transparent SCSI (0x06), selected by
  56. * the optional "protocol" module parameter. In addition, the default
  57. * Vendor ID, Product ID, and release number can be overridden.
  58. *
  59. * There is support for multiple logical units (LUNs), each of which has
  60. * its own backing file. The number of LUNs can be set using the optional
  61. * "luns" module parameter (anywhere from 1 to 8), and the corresponding
  62. * files are specified using comma-separated lists for "file" and "ro".
  63. * The default number of LUNs is taken from the number of "file" elements;
  64. * it is 1 if "file" is not given. If "removable" is not set then a backing
  65. * file must be specified for each LUN. If it is set, then an unspecified
  66. * or empty backing filename means the LUN's medium is not loaded. Ideally
  67. * each LUN would be settable independently as a disk drive or a CD-ROM
  68. * drive, but currently all LUNs have to be the same type. The CD-ROM
  69. * emulation includes a single data track and no audio tracks; hence there
  70. * need be only one backing file per LUN. Note also that the CD-ROM block
  71. * length is set to 512 rather than the more common value 2048.
  72. *
  73. * Requirements are modest; only a bulk-in and a bulk-out endpoint are
  74. * needed (an interrupt-out endpoint is also needed for CBI). The memory
  75. * requirement amounts to two 16K buffers, size configurable by a parameter.
  76. * Support is included for both full-speed and high-speed operation.
  77. *
  78. * Note that the driver is slightly non-portable in that it assumes a
  79. * single memory/DMA buffer will be useable for bulk-in, bulk-out, and
  80. * interrupt-in endpoints. With most device controllers this isn't an
  81. * issue, but there may be some with hardware restrictions that prevent
  82. * a buffer from being used by more than one endpoint.
  83. *
  84. * Module options:
  85. *
  86. * file=filename[,filename...]
  87. * Required if "removable" is not set, names of
  88. * the files or block devices used for
  89. * backing storage
  90. * ro=b[,b...] Default false, booleans for read-only access
  91. * removable Default false, boolean for removable media
  92. * luns=N Default N = number of filenames, number of
  93. * LUNs to support
  94. * stall Default determined according to the type of
  95. * USB device controller (usually true),
  96. * boolean to permit the driver to halt
  97. * bulk endpoints
  98. * cdrom Default false, boolean for whether to emulate
  99. * a CD-ROM drive
  100. * transport=XXX Default BBB, transport name (CB, CBI, or BBB)
  101. * protocol=YYY Default SCSI, protocol name (RBC, 8020 or
  102. * ATAPI, QIC, UFI, 8070, or SCSI;
  103. * also 1 - 6)
  104. * vendor=0xVVVV Default 0x0525 (NetChip), USB Vendor ID
  105. * product=0xPPPP Default 0xa4a5 (FSG), USB Product ID
  106. * release=0xRRRR Override the USB release number (bcdDevice)
  107. * buflen=N Default N=16384, buffer size used (will be
  108. * rounded down to a multiple of
  109. * PAGE_CACHE_SIZE)
  110. *
  111. * If CONFIG_USB_FILE_STORAGE_TEST is not set, only the "file", "ro",
  112. * "removable", "luns", "stall", and "cdrom" options are available; default
  113. * values are used for everything else.
  114. *
  115. * The pathnames of the backing files and the ro settings are available in
  116. * the attribute files "file" and "ro" in the lun<n> subdirectory of the
  117. * gadget's sysfs directory. If the "removable" option is set, writing to
  118. * these files will simulate ejecting/loading the medium (writing an empty
  119. * line means eject) and adjusting a write-enable tab. Changes to the ro
  120. * setting are not allowed when the medium is loaded or if CD-ROM emulation
  121. * is being used.
  122. *
  123. * This gadget driver is heavily based on "Gadget Zero" by David Brownell.
  124. * The driver's SCSI command interface was based on the "Information
  125. * technology - Small Computer System Interface - 2" document from
  126. * X3T9.2 Project 375D, Revision 10L, 7-SEP-93, available at
  127. * <http://www.t10.org/ftp/t10/drafts/s2/s2-r10l.pdf>. The single exception
  128. * is opcode 0x23 (READ FORMAT CAPACITIES), which was based on the
  129. * "Universal Serial Bus Mass Storage Class UFI Command Specification"
  130. * document, Revision 1.0, December 14, 1998, available at
  131. * <http://www.usb.org/developers/devclass_docs/usbmass-ufi10.pdf>.
  132. */
  133. /*
  134. * Driver Design
  135. *
  136. * The FSG driver is fairly straightforward. There is a main kernel
  137. * thread that handles most of the work. Interrupt routines field
  138. * callbacks from the controller driver: bulk- and interrupt-request
  139. * completion notifications, endpoint-0 events, and disconnect events.
  140. * Completion events are passed to the main thread by wakeup calls. Many
  141. * ep0 requests are handled at interrupt time, but SetInterface,
  142. * SetConfiguration, and device reset requests are forwarded to the
  143. * thread in the form of "exceptions" using SIGUSR1 signals (since they
  144. * should interrupt any ongoing file I/O operations).
  145. *
  146. * The thread's main routine implements the standard command/data/status
  147. * parts of a SCSI interaction. It and its subroutines are full of tests
  148. * for pending signals/exceptions -- all this polling is necessary since
  149. * the kernel has no setjmp/longjmp equivalents. (Maybe this is an
  150. * indication that the driver really wants to be running in userspace.)
  151. * An important point is that so long as the thread is alive it keeps an
  152. * open reference to the backing file. This will prevent unmounting
  153. * the backing file's underlying filesystem and could cause problems
  154. * during system shutdown, for example. To prevent such problems, the
  155. * thread catches INT, TERM, and KILL signals and converts them into
  156. * an EXIT exception.
  157. *
  158. * In normal operation the main thread is started during the gadget's
  159. * fsg_bind() callback and stopped during fsg_unbind(). But it can also
  160. * exit when it receives a signal, and there's no point leaving the
  161. * gadget running when the thread is dead. So just before the thread
  162. * exits, it deregisters the gadget driver. This makes things a little
  163. * tricky: The driver is deregistered at two places, and the exiting
  164. * thread can indirectly call fsg_unbind() which in turn can tell the
  165. * thread to exit. The first problem is resolved through the use of the
  166. * REGISTERED atomic bitflag; the driver will only be deregistered once.
  167. * The second problem is resolved by having fsg_unbind() check
  168. * fsg->state; it won't try to stop the thread if the state is already
  169. * FSG_STATE_TERMINATED.
  170. *
  171. * To provide maximum throughput, the driver uses a circular pipeline of
  172. * buffer heads (struct fsg_buffhd). In principle the pipeline can be
  173. * arbitrarily long; in practice the benefits don't justify having more
  174. * than 2 stages (i.e., double buffering). But it helps to think of the
  175. * pipeline as being a long one. Each buffer head contains a bulk-in and
  176. * a bulk-out request pointer (since the buffer can be used for both
  177. * output and input -- directions always are given from the host's
  178. * point of view) as well as a pointer to the buffer and various state
  179. * variables.
  180. *
  181. * Use of the pipeline follows a simple protocol. There is a variable
  182. * (fsg->next_buffhd_to_fill) that points to the next buffer head to use.
  183. * At any time that buffer head may still be in use from an earlier
  184. * request, so each buffer head has a state variable indicating whether
  185. * it is EMPTY, FULL, or BUSY. Typical use involves waiting for the
  186. * buffer head to be EMPTY, filling the buffer either by file I/O or by
  187. * USB I/O (during which the buffer head is BUSY), and marking the buffer
  188. * head FULL when the I/O is complete. Then the buffer will be emptied
  189. * (again possibly by USB I/O, during which it is marked BUSY) and
  190. * finally marked EMPTY again (possibly by a completion routine).
  191. *
  192. * A module parameter tells the driver to avoid stalling the bulk
  193. * endpoints wherever the transport specification allows. This is
  194. * necessary for some UDCs like the SuperH, which cannot reliably clear a
  195. * halt on a bulk endpoint. However, under certain circumstances the
  196. * Bulk-only specification requires a stall. In such cases the driver
  197. * will halt the endpoint and set a flag indicating that it should clear
  198. * the halt in software during the next device reset. Hopefully this
  199. * will permit everything to work correctly. Furthermore, although the
  200. * specification allows the bulk-out endpoint to halt when the host sends
  201. * too much data, implementing this would cause an unavoidable race.
  202. * The driver will always use the "no-stall" approach for OUT transfers.
  203. *
  204. * One subtle point concerns sending status-stage responses for ep0
  205. * requests. Some of these requests, such as device reset, can involve
  206. * interrupting an ongoing file I/O operation, which might take an
  207. * arbitrarily long time. During that delay the host might give up on
  208. * the original ep0 request and issue a new one. When that happens the
  209. * driver should not notify the host about completion of the original
  210. * request, as the host will no longer be waiting for it. So the driver
  211. * assigns to each ep0 request a unique tag, and it keeps track of the
  212. * tag value of the request associated with a long-running exception
  213. * (device-reset, interface-change, or configuration-change). When the
  214. * exception handler is finished, the status-stage response is submitted
  215. * only if the current ep0 request tag is equal to the exception request
  216. * tag. Thus only the most recently received ep0 request will get a
  217. * status-stage response.
  218. *
  219. * Warning: This driver source file is too long. It ought to be split up
  220. * into a header file plus about 3 separate .c files, to handle the details
  221. * of the Gadget, USB Mass Storage, and SCSI protocols.
  222. */
  223. /* #define VERBOSE_DEBUG */
  224. /* #define DUMP_MSGS */
  225. #include <linux/blkdev.h>
  226. #include <linux/completion.h>
  227. #include <linux/dcache.h>
  228. #include <linux/delay.h>
  229. #include <linux/device.h>
  230. #include <linux/fcntl.h>
  231. #include <linux/file.h>
  232. #include <linux/fs.h>
  233. #include <linux/kref.h>
  234. #include <linux/kthread.h>
  235. #include <linux/limits.h>
  236. #include <linux/rwsem.h>
  237. #include <linux/slab.h>
  238. #include <linux/spinlock.h>
  239. #include <linux/string.h>
  240. #include <linux/freezer.h>
  241. #include <linux/utsname.h>
  242. #include <linux/usb/ch9.h>
  243. #include <linux/usb/gadget.h>
  244. #include "gadget_chips.h"
  245. /*
  246. * Kbuild is not very cooperative with respect to linking separately
  247. * compiled library objects into one module. So for now we won't use
  248. * separate compilation ... ensuring init/exit sections work to shrink
  249. * the runtime footprint, and giving us at least some parts of what
  250. * a "gcc --combine ... part1.c part2.c part3.c ... " build would.
  251. */
  252. #include "usbstring.c"
  253. #include "config.c"
  254. #include "epautoconf.c"
  255. /*-------------------------------------------------------------------------*/
  256. #define DRIVER_DESC "File-backed Storage Gadget"
  257. #define DRIVER_NAME "g_file_storage"
  258. #define DRIVER_VERSION "20 November 2008"
  259. static char fsg_string_manufacturer[64];
  260. static const char fsg_string_product[] = DRIVER_DESC;
  261. static char fsg_string_serial[13];
  262. static const char fsg_string_config[] = "Self-powered";
  263. static const char fsg_string_interface[] = "Mass Storage";
  264. #include "storage_common.c"
  265. MODULE_DESCRIPTION(DRIVER_DESC);
  266. MODULE_AUTHOR("Alan Stern");
  267. MODULE_LICENSE("Dual BSD/GPL");
  268. /*
  269. * This driver assumes self-powered hardware and has no way for users to
  270. * trigger remote wakeup. It uses autoconfiguration to select endpoints
  271. * and endpoint addresses.
  272. */
  273. /*-------------------------------------------------------------------------*/
  274. /* Encapsulate the module parameter settings */
  275. static struct {
  276. char *file[FSG_MAX_LUNS];
  277. int ro[FSG_MAX_LUNS];
  278. unsigned int num_filenames;
  279. unsigned int num_ros;
  280. unsigned int nluns;
  281. int removable;
  282. int can_stall;
  283. int cdrom;
  284. char *transport_parm;
  285. char *protocol_parm;
  286. unsigned short vendor;
  287. unsigned short product;
  288. unsigned short release;
  289. unsigned int buflen;
  290. int transport_type;
  291. char *transport_name;
  292. int protocol_type;
  293. char *protocol_name;
  294. } mod_data = { // Default values
  295. .transport_parm = "BBB",
  296. .protocol_parm = "SCSI",
  297. .removable = 0,
  298. .can_stall = 1,
  299. .cdrom = 0,
  300. .vendor = FSG_VENDOR_ID,
  301. .product = FSG_PRODUCT_ID,
  302. .release = 0xffff, // Use controller chip type
  303. .buflen = 16384,
  304. };
  305. module_param_array_named(file, mod_data.file, charp, &mod_data.num_filenames,
  306. S_IRUGO);
  307. MODULE_PARM_DESC(file, "names of backing files or devices");
  308. module_param_array_named(ro, mod_data.ro, bool, &mod_data.num_ros, S_IRUGO);
  309. MODULE_PARM_DESC(ro, "true to force read-only");
  310. module_param_named(luns, mod_data.nluns, uint, S_IRUGO);
  311. MODULE_PARM_DESC(luns, "number of LUNs");
  312. module_param_named(removable, mod_data.removable, bool, S_IRUGO);
  313. MODULE_PARM_DESC(removable, "true to simulate removable media");
  314. module_param_named(stall, mod_data.can_stall, bool, S_IRUGO);
  315. MODULE_PARM_DESC(stall, "false to prevent bulk stalls");
  316. module_param_named(cdrom, mod_data.cdrom, bool, S_IRUGO);
  317. MODULE_PARM_DESC(cdrom, "true to emulate cdrom instead of disk");
  318. /* In the non-TEST version, only the module parameters listed above
  319. * are available. */
  320. #ifdef CONFIG_USB_FILE_STORAGE_TEST
  321. module_param_named(transport, mod_data.transport_parm, charp, S_IRUGO);
  322. MODULE_PARM_DESC(transport, "type of transport (BBB, CBI, or CB)");
  323. module_param_named(protocol, mod_data.protocol_parm, charp, S_IRUGO);
  324. MODULE_PARM_DESC(protocol, "type of protocol (RBC, 8020, QIC, UFI, "
  325. "8070, or SCSI)");
  326. module_param_named(vendor, mod_data.vendor, ushort, S_IRUGO);
  327. MODULE_PARM_DESC(vendor, "USB Vendor ID");
  328. module_param_named(product, mod_data.product, ushort, S_IRUGO);
  329. MODULE_PARM_DESC(product, "USB Product ID");
  330. module_param_named(release, mod_data.release, ushort, S_IRUGO);
  331. MODULE_PARM_DESC(release, "USB release number");
  332. module_param_named(buflen, mod_data.buflen, uint, S_IRUGO);
  333. MODULE_PARM_DESC(buflen, "I/O buffer size");
  334. #endif /* CONFIG_USB_FILE_STORAGE_TEST */
  335. /*
  336. * These definitions will permit the compiler to avoid generating code for
  337. * parts of the driver that aren't used in the non-TEST version. Even gcc
  338. * can recognize when a test of a constant expression yields a dead code
  339. * path.
  340. */
  341. #ifdef CONFIG_USB_FILE_STORAGE_TEST
  342. #define transport_is_bbb() (mod_data.transport_type == USB_PR_BULK)
  343. #define transport_is_cbi() (mod_data.transport_type == USB_PR_CBI)
  344. #define protocol_is_scsi() (mod_data.protocol_type == USB_SC_SCSI)
  345. #else
  346. #define transport_is_bbb() 1
  347. #define transport_is_cbi() 0
  348. #define protocol_is_scsi() 1
  349. #endif /* CONFIG_USB_FILE_STORAGE_TEST */
  350. /*-------------------------------------------------------------------------*/
  351. struct fsg_dev {
  352. /* lock protects: state, all the req_busy's, and cbbuf_cmnd */
  353. spinlock_t lock;
  354. struct usb_gadget *gadget;
  355. /* filesem protects: backing files in use */
  356. struct rw_semaphore filesem;
  357. /* reference counting: wait until all LUNs are released */
  358. struct kref ref;
  359. struct usb_ep *ep0; // Handy copy of gadget->ep0
  360. struct usb_request *ep0req; // For control responses
  361. unsigned int ep0_req_tag;
  362. const char *ep0req_name;
  363. struct usb_request *intreq; // For interrupt responses
  364. int intreq_busy;
  365. struct fsg_buffhd *intr_buffhd;
  366. unsigned int bulk_out_maxpacket;
  367. enum fsg_state state; // For exception handling
  368. unsigned int exception_req_tag;
  369. u8 config, new_config;
  370. unsigned int running : 1;
  371. unsigned int bulk_in_enabled : 1;
  372. unsigned int bulk_out_enabled : 1;
  373. unsigned int intr_in_enabled : 1;
  374. unsigned int phase_error : 1;
  375. unsigned int short_packet_received : 1;
  376. unsigned int bad_lun_okay : 1;
  377. unsigned long atomic_bitflags;
  378. #define REGISTERED 0
  379. #define IGNORE_BULK_OUT 1
  380. #define SUSPENDED 2
  381. struct usb_ep *bulk_in;
  382. struct usb_ep *bulk_out;
  383. struct usb_ep *intr_in;
  384. struct fsg_buffhd *next_buffhd_to_fill;
  385. struct fsg_buffhd *next_buffhd_to_drain;
  386. struct fsg_buffhd buffhds[FSG_NUM_BUFFERS];
  387. int thread_wakeup_needed;
  388. struct completion thread_notifier;
  389. struct task_struct *thread_task;
  390. int cmnd_size;
  391. u8 cmnd[MAX_COMMAND_SIZE];
  392. enum data_direction data_dir;
  393. u32 data_size;
  394. u32 data_size_from_cmnd;
  395. u32 tag;
  396. unsigned int lun;
  397. u32 residue;
  398. u32 usb_amount_left;
  399. /* The CB protocol offers no way for a host to know when a command
  400. * has completed. As a result the next command may arrive early,
  401. * and we will still have to handle it. For that reason we need
  402. * a buffer to store new commands when using CB (or CBI, which
  403. * does not oblige a host to wait for command completion either). */
  404. int cbbuf_cmnd_size;
  405. u8 cbbuf_cmnd[MAX_COMMAND_SIZE];
  406. unsigned int nluns;
  407. struct fsg_lun *luns;
  408. struct fsg_lun *curlun;
  409. };
  410. typedef void (*fsg_routine_t)(struct fsg_dev *);
  411. static int exception_in_progress(struct fsg_dev *fsg)
  412. {
  413. return (fsg->state > FSG_STATE_IDLE);
  414. }
  415. /* Make bulk-out requests be divisible by the maxpacket size */
  416. static void set_bulk_out_req_length(struct fsg_dev *fsg,
  417. struct fsg_buffhd *bh, unsigned int length)
  418. {
  419. unsigned int rem;
  420. bh->bulk_out_intended_length = length;
  421. rem = length % fsg->bulk_out_maxpacket;
  422. if (rem > 0)
  423. length += fsg->bulk_out_maxpacket - rem;
  424. bh->outreq->length = length;
  425. }
  426. static struct fsg_dev *the_fsg;
  427. static struct usb_gadget_driver fsg_driver;
  428. /*-------------------------------------------------------------------------*/
  429. static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep)
  430. {
  431. const char *name;
  432. if (ep == fsg->bulk_in)
  433. name = "bulk-in";
  434. else if (ep == fsg->bulk_out)
  435. name = "bulk-out";
  436. else
  437. name = ep->name;
  438. DBG(fsg, "%s set halt\n", name);
  439. return usb_ep_set_halt(ep);
  440. }
  441. /*-------------------------------------------------------------------------*/
  442. /*
  443. * DESCRIPTORS ... most are static, but strings and (full) configuration
  444. * descriptors are built on demand. Also the (static) config and interface
  445. * descriptors are adjusted during fsg_bind().
  446. */
  447. /* There is only one configuration. */
  448. #define CONFIG_VALUE 1
  449. static struct usb_device_descriptor
  450. device_desc = {
  451. .bLength = sizeof device_desc,
  452. .bDescriptorType = USB_DT_DEVICE,
  453. .bcdUSB = cpu_to_le16(0x0200),
  454. .bDeviceClass = USB_CLASS_PER_INTERFACE,
  455. /* The next three values can be overridden by module parameters */
  456. .idVendor = cpu_to_le16(FSG_VENDOR_ID),
  457. .idProduct = cpu_to_le16(FSG_PRODUCT_ID),
  458. .bcdDevice = cpu_to_le16(0xffff),
  459. .iManufacturer = FSG_STRING_MANUFACTURER,
  460. .iProduct = FSG_STRING_PRODUCT,
  461. .iSerialNumber = FSG_STRING_SERIAL,
  462. .bNumConfigurations = 1,
  463. };
  464. static struct usb_config_descriptor
  465. config_desc = {
  466. .bLength = sizeof config_desc,
  467. .bDescriptorType = USB_DT_CONFIG,
  468. /* wTotalLength computed by usb_gadget_config_buf() */
  469. .bNumInterfaces = 1,
  470. .bConfigurationValue = CONFIG_VALUE,
  471. .iConfiguration = FSG_STRING_CONFIG,
  472. .bmAttributes = USB_CONFIG_ATT_ONE | USB_CONFIG_ATT_SELFPOWER,
  473. .bMaxPower = CONFIG_USB_GADGET_VBUS_DRAW / 2,
  474. };
  475. static struct usb_qualifier_descriptor
  476. dev_qualifier = {
  477. .bLength = sizeof dev_qualifier,
  478. .bDescriptorType = USB_DT_DEVICE_QUALIFIER,
  479. .bcdUSB = cpu_to_le16(0x0200),
  480. .bDeviceClass = USB_CLASS_PER_INTERFACE,
  481. .bNumConfigurations = 1,
  482. };
  483. /*
  484. * Config descriptors must agree with the code that sets configurations
  485. * and with code managing interfaces and their altsettings. They must
  486. * also handle different speeds and other-speed requests.
  487. */
  488. static int populate_config_buf(struct usb_gadget *gadget,
  489. u8 *buf, u8 type, unsigned index)
  490. {
  491. enum usb_device_speed speed = gadget->speed;
  492. int len;
  493. const struct usb_descriptor_header **function;
  494. if (index > 0)
  495. return -EINVAL;
  496. if (gadget_is_dualspeed(gadget) && type == USB_DT_OTHER_SPEED_CONFIG)
  497. speed = (USB_SPEED_FULL + USB_SPEED_HIGH) - speed;
  498. function = gadget_is_dualspeed(gadget) && speed == USB_SPEED_HIGH
  499. ? (const struct usb_descriptor_header **)fsg_hs_function
  500. : (const struct usb_descriptor_header **)fsg_fs_function;
  501. /* for now, don't advertise srp-only devices */
  502. if (!gadget_is_otg(gadget))
  503. function++;
  504. len = usb_gadget_config_buf(&config_desc, buf, EP0_BUFSIZE, function);
  505. ((struct usb_config_descriptor *) buf)->bDescriptorType = type;
  506. return len;
  507. }
  508. /*-------------------------------------------------------------------------*/
  509. /* These routines may be called in process context or in_irq */
  510. /* Caller must hold fsg->lock */
  511. static void wakeup_thread(struct fsg_dev *fsg)
  512. {
  513. /* Tell the main thread that something has happened */
  514. fsg->thread_wakeup_needed = 1;
  515. if (fsg->thread_task)
  516. wake_up_process(fsg->thread_task);
  517. }
  518. static void raise_exception(struct fsg_dev *fsg, enum fsg_state new_state)
  519. {
  520. unsigned long flags;
  521. /* Do nothing if a higher-priority exception is already in progress.
  522. * If a lower-or-equal priority exception is in progress, preempt it
  523. * and notify the main thread by sending it a signal. */
  524. spin_lock_irqsave(&fsg->lock, flags);
  525. if (fsg->state <= new_state) {
  526. fsg->exception_req_tag = fsg->ep0_req_tag;
  527. fsg->state = new_state;
  528. if (fsg->thread_task)
  529. send_sig_info(SIGUSR1, SEND_SIG_FORCED,
  530. fsg->thread_task);
  531. }
  532. spin_unlock_irqrestore(&fsg->lock, flags);
  533. }
  534. /*-------------------------------------------------------------------------*/
  535. /* The disconnect callback and ep0 routines. These always run in_irq,
  536. * except that ep0_queue() is called in the main thread to acknowledge
  537. * completion of various requests: set config, set interface, and
  538. * Bulk-only device reset. */
  539. static void fsg_disconnect(struct usb_gadget *gadget)
  540. {
  541. struct fsg_dev *fsg = get_gadget_data(gadget);
  542. DBG(fsg, "disconnect or port reset\n");
  543. raise_exception(fsg, FSG_STATE_DISCONNECT);
  544. }
  545. static int ep0_queue(struct fsg_dev *fsg)
  546. {
  547. int rc;
  548. rc = usb_ep_queue(fsg->ep0, fsg->ep0req, GFP_ATOMIC);
  549. if (rc != 0 && rc != -ESHUTDOWN) {
  550. /* We can't do much more than wait for a reset */
  551. WARNING(fsg, "error in submission: %s --> %d\n",
  552. fsg->ep0->name, rc);
  553. }
  554. return rc;
  555. }
  556. static void ep0_complete(struct usb_ep *ep, struct usb_request *req)
  557. {
  558. struct fsg_dev *fsg = ep->driver_data;
  559. if (req->actual > 0)
  560. dump_msg(fsg, fsg->ep0req_name, req->buf, req->actual);
  561. if (req->status || req->actual != req->length)
  562. DBG(fsg, "%s --> %d, %u/%u\n", __func__,
  563. req->status, req->actual, req->length);
  564. if (req->status == -ECONNRESET) // Request was cancelled
  565. usb_ep_fifo_flush(ep);
  566. if (req->status == 0 && req->context)
  567. ((fsg_routine_t) (req->context))(fsg);
  568. }
  569. /*-------------------------------------------------------------------------*/
  570. /* Bulk and interrupt endpoint completion handlers.
  571. * These always run in_irq. */
  572. static void bulk_in_complete(struct usb_ep *ep, struct usb_request *req)
  573. {
  574. struct fsg_dev *fsg = ep->driver_data;
  575. struct fsg_buffhd *bh = req->context;
  576. if (req->status || req->actual != req->length)
  577. DBG(fsg, "%s --> %d, %u/%u\n", __func__,
  578. req->status, req->actual, req->length);
  579. if (req->status == -ECONNRESET) // Request was cancelled
  580. usb_ep_fifo_flush(ep);
  581. /* Hold the lock while we update the request and buffer states */
  582. smp_wmb();
  583. spin_lock(&fsg->lock);
  584. bh->inreq_busy = 0;
  585. bh->state = BUF_STATE_EMPTY;
  586. wakeup_thread(fsg);
  587. spin_unlock(&fsg->lock);
  588. }
  589. static void bulk_out_complete(struct usb_ep *ep, struct usb_request *req)
  590. {
  591. struct fsg_dev *fsg = ep->driver_data;
  592. struct fsg_buffhd *bh = req->context;
  593. dump_msg(fsg, "bulk-out", req->buf, req->actual);
  594. if (req->status || req->actual != bh->bulk_out_intended_length)
  595. DBG(fsg, "%s --> %d, %u/%u\n", __func__,
  596. req->status, req->actual,
  597. bh->bulk_out_intended_length);
  598. if (req->status == -ECONNRESET) // Request was cancelled
  599. usb_ep_fifo_flush(ep);
  600. /* Hold the lock while we update the request and buffer states */
  601. smp_wmb();
  602. spin_lock(&fsg->lock);
  603. bh->outreq_busy = 0;
  604. bh->state = BUF_STATE_FULL;
  605. wakeup_thread(fsg);
  606. spin_unlock(&fsg->lock);
  607. }
  608. #ifdef CONFIG_USB_FILE_STORAGE_TEST
  609. static void intr_in_complete(struct usb_ep *ep, struct usb_request *req)
  610. {
  611. struct fsg_dev *fsg = ep->driver_data;
  612. struct fsg_buffhd *bh = req->context;
  613. if (req->status || req->actual != req->length)
  614. DBG(fsg, "%s --> %d, %u/%u\n", __func__,
  615. req->status, req->actual, req->length);
  616. if (req->status == -ECONNRESET) // Request was cancelled
  617. usb_ep_fifo_flush(ep);
  618. /* Hold the lock while we update the request and buffer states */
  619. smp_wmb();
  620. spin_lock(&fsg->lock);
  621. fsg->intreq_busy = 0;
  622. bh->state = BUF_STATE_EMPTY;
  623. wakeup_thread(fsg);
  624. spin_unlock(&fsg->lock);
  625. }
  626. #else
  627. static void intr_in_complete(struct usb_ep *ep, struct usb_request *req)
  628. {}
  629. #endif /* CONFIG_USB_FILE_STORAGE_TEST */
  630. /*-------------------------------------------------------------------------*/
  631. /* Ep0 class-specific handlers. These always run in_irq. */
  632. #ifdef CONFIG_USB_FILE_STORAGE_TEST
  633. static void received_cbi_adsc(struct fsg_dev *fsg, struct fsg_buffhd *bh)
  634. {
  635. struct usb_request *req = fsg->ep0req;
  636. static u8 cbi_reset_cmnd[6] = {
  637. SC_SEND_DIAGNOSTIC, 4, 0xff, 0xff, 0xff, 0xff};
  638. /* Error in command transfer? */
  639. if (req->status || req->length != req->actual ||
  640. req->actual < 6 || req->actual > MAX_COMMAND_SIZE) {
  641. /* Not all controllers allow a protocol stall after
  642. * receiving control-out data, but we'll try anyway. */
  643. fsg_set_halt(fsg, fsg->ep0);
  644. return; // Wait for reset
  645. }
  646. /* Is it the special reset command? */
  647. if (req->actual >= sizeof cbi_reset_cmnd &&
  648. memcmp(req->buf, cbi_reset_cmnd,
  649. sizeof cbi_reset_cmnd) == 0) {
  650. /* Raise an exception to stop the current operation
  651. * and reinitialize our state. */
  652. DBG(fsg, "cbi reset request\n");
  653. raise_exception(fsg, FSG_STATE_RESET);
  654. return;
  655. }
  656. VDBG(fsg, "CB[I] accept device-specific command\n");
  657. spin_lock(&fsg->lock);
  658. /* Save the command for later */
  659. if (fsg->cbbuf_cmnd_size)
  660. WARNING(fsg, "CB[I] overwriting previous command\n");
  661. fsg->cbbuf_cmnd_size = req->actual;
  662. memcpy(fsg->cbbuf_cmnd, req->buf, fsg->cbbuf_cmnd_size);
  663. wakeup_thread(fsg);
  664. spin_unlock(&fsg->lock);
  665. }
  666. #else
  667. static void received_cbi_adsc(struct fsg_dev *fsg, struct fsg_buffhd *bh)
  668. {}
  669. #endif /* CONFIG_USB_FILE_STORAGE_TEST */
  670. static int class_setup_req(struct fsg_dev *fsg,
  671. const struct usb_ctrlrequest *ctrl)
  672. {
  673. struct usb_request *req = fsg->ep0req;
  674. int value = -EOPNOTSUPP;
  675. u16 w_index = le16_to_cpu(ctrl->wIndex);
  676. u16 w_value = le16_to_cpu(ctrl->wValue);
  677. u16 w_length = le16_to_cpu(ctrl->wLength);
  678. if (!fsg->config)
  679. return value;
  680. /* Handle Bulk-only class-specific requests */
  681. if (transport_is_bbb()) {
  682. switch (ctrl->bRequest) {
  683. case USB_BULK_RESET_REQUEST:
  684. if (ctrl->bRequestType != (USB_DIR_OUT |
  685. USB_TYPE_CLASS | USB_RECIP_INTERFACE))
  686. break;
  687. if (w_index != 0 || w_value != 0) {
  688. value = -EDOM;
  689. break;
  690. }
  691. /* Raise an exception to stop the current operation
  692. * and reinitialize our state. */
  693. DBG(fsg, "bulk reset request\n");
  694. raise_exception(fsg, FSG_STATE_RESET);
  695. value = DELAYED_STATUS;
  696. break;
  697. case USB_BULK_GET_MAX_LUN_REQUEST:
  698. if (ctrl->bRequestType != (USB_DIR_IN |
  699. USB_TYPE_CLASS | USB_RECIP_INTERFACE))
  700. break;
  701. if (w_index != 0 || w_value != 0) {
  702. value = -EDOM;
  703. break;
  704. }
  705. VDBG(fsg, "get max LUN\n");
  706. *(u8 *) req->buf = fsg->nluns - 1;
  707. value = 1;
  708. break;
  709. }
  710. }
  711. /* Handle CBI class-specific requests */
  712. else {
  713. switch (ctrl->bRequest) {
  714. case USB_CBI_ADSC_REQUEST:
  715. if (ctrl->bRequestType != (USB_DIR_OUT |
  716. USB_TYPE_CLASS | USB_RECIP_INTERFACE))
  717. break;
  718. if (w_index != 0 || w_value != 0) {
  719. value = -EDOM;
  720. break;
  721. }
  722. if (w_length > MAX_COMMAND_SIZE) {
  723. value = -EOVERFLOW;
  724. break;
  725. }
  726. value = w_length;
  727. fsg->ep0req->context = received_cbi_adsc;
  728. break;
  729. }
  730. }
  731. if (value == -EOPNOTSUPP)
  732. VDBG(fsg,
  733. "unknown class-specific control req "
  734. "%02x.%02x v%04x i%04x l%u\n",
  735. ctrl->bRequestType, ctrl->bRequest,
  736. le16_to_cpu(ctrl->wValue), w_index, w_length);
  737. return value;
  738. }
  739. /*-------------------------------------------------------------------------*/
  740. /* Ep0 standard request handlers. These always run in_irq. */
  741. static int standard_setup_req(struct fsg_dev *fsg,
  742. const struct usb_ctrlrequest *ctrl)
  743. {
  744. struct usb_request *req = fsg->ep0req;
  745. int value = -EOPNOTSUPP;
  746. u16 w_index = le16_to_cpu(ctrl->wIndex);
  747. u16 w_value = le16_to_cpu(ctrl->wValue);
  748. /* Usually this just stores reply data in the pre-allocated ep0 buffer,
  749. * but config change events will also reconfigure hardware. */
  750. switch (ctrl->bRequest) {
  751. case USB_REQ_GET_DESCRIPTOR:
  752. if (ctrl->bRequestType != (USB_DIR_IN | USB_TYPE_STANDARD |
  753. USB_RECIP_DEVICE))
  754. break;
  755. switch (w_value >> 8) {
  756. case USB_DT_DEVICE:
  757. VDBG(fsg, "get device descriptor\n");
  758. value = sizeof device_desc;
  759. memcpy(req->buf, &device_desc, value);
  760. break;
  761. case USB_DT_DEVICE_QUALIFIER:
  762. VDBG(fsg, "get device qualifier\n");
  763. if (!gadget_is_dualspeed(fsg->gadget))
  764. break;
  765. value = sizeof dev_qualifier;
  766. memcpy(req->buf, &dev_qualifier, value);
  767. break;
  768. case USB_DT_OTHER_SPEED_CONFIG:
  769. VDBG(fsg, "get other-speed config descriptor\n");
  770. if (!gadget_is_dualspeed(fsg->gadget))
  771. break;
  772. goto get_config;
  773. case USB_DT_CONFIG:
  774. VDBG(fsg, "get configuration descriptor\n");
  775. get_config:
  776. value = populate_config_buf(fsg->gadget,
  777. req->buf,
  778. w_value >> 8,
  779. w_value & 0xff);
  780. break;
  781. case USB_DT_STRING:
  782. VDBG(fsg, "get string descriptor\n");
  783. /* wIndex == language code */
  784. value = usb_gadget_get_string(&fsg_stringtab,
  785. w_value & 0xff, req->buf);
  786. break;
  787. }
  788. break;
  789. /* One config, two speeds */
  790. case USB_REQ_SET_CONFIGURATION:
  791. if (ctrl->bRequestType != (USB_DIR_OUT | USB_TYPE_STANDARD |
  792. USB_RECIP_DEVICE))
  793. break;
  794. VDBG(fsg, "set configuration\n");
  795. if (w_value == CONFIG_VALUE || w_value == 0) {
  796. fsg->new_config = w_value;
  797. /* Raise an exception to wipe out previous transaction
  798. * state (queued bufs, etc) and set the new config. */
  799. raise_exception(fsg, FSG_STATE_CONFIG_CHANGE);
  800. value = DELAYED_STATUS;
  801. }
  802. break;
  803. case USB_REQ_GET_CONFIGURATION:
  804. if (ctrl->bRequestType != (USB_DIR_IN | USB_TYPE_STANDARD |
  805. USB_RECIP_DEVICE))
  806. break;
  807. VDBG(fsg, "get configuration\n");
  808. *(u8 *) req->buf = fsg->config;
  809. value = 1;
  810. break;
  811. case USB_REQ_SET_INTERFACE:
  812. if (ctrl->bRequestType != (USB_DIR_OUT| USB_TYPE_STANDARD |
  813. USB_RECIP_INTERFACE))
  814. break;
  815. if (fsg->config && w_index == 0) {
  816. /* Raise an exception to wipe out previous transaction
  817. * state (queued bufs, etc) and install the new
  818. * interface altsetting. */
  819. raise_exception(fsg, FSG_STATE_INTERFACE_CHANGE);
  820. value = DELAYED_STATUS;
  821. }
  822. break;
  823. case USB_REQ_GET_INTERFACE:
  824. if (ctrl->bRequestType != (USB_DIR_IN | USB_TYPE_STANDARD |
  825. USB_RECIP_INTERFACE))
  826. break;
  827. if (!fsg->config)
  828. break;
  829. if (w_index != 0) {
  830. value = -EDOM;
  831. break;
  832. }
  833. VDBG(fsg, "get interface\n");
  834. *(u8 *) req->buf = 0;
  835. value = 1;
  836. break;
  837. default:
  838. VDBG(fsg,
  839. "unknown control req %02x.%02x v%04x i%04x l%u\n",
  840. ctrl->bRequestType, ctrl->bRequest,
  841. w_value, w_index, le16_to_cpu(ctrl->wLength));
  842. }
  843. return value;
  844. }
  845. static int fsg_setup(struct usb_gadget *gadget,
  846. const struct usb_ctrlrequest *ctrl)
  847. {
  848. struct fsg_dev *fsg = get_gadget_data(gadget);
  849. int rc;
  850. int w_length = le16_to_cpu(ctrl->wLength);
  851. ++fsg->ep0_req_tag; // Record arrival of a new request
  852. fsg->ep0req->context = NULL;
  853. fsg->ep0req->length = 0;
  854. dump_msg(fsg, "ep0-setup", (u8 *) ctrl, sizeof(*ctrl));
  855. if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS)
  856. rc = class_setup_req(fsg, ctrl);
  857. else
  858. rc = standard_setup_req(fsg, ctrl);
  859. /* Respond with data/status or defer until later? */
  860. if (rc >= 0 && rc != DELAYED_STATUS) {
  861. rc = min(rc, w_length);
  862. fsg->ep0req->length = rc;
  863. fsg->ep0req->zero = rc < w_length;
  864. fsg->ep0req_name = (ctrl->bRequestType & USB_DIR_IN ?
  865. "ep0-in" : "ep0-out");
  866. rc = ep0_queue(fsg);
  867. }
  868. /* Device either stalls (rc < 0) or reports success */
  869. return rc;
  870. }
  871. /*-------------------------------------------------------------------------*/
  872. /* All the following routines run in process context */
  873. /* Use this for bulk or interrupt transfers, not ep0 */
  874. static void start_transfer(struct fsg_dev *fsg, struct usb_ep *ep,
  875. struct usb_request *req, int *pbusy,
  876. enum fsg_buffer_state *state)
  877. {
  878. int rc;
  879. if (ep == fsg->bulk_in)
  880. dump_msg(fsg, "bulk-in", req->buf, req->length);
  881. else if (ep == fsg->intr_in)
  882. dump_msg(fsg, "intr-in", req->buf, req->length);
  883. spin_lock_irq(&fsg->lock);
  884. *pbusy = 1;
  885. *state = BUF_STATE_BUSY;
  886. spin_unlock_irq(&fsg->lock);
  887. rc = usb_ep_queue(ep, req, GFP_KERNEL);
  888. if (rc != 0) {
  889. *pbusy = 0;
  890. *state = BUF_STATE_EMPTY;
  891. /* We can't do much more than wait for a reset */
  892. /* Note: currently the net2280 driver fails zero-length
  893. * submissions if DMA is enabled. */
  894. if (rc != -ESHUTDOWN && !(rc == -EOPNOTSUPP &&
  895. req->length == 0))
  896. WARNING(fsg, "error in submission: %s --> %d\n",
  897. ep->name, rc);
  898. }
  899. }
  900. static int sleep_thread(struct fsg_dev *fsg)
  901. {
  902. int rc = 0;
  903. /* Wait until a signal arrives or we are woken up */
  904. for (;;) {
  905. try_to_freeze();
  906. set_current_state(TASK_INTERRUPTIBLE);
  907. if (signal_pending(current)) {
  908. rc = -EINTR;
  909. break;
  910. }
  911. if (fsg->thread_wakeup_needed)
  912. break;
  913. schedule();
  914. }
  915. __set_current_state(TASK_RUNNING);
  916. fsg->thread_wakeup_needed = 0;
  917. return rc;
  918. }
  919. /*-------------------------------------------------------------------------*/
  920. static int do_read(struct fsg_dev *fsg)
  921. {
  922. struct fsg_lun *curlun = fsg->curlun;
  923. u32 lba;
  924. struct fsg_buffhd *bh;
  925. int rc;
  926. u32 amount_left;
  927. loff_t file_offset, file_offset_tmp;
  928. unsigned int amount;
  929. unsigned int partial_page;
  930. ssize_t nread;
  931. /* Get the starting Logical Block Address and check that it's
  932. * not too big */
  933. if (fsg->cmnd[0] == SC_READ_6)
  934. lba = get_unaligned_be24(&fsg->cmnd[1]);
  935. else {
  936. lba = get_unaligned_be32(&fsg->cmnd[2]);
  937. /* We allow DPO (Disable Page Out = don't save data in the
  938. * cache) and FUA (Force Unit Access = don't read from the
  939. * cache), but we don't implement them. */
  940. if ((fsg->cmnd[1] & ~0x18) != 0) {
  941. curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
  942. return -EINVAL;
  943. }
  944. }
  945. if (lba >= curlun->num_sectors) {
  946. curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
  947. return -EINVAL;
  948. }
  949. file_offset = ((loff_t) lba) << 9;
  950. /* Carry out the file reads */
  951. amount_left = fsg->data_size_from_cmnd;
  952. if (unlikely(amount_left == 0))
  953. return -EIO; // No default reply
  954. for (;;) {
  955. /* Figure out how much we need to read:
  956. * Try to read the remaining amount.
  957. * But don't read more than the buffer size.
  958. * And don't try to read past the end of the file.
  959. * Finally, if we're not at a page boundary, don't read past
  960. * the next page.
  961. * If this means reading 0 then we were asked to read past
  962. * the end of file. */
  963. amount = min((unsigned int) amount_left, mod_data.buflen);
  964. amount = min((loff_t) amount,
  965. curlun->file_length - file_offset);
  966. partial_page = file_offset & (PAGE_CACHE_SIZE - 1);
  967. if (partial_page > 0)
  968. amount = min(amount, (unsigned int) PAGE_CACHE_SIZE -
  969. partial_page);
  970. /* Wait for the next buffer to become available */
  971. bh = fsg->next_buffhd_to_fill;
  972. while (bh->state != BUF_STATE_EMPTY) {
  973. rc = sleep_thread(fsg);
  974. if (rc)
  975. return rc;
  976. }
  977. /* If we were asked to read past the end of file,
  978. * end with an empty buffer. */
  979. if (amount == 0) {
  980. curlun->sense_data =
  981. SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
  982. curlun->sense_data_info = file_offset >> 9;
  983. curlun->info_valid = 1;
  984. bh->inreq->length = 0;
  985. bh->state = BUF_STATE_FULL;
  986. break;
  987. }
  988. /* Perform the read */
  989. file_offset_tmp = file_offset;
  990. nread = vfs_read(curlun->filp,
  991. (char __user *) bh->buf,
  992. amount, &file_offset_tmp);
  993. VLDBG(curlun, "file read %u @ %llu -> %d\n", amount,
  994. (unsigned long long) file_offset,
  995. (int) nread);
  996. if (signal_pending(current))
  997. return -EINTR;
  998. if (nread < 0) {
  999. LDBG(curlun, "error in file read: %d\n",
  1000. (int) nread);
  1001. nread = 0;
  1002. } else if (nread < amount) {
  1003. LDBG(curlun, "partial file read: %d/%u\n",
  1004. (int) nread, amount);
  1005. nread -= (nread & 511); // Round down to a block
  1006. }
  1007. file_offset += nread;
  1008. amount_left -= nread;
  1009. fsg->residue -= nread;
  1010. bh->inreq->length = nread;
  1011. bh->state = BUF_STATE_FULL;
  1012. /* If an error occurred, report it and its position */
  1013. if (nread < amount) {
  1014. curlun->sense_data = SS_UNRECOVERED_READ_ERROR;
  1015. curlun->sense_data_info = file_offset >> 9;
  1016. curlun->info_valid = 1;
  1017. break;
  1018. }
  1019. if (amount_left == 0)
  1020. break; // No more left to read
  1021. /* Send this buffer and go read some more */
  1022. bh->inreq->zero = 0;
  1023. start_transfer(fsg, fsg->bulk_in, bh->inreq,
  1024. &bh->inreq_busy, &bh->state);
  1025. fsg->next_buffhd_to_fill = bh->next;
  1026. }
  1027. return -EIO; // No default reply
  1028. }
  1029. /*-------------------------------------------------------------------------*/
  1030. static int do_write(struct fsg_dev *fsg)
  1031. {
  1032. struct fsg_lun *curlun = fsg->curlun;
  1033. u32 lba;
  1034. struct fsg_buffhd *bh;
  1035. int get_some_more;
  1036. u32 amount_left_to_req, amount_left_to_write;
  1037. loff_t usb_offset, file_offset, file_offset_tmp;
  1038. unsigned int amount;
  1039. unsigned int partial_page;
  1040. ssize_t nwritten;
  1041. int rc;
  1042. if (curlun->ro) {
  1043. curlun->sense_data = SS_WRITE_PROTECTED;
  1044. return -EINVAL;
  1045. }
  1046. spin_lock(&curlun->filp->f_lock);
  1047. curlun->filp->f_flags &= ~O_SYNC; // Default is not to wait
  1048. spin_unlock(&curlun->filp->f_lock);
  1049. /* Get the starting Logical Block Address and check that it's
  1050. * not too big */
  1051. if (fsg->cmnd[0] == SC_WRITE_6)
  1052. lba = get_unaligned_be24(&fsg->cmnd[1]);
  1053. else {
  1054. lba = get_unaligned_be32(&fsg->cmnd[2]);
  1055. /* We allow DPO (Disable Page Out = don't save data in the
  1056. * cache) and FUA (Force Unit Access = write directly to the
  1057. * medium). We don't implement DPO; we implement FUA by
  1058. * performing synchronous output. */
  1059. if ((fsg->cmnd[1] & ~0x18) != 0) {
  1060. curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
  1061. return -EINVAL;
  1062. }
  1063. if (fsg->cmnd[1] & 0x08) { // FUA
  1064. spin_lock(&curlun->filp->f_lock);
  1065. curlun->filp->f_flags |= O_DSYNC;
  1066. spin_unlock(&curlun->filp->f_lock);
  1067. }
  1068. }
  1069. if (lba >= curlun->num_sectors) {
  1070. curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
  1071. return -EINVAL;
  1072. }
  1073. /* Carry out the file writes */
  1074. get_some_more = 1;
  1075. file_offset = usb_offset = ((loff_t) lba) << 9;
  1076. amount_left_to_req = amount_left_to_write = fsg->data_size_from_cmnd;
  1077. while (amount_left_to_write > 0) {
  1078. /* Queue a request for more data from the host */
  1079. bh = fsg->next_buffhd_to_fill;
  1080. if (bh->state == BUF_STATE_EMPTY && get_some_more) {
  1081. /* Figure out how much we want to get:
  1082. * Try to get the remaining amount.
  1083. * But don't get more than the buffer size.
  1084. * And don't try to go past the end of the file.
  1085. * If we're not at a page boundary,
  1086. * don't go past the next page.
  1087. * If this means getting 0, then we were asked
  1088. * to write past the end of file.
  1089. * Finally, round down to a block boundary. */
  1090. amount = min(amount_left_to_req, mod_data.buflen);
  1091. amount = min((loff_t) amount, curlun->file_length -
  1092. usb_offset);
  1093. partial_page = usb_offset & (PAGE_CACHE_SIZE - 1);
  1094. if (partial_page > 0)
  1095. amount = min(amount,
  1096. (unsigned int) PAGE_CACHE_SIZE - partial_page);
  1097. if (amount == 0) {
  1098. get_some_more = 0;
  1099. curlun->sense_data =
  1100. SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
  1101. curlun->sense_data_info = usb_offset >> 9;
  1102. curlun->info_valid = 1;
  1103. continue;
  1104. }
  1105. amount -= (amount & 511);
  1106. if (amount == 0) {
  1107. /* Why were we were asked to transfer a
  1108. * partial block? */
  1109. get_some_more = 0;
  1110. continue;
  1111. }
  1112. /* Get the next buffer */
  1113. usb_offset += amount;
  1114. fsg->usb_amount_left -= amount;
  1115. amount_left_to_req -= amount;
  1116. if (amount_left_to_req == 0)
  1117. get_some_more = 0;
  1118. /* amount is always divisible by 512, hence by
  1119. * the bulk-out maxpacket size */
  1120. bh->outreq->length = bh->bulk_out_intended_length =
  1121. amount;
  1122. bh->outreq->short_not_ok = 1;
  1123. start_transfer(fsg, fsg->bulk_out, bh->outreq,
  1124. &bh->outreq_busy, &bh->state);
  1125. fsg->next_buffhd_to_fill = bh->next;
  1126. continue;
  1127. }
  1128. /* Write the received data to the backing file */
  1129. bh = fsg->next_buffhd_to_drain;
  1130. if (bh->state == BUF_STATE_EMPTY && !get_some_more)
  1131. break; // We stopped early
  1132. if (bh->state == BUF_STATE_FULL) {
  1133. smp_rmb();
  1134. fsg->next_buffhd_to_drain = bh->next;
  1135. bh->state = BUF_STATE_EMPTY;
  1136. /* Did something go wrong with the transfer? */
  1137. if (bh->outreq->status != 0) {
  1138. curlun->sense_data = SS_COMMUNICATION_FAILURE;
  1139. curlun->sense_data_info = file_offset >> 9;
  1140. curlun->info_valid = 1;
  1141. break;
  1142. }
  1143. amount = bh->outreq->actual;
  1144. if (curlun->file_length - file_offset < amount) {
  1145. LERROR(curlun,
  1146. "write %u @ %llu beyond end %llu\n",
  1147. amount, (unsigned long long) file_offset,
  1148. (unsigned long long) curlun->file_length);
  1149. amount = curlun->file_length - file_offset;
  1150. }
  1151. /* Perform the write */
  1152. file_offset_tmp = file_offset;
  1153. nwritten = vfs_write(curlun->filp,
  1154. (char __user *) bh->buf,
  1155. amount, &file_offset_tmp);
  1156. VLDBG(curlun, "file write %u @ %llu -> %d\n", amount,
  1157. (unsigned long long) file_offset,
  1158. (int) nwritten);
  1159. if (signal_pending(current))
  1160. return -EINTR; // Interrupted!
  1161. if (nwritten < 0) {
  1162. LDBG(curlun, "error in file write: %d\n",
  1163. (int) nwritten);
  1164. nwritten = 0;
  1165. } else if (nwritten < amount) {
  1166. LDBG(curlun, "partial file write: %d/%u\n",
  1167. (int) nwritten, amount);
  1168. nwritten -= (nwritten & 511);
  1169. // Round down to a block
  1170. }
  1171. file_offset += nwritten;
  1172. amount_left_to_write -= nwritten;
  1173. fsg->residue -= nwritten;
  1174. /* If an error occurred, report it and its position */
  1175. if (nwritten < amount) {
  1176. curlun->sense_data = SS_WRITE_ERROR;
  1177. curlun->sense_data_info = file_offset >> 9;
  1178. curlun->info_valid = 1;
  1179. break;
  1180. }
  1181. /* Did the host decide to stop early? */
  1182. if (bh->outreq->actual != bh->outreq->length) {
  1183. fsg->short_packet_received = 1;
  1184. break;
  1185. }
  1186. continue;
  1187. }
  1188. /* Wait for something to happen */
  1189. rc = sleep_thread(fsg);
  1190. if (rc)
  1191. return rc;
  1192. }
  1193. return -EIO; // No default reply
  1194. }
  1195. /*-------------------------------------------------------------------------*/
  1196. static int do_synchronize_cache(struct fsg_dev *fsg)
  1197. {
  1198. struct fsg_lun *curlun = fsg->curlun;
  1199. int rc;
  1200. /* We ignore the requested LBA and write out all file's
  1201. * dirty data buffers. */
  1202. rc = fsg_lun_fsync_sub(curlun);
  1203. if (rc)
  1204. curlun->sense_data = SS_WRITE_ERROR;
  1205. return 0;
  1206. }
  1207. /*-------------------------------------------------------------------------*/
  1208. static void invalidate_sub(struct fsg_lun *curlun)
  1209. {
  1210. struct file *filp = curlun->filp;
  1211. struct inode *inode = filp->f_path.dentry->d_inode;
  1212. unsigned long rc;
  1213. rc = invalidate_mapping_pages(inode->i_mapping, 0, -1);
  1214. VLDBG(curlun, "invalidate_mapping_pages -> %ld\n", rc);
  1215. }
  1216. static int do_verify(struct fsg_dev *fsg)
  1217. {
  1218. struct fsg_lun *curlun = fsg->curlun;
  1219. u32 lba;
  1220. u32 verification_length;
  1221. struct fsg_buffhd *bh = fsg->next_buffhd_to_fill;
  1222. loff_t file_offset, file_offset_tmp;
  1223. u32 amount_left;
  1224. unsigned int amount;
  1225. ssize_t nread;
  1226. /* Get the starting Logical Block Address and check that it's
  1227. * not too big */
  1228. lba = get_unaligned_be32(&fsg->cmnd[2]);
  1229. if (lba >= curlun->num_sectors) {
  1230. curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
  1231. return -EINVAL;
  1232. }
  1233. /* We allow DPO (Disable Page Out = don't save data in the
  1234. * cache) but we don't implement it. */
  1235. if ((fsg->cmnd[1] & ~0x10) != 0) {
  1236. curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
  1237. return -EINVAL;
  1238. }
  1239. verification_length = get_unaligned_be16(&fsg->cmnd[7]);
  1240. if (unlikely(verification_length == 0))
  1241. return -EIO; // No default reply
  1242. /* Prepare to carry out the file verify */
  1243. amount_left = verification_length << 9;
  1244. file_offset = ((loff_t) lba) << 9;
  1245. /* Write out all the dirty buffers before invalidating them */
  1246. fsg_lun_fsync_sub(curlun);
  1247. if (signal_pending(current))
  1248. return -EINTR;
  1249. invalidate_sub(curlun);
  1250. if (signal_pending(current))
  1251. return -EINTR;
  1252. /* Just try to read the requested blocks */
  1253. while (amount_left > 0) {
  1254. /* Figure out how much we need to read:
  1255. * Try to read the remaining amount, but not more than
  1256. * the buffer size.
  1257. * And don't try to read past the end of the file.
  1258. * If this means reading 0 then we were asked to read
  1259. * past the end of file. */
  1260. amount = min((unsigned int) amount_left, mod_data.buflen);
  1261. amount = min((loff_t) amount,
  1262. curlun->file_length - file_offset);
  1263. if (amount == 0) {
  1264. curlun->sense_data =
  1265. SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
  1266. curlun->sense_data_info = file_offset >> 9;
  1267. curlun->info_valid = 1;
  1268. break;
  1269. }
  1270. /* Perform the read */
  1271. file_offset_tmp = file_offset;
  1272. nread = vfs_read(curlun->filp,
  1273. (char __user *) bh->buf,
  1274. amount, &file_offset_tmp);
  1275. VLDBG(curlun, "file read %u @ %llu -> %d\n", amount,
  1276. (unsigned long long) file_offset,
  1277. (int) nread);
  1278. if (signal_pending(current))
  1279. return -EINTR;
  1280. if (nread < 0) {
  1281. LDBG(curlun, "error in file verify: %d\n",
  1282. (int) nread);
  1283. nread = 0;
  1284. } else if (nread < amount) {
  1285. LDBG(curlun, "partial file verify: %d/%u\n",
  1286. (int) nread, amount);
  1287. nread -= (nread & 511); // Round down to a sector
  1288. }
  1289. if (nread == 0) {
  1290. curlun->sense_data = SS_UNRECOVERED_READ_ERROR;
  1291. curlun->sense_data_info = file_offset >> 9;
  1292. curlun->info_valid = 1;
  1293. break;
  1294. }
  1295. file_offset += nread;
  1296. amount_left -= nread;
  1297. }
  1298. return 0;
  1299. }
  1300. /*-------------------------------------------------------------------------*/
  1301. static int do_inquiry(struct fsg_dev *fsg, struct fsg_buffhd *bh)
  1302. {
  1303. u8 *buf = (u8 *) bh->buf;
  1304. static char vendor_id[] = "Linux ";
  1305. static char product_disk_id[] = "File-Stor Gadget";
  1306. static char product_cdrom_id[] = "File-CD Gadget ";
  1307. if (!fsg->curlun) { // Unsupported LUNs are okay
  1308. fsg->bad_lun_okay = 1;
  1309. memset(buf, 0, 36);
  1310. buf[0] = 0x7f; // Unsupported, no device-type
  1311. buf[4] = 31; // Additional length
  1312. return 36;
  1313. }
  1314. memset(buf, 0, 8);
  1315. buf[0] = (mod_data.cdrom ? TYPE_CDROM : TYPE_DISK);
  1316. if (mod_data.removable)
  1317. buf[1] = 0x80;
  1318. buf[2] = 2; // ANSI SCSI level 2
  1319. buf[3] = 2; // SCSI-2 INQUIRY data format
  1320. buf[4] = 31; // Additional length
  1321. // No special options
  1322. sprintf(buf + 8, "%-8s%-16s%04x", vendor_id,
  1323. (mod_data.cdrom ? product_cdrom_id :
  1324. product_disk_id),
  1325. mod_data.release);
  1326. return 36;
  1327. }
  1328. static int do_request_sense(struct fsg_dev *fsg, struct fsg_buffhd *bh)
  1329. {
  1330. struct fsg_lun *curlun = fsg->curlun;
  1331. u8 *buf = (u8 *) bh->buf;
  1332. u32 sd, sdinfo;
  1333. int valid;
  1334. /*
  1335. * From the SCSI-2 spec., section 7.9 (Unit attention condition):
  1336. *
  1337. * If a REQUEST SENSE command is received from an initiator
  1338. * with a pending unit attention condition (before the target
  1339. * generates the contingent allegiance condition), then the
  1340. * target shall either:
  1341. * a) report any pending sense data and preserve the unit
  1342. * attention condition on the logical unit, or,
  1343. * b) report the unit attention condition, may discard any
  1344. * pending sense data, and clear the unit attention
  1345. * condition on the logical unit for that initiator.
  1346. *
  1347. * FSG normally uses option a); enable this code to use option b).
  1348. */
  1349. #if 0
  1350. if (curlun && curlun->unit_attention_data != SS_NO_SENSE) {
  1351. curlun->sense_data = curlun->unit_attention_data;
  1352. curlun->unit_attention_data = SS_NO_SENSE;
  1353. }
  1354. #endif
  1355. if (!curlun) { // Unsupported LUNs are okay
  1356. fsg->bad_lun_okay = 1;
  1357. sd = SS_LOGICAL_UNIT_NOT_SUPPORTED;
  1358. sdinfo = 0;
  1359. valid = 0;
  1360. } else {
  1361. sd = curlun->sense_data;
  1362. sdinfo = curlun->sense_data_info;
  1363. valid = curlun->info_valid << 7;
  1364. curlun->sense_data = SS_NO_SENSE;
  1365. curlun->sense_data_info = 0;
  1366. curlun->info_valid = 0;
  1367. }
  1368. memset(buf, 0, 18);
  1369. buf[0] = valid | 0x70; // Valid, current error
  1370. buf[2] = SK(sd);
  1371. put_unaligned_be32(sdinfo, &buf[3]); /* Sense information */
  1372. buf[7] = 18 - 8; // Additional sense length
  1373. buf[12] = ASC(sd);
  1374. buf[13] = ASCQ(sd);
  1375. return 18;
  1376. }
  1377. static int do_read_capacity(struct fsg_dev *fsg, struct fsg_buffhd *bh)
  1378. {
  1379. struct fsg_lun *curlun = fsg->curlun;
  1380. u32 lba = get_unaligned_be32(&fsg->cmnd[2]);
  1381. int pmi = fsg->cmnd[8];
  1382. u8 *buf = (u8 *) bh->buf;
  1383. /* Check the PMI and LBA fields */
  1384. if (pmi > 1 || (pmi == 0 && lba != 0)) {
  1385. curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
  1386. return -EINVAL;
  1387. }
  1388. put_unaligned_be32(curlun->num_sectors - 1, &buf[0]);
  1389. /* Max logical block */
  1390. put_unaligned_be32(512, &buf[4]); /* Block length */
  1391. return 8;
  1392. }
  1393. static int do_read_header(struct fsg_dev *fsg, struct fsg_buffhd *bh)
  1394. {
  1395. struct fsg_lun *curlun = fsg->curlun;
  1396. int msf = fsg->cmnd[1] & 0x02;
  1397. u32 lba = get_unaligned_be32(&fsg->cmnd[2]);
  1398. u8 *buf = (u8 *) bh->buf;
  1399. if ((fsg->cmnd[1] & ~0x02) != 0) { /* Mask away MSF */
  1400. curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
  1401. return -EINVAL;
  1402. }
  1403. if (lba >= curlun->num_sectors) {
  1404. curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
  1405. return -EINVAL;
  1406. }
  1407. memset(buf, 0, 8);
  1408. buf[0] = 0x01; /* 2048 bytes of user data, rest is EC */
  1409. store_cdrom_address(&buf[4], msf, lba);
  1410. return 8;
  1411. }
  1412. static int do_read_toc(struct fsg_dev *fsg, struct fsg_buffhd *bh)
  1413. {
  1414. struct fsg_lun *curlun = fsg->curlun;
  1415. int msf = fsg->cmnd[1] & 0x02;
  1416. int start_track = fsg->cmnd[6];
  1417. u8 *buf = (u8 *) bh->buf;
  1418. if ((fsg->cmnd[1] & ~0x02) != 0 || /* Mask away MSF */
  1419. start_track > 1) {
  1420. curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
  1421. return -EINVAL;
  1422. }
  1423. memset(buf, 0, 20);
  1424. buf[1] = (20-2); /* TOC data length */
  1425. buf[2] = 1; /* First track number */
  1426. buf[3] = 1; /* Last track number */
  1427. buf[5] = 0x16; /* Data track, copying allowed */
  1428. buf[6] = 0x01; /* Only track is number 1 */
  1429. store_cdrom_address(&buf[8], msf, 0);
  1430. buf[13] = 0x16; /* Lead-out track is data */
  1431. buf[14] = 0xAA; /* Lead-out track number */
  1432. store_cdrom_address(&buf[16], msf, curlun->num_sectors);
  1433. return 20;
  1434. }
  1435. static int do_mode_sense(struct fsg_dev *fsg, struct fsg_buffhd *bh)
  1436. {
  1437. struct fsg_lun *curlun = fsg->curlun;
  1438. int mscmnd = fsg->cmnd[0];
  1439. u8 *buf = (u8 *) bh->buf;
  1440. u8 *buf0 = buf;
  1441. int pc, page_code;
  1442. int changeable_values, all_pages;
  1443. int valid_page = 0;
  1444. int len, limit;
  1445. if ((fsg->cmnd[1] & ~0x08) != 0) { // Mask away DBD
  1446. curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
  1447. return -EINVAL;
  1448. }
  1449. pc = fsg->cmnd[2] >> 6;
  1450. page_code = fsg->cmnd[2] & 0x3f;
  1451. if (pc == 3) {
  1452. curlun->sense_data = SS_SAVING_PARAMETERS_NOT_SUPPORTED;
  1453. return -EINVAL;
  1454. }
  1455. changeable_values = (pc == 1);
  1456. all_pages = (page_code == 0x3f);
  1457. /* Write the mode parameter header. Fixed values are: default
  1458. * medium type, no cache control (DPOFUA), and no block descriptors.
  1459. * The only variable value is the WriteProtect bit. We will fill in
  1460. * the mode data length later. */
  1461. memset(buf, 0, 8);
  1462. if (mscmnd == SC_MODE_SENSE_6) {
  1463. buf[2] = (curlun->ro ? 0x80 : 0x00); // WP, DPOFUA
  1464. buf += 4;
  1465. limit = 255;
  1466. } else { // SC_MODE_SENSE_10
  1467. buf[3] = (curlun->ro ? 0x80 : 0x00); // WP, DPOFUA
  1468. buf += 8;
  1469. limit = 65535; // Should really be mod_data.buflen
  1470. }
  1471. /* No block descriptors */
  1472. /* The mode pages, in numerical order. The only page we support
  1473. * is the Caching page. */
  1474. if (page_code == 0x08 || all_pages) {
  1475. valid_page = 1;
  1476. buf[0] = 0x08; // Page code
  1477. buf[1] = 10; // Page length
  1478. memset(buf+2, 0, 10); // None of the fields are changeable
  1479. if (!changeable_values) {
  1480. buf[2] = 0x04; // Write cache enable,
  1481. // Read cache not disabled
  1482. // No cache retention priorities
  1483. put_unaligned_be16(0xffff, &buf[4]);
  1484. /* Don't disable prefetch */
  1485. /* Minimum prefetch = 0 */
  1486. put_unaligned_be16(0xffff, &buf[8]);
  1487. /* Maximum prefetch */
  1488. put_unaligned_be16(0xffff, &buf[10]);
  1489. /* Maximum prefetch ceiling */
  1490. }
  1491. buf += 12;
  1492. }
  1493. /* Check that a valid page was requested and the mode data length
  1494. * isn't too long. */
  1495. len = buf - buf0;
  1496. if (!valid_page || len > limit) {
  1497. curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
  1498. return -EINVAL;
  1499. }
  1500. /* Store the mode data length */
  1501. if (mscmnd == SC_MODE_SENSE_6)
  1502. buf0[0] = len - 1;
  1503. else
  1504. put_unaligned_be16(len - 2, buf0);
  1505. return len;
  1506. }
  1507. static int do_start_stop(struct fsg_dev *fsg)
  1508. {
  1509. struct fsg_lun *curlun = fsg->curlun;
  1510. int loej, start;
  1511. if (!mod_data.removable) {
  1512. curlun->sense_data = SS_INVALID_COMMAND;
  1513. return -EINVAL;
  1514. }
  1515. // int immed = fsg->cmnd[1] & 0x01;
  1516. loej = fsg->cmnd[4] & 0x02;
  1517. start = fsg->cmnd[4] & 0x01;
  1518. #ifdef CONFIG_USB_FILE_STORAGE_TEST
  1519. if ((fsg->cmnd[1] & ~0x01) != 0 || // Mask away Immed
  1520. (fsg->cmnd[4] & ~0x03) != 0) { // Mask LoEj, Start
  1521. curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
  1522. return -EINVAL;
  1523. }
  1524. if (!start) {
  1525. /* Are we allowed to unload the media? */
  1526. if (curlun->prevent_medium_removal) {
  1527. LDBG(curlun, "unload attempt prevented\n");
  1528. curlun->sense_data = SS_MEDIUM_REMOVAL_PREVENTED;
  1529. return -EINVAL;
  1530. }
  1531. if (loej) { // Simulate an unload/eject
  1532. up_read(&fsg->filesem);
  1533. down_write(&fsg->filesem);
  1534. fsg_lun_close(curlun);
  1535. up_write(&fsg->filesem);
  1536. down_read(&fsg->filesem);
  1537. }
  1538. } else {
  1539. /* Our emulation doesn't support mounting; the medium is
  1540. * available for use as soon as it is loaded. */
  1541. if (!fsg_lun_is_open(curlun)) {
  1542. curlun->sense_data = SS_MEDIUM_NOT_PRESENT;
  1543. return -EINVAL;
  1544. }
  1545. }
  1546. #endif
  1547. return 0;
  1548. }
  1549. static int do_prevent_allow(struct fsg_dev *fsg)
  1550. {
  1551. struct fsg_lun *curlun = fsg->curlun;
  1552. int prevent;
  1553. if (!mod_data.removable) {
  1554. curlun->sense_data = SS_INVALID_COMMAND;
  1555. return -EINVAL;
  1556. }
  1557. prevent = fsg->cmnd[4] & 0x01;
  1558. if ((fsg->cmnd[4] & ~0x01) != 0) { // Mask away Prevent
  1559. curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
  1560. return -EINVAL;
  1561. }
  1562. if (curlun->prevent_medium_removal && !prevent)
  1563. fsg_lun_fsync_sub(curlun);
  1564. curlun->prevent_medium_removal = prevent;
  1565. return 0;
  1566. }
  1567. static int do_read_format_capacities(struct fsg_dev *fsg,
  1568. struct fsg_buffhd *bh)
  1569. {
  1570. struct fsg_lun *curlun = fsg->curlun;
  1571. u8 *buf = (u8 *) bh->buf;
  1572. buf[0] = buf[1] = buf[2] = 0;
  1573. buf[3] = 8; // Only the Current/Maximum Capacity Descriptor
  1574. buf += 4;
  1575. put_unaligned_be32(curlun->num_sectors, &buf[0]);
  1576. /* Number of blocks */
  1577. put_unaligned_be32(512, &buf[4]); /* Block length */
  1578. buf[4] = 0x02; /* Current capacity */
  1579. return 12;
  1580. }
  1581. static int do_mode_select(struct fsg_dev *fsg, struct fsg_buffhd *bh)
  1582. {
  1583. struct fsg_lun *curlun = fsg->curlun;
  1584. /* We don't support MODE SELECT */
  1585. curlun->sense_data = SS_INVALID_COMMAND;
  1586. return -EINVAL;
  1587. }
  1588. /*-------------------------------------------------------------------------*/
  1589. static int halt_bulk_in_endpoint(struct fsg_dev *fsg)
  1590. {
  1591. int rc;
  1592. rc = fsg_set_halt(fsg, fsg->bulk_in);
  1593. if (rc == -EAGAIN)
  1594. VDBG(fsg, "delayed bulk-in endpoint halt\n");
  1595. while (rc != 0) {
  1596. if (rc != -EAGAIN) {
  1597. WARNING(fsg, "usb_ep_set_halt -> %d\n", rc);
  1598. rc = 0;
  1599. break;
  1600. }
  1601. /* Wait for a short time and then try again */
  1602. if (msleep_interruptible(100) != 0)
  1603. return -EINTR;
  1604. rc = usb_ep_set_halt(fsg->bulk_in);
  1605. }
  1606. return rc;
  1607. }
  1608. static int wedge_bulk_in_endpoint(struct fsg_dev *fsg)
  1609. {
  1610. int rc;
  1611. DBG(fsg, "bulk-in set wedge\n");
  1612. rc = usb_ep_set_wedge(fsg->bulk_in);
  1613. if (rc == -EAGAIN)
  1614. VDBG(fsg, "delayed bulk-in endpoint wedge\n");
  1615. while (rc != 0) {
  1616. if (rc != -EAGAIN) {
  1617. WARNING(fsg, "usb_ep_set_wedge -> %d\n", rc);
  1618. rc = 0;
  1619. break;
  1620. }
  1621. /* Wait for a short time and then try again */
  1622. if (msleep_interruptible(100) != 0)
  1623. return -EINTR;
  1624. rc = usb_ep_set_wedge(fsg->bulk_in);
  1625. }
  1626. return rc;
  1627. }
  1628. static int pad_with_zeros(struct fsg_dev *fsg)
  1629. {
  1630. struct fsg_buffhd *bh = fsg->next_buffhd_to_fill;
  1631. u32 nkeep = bh->inreq->length;
  1632. u32 nsend;
  1633. int rc;
  1634. bh->state = BUF_STATE_EMPTY; // For the first iteration
  1635. fsg->usb_amount_left = nkeep + fsg->residue;
  1636. while (fsg->usb_amount_left > 0) {
  1637. /* Wait for the next buffer to be free */
  1638. while (bh->state != BUF_STATE_EMPTY) {
  1639. rc = sleep_thread(fsg);
  1640. if (rc)
  1641. return rc;
  1642. }
  1643. nsend = min(fsg->usb_amount_left, (u32) mod_data.buflen);
  1644. memset(bh->buf + nkeep, 0, nsend - nkeep);
  1645. bh->inreq->length = nsend;
  1646. bh->inreq->zero = 0;
  1647. start_transfer(fsg, fsg->bulk_in, bh->inreq,
  1648. &bh->inreq_busy, &bh->state);
  1649. bh = fsg->next_buffhd_to_fill = bh->next;
  1650. fsg->usb_amount_left -= nsend;
  1651. nkeep = 0;
  1652. }
  1653. return 0;
  1654. }
  1655. static int throw_away_data(struct fsg_dev *fsg)
  1656. {
  1657. struct fsg_buffhd *bh;
  1658. u32 amount;
  1659. int rc;
  1660. while ((bh = fsg->next_buffhd_to_drain)->state != BUF_STATE_EMPTY ||
  1661. fsg->usb_amount_left > 0) {
  1662. /* Throw away the data in a filled buffer */
  1663. if (bh->state == BUF_STATE_FULL) {
  1664. smp_rmb();
  1665. bh->state = BUF_STATE_EMPTY;
  1666. fsg->next_buffhd_to_drain = bh->next;
  1667. /* A short packet or an error ends everything */
  1668. if (bh->outreq->actual != bh->outreq->length ||
  1669. bh->outreq->status != 0) {
  1670. raise_exception(fsg, FSG_STATE_ABORT_BULK_OUT);
  1671. return -EINTR;
  1672. }
  1673. continue;
  1674. }
  1675. /* Try to submit another request if we need one */
  1676. bh = fsg->next_buffhd_to_fill;
  1677. if (bh->state == BUF_STATE_EMPTY && fsg->usb_amount_left > 0) {
  1678. amount = min(fsg->usb_amount_left,
  1679. (u32) mod_data.buflen);
  1680. /* amount is always divisible by 512, hence by
  1681. * the bulk-out maxpacket size */
  1682. bh->outreq->length = bh->bulk_out_intended_length =
  1683. amount;
  1684. bh->outreq->short_not_ok = 1;
  1685. start_transfer(fsg, fsg->bulk_out, bh->outreq,
  1686. &bh->outreq_busy, &bh->state);
  1687. fsg->next_buffhd_to_fill = bh->next;
  1688. fsg->usb_amount_left -= amount;
  1689. continue;
  1690. }
  1691. /* Otherwise wait for something to happen */
  1692. rc = sleep_thread(fsg);
  1693. if (rc)
  1694. return rc;
  1695. }
  1696. return 0;
  1697. }
  1698. static int finish_reply(struct fsg_dev *fsg)
  1699. {
  1700. struct fsg_buffhd *bh = fsg->next_buffhd_to_fill;
  1701. int rc = 0;
  1702. switch (fsg->data_dir) {
  1703. case DATA_DIR_NONE:
  1704. break; // Nothing to send
  1705. /* If we don't know whether the host wants to read or write,
  1706. * this must be CB or CBI with an unknown command. We mustn't
  1707. * try to send or receive any data. So stall both bulk pipes
  1708. * if we can and wait for a reset. */
  1709. case DATA_DIR_UNKNOWN:
  1710. if (mod_data.can_stall) {
  1711. fsg_set_halt(fsg, fsg->bulk_out);
  1712. rc = halt_bulk_in_endpoint(fsg);
  1713. }
  1714. break;
  1715. /* All but the last buffer of data must have already been sent */
  1716. case DATA_DIR_TO_HOST:
  1717. if (fsg->data_size == 0)
  1718. ; // Nothing to send
  1719. /* If there's no residue, simply send the last buffer */
  1720. else if (fsg->residue == 0) {
  1721. bh->inreq->zero = 0;
  1722. start_transfer(fsg, fsg->bulk_in, bh->inreq,
  1723. &bh->inreq_busy, &bh->state);
  1724. fsg->next_buffhd_to_fill = bh->next;
  1725. }
  1726. /* There is a residue. For CB and CBI, simply mark the end
  1727. * of the data with a short packet. However, if we are
  1728. * allowed to stall, there was no data at all (residue ==
  1729. * data_size), and the command failed (invalid LUN or
  1730. * sense data is set), then halt the bulk-in endpoint
  1731. * instead. */
  1732. else if (!transport_is_bbb()) {
  1733. if (mod_data.can_stall &&
  1734. fsg->residue == fsg->data_size &&
  1735. (!fsg->curlun || fsg->curlun->sense_data != SS_NO_SENSE)) {
  1736. bh->state = BUF_STATE_EMPTY;
  1737. rc = halt_bulk_in_endpoint(fsg);
  1738. } else {
  1739. bh->inreq->zero = 1;
  1740. start_transfer(fsg, fsg->bulk_in, bh->inreq,
  1741. &bh->inreq_busy, &bh->state);
  1742. fsg->next_buffhd_to_fill = bh->next;
  1743. }
  1744. }
  1745. /* For Bulk-only, if we're allowed to stall then send the
  1746. * short packet and halt the bulk-in endpoint. If we can't
  1747. * stall, pad out the remaining data with 0's. */
  1748. else {
  1749. if (mod_data.can_stall) {
  1750. bh->inreq->zero = 1;
  1751. start_transfer(fsg, fsg->bulk_in, bh->inreq,
  1752. &bh->inreq_busy, &bh->state);
  1753. fsg->next_buffhd_to_fill = bh->next;
  1754. rc = halt_bulk_in_endpoint(fsg);
  1755. } else
  1756. rc = pad_with_zeros(fsg);
  1757. }
  1758. break;
  1759. /* We have processed all we want from the data the host has sent.
  1760. * There may still be outstanding bulk-out requests. */
  1761. case DATA_DIR_FROM_HOST:
  1762. if (fsg->residue == 0)
  1763. ; // Nothing to receive
  1764. /* Did the host stop sending unexpectedly early? */
  1765. else if (fsg->short_packet_received) {
  1766. raise_exception(fsg, FSG_STATE_ABORT_BULK_OUT);
  1767. rc = -EINTR;
  1768. }
  1769. /* We haven't processed all the incoming data. Even though
  1770. * we may be allowed to stall, doing so would cause a race.
  1771. * The controller may already have ACK'ed all the remaining
  1772. * bulk-out packets, in which case the host wouldn't see a
  1773. * STALL. Not realizing the endpoint was halted, it wouldn't
  1774. * clear the halt -- leading to problems later on. */
  1775. #if 0
  1776. else if (mod_data.can_stall) {
  1777. fsg_set_halt(fsg, fsg->bulk_out);
  1778. raise_exception(fsg, FSG_STATE_ABORT_BULK_OUT);
  1779. rc = -EINTR;
  1780. }
  1781. #endif
  1782. /* We can't stall. Read in the excess data and throw it
  1783. * all away. */
  1784. else
  1785. rc = throw_away_data(fsg);
  1786. break;
  1787. }
  1788. return rc;
  1789. }
  1790. static int send_status(struct fsg_dev *fsg)
  1791. {
  1792. struct fsg_lun *curlun = fsg->curlun;
  1793. struct fsg_buffhd *bh;
  1794. int rc;
  1795. u8 status = USB_STATUS_PASS;
  1796. u32 sd, sdinfo = 0;
  1797. /* Wait for the next buffer to become available */
  1798. bh = fsg->next_buffhd_to_fill;
  1799. while (bh->state != BUF_STATE_EMPTY) {
  1800. rc = sleep_thread(fsg);
  1801. if (rc)
  1802. return rc;
  1803. }
  1804. if (curlun) {
  1805. sd = curlun->sense_data;
  1806. sdinfo = curlun->sense_data_info;
  1807. } else if (fsg->bad_lun_okay)
  1808. sd = SS_NO_SENSE;
  1809. else
  1810. sd = SS_LOGICAL_UNIT_NOT_SUPPORTED;
  1811. if (fsg->phase_error) {
  1812. DBG(fsg, "sending phase-error status\n");
  1813. status = USB_STATUS_PHASE_ERROR;
  1814. sd = SS_INVALID_COMMAND;
  1815. } else if (sd != SS_NO_SENSE) {
  1816. DBG(fsg, "sending command-failure status\n");
  1817. status = USB_STATUS_FAIL;
  1818. VDBG(fsg, " sense data: SK x%02x, ASC x%02x, ASCQ x%02x;"
  1819. " info x%x\n",
  1820. SK(sd), ASC(sd), ASCQ(sd), sdinfo);
  1821. }
  1822. if (transport_is_bbb()) {
  1823. struct bulk_cs_wrap *csw = bh->buf;
  1824. /* Store and send the Bulk-only CSW */
  1825. csw->Signature = cpu_to_le32(USB_BULK_CS_SIG);
  1826. csw->Tag = fsg->tag;
  1827. csw->Residue = cpu_to_le32(fsg->residue);
  1828. csw->Status = status;
  1829. bh->inreq->length = USB_BULK_CS_WRAP_LEN;
  1830. bh->inreq->zero = 0;
  1831. start_transfer(fsg, fsg->bulk_in, bh->inreq,
  1832. &bh->inreq_busy, &bh->state);
  1833. } else if (mod_data.transport_type == USB_PR_CB) {
  1834. /* Control-Bulk transport has no status phase! */
  1835. return 0;
  1836. } else { // USB_PR_CBI
  1837. struct interrupt_data *buf = bh->buf;
  1838. /* Store and send the Interrupt data. UFI sends the ASC
  1839. * and ASCQ bytes. Everything else sends a Type (which
  1840. * is always 0) and the status Value. */
  1841. if (mod_data.protocol_type == USB_SC_UFI) {
  1842. buf->bType = ASC(sd);
  1843. buf->bValue = ASCQ(sd);
  1844. } else {
  1845. buf->bType = 0;
  1846. buf->bValue = status;
  1847. }
  1848. fsg->intreq->length = CBI_INTERRUPT_DATA_LEN;
  1849. fsg->intr_buffhd = bh; // Point to the right buffhd
  1850. fsg->intreq->buf = bh->inreq->buf;
  1851. fsg->intreq->context = bh;
  1852. start_transfer(fsg, fsg->intr_in, fsg->intreq,
  1853. &fsg->intreq_busy, &bh->state);
  1854. }
  1855. fsg->next_buffhd_to_fill = bh->next;
  1856. return 0;
  1857. }
  1858. /*-------------------------------------------------------------------------*/
  1859. /* Check whether the command is properly formed and whether its data size
  1860. * and direction agree with the values we already have. */
  1861. static int check_command(struct fsg_dev *fsg, int cmnd_size,
  1862. enum data_direction data_dir, unsigned int mask,
  1863. int needs_medium, const char *name)
  1864. {
  1865. int i;
  1866. int lun = fsg->cmnd[1] >> 5;
  1867. static const char dirletter[4] = {'u', 'o', 'i', 'n'};
  1868. char hdlen[20];
  1869. struct fsg_lun *curlun;
  1870. /* Adjust the expected cmnd_size for protocol encapsulation padding.
  1871. * Transparent SCSI doesn't pad. */
  1872. if (protocol_is_scsi())
  1873. ;
  1874. /* There's some disagreement as to whether RBC pads commands or not.
  1875. * We'll play it safe and accept either form. */
  1876. else if (mod_data.protocol_type == USB_SC_RBC) {
  1877. if (fsg->cmnd_size == 12)
  1878. cmnd_size = 12;
  1879. /* All the other protocols pad to 12 bytes */
  1880. } else
  1881. cmnd_size = 12;
  1882. hdlen[0] = 0;
  1883. if (fsg->data_dir != DATA_DIR_UNKNOWN)
  1884. sprintf(hdlen, ", H%c=%u", dirletter[(int) fsg->data_dir],
  1885. fsg->data_size);
  1886. VDBG(fsg, "SCSI command: %s; Dc=%d, D%c=%u; Hc=%d%s\n",
  1887. name, cmnd_size, dirletter[(int) data_dir],
  1888. fsg->data_size_from_cmnd, fsg->cmnd_size, hdlen);
  1889. /* We can't reply at all until we know the correct data direction
  1890. * and size. */
  1891. if (fsg->data_size_from_cmnd == 0)
  1892. data_dir = DATA_DIR_NONE;
  1893. if (fsg->data_dir == DATA_DIR_UNKNOWN) { // CB or CBI
  1894. fsg->data_dir = data_dir;
  1895. fsg->data_size = fsg->data_size_from_cmnd;
  1896. } else { // Bulk-only
  1897. if (fsg->data_size < fsg->data_size_from_cmnd) {
  1898. /* Host data size < Device data size is a phase error.
  1899. * Carry out the command, but only transfer as much
  1900. * as we are allowed. */
  1901. fsg->data_size_from_cmnd = fsg->data_size;
  1902. fsg->phase_error = 1;
  1903. }
  1904. }
  1905. fsg->residue = fsg->usb_amount_left = fsg->data_size;
  1906. /* Conflicting data directions is a phase error */
  1907. if (fsg->data_dir != data_dir && fsg->data_size_from_cmnd > 0) {
  1908. fsg->phase_error = 1;
  1909. return -EINVAL;
  1910. }
  1911. /* Verify the length of the command itself */
  1912. if (cmnd_size != fsg->cmnd_size) {
  1913. /* Special case workaround: There are plenty of buggy SCSI
  1914. * implementations. Many have issues with cbw->Length
  1915. * field passing a wrong command size. For those cases we
  1916. * always try to work around the problem by using the length
  1917. * sent by the host side provided it is at least as large
  1918. * as the correct command length.
  1919. * Examples of such cases would be MS-Windows, which issues
  1920. * REQUEST SENSE with cbw->Length == 12 where it should
  1921. * be 6, and xbox360 issuing INQUIRY, TEST UNIT READY and
  1922. * REQUEST SENSE with cbw->Length == 10 where it should
  1923. * be 6 as well.
  1924. */
  1925. if (cmnd_size <= fsg->cmnd_size) {
  1926. DBG(fsg, "%s is buggy! Expected length %d "
  1927. "but we got %d\n", name,
  1928. cmnd_size, fsg->cmnd_size);
  1929. cmnd_size = fsg->cmnd_size;
  1930. } else {
  1931. fsg->phase_error = 1;
  1932. return -EINVAL;
  1933. }
  1934. }
  1935. /* Check that the LUN values are consistent */
  1936. if (transport_is_bbb()) {
  1937. if (fsg->lun != lun)
  1938. DBG(fsg, "using LUN %d from CBW, "
  1939. "not LUN %d from CDB\n",
  1940. fsg->lun, lun);
  1941. } else
  1942. fsg->lun = lun; // Use LUN from the command
  1943. /* Check the LUN */
  1944. if (fsg->lun >= 0 && fsg->lun < fsg->nluns) {
  1945. fsg->curlun = curlun = &fsg->luns[fsg->lun];
  1946. if (fsg->cmnd[0] != SC_REQUEST_SENSE) {
  1947. curlun->sense_data = SS_NO_SENSE;
  1948. curlun->sense_data_info = 0;
  1949. curlun->info_valid = 0;
  1950. }
  1951. } else {
  1952. fsg->curlun = curlun = NULL;
  1953. fsg->bad_lun_okay = 0;
  1954. /* INQUIRY and REQUEST SENSE commands are explicitly allowed
  1955. * to use unsupported LUNs; all others may not. */
  1956. if (fsg->cmnd[0] != SC_INQUIRY &&
  1957. fsg->cmnd[0] != SC_REQUEST_SENSE) {
  1958. DBG(fsg, "unsupported LUN %d\n", fsg->lun);
  1959. return -EINVAL;
  1960. }
  1961. }
  1962. /* If a unit attention condition exists, only INQUIRY and
  1963. * REQUEST SENSE commands are allowed; anything else must fail. */
  1964. if (curlun && curlun->unit_attention_data != SS_NO_SENSE &&
  1965. fsg->cmnd[0] != SC_INQUIRY &&
  1966. fsg->cmnd[0] != SC_REQUEST_SENSE) {
  1967. curlun->sense_data = curlun->unit_attention_data;
  1968. curlun->unit_attention_data = SS_NO_SENSE;
  1969. return -EINVAL;
  1970. }
  1971. /* Check that only command bytes listed in the mask are non-zero */
  1972. fsg->cmnd[1] &= 0x1f; // Mask away the LUN
  1973. for (i = 1; i < cmnd_size; ++i) {
  1974. if (fsg->cmnd[i] && !(mask & (1 << i))) {
  1975. if (curlun)
  1976. curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
  1977. return -EINVAL;
  1978. }
  1979. }
  1980. /* If the medium isn't mounted and the command needs to access
  1981. * it, return an error. */
  1982. if (curlun && !fsg_lun_is_open(curlun) && needs_medium) {
  1983. curlun->sense_data = SS_MEDIUM_NOT_PRESENT;
  1984. return -EINVAL;
  1985. }
  1986. return 0;
  1987. }
  1988. static int do_scsi_command(struct fsg_dev *fsg)
  1989. {
  1990. struct fsg_buffhd *bh;
  1991. int rc;
  1992. int reply = -EINVAL;
  1993. int i;
  1994. static char unknown[16];
  1995. dump_cdb(fsg);
  1996. /* Wait for the next buffer to become available for data or status */
  1997. bh = fsg->next_buffhd_to_drain = fsg->next_buffhd_to_fill;
  1998. while (bh->state != BUF_STATE_EMPTY) {
  1999. rc = sleep_thread(fsg);
  2000. if (rc)
  2001. return rc;
  2002. }
  2003. fsg->phase_error = 0;
  2004. fsg->short_packet_received = 0;
  2005. down_read(&fsg->filesem); // We're using the backing file
  2006. switch (fsg->cmnd[0]) {
  2007. case SC_INQUIRY:
  2008. fsg->data_size_from_cmnd = fsg->cmnd[4];
  2009. if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST,
  2010. (1<<4), 0,
  2011. "INQUIRY")) == 0)
  2012. reply = do_inquiry(fsg, bh);
  2013. break;
  2014. case SC_MODE_SELECT_6:
  2015. fsg->data_size_from_cmnd = fsg->cmnd[4];
  2016. if ((reply = check_command(fsg, 6, DATA_DIR_FROM_HOST,
  2017. (1<<1) | (1<<4), 0,
  2018. "MODE SELECT(6)")) == 0)
  2019. reply = do_mode_select(fsg, bh);
  2020. break;
  2021. case SC_MODE_SELECT_10:
  2022. fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]);
  2023. if ((reply = check_command(fsg, 10, DATA_DIR_FROM_HOST,
  2024. (1<<1) | (3<<7), 0,
  2025. "MODE SELECT(10)")) == 0)
  2026. reply = do_mode_select(fsg, bh);
  2027. break;
  2028. case SC_MODE_SENSE_6:
  2029. fsg->data_size_from_cmnd = fsg->cmnd[4];
  2030. if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST,
  2031. (1<<1) | (1<<2) | (1<<4), 0,
  2032. "MODE SENSE(6)")) == 0)
  2033. reply = do_mode_sense(fsg, bh);
  2034. break;
  2035. case SC_MODE_SENSE_10:
  2036. fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]);
  2037. if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
  2038. (1<<1) | (1<<2) | (3<<7), 0,
  2039. "MODE SENSE(10)")) == 0)
  2040. reply = do_mode_sense(fsg, bh);
  2041. break;
  2042. case SC_PREVENT_ALLOW_MEDIUM_REMOVAL:
  2043. fsg->data_size_from_cmnd = 0;
  2044. if ((reply = check_command(fsg, 6, DATA_DIR_NONE,
  2045. (1<<4), 0,
  2046. "PREVENT-ALLOW MEDIUM REMOVAL")) == 0)
  2047. reply = do_prevent_allow(fsg);
  2048. break;
  2049. case SC_READ_6:
  2050. i = fsg->cmnd[4];
  2051. fsg->data_size_from_cmnd = (i == 0 ? 256 : i) << 9;
  2052. if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST,
  2053. (7<<1) | (1<<4), 1,
  2054. "READ(6)")) == 0)
  2055. reply = do_read(fsg);
  2056. break;
  2057. case SC_READ_10:
  2058. fsg->data_size_from_cmnd =
  2059. get_unaligned_be16(&fsg->cmnd[7]) << 9;
  2060. if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
  2061. (1<<1) | (0xf<<2) | (3<<7), 1,
  2062. "READ(10)")) == 0)
  2063. reply = do_read(fsg);
  2064. break;
  2065. case SC_READ_12:
  2066. fsg->data_size_from_cmnd =
  2067. get_unaligned_be32(&fsg->cmnd[6]) << 9;
  2068. if ((reply = check_command(fsg, 12, DATA_DIR_TO_HOST,
  2069. (1<<1) | (0xf<<2) | (0xf<<6), 1,
  2070. "READ(12)")) == 0)
  2071. reply = do_read(fsg);
  2072. break;
  2073. case SC_READ_CAPACITY:
  2074. fsg->data_size_from_cmnd = 8;
  2075. if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
  2076. (0xf<<2) | (1<<8), 1,
  2077. "READ CAPACITY")) == 0)
  2078. reply = do_read_capacity(fsg, bh);
  2079. break;
  2080. case SC_READ_HEADER:
  2081. if (!mod_data.cdrom)
  2082. goto unknown_cmnd;
  2083. fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]);
  2084. if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
  2085. (3<<7) | (0x1f<<1), 1,
  2086. "READ HEADER")) == 0)
  2087. reply = do_read_header(fsg, bh);
  2088. break;
  2089. case SC_READ_TOC:
  2090. if (!mod_data.cdrom)
  2091. goto unknown_cmnd;
  2092. fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]);
  2093. if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
  2094. (7<<6) | (1<<1), 1,
  2095. "READ TOC")) == 0)
  2096. reply = do_read_toc(fsg, bh);
  2097. break;
  2098. case SC_READ_FORMAT_CAPACITIES:
  2099. fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]);
  2100. if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
  2101. (3<<7), 1,
  2102. "READ FORMAT CAPACITIES")) == 0)
  2103. reply = do_read_format_capacities(fsg, bh);
  2104. break;
  2105. case SC_REQUEST_SENSE:
  2106. fsg->data_size_from_cmnd = fsg->cmnd[4];
  2107. if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST,
  2108. (1<<4), 0,
  2109. "REQUEST SENSE")) == 0)
  2110. reply = do_request_sense(fsg, bh);
  2111. break;
  2112. case SC_START_STOP_UNIT:
  2113. fsg->data_size_from_cmnd = 0;
  2114. if ((reply = check_command(fsg, 6, DATA_DIR_NONE,
  2115. (1<<1) | (1<<4), 0,
  2116. "START-STOP UNIT")) == 0)
  2117. reply = do_start_stop(fsg);
  2118. break;
  2119. case SC_SYNCHRONIZE_CACHE:
  2120. fsg->data_size_from_cmnd = 0;
  2121. if ((reply = check_command(fsg, 10, DATA_DIR_NONE,
  2122. (0xf<<2) | (3<<7), 1,
  2123. "SYNCHRONIZE CACHE")) == 0)
  2124. reply = do_synchronize_cache(fsg);
  2125. break;
  2126. case SC_TEST_UNIT_READY:
  2127. fsg->data_size_from_cmnd = 0;
  2128. reply = check_command(fsg, 6, DATA_DIR_NONE,
  2129. 0, 1,
  2130. "TEST UNIT READY");
  2131. break;
  2132. /* Although optional, this command is used by MS-Windows. We
  2133. * support a minimal version: BytChk must be 0. */
  2134. case SC_VERIFY:
  2135. fsg->data_size_from_cmnd = 0;
  2136. if ((reply = check_command(fsg, 10, DATA_DIR_NONE,
  2137. (1<<1) | (0xf<<2) | (3<<7), 1,
  2138. "VERIFY")) == 0)
  2139. reply = do_verify(fsg);
  2140. break;
  2141. case SC_WRITE_6:
  2142. i = fsg->cmnd[4];
  2143. fsg->data_size_from_cmnd = (i == 0 ? 256 : i) << 9;
  2144. if ((reply = check_command(fsg, 6, DATA_DIR_FROM_HOST,
  2145. (7<<1) | (1<<4), 1,
  2146. "WRITE(6)")) == 0)
  2147. reply = do_write(fsg);
  2148. break;
  2149. case SC_WRITE_10:
  2150. fsg->data_size_from_cmnd =
  2151. get_unaligned_be16(&fsg->cmnd[7]) << 9;
  2152. if ((reply = check_command(fsg, 10, DATA_DIR_FROM_HOST,
  2153. (1<<1) | (0xf<<2) | (3<<7), 1,
  2154. "WRITE(10)")) == 0)
  2155. reply = do_write(fsg);
  2156. break;
  2157. case SC_WRITE_12:
  2158. fsg->data_size_from_cmnd =
  2159. get_unaligned_be32(&fsg->cmnd[6]) << 9;
  2160. if ((reply = check_command(fsg, 12, DATA_DIR_FROM_HOST,
  2161. (1<<1) | (0xf<<2) | (0xf<<6), 1,
  2162. "WRITE(12)")) == 0)
  2163. reply = do_write(fsg);
  2164. break;
  2165. /* Some mandatory commands that we recognize but don't implement.
  2166. * They don't mean much in this setting. It's left as an exercise
  2167. * for anyone interested to implement RESERVE and RELEASE in terms
  2168. * of Posix locks. */
  2169. case SC_FORMAT_UNIT:
  2170. case SC_RELEASE:
  2171. case SC_RESERVE:
  2172. case SC_SEND_DIAGNOSTIC:
  2173. // Fall through
  2174. default:
  2175. unknown_cmnd:
  2176. fsg->data_size_from_cmnd = 0;
  2177. sprintf(unknown, "Unknown x%02x", fsg->cmnd[0]);
  2178. if ((reply = check_command(fsg, fsg->cmnd_size,
  2179. DATA_DIR_UNKNOWN, 0xff, 0, unknown)) == 0) {
  2180. fsg->curlun->sense_data = SS_INVALID_COMMAND;
  2181. reply = -EINVAL;
  2182. }
  2183. break;
  2184. }
  2185. up_read(&fsg->filesem);
  2186. if (reply == -EINTR || signal_pending(current))
  2187. return -EINTR;
  2188. /* Set up the single reply buffer for finish_reply() */
  2189. if (reply == -EINVAL)
  2190. reply = 0; // Error reply length
  2191. if (reply >= 0 && fsg->data_dir == DATA_DIR_TO_HOST) {
  2192. reply = min((u32) reply, fsg->data_size_from_cmnd);
  2193. bh->inreq->length = reply;
  2194. bh->state = BUF_STATE_FULL;
  2195. fsg->residue -= reply;
  2196. } // Otherwise it's already set
  2197. return 0;
  2198. }
  2199. /*-------------------------------------------------------------------------*/
  2200. static int received_cbw(struct fsg_dev *fsg, struct fsg_buffhd *bh)
  2201. {
  2202. struct usb_request *req = bh->outreq;
  2203. struct fsg_bulk_cb_wrap *cbw = req->buf;
  2204. /* Was this a real packet? Should it be ignored? */
  2205. if (req->status || test_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags))
  2206. return -EINVAL;
  2207. /* Is the CBW valid? */
  2208. if (req->actual != USB_BULK_CB_WRAP_LEN ||
  2209. cbw->Signature != cpu_to_le32(
  2210. USB_BULK_CB_SIG)) {
  2211. DBG(fsg, "invalid CBW: len %u sig 0x%x\n",
  2212. req->actual,
  2213. le32_to_cpu(cbw->Signature));
  2214. /* The Bulk-only spec says we MUST stall the IN endpoint
  2215. * (6.6.1), so it's unavoidable. It also says we must
  2216. * retain this state until the next reset, but there's
  2217. * no way to tell the controller driver it should ignore
  2218. * Clear-Feature(HALT) requests.
  2219. *
  2220. * We aren't required to halt the OUT endpoint; instead
  2221. * we can simply accept and discard any data received
  2222. * until the next reset. */
  2223. wedge_bulk_in_endpoint(fsg);
  2224. set_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags);
  2225. return -EINVAL;
  2226. }
  2227. /* Is the CBW meaningful? */
  2228. if (cbw->Lun >= FSG_MAX_LUNS || cbw->Flags & ~USB_BULK_IN_FLAG ||
  2229. cbw->Length <= 0 || cbw->Length > MAX_COMMAND_SIZE) {
  2230. DBG(fsg, "non-meaningful CBW: lun = %u, flags = 0x%x, "
  2231. "cmdlen %u\n",
  2232. cbw->Lun, cbw->Flags, cbw->Length);
  2233. /* We can do anything we want here, so let's stall the
  2234. * bulk pipes if we are allowed to. */
  2235. if (mod_data.can_stall) {
  2236. fsg_set_halt(fsg, fsg->bulk_out);
  2237. halt_bulk_in_endpoint(fsg);
  2238. }
  2239. return -EINVAL;
  2240. }
  2241. /* Save the command for later */
  2242. fsg->cmnd_size = cbw->Length;
  2243. memcpy(fsg->cmnd, cbw->CDB, fsg->cmnd_size);
  2244. if (cbw->Flags & USB_BULK_IN_FLAG)
  2245. fsg->data_dir = DATA_DIR_TO_HOST;
  2246. else
  2247. fsg->data_dir = DATA_DIR_FROM_HOST;
  2248. fsg->data_size = le32_to_cpu(cbw->DataTransferLength);
  2249. if (fsg->data_size == 0)
  2250. fsg->data_dir = DATA_DIR_NONE;
  2251. fsg->lun = cbw->Lun;
  2252. fsg->tag = cbw->Tag;
  2253. return 0;
  2254. }
  2255. static int get_next_command(struct fsg_dev *fsg)
  2256. {
  2257. struct fsg_buffhd *bh;
  2258. int rc = 0;
  2259. if (transport_is_bbb()) {
  2260. /* Wait for the next buffer to become available */
  2261. bh = fsg->next_buffhd_to_fill;
  2262. while (bh->state != BUF_STATE_EMPTY) {
  2263. rc = sleep_thread(fsg);
  2264. if (rc)
  2265. return rc;
  2266. }
  2267. /* Queue a request to read a Bulk-only CBW */
  2268. set_bulk_out_req_length(fsg, bh, USB_BULK_CB_WRAP_LEN);
  2269. bh->outreq->short_not_ok = 1;
  2270. start_transfer(fsg, fsg->bulk_out, bh->outreq,
  2271. &bh->outreq_busy, &bh->state);
  2272. /* We will drain the buffer in software, which means we
  2273. * can reuse it for the next filling. No need to advance
  2274. * next_buffhd_to_fill. */
  2275. /* Wait for the CBW to arrive */
  2276. while (bh->state != BUF_STATE_FULL) {
  2277. rc = sleep_thread(fsg);
  2278. if (rc)
  2279. return rc;
  2280. }
  2281. smp_rmb();
  2282. rc = received_cbw(fsg, bh);
  2283. bh->state = BUF_STATE_EMPTY;
  2284. } else { // USB_PR_CB or USB_PR_CBI
  2285. /* Wait for the next command to arrive */
  2286. while (fsg->cbbuf_cmnd_size == 0) {
  2287. rc = sleep_thread(fsg);
  2288. if (rc)
  2289. return rc;
  2290. }
  2291. /* Is the previous status interrupt request still busy?
  2292. * The host is allowed to skip reading the status,
  2293. * so we must cancel it. */
  2294. if (fsg->intreq_busy)
  2295. usb_ep_dequeue(fsg->intr_in, fsg->intreq);
  2296. /* Copy the command and mark the buffer empty */
  2297. fsg->data_dir = DATA_DIR_UNKNOWN;
  2298. spin_lock_irq(&fsg->lock);
  2299. fsg->cmnd_size = fsg->cbbuf_cmnd_size;
  2300. memcpy(fsg->cmnd, fsg->cbbuf_cmnd, fsg->cmnd_size);
  2301. fsg->cbbuf_cmnd_size = 0;
  2302. spin_unlock_irq(&fsg->lock);
  2303. }
  2304. return rc;
  2305. }
  2306. /*-------------------------------------------------------------------------*/
  2307. static int enable_endpoint(struct fsg_dev *fsg, struct usb_ep *ep,
  2308. const struct usb_endpoint_descriptor *d)
  2309. {
  2310. int rc;
  2311. ep->driver_data = fsg;
  2312. rc = usb_ep_enable(ep, d);
  2313. if (rc)
  2314. ERROR(fsg, "can't enable %s, result %d\n", ep->name, rc);
  2315. return rc;
  2316. }
  2317. static int alloc_request(struct fsg_dev *fsg, struct usb_ep *ep,
  2318. struct usb_request **preq)
  2319. {
  2320. *preq = usb_ep_alloc_request(ep, GFP_ATOMIC);
  2321. if (*preq)
  2322. return 0;
  2323. ERROR(fsg, "can't allocate request for %s\n", ep->name);
  2324. return -ENOMEM;
  2325. }
  2326. /*
  2327. * Reset interface setting and re-init endpoint state (toggle etc).
  2328. * Call with altsetting < 0 to disable the interface. The only other
  2329. * available altsetting is 0, which enables the interface.
  2330. */
  2331. static int do_set_interface(struct fsg_dev *fsg, int altsetting)
  2332. {
  2333. int rc = 0;
  2334. int i;
  2335. const struct usb_endpoint_descriptor *d;
  2336. if (fsg->running)
  2337. DBG(fsg, "reset interface\n");
  2338. reset:
  2339. /* Deallocate the requests */
  2340. for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
  2341. struct fsg_buffhd *bh = &fsg->buffhds[i];
  2342. if (bh->inreq) {
  2343. usb_ep_free_request(fsg->bulk_in, bh->inreq);
  2344. bh->inreq = NULL;
  2345. }
  2346. if (bh->outreq) {
  2347. usb_ep_free_request(fsg->bulk_out, bh->outreq);
  2348. bh->outreq = NULL;
  2349. }
  2350. }
  2351. if (fsg->intreq) {
  2352. usb_ep_free_request(fsg->intr_in, fsg->intreq);
  2353. fsg->intreq = NULL;
  2354. }
  2355. /* Disable the endpoints */
  2356. if (fsg->bulk_in_enabled) {
  2357. usb_ep_disable(fsg->bulk_in);
  2358. fsg->bulk_in_enabled = 0;
  2359. }
  2360. if (fsg->bulk_out_enabled) {
  2361. usb_ep_disable(fsg->bulk_out);
  2362. fsg->bulk_out_enabled = 0;
  2363. }
  2364. if (fsg->intr_in_enabled) {
  2365. usb_ep_disable(fsg->intr_in);
  2366. fsg->intr_in_enabled = 0;
  2367. }
  2368. fsg->running = 0;
  2369. if (altsetting < 0 || rc != 0)
  2370. return rc;
  2371. DBG(fsg, "set interface %d\n", altsetting);
  2372. /* Enable the endpoints */
  2373. d = fsg_ep_desc(fsg->gadget,
  2374. &fsg_fs_bulk_in_desc, &fsg_hs_bulk_in_desc);
  2375. if ((rc = enable_endpoint(fsg, fsg->bulk_in, d)) != 0)
  2376. goto reset;
  2377. fsg->bulk_in_enabled = 1;
  2378. d = fsg_ep_desc(fsg->gadget,
  2379. &fsg_fs_bulk_out_desc, &fsg_hs_bulk_out_desc);
  2380. if ((rc = enable_endpoint(fsg, fsg->bulk_out, d)) != 0)
  2381. goto reset;
  2382. fsg->bulk_out_enabled = 1;
  2383. fsg->bulk_out_maxpacket = le16_to_cpu(d->wMaxPacketSize);
  2384. clear_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags);
  2385. if (transport_is_cbi()) {
  2386. d = fsg_ep_desc(fsg->gadget,
  2387. &fsg_fs_intr_in_desc, &fsg_hs_intr_in_desc);
  2388. if ((rc = enable_endpoint(fsg, fsg->intr_in, d)) != 0)
  2389. goto reset;
  2390. fsg->intr_in_enabled = 1;
  2391. }
  2392. /* Allocate the requests */
  2393. for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
  2394. struct fsg_buffhd *bh = &fsg->buffhds[i];
  2395. if ((rc = alloc_request(fsg, fsg->bulk_in, &bh->inreq)) != 0)
  2396. goto reset;
  2397. if ((rc = alloc_request(fsg, fsg->bulk_out, &bh->outreq)) != 0)
  2398. goto reset;
  2399. bh->inreq->buf = bh->outreq->buf = bh->buf;
  2400. bh->inreq->context = bh->outreq->context = bh;
  2401. bh->inreq->complete = bulk_in_complete;
  2402. bh->outreq->complete = bulk_out_complete;
  2403. }
  2404. if (transport_is_cbi()) {
  2405. if ((rc = alloc_request(fsg, fsg->intr_in, &fsg->intreq)) != 0)
  2406. goto reset;
  2407. fsg->intreq->complete = intr_in_complete;
  2408. }
  2409. fsg->running = 1;
  2410. for (i = 0; i < fsg->nluns; ++i)
  2411. fsg->luns[i].unit_attention_data = SS_RESET_OCCURRED;
  2412. return rc;
  2413. }
  2414. /*
  2415. * Change our operational configuration. This code must agree with the code
  2416. * that returns config descriptors, and with interface altsetting code.
  2417. *
  2418. * It's also responsible for power management interactions. Some
  2419. * configurations might not work with our current power sources.
  2420. * For now we just assume the gadget is always self-powered.
  2421. */
  2422. static int do_set_config(struct fsg_dev *fsg, u8 new_config)
  2423. {
  2424. int rc = 0;
  2425. /* Disable the single interface */
  2426. if (fsg->config != 0) {
  2427. DBG(fsg, "reset config\n");
  2428. fsg->config = 0;
  2429. rc = do_set_interface(fsg, -1);
  2430. }
  2431. /* Enable the interface */
  2432. if (new_config != 0) {
  2433. fsg->config = new_config;
  2434. if ((rc = do_set_interface(fsg, 0)) != 0)
  2435. fsg->config = 0; // Reset on errors
  2436. else {
  2437. char *speed;
  2438. switch (fsg->gadget->speed) {
  2439. case USB_SPEED_LOW: speed = "low"; break;
  2440. case USB_SPEED_FULL: speed = "full"; break;
  2441. case USB_SPEED_HIGH: speed = "high"; break;
  2442. default: speed = "?"; break;
  2443. }
  2444. INFO(fsg, "%s speed config #%d\n", speed, fsg->config);
  2445. }
  2446. }
  2447. return rc;
  2448. }
  2449. /*-------------------------------------------------------------------------*/
  2450. static void handle_exception(struct fsg_dev *fsg)
  2451. {
  2452. siginfo_t info;
  2453. int sig;
  2454. int i;
  2455. int num_active;
  2456. struct fsg_buffhd *bh;
  2457. enum fsg_state old_state;
  2458. u8 new_config;
  2459. struct fsg_lun *curlun;
  2460. unsigned int exception_req_tag;
  2461. int rc;
  2462. /* Clear the existing signals. Anything but SIGUSR1 is converted
  2463. * into a high-priority EXIT exception. */
  2464. for (;;) {
  2465. sig = dequeue_signal_lock(current, &current->blocked, &info);
  2466. if (!sig)
  2467. break;
  2468. if (sig != SIGUSR1) {
  2469. if (fsg->state < FSG_STATE_EXIT)
  2470. DBG(fsg, "Main thread exiting on signal\n");
  2471. raise_exception(fsg, FSG_STATE_EXIT);
  2472. }
  2473. }
  2474. /* Cancel all the pending transfers */
  2475. if (fsg->intreq_busy)
  2476. usb_ep_dequeue(fsg->intr_in, fsg->intreq);
  2477. for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
  2478. bh = &fsg->buffhds[i];
  2479. if (bh->inreq_busy)
  2480. usb_ep_dequeue(fsg->bulk_in, bh->inreq);
  2481. if (bh->outreq_busy)
  2482. usb_ep_dequeue(fsg->bulk_out, bh->outreq);
  2483. }
  2484. /* Wait until everything is idle */
  2485. for (;;) {
  2486. num_active = fsg->intreq_busy;
  2487. for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
  2488. bh = &fsg->buffhds[i];
  2489. num_active += bh->inreq_busy + bh->outreq_busy;
  2490. }
  2491. if (num_active == 0)
  2492. break;
  2493. if (sleep_thread(fsg))
  2494. return;
  2495. }
  2496. /* Clear out the controller's fifos */
  2497. if (fsg->bulk_in_enabled)
  2498. usb_ep_fifo_flush(fsg->bulk_in);
  2499. if (fsg->bulk_out_enabled)
  2500. usb_ep_fifo_flush(fsg->bulk_out);
  2501. if (fsg->intr_in_enabled)
  2502. usb_ep_fifo_flush(fsg->intr_in);
  2503. /* Reset the I/O buffer states and pointers, the SCSI
  2504. * state, and the exception. Then invoke the handler. */
  2505. spin_lock_irq(&fsg->lock);
  2506. for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
  2507. bh = &fsg->buffhds[i];
  2508. bh->state = BUF_STATE_EMPTY;
  2509. }
  2510. fsg->next_buffhd_to_fill = fsg->next_buffhd_to_drain =
  2511. &fsg->buffhds[0];
  2512. exception_req_tag = fsg->exception_req_tag;
  2513. new_config = fsg->new_config;
  2514. old_state = fsg->state;
  2515. if (old_state == FSG_STATE_ABORT_BULK_OUT)
  2516. fsg->state = FSG_STATE_STATUS_PHASE;
  2517. else {
  2518. for (i = 0; i < fsg->nluns; ++i) {
  2519. curlun = &fsg->luns[i];
  2520. curlun->prevent_medium_removal = 0;
  2521. curlun->sense_data = curlun->unit_attention_data =
  2522. SS_NO_SENSE;
  2523. curlun->sense_data_info = 0;
  2524. curlun->info_valid = 0;
  2525. }
  2526. fsg->state = FSG_STATE_IDLE;
  2527. }
  2528. spin_unlock_irq(&fsg->lock);
  2529. /* Carry out any extra actions required for the exception */
  2530. switch (old_state) {
  2531. default:
  2532. break;
  2533. case FSG_STATE_ABORT_BULK_OUT:
  2534. send_status(fsg);
  2535. spin_lock_irq(&fsg->lock);
  2536. if (fsg->state == FSG_STATE_STATUS_PHASE)
  2537. fsg->state = FSG_STATE_IDLE;
  2538. spin_unlock_irq(&fsg->lock);
  2539. break;
  2540. case FSG_STATE_RESET:
  2541. /* In case we were forced against our will to halt a
  2542. * bulk endpoint, clear the halt now. (The SuperH UDC
  2543. * requires this.) */
  2544. if (test_and_clear_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags))
  2545. usb_ep_clear_halt(fsg->bulk_in);
  2546. if (transport_is_bbb()) {
  2547. if (fsg->ep0_req_tag == exception_req_tag)
  2548. ep0_queue(fsg); // Complete the status stage
  2549. } else if (transport_is_cbi())
  2550. send_status(fsg); // Status by interrupt pipe
  2551. /* Technically this should go here, but it would only be
  2552. * a waste of time. Ditto for the INTERFACE_CHANGE and
  2553. * CONFIG_CHANGE cases. */
  2554. // for (i = 0; i < fsg->nluns; ++i)
  2555. // fsg->luns[i].unit_attention_data = SS_RESET_OCCURRED;
  2556. break;
  2557. case FSG_STATE_INTERFACE_CHANGE:
  2558. rc = do_set_interface(fsg, 0);
  2559. if (fsg->ep0_req_tag != exception_req_tag)
  2560. break;
  2561. if (rc != 0) // STALL on errors
  2562. fsg_set_halt(fsg, fsg->ep0);
  2563. else // Complete the status stage
  2564. ep0_queue(fsg);
  2565. break;
  2566. case FSG_STATE_CONFIG_CHANGE:
  2567. rc = do_set_config(fsg, new_config);
  2568. if (fsg->ep0_req_tag != exception_req_tag)
  2569. break;
  2570. if (rc != 0) // STALL on errors
  2571. fsg_set_halt(fsg, fsg->ep0);
  2572. else // Complete the status stage
  2573. ep0_queue(fsg);
  2574. break;
  2575. case FSG_STATE_DISCONNECT:
  2576. for (i = 0; i < fsg->nluns; ++i)
  2577. fsg_lun_fsync_sub(fsg->luns + i);
  2578. do_set_config(fsg, 0); // Unconfigured state
  2579. break;
  2580. case FSG_STATE_EXIT:
  2581. case FSG_STATE_TERMINATED:
  2582. do_set_config(fsg, 0); // Free resources
  2583. spin_lock_irq(&fsg->lock);
  2584. fsg->state = FSG_STATE_TERMINATED; // Stop the thread
  2585. spin_unlock_irq(&fsg->lock);
  2586. break;
  2587. }
  2588. }
  2589. /*-------------------------------------------------------------------------*/
  2590. static int fsg_main_thread(void *fsg_)
  2591. {
  2592. struct fsg_dev *fsg = fsg_;
  2593. /* Allow the thread to be killed by a signal, but set the signal mask
  2594. * to block everything but INT, TERM, KILL, and USR1. */
  2595. allow_signal(SIGINT);
  2596. allow_signal(SIGTERM);
  2597. allow_signal(SIGKILL);
  2598. allow_signal(SIGUSR1);
  2599. /* Allow the thread to be frozen */
  2600. set_freezable();
  2601. /* Arrange for userspace references to be interpreted as kernel
  2602. * pointers. That way we can pass a kernel pointer to a routine
  2603. * that expects a __user pointer and it will work okay. */
  2604. set_fs(get_ds());
  2605. /* The main loop */
  2606. while (fsg->state != FSG_STATE_TERMINATED) {
  2607. if (exception_in_progress(fsg) || signal_pending(current)) {
  2608. handle_exception(fsg);
  2609. continue;
  2610. }
  2611. if (!fsg->running) {
  2612. sleep_thread(fsg);
  2613. continue;
  2614. }
  2615. if (get_next_command(fsg))
  2616. continue;
  2617. spin_lock_irq(&fsg->lock);
  2618. if (!exception_in_progress(fsg))
  2619. fsg->state = FSG_STATE_DATA_PHASE;
  2620. spin_unlock_irq(&fsg->lock);
  2621. if (do_scsi_command(fsg) || finish_reply(fsg))
  2622. continue;
  2623. spin_lock_irq(&fsg->lock);
  2624. if (!exception_in_progress(fsg))
  2625. fsg->state = FSG_STATE_STATUS_PHASE;
  2626. spin_unlock_irq(&fsg->lock);
  2627. if (send_status(fsg))
  2628. continue;
  2629. spin_lock_irq(&fsg->lock);
  2630. if (!exception_in_progress(fsg))
  2631. fsg->state = FSG_STATE_IDLE;
  2632. spin_unlock_irq(&fsg->lock);
  2633. }
  2634. spin_lock_irq(&fsg->lock);
  2635. fsg->thread_task = NULL;
  2636. spin_unlock_irq(&fsg->lock);
  2637. /* If we are exiting because of a signal, unregister the
  2638. * gadget driver. */
  2639. if (test_and_clear_bit(REGISTERED, &fsg->atomic_bitflags))
  2640. usb_gadget_unregister_driver(&fsg_driver);
  2641. /* Let the unbind and cleanup routines know the thread has exited */
  2642. complete_and_exit(&fsg->thread_notifier, 0);
  2643. }
  2644. /*-------------------------------------------------------------------------*/
  2645. /* The write permissions and store_xxx pointers are set in fsg_bind() */
  2646. static DEVICE_ATTR(ro, 0444, fsg_show_ro, NULL);
  2647. static DEVICE_ATTR(file, 0444, fsg_show_file, NULL);
  2648. /*-------------------------------------------------------------------------*/
  2649. static void fsg_release(struct kref *ref)
  2650. {
  2651. struct fsg_dev *fsg = container_of(ref, struct fsg_dev, ref);
  2652. kfree(fsg->luns);
  2653. kfree(fsg);
  2654. }
  2655. static void lun_release(struct device *dev)
  2656. {
  2657. struct rw_semaphore *filesem = dev_get_drvdata(dev);
  2658. struct fsg_dev *fsg =
  2659. container_of(filesem, struct fsg_dev, filesem);
  2660. kref_put(&fsg->ref, fsg_release);
  2661. }
  2662. static void /* __init_or_exit */ fsg_unbind(struct usb_gadget *gadget)
  2663. {
  2664. struct fsg_dev *fsg = get_gadget_data(gadget);
  2665. int i;
  2666. struct fsg_lun *curlun;
  2667. struct usb_request *req = fsg->ep0req;
  2668. DBG(fsg, "unbind\n");
  2669. clear_bit(REGISTERED, &fsg->atomic_bitflags);
  2670. /* Unregister the sysfs attribute files and the LUNs */
  2671. for (i = 0; i < fsg->nluns; ++i) {
  2672. curlun = &fsg->luns[i];
  2673. if (curlun->registered) {
  2674. device_remove_file(&curlun->dev, &dev_attr_ro);
  2675. device_remove_file(&curlun->dev, &dev_attr_file);
  2676. fsg_lun_close(curlun);
  2677. device_unregister(&curlun->dev);
  2678. curlun->registered = 0;
  2679. }
  2680. }
  2681. /* If the thread isn't already dead, tell it to exit now */
  2682. if (fsg->state != FSG_STATE_TERMINATED) {
  2683. raise_exception(fsg, FSG_STATE_EXIT);
  2684. wait_for_completion(&fsg->thread_notifier);
  2685. /* The cleanup routine waits for this completion also */
  2686. complete(&fsg->thread_notifier);
  2687. }
  2688. /* Free the data buffers */
  2689. for (i = 0; i < FSG_NUM_BUFFERS; ++i)
  2690. kfree(fsg->buffhds[i].buf);
  2691. /* Free the request and buffer for endpoint 0 */
  2692. if (req) {
  2693. kfree(req->buf);
  2694. usb_ep_free_request(fsg->ep0, req);
  2695. }
  2696. set_gadget_data(gadget, NULL);
  2697. }
  2698. static int __init check_parameters(struct fsg_dev *fsg)
  2699. {
  2700. int prot;
  2701. int gcnum;
  2702. /* Store the default values */
  2703. mod_data.transport_type = USB_PR_BULK;
  2704. mod_data.transport_name = "Bulk-only";
  2705. mod_data.protocol_type = USB_SC_SCSI;
  2706. mod_data.protocol_name = "Transparent SCSI";
  2707. /* Some peripheral controllers are known not to be able to
  2708. * halt bulk endpoints correctly. If one of them is present,
  2709. * disable stalls.
  2710. */
  2711. if (gadget_is_at91(fsg->gadget))
  2712. mod_data.can_stall = 0;
  2713. if (mod_data.release == 0xffff) { // Parameter wasn't set
  2714. gcnum = usb_gadget_controller_number(fsg->gadget);
  2715. if (gcnum >= 0)
  2716. mod_data.release = 0x0300 + gcnum;
  2717. else {
  2718. WARNING(fsg, "controller '%s' not recognized\n",
  2719. fsg->gadget->name);
  2720. mod_data.release = 0x0399;
  2721. }
  2722. }
  2723. prot = simple_strtol(mod_data.protocol_parm, NULL, 0);
  2724. #ifdef CONFIG_USB_FILE_STORAGE_TEST
  2725. if (strnicmp(mod_data.transport_parm, "BBB", 10) == 0) {
  2726. ; // Use default setting
  2727. } else if (strnicmp(mod_data.transport_parm, "CB", 10) == 0) {
  2728. mod_data.transport_type = USB_PR_CB;
  2729. mod_data.transport_name = "Control-Bulk";
  2730. } else if (strnicmp(mod_data.transport_parm, "CBI", 10) == 0) {
  2731. mod_data.transport_type = USB_PR_CBI;
  2732. mod_data.transport_name = "Control-Bulk-Interrupt";
  2733. } else {
  2734. ERROR(fsg, "invalid transport: %s\n", mod_data.transport_parm);
  2735. return -EINVAL;
  2736. }
  2737. if (strnicmp(mod_data.protocol_parm, "SCSI", 10) == 0 ||
  2738. prot == USB_SC_SCSI) {
  2739. ; // Use default setting
  2740. } else if (strnicmp(mod_data.protocol_parm, "RBC", 10) == 0 ||
  2741. prot == USB_SC_RBC) {
  2742. mod_data.protocol_type = USB_SC_RBC;
  2743. mod_data.protocol_name = "RBC";
  2744. } else if (strnicmp(mod_data.protocol_parm, "8020", 4) == 0 ||
  2745. strnicmp(mod_data.protocol_parm, "ATAPI", 10) == 0 ||
  2746. prot == USB_SC_8020) {
  2747. mod_data.protocol_type = USB_SC_8020;
  2748. mod_data.protocol_name = "8020i (ATAPI)";
  2749. } else if (strnicmp(mod_data.protocol_parm, "QIC", 3) == 0 ||
  2750. prot == USB_SC_QIC) {
  2751. mod_data.protocol_type = USB_SC_QIC;
  2752. mod_data.protocol_name = "QIC-157";
  2753. } else if (strnicmp(mod_data.protocol_parm, "UFI", 10) == 0 ||
  2754. prot == USB_SC_UFI) {
  2755. mod_data.protocol_type = USB_SC_UFI;
  2756. mod_data.protocol_name = "UFI";
  2757. } else if (strnicmp(mod_data.protocol_parm, "8070", 4) == 0 ||
  2758. prot == USB_SC_8070) {
  2759. mod_data.protocol_type = USB_SC_8070;
  2760. mod_data.protocol_name = "8070i";
  2761. } else {
  2762. ERROR(fsg, "invalid protocol: %s\n", mod_data.protocol_parm);
  2763. return -EINVAL;
  2764. }
  2765. mod_data.buflen &= PAGE_CACHE_MASK;
  2766. if (mod_data.buflen <= 0) {
  2767. ERROR(fsg, "invalid buflen\n");
  2768. return -ETOOSMALL;
  2769. }
  2770. #endif /* CONFIG_USB_FILE_STORAGE_TEST */
  2771. return 0;
  2772. }
  2773. static int __init fsg_bind(struct usb_gadget *gadget)
  2774. {
  2775. struct fsg_dev *fsg = the_fsg;
  2776. int rc;
  2777. int i;
  2778. struct fsg_lun *curlun;
  2779. struct usb_ep *ep;
  2780. struct usb_request *req;
  2781. char *pathbuf, *p;
  2782. fsg->gadget = gadget;
  2783. set_gadget_data(gadget, fsg);
  2784. fsg->ep0 = gadget->ep0;
  2785. fsg->ep0->driver_data = fsg;
  2786. if ((rc = check_parameters(fsg)) != 0)
  2787. goto out;
  2788. if (mod_data.removable) { // Enable the store_xxx attributes
  2789. dev_attr_file.attr.mode = 0644;
  2790. dev_attr_file.store = fsg_store_file;
  2791. if (!mod_data.cdrom) {
  2792. dev_attr_ro.attr.mode = 0644;
  2793. dev_attr_ro.store = fsg_store_ro;
  2794. }
  2795. }
  2796. /* Find out how many LUNs there should be */
  2797. i = mod_data.nluns;
  2798. if (i == 0)
  2799. i = max(mod_data.num_filenames, 1u);
  2800. if (i > FSG_MAX_LUNS) {
  2801. ERROR(fsg, "invalid number of LUNs: %d\n", i);
  2802. rc = -EINVAL;
  2803. goto out;
  2804. }
  2805. /* Create the LUNs, open their backing files, and register the
  2806. * LUN devices in sysfs. */
  2807. fsg->luns = kzalloc(i * sizeof(struct fsg_lun), GFP_KERNEL);
  2808. if (!fsg->luns) {
  2809. rc = -ENOMEM;
  2810. goto out;
  2811. }
  2812. fsg->nluns = i;
  2813. for (i = 0; i < fsg->nluns; ++i) {
  2814. curlun = &fsg->luns[i];
  2815. curlun->cdrom = !!mod_data.cdrom;
  2816. curlun->ro = mod_data.cdrom || mod_data.ro[i];
  2817. curlun->initially_ro = curlun->ro;
  2818. curlun->removable = mod_data.removable;
  2819. curlun->dev.release = lun_release;
  2820. curlun->dev.parent = &gadget->dev;
  2821. curlun->dev.driver = &fsg_driver.driver;
  2822. dev_set_drvdata(&curlun->dev, &fsg->filesem);
  2823. dev_set_name(&curlun->dev,"%s-lun%d",
  2824. dev_name(&gadget->dev), i);
  2825. if ((rc = device_register(&curlun->dev)) != 0) {
  2826. INFO(fsg, "failed to register LUN%d: %d\n", i, rc);
  2827. goto out;
  2828. }
  2829. if ((rc = device_create_file(&curlun->dev,
  2830. &dev_attr_ro)) != 0 ||
  2831. (rc = device_create_file(&curlun->dev,
  2832. &dev_attr_file)) != 0) {
  2833. device_unregister(&curlun->dev);
  2834. goto out;
  2835. }
  2836. curlun->registered = 1;
  2837. kref_get(&fsg->ref);
  2838. if (mod_data.file[i] && *mod_data.file[i]) {
  2839. if ((rc = fsg_lun_open(curlun,
  2840. mod_data.file[i])) != 0)
  2841. goto out;
  2842. } else if (!mod_data.removable) {
  2843. ERROR(fsg, "no file given for LUN%d\n", i);
  2844. rc = -EINVAL;
  2845. goto out;
  2846. }
  2847. }
  2848. /* Find all the endpoints we will use */
  2849. usb_ep_autoconfig_reset(gadget);
  2850. ep = usb_ep_autoconfig(gadget, &fsg_fs_bulk_in_desc);
  2851. if (!ep)
  2852. goto autoconf_fail;
  2853. ep->driver_data = fsg; // claim the endpoint
  2854. fsg->bulk_in = ep;
  2855. ep = usb_ep_autoconfig(gadget, &fsg_fs_bulk_out_desc);
  2856. if (!ep)
  2857. goto autoconf_fail;
  2858. ep->driver_data = fsg; // claim the endpoint
  2859. fsg->bulk_out = ep;
  2860. if (transport_is_cbi()) {
  2861. ep = usb_ep_autoconfig(gadget, &fsg_fs_intr_in_desc);
  2862. if (!ep)
  2863. goto autoconf_fail;
  2864. ep->driver_data = fsg; // claim the endpoint
  2865. fsg->intr_in = ep;
  2866. }
  2867. /* Fix up the descriptors */
  2868. device_desc.bMaxPacketSize0 = fsg->ep0->maxpacket;
  2869. device_desc.idVendor = cpu_to_le16(mod_data.vendor);
  2870. device_desc.idProduct = cpu_to_le16(mod_data.product);
  2871. device_desc.bcdDevice = cpu_to_le16(mod_data.release);
  2872. i = (transport_is_cbi() ? 3 : 2); // Number of endpoints
  2873. fsg_intf_desc.bNumEndpoints = i;
  2874. fsg_intf_desc.bInterfaceSubClass = mod_data.protocol_type;
  2875. fsg_intf_desc.bInterfaceProtocol = mod_data.transport_type;
  2876. fsg_fs_function[i + FSG_FS_FUNCTION_PRE_EP_ENTRIES] = NULL;
  2877. if (gadget_is_dualspeed(gadget)) {
  2878. fsg_hs_function[i + FSG_HS_FUNCTION_PRE_EP_ENTRIES] = NULL;
  2879. /* Assume ep0 uses the same maxpacket value for both speeds */
  2880. dev_qualifier.bMaxPacketSize0 = fsg->ep0->maxpacket;
  2881. /* Assume endpoint addresses are the same for both speeds */
  2882. fsg_hs_bulk_in_desc.bEndpointAddress =
  2883. fsg_fs_bulk_in_desc.bEndpointAddress;
  2884. fsg_hs_bulk_out_desc.bEndpointAddress =
  2885. fsg_fs_bulk_out_desc.bEndpointAddress;
  2886. fsg_hs_intr_in_desc.bEndpointAddress =
  2887. fsg_fs_intr_in_desc.bEndpointAddress;
  2888. }
  2889. if (gadget_is_otg(gadget))
  2890. fsg_otg_desc.bmAttributes |= USB_OTG_HNP;
  2891. rc = -ENOMEM;
  2892. /* Allocate the request and buffer for endpoint 0 */
  2893. fsg->ep0req = req = usb_ep_alloc_request(fsg->ep0, GFP_KERNEL);
  2894. if (!req)
  2895. goto out;
  2896. req->buf = kmalloc(EP0_BUFSIZE, GFP_KERNEL);
  2897. if (!req->buf)
  2898. goto out;
  2899. req->complete = ep0_complete;
  2900. /* Allocate the data buffers */
  2901. for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
  2902. struct fsg_buffhd *bh = &fsg->buffhds[i];
  2903. /* Allocate for the bulk-in endpoint. We assume that
  2904. * the buffer will also work with the bulk-out (and
  2905. * interrupt-in) endpoint. */
  2906. bh->buf = kmalloc(mod_data.buflen, GFP_KERNEL);
  2907. if (!bh->buf)
  2908. goto out;
  2909. bh->next = bh + 1;
  2910. }
  2911. fsg->buffhds[FSG_NUM_BUFFERS - 1].next = &fsg->buffhds[0];
  2912. /* This should reflect the actual gadget power source */
  2913. usb_gadget_set_selfpowered(gadget);
  2914. snprintf(fsg_string_manufacturer, sizeof fsg_string_manufacturer,
  2915. "%s %s with %s",
  2916. init_utsname()->sysname, init_utsname()->release,
  2917. gadget->name);
  2918. /* On a real device, serial[] would be loaded from permanent
  2919. * storage. We just encode it from the driver version string. */
  2920. for (i = 0; i < sizeof fsg_string_serial - 2; i += 2) {
  2921. unsigned char c = DRIVER_VERSION[i / 2];
  2922. if (!c)
  2923. break;
  2924. sprintf(&fsg_string_serial[i], "%02X", c);
  2925. }
  2926. fsg->thread_task = kthread_create(fsg_main_thread, fsg,
  2927. "file-storage-gadget");
  2928. if (IS_ERR(fsg->thread_task)) {
  2929. rc = PTR_ERR(fsg->thread_task);
  2930. goto out;
  2931. }
  2932. INFO(fsg, DRIVER_DESC ", version: " DRIVER_VERSION "\n");
  2933. INFO(fsg, "Number of LUNs=%d\n", fsg->nluns);
  2934. pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
  2935. for (i = 0; i < fsg->nluns; ++i) {
  2936. curlun = &fsg->luns[i];
  2937. if (fsg_lun_is_open(curlun)) {
  2938. p = NULL;
  2939. if (pathbuf) {
  2940. p = d_path(&curlun->filp->f_path,
  2941. pathbuf, PATH_MAX);
  2942. if (IS_ERR(p))
  2943. p = NULL;
  2944. }
  2945. LINFO(curlun, "ro=%d, file: %s\n",
  2946. curlun->ro, (p ? p : "(error)"));
  2947. }
  2948. }
  2949. kfree(pathbuf);
  2950. DBG(fsg, "transport=%s (x%02x)\n",
  2951. mod_data.transport_name, mod_data.transport_type);
  2952. DBG(fsg, "protocol=%s (x%02x)\n",
  2953. mod_data.protocol_name, mod_data.protocol_type);
  2954. DBG(fsg, "VendorID=x%04x, ProductID=x%04x, Release=x%04x\n",
  2955. mod_data.vendor, mod_data.product, mod_data.release);
  2956. DBG(fsg, "removable=%d, stall=%d, cdrom=%d, buflen=%u\n",
  2957. mod_data.removable, mod_data.can_stall,
  2958. mod_data.cdrom, mod_data.buflen);
  2959. DBG(fsg, "I/O thread pid: %d\n", task_pid_nr(fsg->thread_task));
  2960. set_bit(REGISTERED, &fsg->atomic_bitflags);
  2961. /* Tell the thread to start working */
  2962. wake_up_process(fsg->thread_task);
  2963. return 0;
  2964. autoconf_fail:
  2965. ERROR(fsg, "unable to autoconfigure all endpoints\n");
  2966. rc = -ENOTSUPP;
  2967. out:
  2968. fsg->state = FSG_STATE_TERMINATED; // The thread is dead
  2969. fsg_unbind(gadget);
  2970. complete(&fsg->thread_notifier);
  2971. return rc;
  2972. }
  2973. /*-------------------------------------------------------------------------*/
  2974. static void fsg_suspend(struct usb_gadget *gadget)
  2975. {
  2976. struct fsg_dev *fsg = get_gadget_data(gadget);
  2977. DBG(fsg, "suspend\n");
  2978. set_bit(SUSPENDED, &fsg->atomic_bitflags);
  2979. }
  2980. static void fsg_resume(struct usb_gadget *gadget)
  2981. {
  2982. struct fsg_dev *fsg = get_gadget_data(gadget);
  2983. DBG(fsg, "resume\n");
  2984. clear_bit(SUSPENDED, &fsg->atomic_bitflags);
  2985. }
  2986. /*-------------------------------------------------------------------------*/
  2987. static struct usb_gadget_driver fsg_driver = {
  2988. #ifdef CONFIG_USB_GADGET_DUALSPEED
  2989. .speed = USB_SPEED_HIGH,
  2990. #else
  2991. .speed = USB_SPEED_FULL,
  2992. #endif
  2993. .function = (char *) fsg_string_product,
  2994. .bind = fsg_bind,
  2995. .unbind = fsg_unbind,
  2996. .disconnect = fsg_disconnect,
  2997. .setup = fsg_setup,
  2998. .suspend = fsg_suspend,
  2999. .resume = fsg_resume,
  3000. .driver = {
  3001. .name = DRIVER_NAME,
  3002. .owner = THIS_MODULE,
  3003. // .release = ...
  3004. // .suspend = ...
  3005. // .resume = ...
  3006. },
  3007. };
  3008. static int __init fsg_alloc(void)
  3009. {
  3010. struct fsg_dev *fsg;
  3011. fsg = kzalloc(sizeof *fsg, GFP_KERNEL);
  3012. if (!fsg)
  3013. return -ENOMEM;
  3014. spin_lock_init(&fsg->lock);
  3015. init_rwsem(&fsg->filesem);
  3016. kref_init(&fsg->ref);
  3017. init_completion(&fsg->thread_notifier);
  3018. the_fsg = fsg;
  3019. return 0;
  3020. }
  3021. static int __init fsg_init(void)
  3022. {
  3023. int rc;
  3024. struct fsg_dev *fsg;
  3025. if ((rc = fsg_alloc()) != 0)
  3026. return rc;
  3027. fsg = the_fsg;
  3028. if ((rc = usb_gadget_register_driver(&fsg_driver)) != 0)
  3029. kref_put(&fsg->ref, fsg_release);
  3030. return rc;
  3031. }
  3032. module_init(fsg_init);
  3033. static void __exit fsg_cleanup(void)
  3034. {
  3035. struct fsg_dev *fsg = the_fsg;
  3036. /* Unregister the driver iff the thread hasn't already done so */
  3037. if (test_and_clear_bit(REGISTERED, &fsg->atomic_bitflags))
  3038. usb_gadget_unregister_driver(&fsg_driver);
  3039. /* Wait for the thread to finish up */
  3040. wait_for_completion(&fsg->thread_notifier);
  3041. kref_put(&fsg->ref, fsg_release);
  3042. }
  3043. module_exit(fsg_cleanup);