ide-tape.c 99 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498
  1. /*
  2. * IDE ATAPI streaming tape driver.
  3. *
  4. * Copyright (C) 1995-1999 Gadi Oxman <gadio@netvision.net.il>
  5. * Copyright (C) 2003-2005 Bartlomiej Zolnierkiewicz
  6. *
  7. * This driver was constructed as a student project in the software laboratory
  8. * of the faculty of electrical engineering in the Technion - Israel's
  9. * Institute Of Technology, with the guide of Avner Lottem and Dr. Ilana David.
  10. *
  11. * It is hereby placed under the terms of the GNU general public license.
  12. * (See linux/COPYING).
  13. *
  14. * For a historical changelog see
  15. * Documentation/ide/ChangeLog.ide-tape.1995-2002
  16. */
  17. #define IDETAPE_VERSION "1.20"
  18. #include <linux/module.h>
  19. #include <linux/types.h>
  20. #include <linux/string.h>
  21. #include <linux/kernel.h>
  22. #include <linux/delay.h>
  23. #include <linux/timer.h>
  24. #include <linux/mm.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/jiffies.h>
  27. #include <linux/major.h>
  28. #include <linux/errno.h>
  29. #include <linux/genhd.h>
  30. #include <linux/slab.h>
  31. #include <linux/pci.h>
  32. #include <linux/ide.h>
  33. #include <linux/smp_lock.h>
  34. #include <linux/completion.h>
  35. #include <linux/bitops.h>
  36. #include <linux/mutex.h>
  37. #include <scsi/scsi.h>
  38. #include <asm/byteorder.h>
  39. #include <linux/irq.h>
  40. #include <linux/uaccess.h>
  41. #include <linux/io.h>
  42. #include <asm/unaligned.h>
  43. #include <linux/mtio.h>
  44. enum {
  45. /* output errors only */
  46. DBG_ERR = (1 << 0),
  47. /* output all sense key/asc */
  48. DBG_SENSE = (1 << 1),
  49. /* info regarding all chrdev-related procedures */
  50. DBG_CHRDEV = (1 << 2),
  51. /* all remaining procedures */
  52. DBG_PROCS = (1 << 3),
  53. /* buffer alloc info (pc_stack & rq_stack) */
  54. DBG_PCRQ_STACK = (1 << 4),
  55. };
  56. /* define to see debug info */
  57. #define IDETAPE_DEBUG_LOG 0
  58. #if IDETAPE_DEBUG_LOG
  59. #define debug_log(lvl, fmt, args...) \
  60. { \
  61. if (tape->debug_mask & lvl) \
  62. printk(KERN_INFO "ide-tape: " fmt, ## args); \
  63. }
  64. #else
  65. #define debug_log(lvl, fmt, args...) do {} while (0)
  66. #endif
  67. /**************************** Tunable parameters *****************************/
  68. /*
  69. * Pipelined mode parameters.
  70. *
  71. * We try to use the minimum number of stages which is enough to keep the tape
  72. * constantly streaming. To accomplish that, we implement a feedback loop around
  73. * the maximum number of stages:
  74. *
  75. * We start from MIN maximum stages (we will not even use MIN stages if we don't
  76. * need them), increment it by RATE*(MAX-MIN) whenever we sense that the
  77. * pipeline is empty, until we reach the optimum value or until we reach MAX.
  78. */
  79. #define IDETAPE_MIN_PIPELINE_STAGES 1
  80. #define IDETAPE_MAX_PIPELINE_STAGES 400
  81. #define IDETAPE_INCREASE_STAGES_RATE 20
  82. /*
  83. * After each failed packet command we issue a request sense command and retry
  84. * the packet command IDETAPE_MAX_PC_RETRIES times.
  85. *
  86. * Setting IDETAPE_MAX_PC_RETRIES to 0 will disable retries.
  87. */
  88. #define IDETAPE_MAX_PC_RETRIES 3
  89. /*
  90. * With each packet command, we allocate a buffer of IDETAPE_PC_BUFFER_SIZE
  91. * bytes. This is used for several packet commands (Not for READ/WRITE commands)
  92. */
  93. #define IDETAPE_PC_BUFFER_SIZE 256
  94. /*
  95. * In various places in the driver, we need to allocate storage
  96. * for packet commands and requests, which will remain valid while
  97. * we leave the driver to wait for an interrupt or a timeout event.
  98. */
  99. #define IDETAPE_PC_STACK (10 + IDETAPE_MAX_PC_RETRIES)
  100. /*
  101. * Some drives (for example, Seagate STT3401A Travan) require a very long
  102. * timeout, because they don't return an interrupt or clear their busy bit
  103. * until after the command completes (even retension commands).
  104. */
  105. #define IDETAPE_WAIT_CMD (900*HZ)
  106. /*
  107. * The following parameter is used to select the point in the internal tape fifo
  108. * in which we will start to refill the buffer. Decreasing the following
  109. * parameter will improve the system's latency and interactive response, while
  110. * using a high value might improve system throughput.
  111. */
  112. #define IDETAPE_FIFO_THRESHOLD 2
  113. /*
  114. * DSC polling parameters.
  115. *
  116. * Polling for DSC (a single bit in the status register) is a very important
  117. * function in ide-tape. There are two cases in which we poll for DSC:
  118. *
  119. * 1. Before a read/write packet command, to ensure that we can transfer data
  120. * from/to the tape's data buffers, without causing an actual media access.
  121. * In case the tape is not ready yet, we take out our request from the device
  122. * request queue, so that ide.c could service requests from the other device
  123. * on the same interface in the meantime.
  124. *
  125. * 2. After the successful initialization of a "media access packet command",
  126. * which is a command that can take a long time to complete (the interval can
  127. * range from several seconds to even an hour). Again, we postpone our request
  128. * in the middle to free the bus for the other device. The polling frequency
  129. * here should be lower than the read/write frequency since those media access
  130. * commands are slow. We start from a "fast" frequency - IDETAPE_DSC_MA_FAST
  131. * (1 second), and if we don't receive DSC after IDETAPE_DSC_MA_THRESHOLD
  132. * (5 min), we switch it to a lower frequency - IDETAPE_DSC_MA_SLOW (1 min).
  133. *
  134. * We also set a timeout for the timer, in case something goes wrong. The
  135. * timeout should be longer then the maximum execution time of a tape operation.
  136. */
  137. /* DSC timings. */
  138. #define IDETAPE_DSC_RW_MIN 5*HZ/100 /* 50 msec */
  139. #define IDETAPE_DSC_RW_MAX 40*HZ/100 /* 400 msec */
  140. #define IDETAPE_DSC_RW_TIMEOUT 2*60*HZ /* 2 minutes */
  141. #define IDETAPE_DSC_MA_FAST 2*HZ /* 2 seconds */
  142. #define IDETAPE_DSC_MA_THRESHOLD 5*60*HZ /* 5 minutes */
  143. #define IDETAPE_DSC_MA_SLOW 30*HZ /* 30 seconds */
  144. #define IDETAPE_DSC_MA_TIMEOUT 2*60*60*HZ /* 2 hours */
  145. /*************************** End of tunable parameters ***********************/
  146. /* Read/Write error simulation */
  147. #define SIMULATE_ERRORS 0
  148. /* tape directions */
  149. enum {
  150. IDETAPE_DIR_NONE = (1 << 0),
  151. IDETAPE_DIR_READ = (1 << 1),
  152. IDETAPE_DIR_WRITE = (1 << 2),
  153. };
  154. struct idetape_bh {
  155. u32 b_size;
  156. atomic_t b_count;
  157. struct idetape_bh *b_reqnext;
  158. char *b_data;
  159. };
  160. /* Tape door status */
  161. #define DOOR_UNLOCKED 0
  162. #define DOOR_LOCKED 1
  163. #define DOOR_EXPLICITLY_LOCKED 2
  164. /* Some defines for the SPACE command */
  165. #define IDETAPE_SPACE_OVER_FILEMARK 1
  166. #define IDETAPE_SPACE_TO_EOD 3
  167. /* Some defines for the LOAD UNLOAD command */
  168. #define IDETAPE_LU_LOAD_MASK 1
  169. #define IDETAPE_LU_RETENSION_MASK 2
  170. #define IDETAPE_LU_EOT_MASK 4
  171. /*
  172. * Special requests for our block device strategy routine.
  173. *
  174. * In order to service a character device command, we add special requests to
  175. * the tail of our block device request queue and wait for their completion.
  176. */
  177. enum {
  178. REQ_IDETAPE_PC1 = (1 << 0), /* packet command (first stage) */
  179. REQ_IDETAPE_PC2 = (1 << 1), /* packet command (second stage) */
  180. REQ_IDETAPE_READ = (1 << 2),
  181. REQ_IDETAPE_WRITE = (1 << 3),
  182. };
  183. /* Error codes returned in rq->errors to the higher part of the driver. */
  184. #define IDETAPE_ERROR_GENERAL 101
  185. #define IDETAPE_ERROR_FILEMARK 102
  186. #define IDETAPE_ERROR_EOD 103
  187. /* Structures related to the SELECT SENSE / MODE SENSE packet commands. */
  188. #define IDETAPE_BLOCK_DESCRIPTOR 0
  189. #define IDETAPE_CAPABILITIES_PAGE 0x2a
  190. /* Tape flag bits values. */
  191. enum {
  192. IDETAPE_FLAG_IGNORE_DSC = (1 << 0),
  193. /* 0 When the tape position is unknown */
  194. IDETAPE_FLAG_ADDRESS_VALID = (1 << 1),
  195. /* Device already opened */
  196. IDETAPE_FLAG_BUSY = (1 << 2),
  197. /* Error detected in a pipeline stage */
  198. IDETAPE_FLAG_PIPELINE_ERR = (1 << 3),
  199. /* Attempt to auto-detect the current user block size */
  200. IDETAPE_FLAG_DETECT_BS = (1 << 4),
  201. /* Currently on a filemark */
  202. IDETAPE_FLAG_FILEMARK = (1 << 5),
  203. /* DRQ interrupt device */
  204. IDETAPE_FLAG_DRQ_INTERRUPT = (1 << 6),
  205. /* pipeline active */
  206. IDETAPE_FLAG_PIPELINE_ACTIVE = (1 << 7),
  207. /* 0 = no tape is loaded, so we don't rewind after ejecting */
  208. IDETAPE_FLAG_MEDIUM_PRESENT = (1 << 8),
  209. };
  210. /* A pipeline stage. */
  211. typedef struct idetape_stage_s {
  212. struct request rq; /* The corresponding request */
  213. struct idetape_bh *bh; /* The data buffers */
  214. struct idetape_stage_s *next; /* Pointer to the next stage */
  215. } idetape_stage_t;
  216. /*
  217. * Most of our global data which we need to save even as we leave the driver due
  218. * to an interrupt or a timer event is stored in the struct defined below.
  219. */
  220. typedef struct ide_tape_obj {
  221. ide_drive_t *drive;
  222. ide_driver_t *driver;
  223. struct gendisk *disk;
  224. struct kref kref;
  225. /*
  226. * Since a typical character device operation requires more
  227. * than one packet command, we provide here enough memory
  228. * for the maximum of interconnected packet commands.
  229. * The packet commands are stored in the circular array pc_stack.
  230. * pc_stack_index points to the last used entry, and warps around
  231. * to the start when we get to the last array entry.
  232. *
  233. * pc points to the current processed packet command.
  234. *
  235. * failed_pc points to the last failed packet command, or contains
  236. * NULL if we do not need to retry any packet command. This is
  237. * required since an additional packet command is needed before the
  238. * retry, to get detailed information on what went wrong.
  239. */
  240. /* Current packet command */
  241. struct ide_atapi_pc *pc;
  242. /* Last failed packet command */
  243. struct ide_atapi_pc *failed_pc;
  244. /* Packet command stack */
  245. struct ide_atapi_pc pc_stack[IDETAPE_PC_STACK];
  246. /* Next free packet command storage space */
  247. int pc_stack_index;
  248. struct request rq_stack[IDETAPE_PC_STACK];
  249. /* We implement a circular array */
  250. int rq_stack_index;
  251. /*
  252. * DSC polling variables.
  253. *
  254. * While polling for DSC we use postponed_rq to postpone the current
  255. * request so that ide.c will be able to service pending requests on the
  256. * other device. Note that at most we will have only one DSC (usually
  257. * data transfer) request in the device request queue. Additional
  258. * requests can be queued in our internal pipeline, but they will be
  259. * visible to ide.c only one at a time.
  260. */
  261. struct request *postponed_rq;
  262. /* The time in which we started polling for DSC */
  263. unsigned long dsc_polling_start;
  264. /* Timer used to poll for dsc */
  265. struct timer_list dsc_timer;
  266. /* Read/Write dsc polling frequency */
  267. unsigned long best_dsc_rw_freq;
  268. unsigned long dsc_poll_freq;
  269. unsigned long dsc_timeout;
  270. /* Read position information */
  271. u8 partition;
  272. /* Current block */
  273. unsigned int first_frame;
  274. /* Last error information */
  275. u8 sense_key, asc, ascq;
  276. /* Character device operation */
  277. unsigned int minor;
  278. /* device name */
  279. char name[4];
  280. /* Current character device data transfer direction */
  281. u8 chrdev_dir;
  282. /* tape block size, usually 512 or 1024 bytes */
  283. unsigned short blk_size;
  284. int user_bs_factor;
  285. /* Copy of the tape's Capabilities and Mechanical Page */
  286. u8 caps[20];
  287. /*
  288. * Active data transfer request parameters.
  289. *
  290. * At most, there is only one ide-tape originated data transfer request
  291. * in the device request queue. This allows ide.c to easily service
  292. * requests from the other device when we postpone our active request.
  293. * In the pipelined operation mode, we use our internal pipeline
  294. * structure to hold more data requests. The data buffer size is chosen
  295. * based on the tape's recommendation.
  296. */
  297. /* ptr to the request which is waiting in the device request queue */
  298. struct request *active_data_rq;
  299. /* Data buffer size chosen based on the tape's recommendation */
  300. int stage_size;
  301. idetape_stage_t *merge_stage;
  302. int merge_stage_size;
  303. struct idetape_bh *bh;
  304. char *b_data;
  305. int b_count;
  306. /*
  307. * Pipeline parameters.
  308. *
  309. * To accomplish non-pipelined mode, we simply set the following
  310. * variables to zero (or NULL, where appropriate).
  311. */
  312. /* Number of currently used stages */
  313. int nr_stages;
  314. /* Number of pending stages */
  315. int nr_pending_stages;
  316. /* We will not allocate more than this number of stages */
  317. int max_stages, min_pipeline, max_pipeline;
  318. /* The first stage which will be removed from the pipeline */
  319. idetape_stage_t *first_stage;
  320. /* The currently active stage */
  321. idetape_stage_t *active_stage;
  322. /* Will be serviced after the currently active request */
  323. idetape_stage_t *next_stage;
  324. /* New requests will be added to the pipeline here */
  325. idetape_stage_t *last_stage;
  326. int pages_per_stage;
  327. /* Wasted space in each stage */
  328. int excess_bh_size;
  329. /* Status/Action flags: long for set_bit */
  330. unsigned long flags;
  331. /* protects the ide-tape queue */
  332. spinlock_t lock;
  333. /* Measures average tape speed */
  334. unsigned long avg_time;
  335. int avg_size;
  336. int avg_speed;
  337. /* the door is currently locked */
  338. int door_locked;
  339. /* the tape hardware is write protected */
  340. char drv_write_prot;
  341. /* the tape is write protected (hardware or opened as read-only) */
  342. char write_prot;
  343. /*
  344. * Limit the number of times a request can be postponed, to avoid an
  345. * infinite postpone deadlock.
  346. */
  347. int postpone_cnt;
  348. /* Speed control at the tape buffers input/output */
  349. unsigned long insert_time;
  350. int insert_size;
  351. int insert_speed;
  352. int measure_insert_time;
  353. u32 debug_mask;
  354. } idetape_tape_t;
  355. static DEFINE_MUTEX(idetape_ref_mutex);
  356. static struct class *idetape_sysfs_class;
  357. #define to_ide_tape(obj) container_of(obj, struct ide_tape_obj, kref)
  358. #define ide_tape_g(disk) \
  359. container_of((disk)->private_data, struct ide_tape_obj, driver)
  360. static struct ide_tape_obj *ide_tape_get(struct gendisk *disk)
  361. {
  362. struct ide_tape_obj *tape = NULL;
  363. mutex_lock(&idetape_ref_mutex);
  364. tape = ide_tape_g(disk);
  365. if (tape)
  366. kref_get(&tape->kref);
  367. mutex_unlock(&idetape_ref_mutex);
  368. return tape;
  369. }
  370. static void ide_tape_release(struct kref *);
  371. static void ide_tape_put(struct ide_tape_obj *tape)
  372. {
  373. mutex_lock(&idetape_ref_mutex);
  374. kref_put(&tape->kref, ide_tape_release);
  375. mutex_unlock(&idetape_ref_mutex);
  376. }
  377. /*
  378. * The variables below are used for the character device interface. Additional
  379. * state variables are defined in our ide_drive_t structure.
  380. */
  381. static struct ide_tape_obj *idetape_devs[MAX_HWIFS * MAX_DRIVES];
  382. #define ide_tape_f(file) ((file)->private_data)
  383. static struct ide_tape_obj *ide_tape_chrdev_get(unsigned int i)
  384. {
  385. struct ide_tape_obj *tape = NULL;
  386. mutex_lock(&idetape_ref_mutex);
  387. tape = idetape_devs[i];
  388. if (tape)
  389. kref_get(&tape->kref);
  390. mutex_unlock(&idetape_ref_mutex);
  391. return tape;
  392. }
  393. static void idetape_input_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
  394. unsigned int bcount)
  395. {
  396. struct idetape_bh *bh = pc->bh;
  397. int count;
  398. while (bcount) {
  399. if (bh == NULL) {
  400. printk(KERN_ERR "ide-tape: bh == NULL in "
  401. "idetape_input_buffers\n");
  402. ide_atapi_discard_data(drive, bcount);
  403. return;
  404. }
  405. count = min(
  406. (unsigned int)(bh->b_size - atomic_read(&bh->b_count)),
  407. bcount);
  408. HWIF(drive)->atapi_input_bytes(drive, bh->b_data +
  409. atomic_read(&bh->b_count), count);
  410. bcount -= count;
  411. atomic_add(count, &bh->b_count);
  412. if (atomic_read(&bh->b_count) == bh->b_size) {
  413. bh = bh->b_reqnext;
  414. if (bh)
  415. atomic_set(&bh->b_count, 0);
  416. }
  417. }
  418. pc->bh = bh;
  419. }
  420. static void idetape_output_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
  421. unsigned int bcount)
  422. {
  423. struct idetape_bh *bh = pc->bh;
  424. int count;
  425. while (bcount) {
  426. if (bh == NULL) {
  427. printk(KERN_ERR "ide-tape: bh == NULL in %s\n",
  428. __func__);
  429. return;
  430. }
  431. count = min((unsigned int)pc->b_count, (unsigned int)bcount);
  432. HWIF(drive)->atapi_output_bytes(drive, pc->b_data, count);
  433. bcount -= count;
  434. pc->b_data += count;
  435. pc->b_count -= count;
  436. if (!pc->b_count) {
  437. bh = bh->b_reqnext;
  438. pc->bh = bh;
  439. if (bh) {
  440. pc->b_data = bh->b_data;
  441. pc->b_count = atomic_read(&bh->b_count);
  442. }
  443. }
  444. }
  445. }
  446. static void idetape_update_buffers(struct ide_atapi_pc *pc)
  447. {
  448. struct idetape_bh *bh = pc->bh;
  449. int count;
  450. unsigned int bcount = pc->xferred;
  451. if (pc->flags & PC_FLAG_WRITING)
  452. return;
  453. while (bcount) {
  454. if (bh == NULL) {
  455. printk(KERN_ERR "ide-tape: bh == NULL in %s\n",
  456. __func__);
  457. return;
  458. }
  459. count = min((unsigned int)bh->b_size, (unsigned int)bcount);
  460. atomic_set(&bh->b_count, count);
  461. if (atomic_read(&bh->b_count) == bh->b_size)
  462. bh = bh->b_reqnext;
  463. bcount -= count;
  464. }
  465. pc->bh = bh;
  466. }
  467. /*
  468. * idetape_next_pc_storage returns a pointer to a place in which we can
  469. * safely store a packet command, even though we intend to leave the
  470. * driver. A storage space for a maximum of IDETAPE_PC_STACK packet
  471. * commands is allocated at initialization time.
  472. */
  473. static struct ide_atapi_pc *idetape_next_pc_storage(ide_drive_t *drive)
  474. {
  475. idetape_tape_t *tape = drive->driver_data;
  476. debug_log(DBG_PCRQ_STACK, "pc_stack_index=%d\n", tape->pc_stack_index);
  477. if (tape->pc_stack_index == IDETAPE_PC_STACK)
  478. tape->pc_stack_index = 0;
  479. return (&tape->pc_stack[tape->pc_stack_index++]);
  480. }
  481. /*
  482. * idetape_next_rq_storage is used along with idetape_next_pc_storage.
  483. * Since we queue packet commands in the request queue, we need to
  484. * allocate a request, along with the allocation of a packet command.
  485. */
  486. /**************************************************************
  487. * *
  488. * This should get fixed to use kmalloc(.., GFP_ATOMIC) *
  489. * followed later on by kfree(). -ml *
  490. * *
  491. **************************************************************/
  492. static struct request *idetape_next_rq_storage(ide_drive_t *drive)
  493. {
  494. idetape_tape_t *tape = drive->driver_data;
  495. debug_log(DBG_PCRQ_STACK, "rq_stack_index=%d\n", tape->rq_stack_index);
  496. if (tape->rq_stack_index == IDETAPE_PC_STACK)
  497. tape->rq_stack_index = 0;
  498. return (&tape->rq_stack[tape->rq_stack_index++]);
  499. }
  500. static void idetape_init_pc(struct ide_atapi_pc *pc)
  501. {
  502. memset(pc->c, 0, 12);
  503. pc->retries = 0;
  504. pc->flags = 0;
  505. pc->req_xfer = 0;
  506. pc->buf = pc->pc_buf;
  507. pc->buf_size = IDETAPE_PC_BUFFER_SIZE;
  508. pc->bh = NULL;
  509. pc->b_data = NULL;
  510. }
  511. /*
  512. * called on each failed packet command retry to analyze the request sense. We
  513. * currently do not utilize this information.
  514. */
  515. static void idetape_analyze_error(ide_drive_t *drive, u8 *sense)
  516. {
  517. idetape_tape_t *tape = drive->driver_data;
  518. struct ide_atapi_pc *pc = tape->failed_pc;
  519. tape->sense_key = sense[2] & 0xF;
  520. tape->asc = sense[12];
  521. tape->ascq = sense[13];
  522. debug_log(DBG_ERR, "pc = %x, sense key = %x, asc = %x, ascq = %x\n",
  523. pc->c[0], tape->sense_key, tape->asc, tape->ascq);
  524. /* Correct pc->xferred by asking the tape. */
  525. if (pc->flags & PC_FLAG_DMA_ERROR) {
  526. pc->xferred = pc->req_xfer -
  527. tape->blk_size *
  528. be32_to_cpu(get_unaligned((u32 *)&sense[3]));
  529. idetape_update_buffers(pc);
  530. }
  531. /*
  532. * If error was the result of a zero-length read or write command,
  533. * with sense key=5, asc=0x22, ascq=0, let it slide. Some drives
  534. * (i.e. Seagate STT3401A Travan) don't support 0-length read/writes.
  535. */
  536. if ((pc->c[0] == READ_6 || pc->c[0] == WRITE_6)
  537. /* length == 0 */
  538. && pc->c[4] == 0 && pc->c[3] == 0 && pc->c[2] == 0) {
  539. if (tape->sense_key == 5) {
  540. /* don't report an error, everything's ok */
  541. pc->error = 0;
  542. /* don't retry read/write */
  543. pc->flags |= PC_FLAG_ABORT;
  544. }
  545. }
  546. if (pc->c[0] == READ_6 && (sense[2] & 0x80)) {
  547. pc->error = IDETAPE_ERROR_FILEMARK;
  548. pc->flags |= PC_FLAG_ABORT;
  549. }
  550. if (pc->c[0] == WRITE_6) {
  551. if ((sense[2] & 0x40) || (tape->sense_key == 0xd
  552. && tape->asc == 0x0 && tape->ascq == 0x2)) {
  553. pc->error = IDETAPE_ERROR_EOD;
  554. pc->flags |= PC_FLAG_ABORT;
  555. }
  556. }
  557. if (pc->c[0] == READ_6 || pc->c[0] == WRITE_6) {
  558. if (tape->sense_key == 8) {
  559. pc->error = IDETAPE_ERROR_EOD;
  560. pc->flags |= PC_FLAG_ABORT;
  561. }
  562. if (!(pc->flags & PC_FLAG_ABORT) &&
  563. pc->xferred)
  564. pc->retries = IDETAPE_MAX_PC_RETRIES + 1;
  565. }
  566. }
  567. static void idetape_activate_next_stage(ide_drive_t *drive)
  568. {
  569. idetape_tape_t *tape = drive->driver_data;
  570. idetape_stage_t *stage = tape->next_stage;
  571. struct request *rq = &stage->rq;
  572. debug_log(DBG_PROCS, "Enter %s\n", __func__);
  573. if (stage == NULL) {
  574. printk(KERN_ERR "ide-tape: bug: Trying to activate a non"
  575. " existing stage\n");
  576. return;
  577. }
  578. rq->rq_disk = tape->disk;
  579. rq->buffer = NULL;
  580. rq->special = (void *)stage->bh;
  581. tape->active_data_rq = rq;
  582. tape->active_stage = stage;
  583. tape->next_stage = stage->next;
  584. }
  585. /* Free a stage along with its related buffers completely. */
  586. static void __idetape_kfree_stage(idetape_stage_t *stage)
  587. {
  588. struct idetape_bh *prev_bh, *bh = stage->bh;
  589. int size;
  590. while (bh != NULL) {
  591. if (bh->b_data != NULL) {
  592. size = (int) bh->b_size;
  593. while (size > 0) {
  594. free_page((unsigned long) bh->b_data);
  595. size -= PAGE_SIZE;
  596. bh->b_data += PAGE_SIZE;
  597. }
  598. }
  599. prev_bh = bh;
  600. bh = bh->b_reqnext;
  601. kfree(prev_bh);
  602. }
  603. kfree(stage);
  604. }
  605. static void idetape_kfree_stage(idetape_tape_t *tape, idetape_stage_t *stage)
  606. {
  607. __idetape_kfree_stage(stage);
  608. }
  609. /*
  610. * Remove tape->first_stage from the pipeline. The caller should avoid race
  611. * conditions.
  612. */
  613. static void idetape_remove_stage_head(ide_drive_t *drive)
  614. {
  615. idetape_tape_t *tape = drive->driver_data;
  616. idetape_stage_t *stage;
  617. debug_log(DBG_PROCS, "Enter %s\n", __func__);
  618. if (tape->first_stage == NULL) {
  619. printk(KERN_ERR "ide-tape: bug: tape->first_stage is NULL\n");
  620. return;
  621. }
  622. if (tape->active_stage == tape->first_stage) {
  623. printk(KERN_ERR "ide-tape: bug: Trying to free our active "
  624. "pipeline stage\n");
  625. return;
  626. }
  627. stage = tape->first_stage;
  628. tape->first_stage = stage->next;
  629. idetape_kfree_stage(tape, stage);
  630. tape->nr_stages--;
  631. if (tape->first_stage == NULL) {
  632. tape->last_stage = NULL;
  633. if (tape->next_stage != NULL)
  634. printk(KERN_ERR "ide-tape: bug: tape->next_stage !="
  635. " NULL\n");
  636. if (tape->nr_stages)
  637. printk(KERN_ERR "ide-tape: bug: nr_stages should be 0 "
  638. "now\n");
  639. }
  640. }
  641. /*
  642. * This will free all the pipeline stages starting from new_last_stage->next
  643. * to the end of the list, and point tape->last_stage to new_last_stage.
  644. */
  645. static void idetape_abort_pipeline(ide_drive_t *drive,
  646. idetape_stage_t *new_last_stage)
  647. {
  648. idetape_tape_t *tape = drive->driver_data;
  649. idetape_stage_t *stage = new_last_stage->next;
  650. idetape_stage_t *nstage;
  651. debug_log(DBG_PROCS, "%s: Enter %s\n", tape->name, __func__);
  652. while (stage) {
  653. nstage = stage->next;
  654. idetape_kfree_stage(tape, stage);
  655. --tape->nr_stages;
  656. --tape->nr_pending_stages;
  657. stage = nstage;
  658. }
  659. if (new_last_stage)
  660. new_last_stage->next = NULL;
  661. tape->last_stage = new_last_stage;
  662. tape->next_stage = NULL;
  663. }
  664. /*
  665. * Finish servicing a request and insert a pending pipeline request into the
  666. * main device queue.
  667. */
  668. static int idetape_end_request(ide_drive_t *drive, int uptodate, int nr_sects)
  669. {
  670. struct request *rq = HWGROUP(drive)->rq;
  671. idetape_tape_t *tape = drive->driver_data;
  672. unsigned long flags;
  673. int error;
  674. int remove_stage = 0;
  675. idetape_stage_t *active_stage;
  676. debug_log(DBG_PROCS, "Enter %s\n", __func__);
  677. switch (uptodate) {
  678. case 0: error = IDETAPE_ERROR_GENERAL; break;
  679. case 1: error = 0; break;
  680. default: error = uptodate;
  681. }
  682. rq->errors = error;
  683. if (error)
  684. tape->failed_pc = NULL;
  685. if (!blk_special_request(rq)) {
  686. ide_end_request(drive, uptodate, nr_sects);
  687. return 0;
  688. }
  689. spin_lock_irqsave(&tape->lock, flags);
  690. /* The request was a pipelined data transfer request */
  691. if (tape->active_data_rq == rq) {
  692. active_stage = tape->active_stage;
  693. tape->active_stage = NULL;
  694. tape->active_data_rq = NULL;
  695. tape->nr_pending_stages--;
  696. if (rq->cmd[0] & REQ_IDETAPE_WRITE) {
  697. remove_stage = 1;
  698. if (error) {
  699. set_bit(IDETAPE_FLAG_PIPELINE_ERR,
  700. &tape->flags);
  701. if (error == IDETAPE_ERROR_EOD)
  702. idetape_abort_pipeline(drive,
  703. active_stage);
  704. }
  705. } else if (rq->cmd[0] & REQ_IDETAPE_READ) {
  706. if (error == IDETAPE_ERROR_EOD) {
  707. set_bit(IDETAPE_FLAG_PIPELINE_ERR,
  708. &tape->flags);
  709. idetape_abort_pipeline(drive, active_stage);
  710. }
  711. }
  712. if (tape->next_stage != NULL) {
  713. idetape_activate_next_stage(drive);
  714. /* Insert the next request into the request queue. */
  715. (void)ide_do_drive_cmd(drive, tape->active_data_rq,
  716. ide_end);
  717. } else if (!error) {
  718. /*
  719. * This is a part of the feedback loop which tries to
  720. * find the optimum number of stages. We are starting
  721. * from a minimum maximum number of stages, and if we
  722. * sense that the pipeline is empty, we try to increase
  723. * it, until we reach the user compile time memory
  724. * limit.
  725. */
  726. int i = (tape->max_pipeline - tape->min_pipeline) / 10;
  727. tape->max_stages += max(i, 1);
  728. tape->max_stages = max(tape->max_stages,
  729. tape->min_pipeline);
  730. tape->max_stages = min(tape->max_stages,
  731. tape->max_pipeline);
  732. }
  733. }
  734. ide_end_drive_cmd(drive, 0, 0);
  735. if (remove_stage)
  736. idetape_remove_stage_head(drive);
  737. if (tape->active_data_rq == NULL)
  738. clear_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags);
  739. spin_unlock_irqrestore(&tape->lock, flags);
  740. return 0;
  741. }
  742. static ide_startstop_t idetape_request_sense_callback(ide_drive_t *drive)
  743. {
  744. idetape_tape_t *tape = drive->driver_data;
  745. debug_log(DBG_PROCS, "Enter %s\n", __func__);
  746. if (!tape->pc->error) {
  747. idetape_analyze_error(drive, tape->pc->buf);
  748. idetape_end_request(drive, 1, 0);
  749. } else {
  750. printk(KERN_ERR "ide-tape: Error in REQUEST SENSE itself - "
  751. "Aborting request!\n");
  752. idetape_end_request(drive, 0, 0);
  753. }
  754. return ide_stopped;
  755. }
  756. static void idetape_create_request_sense_cmd(struct ide_atapi_pc *pc)
  757. {
  758. idetape_init_pc(pc);
  759. pc->c[0] = REQUEST_SENSE;
  760. pc->c[4] = 20;
  761. pc->req_xfer = 20;
  762. pc->idetape_callback = &idetape_request_sense_callback;
  763. }
  764. static void idetape_init_rq(struct request *rq, u8 cmd)
  765. {
  766. memset(rq, 0, sizeof(*rq));
  767. rq->cmd_type = REQ_TYPE_SPECIAL;
  768. rq->cmd[0] = cmd;
  769. }
  770. /*
  771. * Generate a new packet command request in front of the request queue, before
  772. * the current request, so that it will be processed immediately, on the next
  773. * pass through the driver. The function below is called from the request
  774. * handling part of the driver (the "bottom" part). Safe storage for the request
  775. * should be allocated with ide_tape_next_{pc,rq}_storage() prior to that.
  776. *
  777. * Memory for those requests is pre-allocated at initialization time, and is
  778. * limited to IDETAPE_PC_STACK requests. We assume that we have enough space for
  779. * the maximum possible number of inter-dependent packet commands.
  780. *
  781. * The higher level of the driver - The ioctl handler and the character device
  782. * handling functions should queue request to the lower level part and wait for
  783. * their completion using idetape_queue_pc_tail or idetape_queue_rw_tail.
  784. */
  785. static void idetape_queue_pc_head(ide_drive_t *drive, struct ide_atapi_pc *pc,
  786. struct request *rq)
  787. {
  788. struct ide_tape_obj *tape = drive->driver_data;
  789. idetape_init_rq(rq, REQ_IDETAPE_PC1);
  790. rq->buffer = (char *) pc;
  791. rq->rq_disk = tape->disk;
  792. (void) ide_do_drive_cmd(drive, rq, ide_preempt);
  793. }
  794. /*
  795. * idetape_retry_pc is called when an error was detected during the
  796. * last packet command. We queue a request sense packet command in
  797. * the head of the request list.
  798. */
  799. static ide_startstop_t idetape_retry_pc (ide_drive_t *drive)
  800. {
  801. idetape_tape_t *tape = drive->driver_data;
  802. struct ide_atapi_pc *pc;
  803. struct request *rq;
  804. (void)ide_read_error(drive);
  805. pc = idetape_next_pc_storage(drive);
  806. rq = idetape_next_rq_storage(drive);
  807. idetape_create_request_sense_cmd(pc);
  808. set_bit(IDETAPE_FLAG_IGNORE_DSC, &tape->flags);
  809. idetape_queue_pc_head(drive, pc, rq);
  810. return ide_stopped;
  811. }
  812. /*
  813. * Postpone the current request so that ide.c will be able to service requests
  814. * from another device on the same hwgroup while we are polling for DSC.
  815. */
  816. static void idetape_postpone_request(ide_drive_t *drive)
  817. {
  818. idetape_tape_t *tape = drive->driver_data;
  819. debug_log(DBG_PROCS, "Enter %s\n", __func__);
  820. tape->postponed_rq = HWGROUP(drive)->rq;
  821. ide_stall_queue(drive, tape->dsc_poll_freq);
  822. }
  823. typedef void idetape_io_buf(ide_drive_t *, struct ide_atapi_pc *, unsigned int);
  824. /*
  825. * This is the usual interrupt handler which will be called during a packet
  826. * command. We will transfer some of the data (as requested by the drive) and
  827. * will re-point interrupt handler to us. When data transfer is finished, we
  828. * will act according to the algorithm described before
  829. * idetape_issue_pc.
  830. */
  831. static ide_startstop_t idetape_pc_intr(ide_drive_t *drive)
  832. {
  833. ide_hwif_t *hwif = drive->hwif;
  834. idetape_tape_t *tape = drive->driver_data;
  835. struct ide_atapi_pc *pc = tape->pc;
  836. xfer_func_t *xferfunc;
  837. idetape_io_buf *iobuf;
  838. unsigned int temp;
  839. #if SIMULATE_ERRORS
  840. static int error_sim_count;
  841. #endif
  842. u16 bcount;
  843. u8 stat, ireason;
  844. debug_log(DBG_PROCS, "Enter %s - interrupt handler\n", __func__);
  845. /* Clear the interrupt */
  846. stat = ide_read_status(drive);
  847. if (pc->flags & PC_FLAG_DMA_IN_PROGRESS) {
  848. if (hwif->dma_ops->dma_end(drive) || (stat & ERR_STAT)) {
  849. /*
  850. * A DMA error is sometimes expected. For example,
  851. * if the tape is crossing a filemark during a
  852. * READ command, it will issue an irq and position
  853. * itself before the filemark, so that only a partial
  854. * data transfer will occur (which causes the DMA
  855. * error). In that case, we will later ask the tape
  856. * how much bytes of the original request were
  857. * actually transferred (we can't receive that
  858. * information from the DMA engine on most chipsets).
  859. */
  860. /*
  861. * On the contrary, a DMA error is never expected;
  862. * it usually indicates a hardware error or abort.
  863. * If the tape crosses a filemark during a READ
  864. * command, it will issue an irq and position itself
  865. * after the filemark (not before). Only a partial
  866. * data transfer will occur, but no DMA error.
  867. * (AS, 19 Apr 2001)
  868. */
  869. pc->flags |= PC_FLAG_DMA_ERROR;
  870. } else {
  871. pc->xferred = pc->req_xfer;
  872. idetape_update_buffers(pc);
  873. }
  874. debug_log(DBG_PROCS, "DMA finished\n");
  875. }
  876. /* No more interrupts */
  877. if ((stat & DRQ_STAT) == 0) {
  878. debug_log(DBG_SENSE, "Packet command completed, %d bytes"
  879. " transferred\n", pc->xferred);
  880. pc->flags &= ~PC_FLAG_DMA_IN_PROGRESS;
  881. local_irq_enable();
  882. #if SIMULATE_ERRORS
  883. if ((pc->c[0] == WRITE_6 || pc->c[0] == READ_6) &&
  884. (++error_sim_count % 100) == 0) {
  885. printk(KERN_INFO "ide-tape: %s: simulating error\n",
  886. tape->name);
  887. stat |= ERR_STAT;
  888. }
  889. #endif
  890. if ((stat & ERR_STAT) && pc->c[0] == REQUEST_SENSE)
  891. stat &= ~ERR_STAT;
  892. if ((stat & ERR_STAT) || (pc->flags & PC_FLAG_DMA_ERROR)) {
  893. /* Error detected */
  894. debug_log(DBG_ERR, "%s: I/O error\n", tape->name);
  895. if (pc->c[0] == REQUEST_SENSE) {
  896. printk(KERN_ERR "ide-tape: I/O error in request"
  897. " sense command\n");
  898. return ide_do_reset(drive);
  899. }
  900. debug_log(DBG_ERR, "[cmd %x]: check condition\n",
  901. pc->c[0]);
  902. /* Retry operation */
  903. return idetape_retry_pc(drive);
  904. }
  905. pc->error = 0;
  906. if ((pc->flags & PC_FLAG_WAIT_FOR_DSC) &&
  907. (stat & SEEK_STAT) == 0) {
  908. /* Media access command */
  909. tape->dsc_polling_start = jiffies;
  910. tape->dsc_poll_freq = IDETAPE_DSC_MA_FAST;
  911. tape->dsc_timeout = jiffies + IDETAPE_DSC_MA_TIMEOUT;
  912. /* Allow ide.c to handle other requests */
  913. idetape_postpone_request(drive);
  914. return ide_stopped;
  915. }
  916. if (tape->failed_pc == pc)
  917. tape->failed_pc = NULL;
  918. /* Command finished - Call the callback function */
  919. return pc->idetape_callback(drive);
  920. }
  921. if (pc->flags & PC_FLAG_DMA_IN_PROGRESS) {
  922. pc->flags &= ~PC_FLAG_DMA_IN_PROGRESS;
  923. printk(KERN_ERR "ide-tape: The tape wants to issue more "
  924. "interrupts in DMA mode\n");
  925. printk(KERN_ERR "ide-tape: DMA disabled, reverting to PIO\n");
  926. ide_dma_off(drive);
  927. return ide_do_reset(drive);
  928. }
  929. /* Get the number of bytes to transfer on this interrupt. */
  930. bcount = (hwif->INB(hwif->io_ports[IDE_BCOUNTH_OFFSET]) << 8) |
  931. hwif->INB(hwif->io_ports[IDE_BCOUNTL_OFFSET]);
  932. ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]);
  933. if (ireason & CD) {
  934. printk(KERN_ERR "ide-tape: CoD != 0 in %s\n", __func__);
  935. return ide_do_reset(drive);
  936. }
  937. if (((ireason & IO) == IO) == !!(pc->flags & PC_FLAG_WRITING)) {
  938. /* Hopefully, we will never get here */
  939. printk(KERN_ERR "ide-tape: We wanted to %s, ",
  940. (ireason & IO) ? "Write" : "Read");
  941. printk(KERN_ERR "ide-tape: but the tape wants us to %s !\n",
  942. (ireason & IO) ? "Read" : "Write");
  943. return ide_do_reset(drive);
  944. }
  945. if (!(pc->flags & PC_FLAG_WRITING)) {
  946. /* Reading - Check that we have enough space */
  947. temp = pc->xferred + bcount;
  948. if (temp > pc->req_xfer) {
  949. if (temp > pc->buf_size) {
  950. printk(KERN_ERR "ide-tape: The tape wants to "
  951. "send us more data than expected "
  952. "- discarding data\n");
  953. ide_atapi_discard_data(drive, bcount);
  954. ide_set_handler(drive, &idetape_pc_intr,
  955. IDETAPE_WAIT_CMD, NULL);
  956. return ide_started;
  957. }
  958. debug_log(DBG_SENSE, "The tape wants to send us more "
  959. "data than expected - allowing transfer\n");
  960. }
  961. iobuf = &idetape_input_buffers;
  962. xferfunc = hwif->atapi_input_bytes;
  963. } else {
  964. iobuf = &idetape_output_buffers;
  965. xferfunc = hwif->atapi_output_bytes;
  966. }
  967. if (pc->bh)
  968. iobuf(drive, pc, bcount);
  969. else
  970. xferfunc(drive, pc->cur_pos, bcount);
  971. /* Update the current position */
  972. pc->xferred += bcount;
  973. pc->cur_pos += bcount;
  974. debug_log(DBG_SENSE, "[cmd %x] transferred %d bytes on that intr.\n",
  975. pc->c[0], bcount);
  976. /* And set the interrupt handler again */
  977. ide_set_handler(drive, &idetape_pc_intr, IDETAPE_WAIT_CMD, NULL);
  978. return ide_started;
  979. }
  980. /*
  981. * Packet Command Interface
  982. *
  983. * The current Packet Command is available in tape->pc, and will not change
  984. * until we finish handling it. Each packet command is associated with a
  985. * callback function that will be called when the command is finished.
  986. *
  987. * The handling will be done in three stages:
  988. *
  989. * 1. idetape_issue_pc will send the packet command to the drive, and will set
  990. * the interrupt handler to idetape_pc_intr.
  991. *
  992. * 2. On each interrupt, idetape_pc_intr will be called. This step will be
  993. * repeated until the device signals us that no more interrupts will be issued.
  994. *
  995. * 3. ATAPI Tape media access commands have immediate status with a delayed
  996. * process. In case of a successful initiation of a media access packet command,
  997. * the DSC bit will be set when the actual execution of the command is finished.
  998. * Since the tape drive will not issue an interrupt, we have to poll for this
  999. * event. In this case, we define the request as "low priority request" by
  1000. * setting rq_status to IDETAPE_RQ_POSTPONED, set a timer to poll for DSC and
  1001. * exit the driver.
  1002. *
  1003. * ide.c will then give higher priority to requests which originate from the
  1004. * other device, until will change rq_status to RQ_ACTIVE.
  1005. *
  1006. * 4. When the packet command is finished, it will be checked for errors.
  1007. *
  1008. * 5. In case an error was found, we queue a request sense packet command in
  1009. * front of the request queue and retry the operation up to
  1010. * IDETAPE_MAX_PC_RETRIES times.
  1011. *
  1012. * 6. In case no error was found, or we decided to give up and not to retry
  1013. * again, the callback function will be called and then we will handle the next
  1014. * request.
  1015. */
  1016. static ide_startstop_t idetape_transfer_pc(ide_drive_t *drive)
  1017. {
  1018. ide_hwif_t *hwif = drive->hwif;
  1019. idetape_tape_t *tape = drive->driver_data;
  1020. struct ide_atapi_pc *pc = tape->pc;
  1021. int retries = 100;
  1022. ide_startstop_t startstop;
  1023. u8 ireason;
  1024. if (ide_wait_stat(&startstop, drive, DRQ_STAT, BUSY_STAT, WAIT_READY)) {
  1025. printk(KERN_ERR "ide-tape: Strange, packet command initiated "
  1026. "yet DRQ isn't asserted\n");
  1027. return startstop;
  1028. }
  1029. ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]);
  1030. while (retries-- && ((ireason & CD) == 0 || (ireason & IO))) {
  1031. printk(KERN_ERR "ide-tape: (IO,CoD != (0,1) while issuing "
  1032. "a packet command, retrying\n");
  1033. udelay(100);
  1034. ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]);
  1035. if (retries == 0) {
  1036. printk(KERN_ERR "ide-tape: (IO,CoD != (0,1) while "
  1037. "issuing a packet command, ignoring\n");
  1038. ireason |= CD;
  1039. ireason &= ~IO;
  1040. }
  1041. }
  1042. if ((ireason & CD) == 0 || (ireason & IO)) {
  1043. printk(KERN_ERR "ide-tape: (IO,CoD) != (0,1) while issuing "
  1044. "a packet command\n");
  1045. return ide_do_reset(drive);
  1046. }
  1047. /* Set the interrupt routine */
  1048. ide_set_handler(drive, &idetape_pc_intr, IDETAPE_WAIT_CMD, NULL);
  1049. #ifdef CONFIG_BLK_DEV_IDEDMA
  1050. /* Begin DMA, if necessary */
  1051. if (pc->flags & PC_FLAG_DMA_IN_PROGRESS)
  1052. hwif->dma_ops->dma_start(drive);
  1053. #endif
  1054. /* Send the actual packet */
  1055. HWIF(drive)->atapi_output_bytes(drive, pc->c, 12);
  1056. return ide_started;
  1057. }
  1058. static ide_startstop_t idetape_issue_pc(ide_drive_t *drive,
  1059. struct ide_atapi_pc *pc)
  1060. {
  1061. ide_hwif_t *hwif = drive->hwif;
  1062. idetape_tape_t *tape = drive->driver_data;
  1063. int dma_ok = 0;
  1064. u16 bcount;
  1065. if (tape->pc->c[0] == REQUEST_SENSE &&
  1066. pc->c[0] == REQUEST_SENSE) {
  1067. printk(KERN_ERR "ide-tape: possible ide-tape.c bug - "
  1068. "Two request sense in serial were issued\n");
  1069. }
  1070. if (tape->failed_pc == NULL && pc->c[0] != REQUEST_SENSE)
  1071. tape->failed_pc = pc;
  1072. /* Set the current packet command */
  1073. tape->pc = pc;
  1074. if (pc->retries > IDETAPE_MAX_PC_RETRIES ||
  1075. (pc->flags & PC_FLAG_ABORT)) {
  1076. /*
  1077. * We will "abort" retrying a packet command in case legitimate
  1078. * error code was received (crossing a filemark, or end of the
  1079. * media, for example).
  1080. */
  1081. if (!(pc->flags & PC_FLAG_ABORT)) {
  1082. if (!(pc->c[0] == TEST_UNIT_READY &&
  1083. tape->sense_key == 2 && tape->asc == 4 &&
  1084. (tape->ascq == 1 || tape->ascq == 8))) {
  1085. printk(KERN_ERR "ide-tape: %s: I/O error, "
  1086. "pc = %2x, key = %2x, "
  1087. "asc = %2x, ascq = %2x\n",
  1088. tape->name, pc->c[0],
  1089. tape->sense_key, tape->asc,
  1090. tape->ascq);
  1091. }
  1092. /* Giving up */
  1093. pc->error = IDETAPE_ERROR_GENERAL;
  1094. }
  1095. tape->failed_pc = NULL;
  1096. return pc->idetape_callback(drive);
  1097. }
  1098. debug_log(DBG_SENSE, "Retry #%d, cmd = %02X\n", pc->retries, pc->c[0]);
  1099. pc->retries++;
  1100. /* We haven't transferred any data yet */
  1101. pc->xferred = 0;
  1102. pc->cur_pos = pc->buf;
  1103. /* Request to transfer the entire buffer at once */
  1104. bcount = pc->req_xfer;
  1105. if (pc->flags & PC_FLAG_DMA_ERROR) {
  1106. pc->flags &= ~PC_FLAG_DMA_ERROR;
  1107. printk(KERN_WARNING "ide-tape: DMA disabled, "
  1108. "reverting to PIO\n");
  1109. ide_dma_off(drive);
  1110. }
  1111. if ((pc->flags & PC_FLAG_DMA_RECOMMENDED) && drive->using_dma)
  1112. dma_ok = !hwif->dma_ops->dma_setup(drive);
  1113. ide_pktcmd_tf_load(drive, IDE_TFLAG_NO_SELECT_MASK |
  1114. IDE_TFLAG_OUT_DEVICE, bcount, dma_ok);
  1115. if (dma_ok)
  1116. /* Will begin DMA later */
  1117. pc->flags |= PC_FLAG_DMA_IN_PROGRESS;
  1118. if (test_bit(IDETAPE_FLAG_DRQ_INTERRUPT, &tape->flags)) {
  1119. ide_execute_command(drive, WIN_PACKETCMD, &idetape_transfer_pc,
  1120. IDETAPE_WAIT_CMD, NULL);
  1121. return ide_started;
  1122. } else {
  1123. hwif->OUTB(WIN_PACKETCMD, hwif->io_ports[IDE_COMMAND_OFFSET]);
  1124. return idetape_transfer_pc(drive);
  1125. }
  1126. }
  1127. static ide_startstop_t idetape_pc_callback(ide_drive_t *drive)
  1128. {
  1129. idetape_tape_t *tape = drive->driver_data;
  1130. debug_log(DBG_PROCS, "Enter %s\n", __func__);
  1131. idetape_end_request(drive, tape->pc->error ? 0 : 1, 0);
  1132. return ide_stopped;
  1133. }
  1134. /* A mode sense command is used to "sense" tape parameters. */
  1135. static void idetape_create_mode_sense_cmd(struct ide_atapi_pc *pc, u8 page_code)
  1136. {
  1137. idetape_init_pc(pc);
  1138. pc->c[0] = MODE_SENSE;
  1139. if (page_code != IDETAPE_BLOCK_DESCRIPTOR)
  1140. /* DBD = 1 - Don't return block descriptors */
  1141. pc->c[1] = 8;
  1142. pc->c[2] = page_code;
  1143. /*
  1144. * Changed pc->c[3] to 0 (255 will at best return unused info).
  1145. *
  1146. * For SCSI this byte is defined as subpage instead of high byte
  1147. * of length and some IDE drives seem to interpret it this way
  1148. * and return an error when 255 is used.
  1149. */
  1150. pc->c[3] = 0;
  1151. /* We will just discard data in that case */
  1152. pc->c[4] = 255;
  1153. if (page_code == IDETAPE_BLOCK_DESCRIPTOR)
  1154. pc->req_xfer = 12;
  1155. else if (page_code == IDETAPE_CAPABILITIES_PAGE)
  1156. pc->req_xfer = 24;
  1157. else
  1158. pc->req_xfer = 50;
  1159. pc->idetape_callback = &idetape_pc_callback;
  1160. }
  1161. static ide_startstop_t idetape_media_access_finished(ide_drive_t *drive)
  1162. {
  1163. idetape_tape_t *tape = drive->driver_data;
  1164. struct ide_atapi_pc *pc = tape->pc;
  1165. u8 stat;
  1166. stat = ide_read_status(drive);
  1167. if (stat & SEEK_STAT) {
  1168. if (stat & ERR_STAT) {
  1169. /* Error detected */
  1170. if (pc->c[0] != TEST_UNIT_READY)
  1171. printk(KERN_ERR "ide-tape: %s: I/O error, ",
  1172. tape->name);
  1173. /* Retry operation */
  1174. return idetape_retry_pc(drive);
  1175. }
  1176. pc->error = 0;
  1177. if (tape->failed_pc == pc)
  1178. tape->failed_pc = NULL;
  1179. } else {
  1180. pc->error = IDETAPE_ERROR_GENERAL;
  1181. tape->failed_pc = NULL;
  1182. }
  1183. return pc->idetape_callback(drive);
  1184. }
  1185. static ide_startstop_t idetape_rw_callback(ide_drive_t *drive)
  1186. {
  1187. idetape_tape_t *tape = drive->driver_data;
  1188. struct request *rq = HWGROUP(drive)->rq;
  1189. int blocks = tape->pc->xferred / tape->blk_size;
  1190. tape->avg_size += blocks * tape->blk_size;
  1191. tape->insert_size += blocks * tape->blk_size;
  1192. if (tape->insert_size > 1024 * 1024)
  1193. tape->measure_insert_time = 1;
  1194. if (tape->measure_insert_time) {
  1195. tape->measure_insert_time = 0;
  1196. tape->insert_time = jiffies;
  1197. tape->insert_size = 0;
  1198. }
  1199. if (time_after(jiffies, tape->insert_time))
  1200. tape->insert_speed = tape->insert_size / 1024 * HZ /
  1201. (jiffies - tape->insert_time);
  1202. if (time_after_eq(jiffies, tape->avg_time + HZ)) {
  1203. tape->avg_speed = tape->avg_size * HZ /
  1204. (jiffies - tape->avg_time) / 1024;
  1205. tape->avg_size = 0;
  1206. tape->avg_time = jiffies;
  1207. }
  1208. debug_log(DBG_PROCS, "Enter %s\n", __func__);
  1209. tape->first_frame += blocks;
  1210. rq->current_nr_sectors -= blocks;
  1211. if (!tape->pc->error)
  1212. idetape_end_request(drive, 1, 0);
  1213. else
  1214. idetape_end_request(drive, tape->pc->error, 0);
  1215. return ide_stopped;
  1216. }
  1217. static void idetape_create_read_cmd(idetape_tape_t *tape,
  1218. struct ide_atapi_pc *pc,
  1219. unsigned int length, struct idetape_bh *bh)
  1220. {
  1221. idetape_init_pc(pc);
  1222. pc->c[0] = READ_6;
  1223. put_unaligned(cpu_to_be32(length), (unsigned int *) &pc->c[1]);
  1224. pc->c[1] = 1;
  1225. pc->idetape_callback = &idetape_rw_callback;
  1226. pc->bh = bh;
  1227. atomic_set(&bh->b_count, 0);
  1228. pc->buf = NULL;
  1229. pc->buf_size = length * tape->blk_size;
  1230. pc->req_xfer = pc->buf_size;
  1231. if (pc->req_xfer == tape->stage_size)
  1232. pc->flags |= PC_FLAG_DMA_RECOMMENDED;
  1233. }
  1234. static void idetape_create_write_cmd(idetape_tape_t *tape,
  1235. struct ide_atapi_pc *pc,
  1236. unsigned int length, struct idetape_bh *bh)
  1237. {
  1238. idetape_init_pc(pc);
  1239. pc->c[0] = WRITE_6;
  1240. put_unaligned(cpu_to_be32(length), (unsigned int *) &pc->c[1]);
  1241. pc->c[1] = 1;
  1242. pc->idetape_callback = &idetape_rw_callback;
  1243. pc->flags |= PC_FLAG_WRITING;
  1244. pc->bh = bh;
  1245. pc->b_data = bh->b_data;
  1246. pc->b_count = atomic_read(&bh->b_count);
  1247. pc->buf = NULL;
  1248. pc->buf_size = length * tape->blk_size;
  1249. pc->req_xfer = pc->buf_size;
  1250. if (pc->req_xfer == tape->stage_size)
  1251. pc->flags |= PC_FLAG_DMA_RECOMMENDED;
  1252. }
  1253. static ide_startstop_t idetape_do_request(ide_drive_t *drive,
  1254. struct request *rq, sector_t block)
  1255. {
  1256. idetape_tape_t *tape = drive->driver_data;
  1257. struct ide_atapi_pc *pc = NULL;
  1258. struct request *postponed_rq = tape->postponed_rq;
  1259. u8 stat;
  1260. debug_log(DBG_SENSE, "sector: %ld, nr_sectors: %ld,"
  1261. " current_nr_sectors: %d\n",
  1262. rq->sector, rq->nr_sectors, rq->current_nr_sectors);
  1263. if (!blk_special_request(rq)) {
  1264. /* We do not support buffer cache originated requests. */
  1265. printk(KERN_NOTICE "ide-tape: %s: Unsupported request in "
  1266. "request queue (%d)\n", drive->name, rq->cmd_type);
  1267. ide_end_request(drive, 0, 0);
  1268. return ide_stopped;
  1269. }
  1270. /* Retry a failed packet command */
  1271. if (tape->failed_pc && tape->pc->c[0] == REQUEST_SENSE)
  1272. return idetape_issue_pc(drive, tape->failed_pc);
  1273. if (postponed_rq != NULL)
  1274. if (rq != postponed_rq) {
  1275. printk(KERN_ERR "ide-tape: ide-tape.c bug - "
  1276. "Two DSC requests were queued\n");
  1277. idetape_end_request(drive, 0, 0);
  1278. return ide_stopped;
  1279. }
  1280. tape->postponed_rq = NULL;
  1281. /*
  1282. * If the tape is still busy, postpone our request and service
  1283. * the other device meanwhile.
  1284. */
  1285. stat = ide_read_status(drive);
  1286. if (!drive->dsc_overlap && !(rq->cmd[0] & REQ_IDETAPE_PC2))
  1287. set_bit(IDETAPE_FLAG_IGNORE_DSC, &tape->flags);
  1288. if (drive->post_reset == 1) {
  1289. set_bit(IDETAPE_FLAG_IGNORE_DSC, &tape->flags);
  1290. drive->post_reset = 0;
  1291. }
  1292. if (time_after(jiffies, tape->insert_time))
  1293. tape->insert_speed = tape->insert_size / 1024 * HZ /
  1294. (jiffies - tape->insert_time);
  1295. if (!test_and_clear_bit(IDETAPE_FLAG_IGNORE_DSC, &tape->flags) &&
  1296. (stat & SEEK_STAT) == 0) {
  1297. if (postponed_rq == NULL) {
  1298. tape->dsc_polling_start = jiffies;
  1299. tape->dsc_poll_freq = tape->best_dsc_rw_freq;
  1300. tape->dsc_timeout = jiffies + IDETAPE_DSC_RW_TIMEOUT;
  1301. } else if (time_after(jiffies, tape->dsc_timeout)) {
  1302. printk(KERN_ERR "ide-tape: %s: DSC timeout\n",
  1303. tape->name);
  1304. if (rq->cmd[0] & REQ_IDETAPE_PC2) {
  1305. idetape_media_access_finished(drive);
  1306. return ide_stopped;
  1307. } else {
  1308. return ide_do_reset(drive);
  1309. }
  1310. } else if (time_after(jiffies,
  1311. tape->dsc_polling_start +
  1312. IDETAPE_DSC_MA_THRESHOLD))
  1313. tape->dsc_poll_freq = IDETAPE_DSC_MA_SLOW;
  1314. idetape_postpone_request(drive);
  1315. return ide_stopped;
  1316. }
  1317. if (rq->cmd[0] & REQ_IDETAPE_READ) {
  1318. tape->postpone_cnt = 0;
  1319. pc = idetape_next_pc_storage(drive);
  1320. idetape_create_read_cmd(tape, pc, rq->current_nr_sectors,
  1321. (struct idetape_bh *)rq->special);
  1322. goto out;
  1323. }
  1324. if (rq->cmd[0] & REQ_IDETAPE_WRITE) {
  1325. tape->postpone_cnt = 0;
  1326. pc = idetape_next_pc_storage(drive);
  1327. idetape_create_write_cmd(tape, pc, rq->current_nr_sectors,
  1328. (struct idetape_bh *)rq->special);
  1329. goto out;
  1330. }
  1331. if (rq->cmd[0] & REQ_IDETAPE_PC1) {
  1332. pc = (struct ide_atapi_pc *) rq->buffer;
  1333. rq->cmd[0] &= ~(REQ_IDETAPE_PC1);
  1334. rq->cmd[0] |= REQ_IDETAPE_PC2;
  1335. goto out;
  1336. }
  1337. if (rq->cmd[0] & REQ_IDETAPE_PC2) {
  1338. idetape_media_access_finished(drive);
  1339. return ide_stopped;
  1340. }
  1341. BUG();
  1342. out:
  1343. return idetape_issue_pc(drive, pc);
  1344. }
  1345. /* Pipeline related functions */
  1346. /*
  1347. * The function below uses __get_free_page to allocate a pipeline stage, along
  1348. * with all the necessary small buffers which together make a buffer of size
  1349. * tape->stage_size (or a bit more). We attempt to combine sequential pages as
  1350. * much as possible.
  1351. *
  1352. * It returns a pointer to the new allocated stage, or NULL if we can't (or
  1353. * don't want to) allocate a stage.
  1354. *
  1355. * Pipeline stages are optional and are used to increase performance. If we
  1356. * can't allocate them, we'll manage without them.
  1357. */
  1358. static idetape_stage_t *__idetape_kmalloc_stage(idetape_tape_t *tape, int full,
  1359. int clear)
  1360. {
  1361. idetape_stage_t *stage;
  1362. struct idetape_bh *prev_bh, *bh;
  1363. int pages = tape->pages_per_stage;
  1364. char *b_data = NULL;
  1365. stage = kmalloc(sizeof(idetape_stage_t), GFP_KERNEL);
  1366. if (!stage)
  1367. return NULL;
  1368. stage->next = NULL;
  1369. stage->bh = kmalloc(sizeof(struct idetape_bh), GFP_KERNEL);
  1370. bh = stage->bh;
  1371. if (bh == NULL)
  1372. goto abort;
  1373. bh->b_reqnext = NULL;
  1374. bh->b_data = (char *) __get_free_page(GFP_KERNEL);
  1375. if (!bh->b_data)
  1376. goto abort;
  1377. if (clear)
  1378. memset(bh->b_data, 0, PAGE_SIZE);
  1379. bh->b_size = PAGE_SIZE;
  1380. atomic_set(&bh->b_count, full ? bh->b_size : 0);
  1381. while (--pages) {
  1382. b_data = (char *) __get_free_page(GFP_KERNEL);
  1383. if (!b_data)
  1384. goto abort;
  1385. if (clear)
  1386. memset(b_data, 0, PAGE_SIZE);
  1387. if (bh->b_data == b_data + PAGE_SIZE) {
  1388. bh->b_size += PAGE_SIZE;
  1389. bh->b_data -= PAGE_SIZE;
  1390. if (full)
  1391. atomic_add(PAGE_SIZE, &bh->b_count);
  1392. continue;
  1393. }
  1394. if (b_data == bh->b_data + bh->b_size) {
  1395. bh->b_size += PAGE_SIZE;
  1396. if (full)
  1397. atomic_add(PAGE_SIZE, &bh->b_count);
  1398. continue;
  1399. }
  1400. prev_bh = bh;
  1401. bh = kmalloc(sizeof(struct idetape_bh), GFP_KERNEL);
  1402. if (!bh) {
  1403. free_page((unsigned long) b_data);
  1404. goto abort;
  1405. }
  1406. bh->b_reqnext = NULL;
  1407. bh->b_data = b_data;
  1408. bh->b_size = PAGE_SIZE;
  1409. atomic_set(&bh->b_count, full ? bh->b_size : 0);
  1410. prev_bh->b_reqnext = bh;
  1411. }
  1412. bh->b_size -= tape->excess_bh_size;
  1413. if (full)
  1414. atomic_sub(tape->excess_bh_size, &bh->b_count);
  1415. return stage;
  1416. abort:
  1417. __idetape_kfree_stage(stage);
  1418. return NULL;
  1419. }
  1420. static int idetape_copy_stage_from_user(idetape_tape_t *tape,
  1421. idetape_stage_t *stage, const char __user *buf, int n)
  1422. {
  1423. struct idetape_bh *bh = tape->bh;
  1424. int count;
  1425. int ret = 0;
  1426. while (n) {
  1427. if (bh == NULL) {
  1428. printk(KERN_ERR "ide-tape: bh == NULL in %s\n",
  1429. __func__);
  1430. return 1;
  1431. }
  1432. count = min((unsigned int)
  1433. (bh->b_size - atomic_read(&bh->b_count)),
  1434. (unsigned int)n);
  1435. if (copy_from_user(bh->b_data + atomic_read(&bh->b_count), buf,
  1436. count))
  1437. ret = 1;
  1438. n -= count;
  1439. atomic_add(count, &bh->b_count);
  1440. buf += count;
  1441. if (atomic_read(&bh->b_count) == bh->b_size) {
  1442. bh = bh->b_reqnext;
  1443. if (bh)
  1444. atomic_set(&bh->b_count, 0);
  1445. }
  1446. }
  1447. tape->bh = bh;
  1448. return ret;
  1449. }
  1450. static int idetape_copy_stage_to_user(idetape_tape_t *tape, char __user *buf,
  1451. idetape_stage_t *stage, int n)
  1452. {
  1453. struct idetape_bh *bh = tape->bh;
  1454. int count;
  1455. int ret = 0;
  1456. while (n) {
  1457. if (bh == NULL) {
  1458. printk(KERN_ERR "ide-tape: bh == NULL in %s\n",
  1459. __func__);
  1460. return 1;
  1461. }
  1462. count = min(tape->b_count, n);
  1463. if (copy_to_user(buf, tape->b_data, count))
  1464. ret = 1;
  1465. n -= count;
  1466. tape->b_data += count;
  1467. tape->b_count -= count;
  1468. buf += count;
  1469. if (!tape->b_count) {
  1470. bh = bh->b_reqnext;
  1471. tape->bh = bh;
  1472. if (bh) {
  1473. tape->b_data = bh->b_data;
  1474. tape->b_count = atomic_read(&bh->b_count);
  1475. }
  1476. }
  1477. }
  1478. return ret;
  1479. }
  1480. static void idetape_init_merge_stage(idetape_tape_t *tape)
  1481. {
  1482. struct idetape_bh *bh = tape->merge_stage->bh;
  1483. tape->bh = bh;
  1484. if (tape->chrdev_dir == IDETAPE_DIR_WRITE)
  1485. atomic_set(&bh->b_count, 0);
  1486. else {
  1487. tape->b_data = bh->b_data;
  1488. tape->b_count = atomic_read(&bh->b_count);
  1489. }
  1490. }
  1491. /* Install a completion in a pending request and sleep until it is serviced. The
  1492. * caller should ensure that the request will not be serviced before we install
  1493. * the completion (usually by disabling interrupts).
  1494. */
  1495. static void idetape_wait_for_request(ide_drive_t *drive, struct request *rq)
  1496. {
  1497. DECLARE_COMPLETION_ONSTACK(wait);
  1498. idetape_tape_t *tape = drive->driver_data;
  1499. if (rq == NULL || !blk_special_request(rq)) {
  1500. printk(KERN_ERR "ide-tape: bug: Trying to sleep on non-valid"
  1501. " request\n");
  1502. return;
  1503. }
  1504. rq->end_io_data = &wait;
  1505. rq->end_io = blk_end_sync_rq;
  1506. spin_unlock_irq(&tape->lock);
  1507. wait_for_completion(&wait);
  1508. /* The stage and its struct request have been deallocated */
  1509. spin_lock_irq(&tape->lock);
  1510. }
  1511. static ide_startstop_t idetape_read_position_callback(ide_drive_t *drive)
  1512. {
  1513. idetape_tape_t *tape = drive->driver_data;
  1514. u8 *readpos = tape->pc->buf;
  1515. debug_log(DBG_PROCS, "Enter %s\n", __func__);
  1516. if (!tape->pc->error) {
  1517. debug_log(DBG_SENSE, "BOP - %s\n",
  1518. (readpos[0] & 0x80) ? "Yes" : "No");
  1519. debug_log(DBG_SENSE, "EOP - %s\n",
  1520. (readpos[0] & 0x40) ? "Yes" : "No");
  1521. if (readpos[0] & 0x4) {
  1522. printk(KERN_INFO "ide-tape: Block location is unknown"
  1523. "to the tape\n");
  1524. clear_bit(IDETAPE_FLAG_ADDRESS_VALID, &tape->flags);
  1525. idetape_end_request(drive, 0, 0);
  1526. } else {
  1527. debug_log(DBG_SENSE, "Block Location - %u\n",
  1528. be32_to_cpu(*(u32 *)&readpos[4]));
  1529. tape->partition = readpos[1];
  1530. tape->first_frame =
  1531. be32_to_cpu(*(u32 *)&readpos[4]);
  1532. set_bit(IDETAPE_FLAG_ADDRESS_VALID, &tape->flags);
  1533. idetape_end_request(drive, 1, 0);
  1534. }
  1535. } else {
  1536. idetape_end_request(drive, 0, 0);
  1537. }
  1538. return ide_stopped;
  1539. }
  1540. /*
  1541. * Write a filemark if write_filemark=1. Flush the device buffers without
  1542. * writing a filemark otherwise.
  1543. */
  1544. static void idetape_create_write_filemark_cmd(ide_drive_t *drive,
  1545. struct ide_atapi_pc *pc, int write_filemark)
  1546. {
  1547. idetape_init_pc(pc);
  1548. pc->c[0] = WRITE_FILEMARKS;
  1549. pc->c[4] = write_filemark;
  1550. pc->flags |= PC_FLAG_WAIT_FOR_DSC;
  1551. pc->idetape_callback = &idetape_pc_callback;
  1552. }
  1553. static void idetape_create_test_unit_ready_cmd(struct ide_atapi_pc *pc)
  1554. {
  1555. idetape_init_pc(pc);
  1556. pc->c[0] = TEST_UNIT_READY;
  1557. pc->idetape_callback = &idetape_pc_callback;
  1558. }
  1559. /*
  1560. * We add a special packet command request to the tail of the request queue, and
  1561. * wait for it to be serviced. This is not to be called from within the request
  1562. * handling part of the driver! We allocate here data on the stack and it is
  1563. * valid until the request is finished. This is not the case for the bottom part
  1564. * of the driver, where we are always leaving the functions to wait for an
  1565. * interrupt or a timer event.
  1566. *
  1567. * From the bottom part of the driver, we should allocate safe memory using
  1568. * idetape_next_pc_storage() and ide_tape_next_rq_storage(), and add the request
  1569. * to the request list without waiting for it to be serviced! In that case, we
  1570. * usually use idetape_queue_pc_head().
  1571. */
  1572. static int __idetape_queue_pc_tail(ide_drive_t *drive, struct ide_atapi_pc *pc)
  1573. {
  1574. struct ide_tape_obj *tape = drive->driver_data;
  1575. struct request rq;
  1576. idetape_init_rq(&rq, REQ_IDETAPE_PC1);
  1577. rq.buffer = (char *) pc;
  1578. rq.rq_disk = tape->disk;
  1579. return ide_do_drive_cmd(drive, &rq, ide_wait);
  1580. }
  1581. static void idetape_create_load_unload_cmd(ide_drive_t *drive,
  1582. struct ide_atapi_pc *pc, int cmd)
  1583. {
  1584. idetape_init_pc(pc);
  1585. pc->c[0] = START_STOP;
  1586. pc->c[4] = cmd;
  1587. pc->flags |= PC_FLAG_WAIT_FOR_DSC;
  1588. pc->idetape_callback = &idetape_pc_callback;
  1589. }
  1590. static int idetape_wait_ready(ide_drive_t *drive, unsigned long timeout)
  1591. {
  1592. idetape_tape_t *tape = drive->driver_data;
  1593. struct ide_atapi_pc pc;
  1594. int load_attempted = 0;
  1595. /* Wait for the tape to become ready */
  1596. set_bit(IDETAPE_FLAG_MEDIUM_PRESENT, &tape->flags);
  1597. timeout += jiffies;
  1598. while (time_before(jiffies, timeout)) {
  1599. idetape_create_test_unit_ready_cmd(&pc);
  1600. if (!__idetape_queue_pc_tail(drive, &pc))
  1601. return 0;
  1602. if ((tape->sense_key == 2 && tape->asc == 4 && tape->ascq == 2)
  1603. || (tape->asc == 0x3A)) {
  1604. /* no media */
  1605. if (load_attempted)
  1606. return -ENOMEDIUM;
  1607. idetape_create_load_unload_cmd(drive, &pc,
  1608. IDETAPE_LU_LOAD_MASK);
  1609. __idetape_queue_pc_tail(drive, &pc);
  1610. load_attempted = 1;
  1611. /* not about to be ready */
  1612. } else if (!(tape->sense_key == 2 && tape->asc == 4 &&
  1613. (tape->ascq == 1 || tape->ascq == 8)))
  1614. return -EIO;
  1615. msleep(100);
  1616. }
  1617. return -EIO;
  1618. }
  1619. static int idetape_queue_pc_tail(ide_drive_t *drive, struct ide_atapi_pc *pc)
  1620. {
  1621. return __idetape_queue_pc_tail(drive, pc);
  1622. }
  1623. static int idetape_flush_tape_buffers(ide_drive_t *drive)
  1624. {
  1625. struct ide_atapi_pc pc;
  1626. int rc;
  1627. idetape_create_write_filemark_cmd(drive, &pc, 0);
  1628. rc = idetape_queue_pc_tail(drive, &pc);
  1629. if (rc)
  1630. return rc;
  1631. idetape_wait_ready(drive, 60 * 5 * HZ);
  1632. return 0;
  1633. }
  1634. static void idetape_create_read_position_cmd(struct ide_atapi_pc *pc)
  1635. {
  1636. idetape_init_pc(pc);
  1637. pc->c[0] = READ_POSITION;
  1638. pc->req_xfer = 20;
  1639. pc->idetape_callback = &idetape_read_position_callback;
  1640. }
  1641. static int idetape_read_position(ide_drive_t *drive)
  1642. {
  1643. idetape_tape_t *tape = drive->driver_data;
  1644. struct ide_atapi_pc pc;
  1645. int position;
  1646. debug_log(DBG_PROCS, "Enter %s\n", __func__);
  1647. idetape_create_read_position_cmd(&pc);
  1648. if (idetape_queue_pc_tail(drive, &pc))
  1649. return -1;
  1650. position = tape->first_frame;
  1651. return position;
  1652. }
  1653. static void idetape_create_locate_cmd(ide_drive_t *drive,
  1654. struct ide_atapi_pc *pc,
  1655. unsigned int block, u8 partition, int skip)
  1656. {
  1657. idetape_init_pc(pc);
  1658. pc->c[0] = POSITION_TO_ELEMENT;
  1659. pc->c[1] = 2;
  1660. put_unaligned(cpu_to_be32(block), (unsigned int *) &pc->c[3]);
  1661. pc->c[8] = partition;
  1662. pc->flags |= PC_FLAG_WAIT_FOR_DSC;
  1663. pc->idetape_callback = &idetape_pc_callback;
  1664. }
  1665. static int idetape_create_prevent_cmd(ide_drive_t *drive,
  1666. struct ide_atapi_pc *pc, int prevent)
  1667. {
  1668. idetape_tape_t *tape = drive->driver_data;
  1669. /* device supports locking according to capabilities page */
  1670. if (!(tape->caps[6] & 0x01))
  1671. return 0;
  1672. idetape_init_pc(pc);
  1673. pc->c[0] = ALLOW_MEDIUM_REMOVAL;
  1674. pc->c[4] = prevent;
  1675. pc->idetape_callback = &idetape_pc_callback;
  1676. return 1;
  1677. }
  1678. static int __idetape_discard_read_pipeline(ide_drive_t *drive)
  1679. {
  1680. idetape_tape_t *tape = drive->driver_data;
  1681. unsigned long flags;
  1682. int cnt;
  1683. if (tape->chrdev_dir != IDETAPE_DIR_READ)
  1684. return 0;
  1685. /* Remove merge stage. */
  1686. cnt = tape->merge_stage_size / tape->blk_size;
  1687. if (test_and_clear_bit(IDETAPE_FLAG_FILEMARK, &tape->flags))
  1688. ++cnt; /* Filemarks count as 1 sector */
  1689. tape->merge_stage_size = 0;
  1690. if (tape->merge_stage != NULL) {
  1691. __idetape_kfree_stage(tape->merge_stage);
  1692. tape->merge_stage = NULL;
  1693. }
  1694. /* Clear pipeline flags. */
  1695. clear_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags);
  1696. tape->chrdev_dir = IDETAPE_DIR_NONE;
  1697. /* Remove pipeline stages. */
  1698. if (tape->first_stage == NULL)
  1699. return 0;
  1700. spin_lock_irqsave(&tape->lock, flags);
  1701. tape->next_stage = NULL;
  1702. if (test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags))
  1703. idetape_wait_for_request(drive, tape->active_data_rq);
  1704. spin_unlock_irqrestore(&tape->lock, flags);
  1705. while (tape->first_stage != NULL) {
  1706. struct request *rq_ptr = &tape->first_stage->rq;
  1707. cnt += rq_ptr->nr_sectors - rq_ptr->current_nr_sectors;
  1708. if (rq_ptr->errors == IDETAPE_ERROR_FILEMARK)
  1709. ++cnt;
  1710. idetape_remove_stage_head(drive);
  1711. }
  1712. tape->nr_pending_stages = 0;
  1713. tape->max_stages = tape->min_pipeline;
  1714. return cnt;
  1715. }
  1716. /*
  1717. * Position the tape to the requested block using the LOCATE packet command.
  1718. * A READ POSITION command is then issued to check where we are positioned. Like
  1719. * all higher level operations, we queue the commands at the tail of the request
  1720. * queue and wait for their completion.
  1721. */
  1722. static int idetape_position_tape(ide_drive_t *drive, unsigned int block,
  1723. u8 partition, int skip)
  1724. {
  1725. idetape_tape_t *tape = drive->driver_data;
  1726. int retval;
  1727. struct ide_atapi_pc pc;
  1728. if (tape->chrdev_dir == IDETAPE_DIR_READ)
  1729. __idetape_discard_read_pipeline(drive);
  1730. idetape_wait_ready(drive, 60 * 5 * HZ);
  1731. idetape_create_locate_cmd(drive, &pc, block, partition, skip);
  1732. retval = idetape_queue_pc_tail(drive, &pc);
  1733. if (retval)
  1734. return (retval);
  1735. idetape_create_read_position_cmd(&pc);
  1736. return (idetape_queue_pc_tail(drive, &pc));
  1737. }
  1738. static void idetape_discard_read_pipeline(ide_drive_t *drive,
  1739. int restore_position)
  1740. {
  1741. idetape_tape_t *tape = drive->driver_data;
  1742. int cnt;
  1743. int seek, position;
  1744. cnt = __idetape_discard_read_pipeline(drive);
  1745. if (restore_position) {
  1746. position = idetape_read_position(drive);
  1747. seek = position > cnt ? position - cnt : 0;
  1748. if (idetape_position_tape(drive, seek, 0, 0)) {
  1749. printk(KERN_INFO "ide-tape: %s: position_tape failed in"
  1750. " discard_pipeline()\n", tape->name);
  1751. return;
  1752. }
  1753. }
  1754. }
  1755. /*
  1756. * Generate a read/write request for the block device interface and wait for it
  1757. * to be serviced.
  1758. */
  1759. static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int blocks,
  1760. struct idetape_bh *bh)
  1761. {
  1762. idetape_tape_t *tape = drive->driver_data;
  1763. struct request rq;
  1764. debug_log(DBG_SENSE, "%s: cmd=%d\n", __func__, cmd);
  1765. if (test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags)) {
  1766. printk(KERN_ERR "ide-tape: bug: the pipeline is active in %s\n",
  1767. __func__);
  1768. return (0);
  1769. }
  1770. idetape_init_rq(&rq, cmd);
  1771. rq.rq_disk = tape->disk;
  1772. rq.special = (void *)bh;
  1773. rq.sector = tape->first_frame;
  1774. rq.nr_sectors = blocks;
  1775. rq.current_nr_sectors = blocks;
  1776. (void) ide_do_drive_cmd(drive, &rq, ide_wait);
  1777. if ((cmd & (REQ_IDETAPE_READ | REQ_IDETAPE_WRITE)) == 0)
  1778. return 0;
  1779. if (tape->merge_stage)
  1780. idetape_init_merge_stage(tape);
  1781. if (rq.errors == IDETAPE_ERROR_GENERAL)
  1782. return -EIO;
  1783. return (tape->blk_size * (blocks-rq.current_nr_sectors));
  1784. }
  1785. /* start servicing the pipeline stages, starting from tape->next_stage. */
  1786. static void idetape_plug_pipeline(ide_drive_t *drive)
  1787. {
  1788. idetape_tape_t *tape = drive->driver_data;
  1789. if (tape->next_stage == NULL)
  1790. return;
  1791. if (!test_and_set_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags)) {
  1792. idetape_activate_next_stage(drive);
  1793. (void) ide_do_drive_cmd(drive, tape->active_data_rq, ide_end);
  1794. }
  1795. }
  1796. static void idetape_create_inquiry_cmd(struct ide_atapi_pc *pc)
  1797. {
  1798. idetape_init_pc(pc);
  1799. pc->c[0] = INQUIRY;
  1800. pc->c[4] = 254;
  1801. pc->req_xfer = 254;
  1802. pc->idetape_callback = &idetape_pc_callback;
  1803. }
  1804. static void idetape_create_rewind_cmd(ide_drive_t *drive,
  1805. struct ide_atapi_pc *pc)
  1806. {
  1807. idetape_init_pc(pc);
  1808. pc->c[0] = REZERO_UNIT;
  1809. pc->flags |= PC_FLAG_WAIT_FOR_DSC;
  1810. pc->idetape_callback = &idetape_pc_callback;
  1811. }
  1812. static void idetape_create_erase_cmd(struct ide_atapi_pc *pc)
  1813. {
  1814. idetape_init_pc(pc);
  1815. pc->c[0] = ERASE;
  1816. pc->c[1] = 1;
  1817. pc->flags |= PC_FLAG_WAIT_FOR_DSC;
  1818. pc->idetape_callback = &idetape_pc_callback;
  1819. }
  1820. static void idetape_create_space_cmd(struct ide_atapi_pc *pc, int count, u8 cmd)
  1821. {
  1822. idetape_init_pc(pc);
  1823. pc->c[0] = SPACE;
  1824. put_unaligned(cpu_to_be32(count), (unsigned int *) &pc->c[1]);
  1825. pc->c[1] = cmd;
  1826. pc->flags |= PC_FLAG_WAIT_FOR_DSC;
  1827. pc->idetape_callback = &idetape_pc_callback;
  1828. }
  1829. static void idetape_wait_first_stage(ide_drive_t *drive)
  1830. {
  1831. idetape_tape_t *tape = drive->driver_data;
  1832. unsigned long flags;
  1833. if (tape->first_stage == NULL)
  1834. return;
  1835. spin_lock_irqsave(&tape->lock, flags);
  1836. if (tape->active_stage == tape->first_stage)
  1837. idetape_wait_for_request(drive, tape->active_data_rq);
  1838. spin_unlock_irqrestore(&tape->lock, flags);
  1839. }
  1840. /* Queue up a character device originated write request. */
  1841. static int idetape_add_chrdev_write_request(ide_drive_t *drive, int blocks)
  1842. {
  1843. idetape_tape_t *tape = drive->driver_data;
  1844. unsigned long flags;
  1845. debug_log(DBG_CHRDEV, "Enter %s\n", __func__);
  1846. /* Attempt to allocate a new stage. Beware possible race conditions. */
  1847. while (1) {
  1848. spin_lock_irqsave(&tape->lock, flags);
  1849. if (test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags)) {
  1850. idetape_wait_for_request(drive, tape->active_data_rq);
  1851. spin_unlock_irqrestore(&tape->lock, flags);
  1852. } else {
  1853. spin_unlock_irqrestore(&tape->lock, flags);
  1854. idetape_plug_pipeline(drive);
  1855. if (test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE,
  1856. &tape->flags))
  1857. continue;
  1858. return idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE,
  1859. blocks, tape->merge_stage->bh);
  1860. }
  1861. }
  1862. }
  1863. /*
  1864. * Wait until all pending pipeline requests are serviced. Typically called on
  1865. * device close.
  1866. */
  1867. static void idetape_wait_for_pipeline(ide_drive_t *drive)
  1868. {
  1869. idetape_tape_t *tape = drive->driver_data;
  1870. unsigned long flags;
  1871. while (tape->next_stage || test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE,
  1872. &tape->flags)) {
  1873. idetape_plug_pipeline(drive);
  1874. spin_lock_irqsave(&tape->lock, flags);
  1875. if (test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags))
  1876. idetape_wait_for_request(drive, tape->active_data_rq);
  1877. spin_unlock_irqrestore(&tape->lock, flags);
  1878. }
  1879. }
  1880. static void idetape_empty_write_pipeline(ide_drive_t *drive)
  1881. {
  1882. idetape_tape_t *tape = drive->driver_data;
  1883. int blocks, min;
  1884. struct idetape_bh *bh;
  1885. if (tape->chrdev_dir != IDETAPE_DIR_WRITE) {
  1886. printk(KERN_ERR "ide-tape: bug: Trying to empty write pipeline,"
  1887. " but we are not writing.\n");
  1888. return;
  1889. }
  1890. if (tape->merge_stage_size > tape->stage_size) {
  1891. printk(KERN_ERR "ide-tape: bug: merge_buffer too big\n");
  1892. tape->merge_stage_size = tape->stage_size;
  1893. }
  1894. if (tape->merge_stage_size) {
  1895. blocks = tape->merge_stage_size / tape->blk_size;
  1896. if (tape->merge_stage_size % tape->blk_size) {
  1897. unsigned int i;
  1898. blocks++;
  1899. i = tape->blk_size - tape->merge_stage_size %
  1900. tape->blk_size;
  1901. bh = tape->bh->b_reqnext;
  1902. while (bh) {
  1903. atomic_set(&bh->b_count, 0);
  1904. bh = bh->b_reqnext;
  1905. }
  1906. bh = tape->bh;
  1907. while (i) {
  1908. if (bh == NULL) {
  1909. printk(KERN_INFO "ide-tape: bug,"
  1910. " bh NULL\n");
  1911. break;
  1912. }
  1913. min = min(i, (unsigned int)(bh->b_size -
  1914. atomic_read(&bh->b_count)));
  1915. memset(bh->b_data + atomic_read(&bh->b_count),
  1916. 0, min);
  1917. atomic_add(min, &bh->b_count);
  1918. i -= min;
  1919. bh = bh->b_reqnext;
  1920. }
  1921. }
  1922. (void) idetape_add_chrdev_write_request(drive, blocks);
  1923. tape->merge_stage_size = 0;
  1924. }
  1925. idetape_wait_for_pipeline(drive);
  1926. if (tape->merge_stage != NULL) {
  1927. __idetape_kfree_stage(tape->merge_stage);
  1928. tape->merge_stage = NULL;
  1929. }
  1930. clear_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags);
  1931. tape->chrdev_dir = IDETAPE_DIR_NONE;
  1932. /*
  1933. * On the next backup, perform the feedback loop again. (I don't want to
  1934. * keep sense information between backups, as some systems are
  1935. * constantly on, and the system load can be totally different on the
  1936. * next backup).
  1937. */
  1938. tape->max_stages = tape->min_pipeline;
  1939. if (tape->first_stage != NULL ||
  1940. tape->next_stage != NULL ||
  1941. tape->last_stage != NULL ||
  1942. tape->nr_stages != 0) {
  1943. printk(KERN_ERR "ide-tape: ide-tape pipeline bug, "
  1944. "first_stage %p, next_stage %p, "
  1945. "last_stage %p, nr_stages %d\n",
  1946. tape->first_stage, tape->next_stage,
  1947. tape->last_stage, tape->nr_stages);
  1948. }
  1949. }
  1950. static int idetape_init_read(ide_drive_t *drive, int max_stages)
  1951. {
  1952. idetape_tape_t *tape = drive->driver_data;
  1953. int bytes_read;
  1954. /* Initialize read operation */
  1955. if (tape->chrdev_dir != IDETAPE_DIR_READ) {
  1956. if (tape->chrdev_dir == IDETAPE_DIR_WRITE) {
  1957. idetape_empty_write_pipeline(drive);
  1958. idetape_flush_tape_buffers(drive);
  1959. }
  1960. if (tape->merge_stage || tape->merge_stage_size) {
  1961. printk(KERN_ERR "ide-tape: merge_stage_size should be"
  1962. " 0 now\n");
  1963. tape->merge_stage_size = 0;
  1964. }
  1965. tape->merge_stage = __idetape_kmalloc_stage(tape, 0, 0);
  1966. if (!tape->merge_stage)
  1967. return -ENOMEM;
  1968. tape->chrdev_dir = IDETAPE_DIR_READ;
  1969. /*
  1970. * Issue a read 0 command to ensure that DSC handshake is
  1971. * switched from completion mode to buffer available mode.
  1972. * No point in issuing this if DSC overlap isn't supported, some
  1973. * drives (Seagate STT3401A) will return an error.
  1974. */
  1975. if (drive->dsc_overlap) {
  1976. bytes_read = idetape_queue_rw_tail(drive,
  1977. REQ_IDETAPE_READ, 0,
  1978. tape->merge_stage->bh);
  1979. if (bytes_read < 0) {
  1980. __idetape_kfree_stage(tape->merge_stage);
  1981. tape->merge_stage = NULL;
  1982. tape->chrdev_dir = IDETAPE_DIR_NONE;
  1983. return bytes_read;
  1984. }
  1985. }
  1986. }
  1987. if (!test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags)) {
  1988. if (tape->nr_pending_stages >= 3 * max_stages / 4) {
  1989. tape->measure_insert_time = 1;
  1990. tape->insert_time = jiffies;
  1991. tape->insert_size = 0;
  1992. tape->insert_speed = 0;
  1993. idetape_plug_pipeline(drive);
  1994. }
  1995. }
  1996. return 0;
  1997. }
  1998. /*
  1999. * Called from idetape_chrdev_read() to service a character device read request
  2000. * and add read-ahead requests to our pipeline.
  2001. */
  2002. static int idetape_add_chrdev_read_request(ide_drive_t *drive, int blocks)
  2003. {
  2004. idetape_tape_t *tape = drive->driver_data;
  2005. debug_log(DBG_PROCS, "Enter %s, %d blocks\n", __func__, blocks);
  2006. /* If we are at a filemark, return a read length of 0 */
  2007. if (test_bit(IDETAPE_FLAG_FILEMARK, &tape->flags))
  2008. return 0;
  2009. idetape_init_read(drive, tape->max_stages);
  2010. if (test_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags))
  2011. return 0;
  2012. return idetape_queue_rw_tail(drive, REQ_IDETAPE_READ, blocks,
  2013. tape->merge_stage->bh);
  2014. }
  2015. static void idetape_pad_zeros(ide_drive_t *drive, int bcount)
  2016. {
  2017. idetape_tape_t *tape = drive->driver_data;
  2018. struct idetape_bh *bh;
  2019. int blocks;
  2020. while (bcount) {
  2021. unsigned int count;
  2022. bh = tape->merge_stage->bh;
  2023. count = min(tape->stage_size, bcount);
  2024. bcount -= count;
  2025. blocks = count / tape->blk_size;
  2026. while (count) {
  2027. atomic_set(&bh->b_count,
  2028. min(count, (unsigned int)bh->b_size));
  2029. memset(bh->b_data, 0, atomic_read(&bh->b_count));
  2030. count -= atomic_read(&bh->b_count);
  2031. bh = bh->b_reqnext;
  2032. }
  2033. idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE, blocks,
  2034. tape->merge_stage->bh);
  2035. }
  2036. }
  2037. static int idetape_pipeline_size(ide_drive_t *drive)
  2038. {
  2039. idetape_tape_t *tape = drive->driver_data;
  2040. idetape_stage_t *stage;
  2041. struct request *rq;
  2042. int size = 0;
  2043. idetape_wait_for_pipeline(drive);
  2044. stage = tape->first_stage;
  2045. while (stage != NULL) {
  2046. rq = &stage->rq;
  2047. size += tape->blk_size * (rq->nr_sectors -
  2048. rq->current_nr_sectors);
  2049. if (rq->errors == IDETAPE_ERROR_FILEMARK)
  2050. size += tape->blk_size;
  2051. stage = stage->next;
  2052. }
  2053. size += tape->merge_stage_size;
  2054. return size;
  2055. }
  2056. /*
  2057. * Rewinds the tape to the Beginning Of the current Partition (BOP). We
  2058. * currently support only one partition.
  2059. */
  2060. static int idetape_rewind_tape(ide_drive_t *drive)
  2061. {
  2062. int retval;
  2063. struct ide_atapi_pc pc;
  2064. idetape_tape_t *tape;
  2065. tape = drive->driver_data;
  2066. debug_log(DBG_SENSE, "Enter %s\n", __func__);
  2067. idetape_create_rewind_cmd(drive, &pc);
  2068. retval = idetape_queue_pc_tail(drive, &pc);
  2069. if (retval)
  2070. return retval;
  2071. idetape_create_read_position_cmd(&pc);
  2072. retval = idetape_queue_pc_tail(drive, &pc);
  2073. if (retval)
  2074. return retval;
  2075. return 0;
  2076. }
  2077. /* mtio.h compatible commands should be issued to the chrdev interface. */
  2078. static int idetape_blkdev_ioctl(ide_drive_t *drive, unsigned int cmd,
  2079. unsigned long arg)
  2080. {
  2081. idetape_tape_t *tape = drive->driver_data;
  2082. void __user *argp = (void __user *)arg;
  2083. struct idetape_config {
  2084. int dsc_rw_frequency;
  2085. int dsc_media_access_frequency;
  2086. int nr_stages;
  2087. } config;
  2088. debug_log(DBG_PROCS, "Enter %s\n", __func__);
  2089. switch (cmd) {
  2090. case 0x0340:
  2091. if (copy_from_user(&config, argp, sizeof(config)))
  2092. return -EFAULT;
  2093. tape->best_dsc_rw_freq = config.dsc_rw_frequency;
  2094. tape->max_stages = config.nr_stages;
  2095. break;
  2096. case 0x0350:
  2097. config.dsc_rw_frequency = (int) tape->best_dsc_rw_freq;
  2098. config.nr_stages = tape->max_stages;
  2099. if (copy_to_user(argp, &config, sizeof(config)))
  2100. return -EFAULT;
  2101. break;
  2102. default:
  2103. return -EIO;
  2104. }
  2105. return 0;
  2106. }
  2107. /*
  2108. * The function below is now a bit more complicated than just passing the
  2109. * command to the tape since we may have crossed some filemarks during our
  2110. * pipelined read-ahead mode. As a minor side effect, the pipeline enables us to
  2111. * support MTFSFM when the filemark is in our internal pipeline even if the tape
  2112. * doesn't support spacing over filemarks in the reverse direction.
  2113. */
  2114. static int idetape_space_over_filemarks(ide_drive_t *drive, short mt_op,
  2115. int mt_count)
  2116. {
  2117. idetape_tape_t *tape = drive->driver_data;
  2118. struct ide_atapi_pc pc;
  2119. unsigned long flags;
  2120. int retval, count = 0;
  2121. int sprev = !!(tape->caps[4] & 0x20);
  2122. if (mt_count == 0)
  2123. return 0;
  2124. if (MTBSF == mt_op || MTBSFM == mt_op) {
  2125. if (!sprev)
  2126. return -EIO;
  2127. mt_count = -mt_count;
  2128. }
  2129. if (tape->chrdev_dir == IDETAPE_DIR_READ) {
  2130. /* its a read-ahead buffer, scan it for crossed filemarks. */
  2131. tape->merge_stage_size = 0;
  2132. if (test_and_clear_bit(IDETAPE_FLAG_FILEMARK, &tape->flags))
  2133. ++count;
  2134. while (tape->first_stage != NULL) {
  2135. if (count == mt_count) {
  2136. if (mt_op == MTFSFM)
  2137. set_bit(IDETAPE_FLAG_FILEMARK,
  2138. &tape->flags);
  2139. return 0;
  2140. }
  2141. spin_lock_irqsave(&tape->lock, flags);
  2142. if (tape->first_stage == tape->active_stage) {
  2143. /*
  2144. * We have reached the active stage in the read
  2145. * pipeline. There is no point in allowing the
  2146. * drive to continue reading any farther, so we
  2147. * stop the pipeline.
  2148. *
  2149. * This section should be moved to a separate
  2150. * subroutine because similar operations are
  2151. * done in __idetape_discard_read_pipeline(),
  2152. * for example.
  2153. */
  2154. tape->next_stage = NULL;
  2155. spin_unlock_irqrestore(&tape->lock, flags);
  2156. idetape_wait_first_stage(drive);
  2157. tape->next_stage = tape->first_stage->next;
  2158. } else
  2159. spin_unlock_irqrestore(&tape->lock, flags);
  2160. if (tape->first_stage->rq.errors ==
  2161. IDETAPE_ERROR_FILEMARK)
  2162. ++count;
  2163. idetape_remove_stage_head(drive);
  2164. }
  2165. idetape_discard_read_pipeline(drive, 0);
  2166. }
  2167. /*
  2168. * The filemark was not found in our internal pipeline; now we can issue
  2169. * the space command.
  2170. */
  2171. switch (mt_op) {
  2172. case MTFSF:
  2173. case MTBSF:
  2174. idetape_create_space_cmd(&pc, mt_count - count,
  2175. IDETAPE_SPACE_OVER_FILEMARK);
  2176. return idetape_queue_pc_tail(drive, &pc);
  2177. case MTFSFM:
  2178. case MTBSFM:
  2179. if (!sprev)
  2180. return -EIO;
  2181. retval = idetape_space_over_filemarks(drive, MTFSF,
  2182. mt_count - count);
  2183. if (retval)
  2184. return retval;
  2185. count = (MTBSFM == mt_op ? 1 : -1);
  2186. return idetape_space_over_filemarks(drive, MTFSF, count);
  2187. default:
  2188. printk(KERN_ERR "ide-tape: MTIO operation %d not supported\n",
  2189. mt_op);
  2190. return -EIO;
  2191. }
  2192. }
  2193. /*
  2194. * Our character device read / write functions.
  2195. *
  2196. * The tape is optimized to maximize throughput when it is transferring an
  2197. * integral number of the "continuous transfer limit", which is a parameter of
  2198. * the specific tape (26kB on my particular tape, 32kB for Onstream).
  2199. *
  2200. * As of version 1.3 of the driver, the character device provides an abstract
  2201. * continuous view of the media - any mix of block sizes (even 1 byte) on the
  2202. * same backup/restore procedure is supported. The driver will internally
  2203. * convert the requests to the recommended transfer unit, so that an unmatch
  2204. * between the user's block size to the recommended size will only result in a
  2205. * (slightly) increased driver overhead, but will no longer hit performance.
  2206. * This is not applicable to Onstream.
  2207. */
  2208. static ssize_t idetape_chrdev_read(struct file *file, char __user *buf,
  2209. size_t count, loff_t *ppos)
  2210. {
  2211. struct ide_tape_obj *tape = ide_tape_f(file);
  2212. ide_drive_t *drive = tape->drive;
  2213. ssize_t bytes_read, temp, actually_read = 0, rc;
  2214. ssize_t ret = 0;
  2215. u16 ctl = *(u16 *)&tape->caps[12];
  2216. debug_log(DBG_CHRDEV, "Enter %s, count %Zd\n", __func__, count);
  2217. if (tape->chrdev_dir != IDETAPE_DIR_READ) {
  2218. if (test_bit(IDETAPE_FLAG_DETECT_BS, &tape->flags))
  2219. if (count > tape->blk_size &&
  2220. (count % tape->blk_size) == 0)
  2221. tape->user_bs_factor = count / tape->blk_size;
  2222. }
  2223. rc = idetape_init_read(drive, tape->max_stages);
  2224. if (rc < 0)
  2225. return rc;
  2226. if (count == 0)
  2227. return (0);
  2228. if (tape->merge_stage_size) {
  2229. actually_read = min((unsigned int)(tape->merge_stage_size),
  2230. (unsigned int)count);
  2231. if (idetape_copy_stage_to_user(tape, buf, tape->merge_stage,
  2232. actually_read))
  2233. ret = -EFAULT;
  2234. buf += actually_read;
  2235. tape->merge_stage_size -= actually_read;
  2236. count -= actually_read;
  2237. }
  2238. while (count >= tape->stage_size) {
  2239. bytes_read = idetape_add_chrdev_read_request(drive, ctl);
  2240. if (bytes_read <= 0)
  2241. goto finish;
  2242. if (idetape_copy_stage_to_user(tape, buf, tape->merge_stage,
  2243. bytes_read))
  2244. ret = -EFAULT;
  2245. buf += bytes_read;
  2246. count -= bytes_read;
  2247. actually_read += bytes_read;
  2248. }
  2249. if (count) {
  2250. bytes_read = idetape_add_chrdev_read_request(drive, ctl);
  2251. if (bytes_read <= 0)
  2252. goto finish;
  2253. temp = min((unsigned long)count, (unsigned long)bytes_read);
  2254. if (idetape_copy_stage_to_user(tape, buf, tape->merge_stage,
  2255. temp))
  2256. ret = -EFAULT;
  2257. actually_read += temp;
  2258. tape->merge_stage_size = bytes_read-temp;
  2259. }
  2260. finish:
  2261. if (!actually_read && test_bit(IDETAPE_FLAG_FILEMARK, &tape->flags)) {
  2262. debug_log(DBG_SENSE, "%s: spacing over filemark\n", tape->name);
  2263. idetape_space_over_filemarks(drive, MTFSF, 1);
  2264. return 0;
  2265. }
  2266. return ret ? ret : actually_read;
  2267. }
  2268. static ssize_t idetape_chrdev_write(struct file *file, const char __user *buf,
  2269. size_t count, loff_t *ppos)
  2270. {
  2271. struct ide_tape_obj *tape = ide_tape_f(file);
  2272. ide_drive_t *drive = tape->drive;
  2273. ssize_t actually_written = 0;
  2274. ssize_t ret = 0;
  2275. u16 ctl = *(u16 *)&tape->caps[12];
  2276. /* The drive is write protected. */
  2277. if (tape->write_prot)
  2278. return -EACCES;
  2279. debug_log(DBG_CHRDEV, "Enter %s, count %Zd\n", __func__, count);
  2280. /* Initialize write operation */
  2281. if (tape->chrdev_dir != IDETAPE_DIR_WRITE) {
  2282. if (tape->chrdev_dir == IDETAPE_DIR_READ)
  2283. idetape_discard_read_pipeline(drive, 1);
  2284. if (tape->merge_stage || tape->merge_stage_size) {
  2285. printk(KERN_ERR "ide-tape: merge_stage_size "
  2286. "should be 0 now\n");
  2287. tape->merge_stage_size = 0;
  2288. }
  2289. tape->merge_stage = __idetape_kmalloc_stage(tape, 0, 0);
  2290. if (!tape->merge_stage)
  2291. return -ENOMEM;
  2292. tape->chrdev_dir = IDETAPE_DIR_WRITE;
  2293. idetape_init_merge_stage(tape);
  2294. /*
  2295. * Issue a write 0 command to ensure that DSC handshake is
  2296. * switched from completion mode to buffer available mode. No
  2297. * point in issuing this if DSC overlap isn't supported, some
  2298. * drives (Seagate STT3401A) will return an error.
  2299. */
  2300. if (drive->dsc_overlap) {
  2301. ssize_t retval = idetape_queue_rw_tail(drive,
  2302. REQ_IDETAPE_WRITE, 0,
  2303. tape->merge_stage->bh);
  2304. if (retval < 0) {
  2305. __idetape_kfree_stage(tape->merge_stage);
  2306. tape->merge_stage = NULL;
  2307. tape->chrdev_dir = IDETAPE_DIR_NONE;
  2308. return retval;
  2309. }
  2310. }
  2311. }
  2312. if (count == 0)
  2313. return (0);
  2314. if (tape->merge_stage_size) {
  2315. if (tape->merge_stage_size >= tape->stage_size) {
  2316. printk(KERN_ERR "ide-tape: bug: merge buf too big\n");
  2317. tape->merge_stage_size = 0;
  2318. }
  2319. actually_written = min((unsigned int)
  2320. (tape->stage_size - tape->merge_stage_size),
  2321. (unsigned int)count);
  2322. if (idetape_copy_stage_from_user(tape, tape->merge_stage, buf,
  2323. actually_written))
  2324. ret = -EFAULT;
  2325. buf += actually_written;
  2326. tape->merge_stage_size += actually_written;
  2327. count -= actually_written;
  2328. if (tape->merge_stage_size == tape->stage_size) {
  2329. ssize_t retval;
  2330. tape->merge_stage_size = 0;
  2331. retval = idetape_add_chrdev_write_request(drive, ctl);
  2332. if (retval <= 0)
  2333. return (retval);
  2334. }
  2335. }
  2336. while (count >= tape->stage_size) {
  2337. ssize_t retval;
  2338. if (idetape_copy_stage_from_user(tape, tape->merge_stage, buf,
  2339. tape->stage_size))
  2340. ret = -EFAULT;
  2341. buf += tape->stage_size;
  2342. count -= tape->stage_size;
  2343. retval = idetape_add_chrdev_write_request(drive, ctl);
  2344. actually_written += tape->stage_size;
  2345. if (retval <= 0)
  2346. return (retval);
  2347. }
  2348. if (count) {
  2349. actually_written += count;
  2350. if (idetape_copy_stage_from_user(tape, tape->merge_stage, buf,
  2351. count))
  2352. ret = -EFAULT;
  2353. tape->merge_stage_size += count;
  2354. }
  2355. return ret ? ret : actually_written;
  2356. }
  2357. static int idetape_write_filemark(ide_drive_t *drive)
  2358. {
  2359. struct ide_atapi_pc pc;
  2360. /* Write a filemark */
  2361. idetape_create_write_filemark_cmd(drive, &pc, 1);
  2362. if (idetape_queue_pc_tail(drive, &pc)) {
  2363. printk(KERN_ERR "ide-tape: Couldn't write a filemark\n");
  2364. return -EIO;
  2365. }
  2366. return 0;
  2367. }
  2368. /*
  2369. * Called from idetape_chrdev_ioctl when the general mtio MTIOCTOP ioctl is
  2370. * requested.
  2371. *
  2372. * Note: MTBSF and MTBSFM are not supported when the tape doesn't support
  2373. * spacing over filemarks in the reverse direction. In this case, MTFSFM is also
  2374. * usually not supported (it is supported in the rare case in which we crossed
  2375. * the filemark during our read-ahead pipelined operation mode).
  2376. *
  2377. * The following commands are currently not supported:
  2378. *
  2379. * MTFSS, MTBSS, MTWSM, MTSETDENSITY, MTSETDRVBUFFER, MT_ST_BOOLEANS,
  2380. * MT_ST_WRITE_THRESHOLD.
  2381. */
  2382. static int idetape_mtioctop(ide_drive_t *drive, short mt_op, int mt_count)
  2383. {
  2384. idetape_tape_t *tape = drive->driver_data;
  2385. struct ide_atapi_pc pc;
  2386. int i, retval;
  2387. debug_log(DBG_ERR, "Handling MTIOCTOP ioctl: mt_op=%d, mt_count=%d\n",
  2388. mt_op, mt_count);
  2389. /* Commands which need our pipelined read-ahead stages. */
  2390. switch (mt_op) {
  2391. case MTFSF:
  2392. case MTFSFM:
  2393. case MTBSF:
  2394. case MTBSFM:
  2395. if (!mt_count)
  2396. return 0;
  2397. return idetape_space_over_filemarks(drive, mt_op, mt_count);
  2398. default:
  2399. break;
  2400. }
  2401. switch (mt_op) {
  2402. case MTWEOF:
  2403. if (tape->write_prot)
  2404. return -EACCES;
  2405. idetape_discard_read_pipeline(drive, 1);
  2406. for (i = 0; i < mt_count; i++) {
  2407. retval = idetape_write_filemark(drive);
  2408. if (retval)
  2409. return retval;
  2410. }
  2411. return 0;
  2412. case MTREW:
  2413. idetape_discard_read_pipeline(drive, 0);
  2414. if (idetape_rewind_tape(drive))
  2415. return -EIO;
  2416. return 0;
  2417. case MTLOAD:
  2418. idetape_discard_read_pipeline(drive, 0);
  2419. idetape_create_load_unload_cmd(drive, &pc,
  2420. IDETAPE_LU_LOAD_MASK);
  2421. return idetape_queue_pc_tail(drive, &pc);
  2422. case MTUNLOAD:
  2423. case MTOFFL:
  2424. /*
  2425. * If door is locked, attempt to unlock before
  2426. * attempting to eject.
  2427. */
  2428. if (tape->door_locked) {
  2429. if (idetape_create_prevent_cmd(drive, &pc, 0))
  2430. if (!idetape_queue_pc_tail(drive, &pc))
  2431. tape->door_locked = DOOR_UNLOCKED;
  2432. }
  2433. idetape_discard_read_pipeline(drive, 0);
  2434. idetape_create_load_unload_cmd(drive, &pc,
  2435. !IDETAPE_LU_LOAD_MASK);
  2436. retval = idetape_queue_pc_tail(drive, &pc);
  2437. if (!retval)
  2438. clear_bit(IDETAPE_FLAG_MEDIUM_PRESENT, &tape->flags);
  2439. return retval;
  2440. case MTNOP:
  2441. idetape_discard_read_pipeline(drive, 0);
  2442. return idetape_flush_tape_buffers(drive);
  2443. case MTRETEN:
  2444. idetape_discard_read_pipeline(drive, 0);
  2445. idetape_create_load_unload_cmd(drive, &pc,
  2446. IDETAPE_LU_RETENSION_MASK | IDETAPE_LU_LOAD_MASK);
  2447. return idetape_queue_pc_tail(drive, &pc);
  2448. case MTEOM:
  2449. idetape_create_space_cmd(&pc, 0, IDETAPE_SPACE_TO_EOD);
  2450. return idetape_queue_pc_tail(drive, &pc);
  2451. case MTERASE:
  2452. (void)idetape_rewind_tape(drive);
  2453. idetape_create_erase_cmd(&pc);
  2454. return idetape_queue_pc_tail(drive, &pc);
  2455. case MTSETBLK:
  2456. if (mt_count) {
  2457. if (mt_count < tape->blk_size ||
  2458. mt_count % tape->blk_size)
  2459. return -EIO;
  2460. tape->user_bs_factor = mt_count / tape->blk_size;
  2461. clear_bit(IDETAPE_FLAG_DETECT_BS, &tape->flags);
  2462. } else
  2463. set_bit(IDETAPE_FLAG_DETECT_BS, &tape->flags);
  2464. return 0;
  2465. case MTSEEK:
  2466. idetape_discard_read_pipeline(drive, 0);
  2467. return idetape_position_tape(drive,
  2468. mt_count * tape->user_bs_factor, tape->partition, 0);
  2469. case MTSETPART:
  2470. idetape_discard_read_pipeline(drive, 0);
  2471. return idetape_position_tape(drive, 0, mt_count, 0);
  2472. case MTFSR:
  2473. case MTBSR:
  2474. case MTLOCK:
  2475. if (!idetape_create_prevent_cmd(drive, &pc, 1))
  2476. return 0;
  2477. retval = idetape_queue_pc_tail(drive, &pc);
  2478. if (retval)
  2479. return retval;
  2480. tape->door_locked = DOOR_EXPLICITLY_LOCKED;
  2481. return 0;
  2482. case MTUNLOCK:
  2483. if (!idetape_create_prevent_cmd(drive, &pc, 0))
  2484. return 0;
  2485. retval = idetape_queue_pc_tail(drive, &pc);
  2486. if (retval)
  2487. return retval;
  2488. tape->door_locked = DOOR_UNLOCKED;
  2489. return 0;
  2490. default:
  2491. printk(KERN_ERR "ide-tape: MTIO operation %d not supported\n",
  2492. mt_op);
  2493. return -EIO;
  2494. }
  2495. }
  2496. /*
  2497. * Our character device ioctls. General mtio.h magnetic io commands are
  2498. * supported here, and not in the corresponding block interface. Our own
  2499. * ide-tape ioctls are supported on both interfaces.
  2500. */
  2501. static int idetape_chrdev_ioctl(struct inode *inode, struct file *file,
  2502. unsigned int cmd, unsigned long arg)
  2503. {
  2504. struct ide_tape_obj *tape = ide_tape_f(file);
  2505. ide_drive_t *drive = tape->drive;
  2506. struct mtop mtop;
  2507. struct mtget mtget;
  2508. struct mtpos mtpos;
  2509. int block_offset = 0, position = tape->first_frame;
  2510. void __user *argp = (void __user *)arg;
  2511. debug_log(DBG_CHRDEV, "Enter %s, cmd=%u\n", __func__, cmd);
  2512. if (tape->chrdev_dir == IDETAPE_DIR_WRITE) {
  2513. idetape_empty_write_pipeline(drive);
  2514. idetape_flush_tape_buffers(drive);
  2515. }
  2516. if (cmd == MTIOCGET || cmd == MTIOCPOS) {
  2517. block_offset = idetape_pipeline_size(drive) /
  2518. (tape->blk_size * tape->user_bs_factor);
  2519. position = idetape_read_position(drive);
  2520. if (position < 0)
  2521. return -EIO;
  2522. }
  2523. switch (cmd) {
  2524. case MTIOCTOP:
  2525. if (copy_from_user(&mtop, argp, sizeof(struct mtop)))
  2526. return -EFAULT;
  2527. return idetape_mtioctop(drive, mtop.mt_op, mtop.mt_count);
  2528. case MTIOCGET:
  2529. memset(&mtget, 0, sizeof(struct mtget));
  2530. mtget.mt_type = MT_ISSCSI2;
  2531. mtget.mt_blkno = position / tape->user_bs_factor - block_offset;
  2532. mtget.mt_dsreg =
  2533. ((tape->blk_size * tape->user_bs_factor)
  2534. << MT_ST_BLKSIZE_SHIFT) & MT_ST_BLKSIZE_MASK;
  2535. if (tape->drv_write_prot)
  2536. mtget.mt_gstat |= GMT_WR_PROT(0xffffffff);
  2537. if (copy_to_user(argp, &mtget, sizeof(struct mtget)))
  2538. return -EFAULT;
  2539. return 0;
  2540. case MTIOCPOS:
  2541. mtpos.mt_blkno = position / tape->user_bs_factor - block_offset;
  2542. if (copy_to_user(argp, &mtpos, sizeof(struct mtpos)))
  2543. return -EFAULT;
  2544. return 0;
  2545. default:
  2546. if (tape->chrdev_dir == IDETAPE_DIR_READ)
  2547. idetape_discard_read_pipeline(drive, 1);
  2548. return idetape_blkdev_ioctl(drive, cmd, arg);
  2549. }
  2550. }
  2551. /*
  2552. * Do a mode sense page 0 with block descriptor and if it succeeds set the tape
  2553. * block size with the reported value.
  2554. */
  2555. static void ide_tape_get_bsize_from_bdesc(ide_drive_t *drive)
  2556. {
  2557. idetape_tape_t *tape = drive->driver_data;
  2558. struct ide_atapi_pc pc;
  2559. idetape_create_mode_sense_cmd(&pc, IDETAPE_BLOCK_DESCRIPTOR);
  2560. if (idetape_queue_pc_tail(drive, &pc)) {
  2561. printk(KERN_ERR "ide-tape: Can't get block descriptor\n");
  2562. if (tape->blk_size == 0) {
  2563. printk(KERN_WARNING "ide-tape: Cannot deal with zero "
  2564. "block size, assuming 32k\n");
  2565. tape->blk_size = 32768;
  2566. }
  2567. return;
  2568. }
  2569. tape->blk_size = (pc.buf[4 + 5] << 16) +
  2570. (pc.buf[4 + 6] << 8) +
  2571. pc.buf[4 + 7];
  2572. tape->drv_write_prot = (pc.buf[2] & 0x80) >> 7;
  2573. }
  2574. static int idetape_chrdev_open(struct inode *inode, struct file *filp)
  2575. {
  2576. unsigned int minor = iminor(inode), i = minor & ~0xc0;
  2577. ide_drive_t *drive;
  2578. idetape_tape_t *tape;
  2579. struct ide_atapi_pc pc;
  2580. int retval;
  2581. if (i >= MAX_HWIFS * MAX_DRIVES)
  2582. return -ENXIO;
  2583. tape = ide_tape_chrdev_get(i);
  2584. if (!tape)
  2585. return -ENXIO;
  2586. debug_log(DBG_CHRDEV, "Enter %s\n", __func__);
  2587. /*
  2588. * We really want to do nonseekable_open(inode, filp); here, but some
  2589. * versions of tar incorrectly call lseek on tapes and bail out if that
  2590. * fails. So we disallow pread() and pwrite(), but permit lseeks.
  2591. */
  2592. filp->f_mode &= ~(FMODE_PREAD | FMODE_PWRITE);
  2593. drive = tape->drive;
  2594. filp->private_data = tape;
  2595. if (test_and_set_bit(IDETAPE_FLAG_BUSY, &tape->flags)) {
  2596. retval = -EBUSY;
  2597. goto out_put_tape;
  2598. }
  2599. retval = idetape_wait_ready(drive, 60 * HZ);
  2600. if (retval) {
  2601. clear_bit(IDETAPE_FLAG_BUSY, &tape->flags);
  2602. printk(KERN_ERR "ide-tape: %s: drive not ready\n", tape->name);
  2603. goto out_put_tape;
  2604. }
  2605. idetape_read_position(drive);
  2606. if (!test_bit(IDETAPE_FLAG_ADDRESS_VALID, &tape->flags))
  2607. (void)idetape_rewind_tape(drive);
  2608. if (tape->chrdev_dir != IDETAPE_DIR_READ)
  2609. clear_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags);
  2610. /* Read block size and write protect status from drive. */
  2611. ide_tape_get_bsize_from_bdesc(drive);
  2612. /* Set write protect flag if device is opened as read-only. */
  2613. if ((filp->f_flags & O_ACCMODE) == O_RDONLY)
  2614. tape->write_prot = 1;
  2615. else
  2616. tape->write_prot = tape->drv_write_prot;
  2617. /* Make sure drive isn't write protected if user wants to write. */
  2618. if (tape->write_prot) {
  2619. if ((filp->f_flags & O_ACCMODE) == O_WRONLY ||
  2620. (filp->f_flags & O_ACCMODE) == O_RDWR) {
  2621. clear_bit(IDETAPE_FLAG_BUSY, &tape->flags);
  2622. retval = -EROFS;
  2623. goto out_put_tape;
  2624. }
  2625. }
  2626. /* Lock the tape drive door so user can't eject. */
  2627. if (tape->chrdev_dir == IDETAPE_DIR_NONE) {
  2628. if (idetape_create_prevent_cmd(drive, &pc, 1)) {
  2629. if (!idetape_queue_pc_tail(drive, &pc)) {
  2630. if (tape->door_locked != DOOR_EXPLICITLY_LOCKED)
  2631. tape->door_locked = DOOR_LOCKED;
  2632. }
  2633. }
  2634. }
  2635. return 0;
  2636. out_put_tape:
  2637. ide_tape_put(tape);
  2638. return retval;
  2639. }
  2640. static void idetape_write_release(ide_drive_t *drive, unsigned int minor)
  2641. {
  2642. idetape_tape_t *tape = drive->driver_data;
  2643. idetape_empty_write_pipeline(drive);
  2644. tape->merge_stage = __idetape_kmalloc_stage(tape, 1, 0);
  2645. if (tape->merge_stage != NULL) {
  2646. idetape_pad_zeros(drive, tape->blk_size *
  2647. (tape->user_bs_factor - 1));
  2648. __idetape_kfree_stage(tape->merge_stage);
  2649. tape->merge_stage = NULL;
  2650. }
  2651. idetape_write_filemark(drive);
  2652. idetape_flush_tape_buffers(drive);
  2653. idetape_flush_tape_buffers(drive);
  2654. }
  2655. static int idetape_chrdev_release(struct inode *inode, struct file *filp)
  2656. {
  2657. struct ide_tape_obj *tape = ide_tape_f(filp);
  2658. ide_drive_t *drive = tape->drive;
  2659. struct ide_atapi_pc pc;
  2660. unsigned int minor = iminor(inode);
  2661. lock_kernel();
  2662. tape = drive->driver_data;
  2663. debug_log(DBG_CHRDEV, "Enter %s\n", __func__);
  2664. if (tape->chrdev_dir == IDETAPE_DIR_WRITE)
  2665. idetape_write_release(drive, minor);
  2666. if (tape->chrdev_dir == IDETAPE_DIR_READ) {
  2667. if (minor < 128)
  2668. idetape_discard_read_pipeline(drive, 1);
  2669. else
  2670. idetape_wait_for_pipeline(drive);
  2671. }
  2672. if (minor < 128 && test_bit(IDETAPE_FLAG_MEDIUM_PRESENT, &tape->flags))
  2673. (void) idetape_rewind_tape(drive);
  2674. if (tape->chrdev_dir == IDETAPE_DIR_NONE) {
  2675. if (tape->door_locked == DOOR_LOCKED) {
  2676. if (idetape_create_prevent_cmd(drive, &pc, 0)) {
  2677. if (!idetape_queue_pc_tail(drive, &pc))
  2678. tape->door_locked = DOOR_UNLOCKED;
  2679. }
  2680. }
  2681. }
  2682. clear_bit(IDETAPE_FLAG_BUSY, &tape->flags);
  2683. ide_tape_put(tape);
  2684. unlock_kernel();
  2685. return 0;
  2686. }
  2687. /*
  2688. * check the contents of the ATAPI IDENTIFY command results. We return:
  2689. *
  2690. * 1 - If the tape can be supported by us, based on the information we have so
  2691. * far.
  2692. *
  2693. * 0 - If this tape driver is not currently supported by us.
  2694. */
  2695. static int idetape_identify_device(ide_drive_t *drive)
  2696. {
  2697. u8 gcw[2], protocol, device_type, removable, packet_size;
  2698. if (drive->id_read == 0)
  2699. return 1;
  2700. *((unsigned short *) &gcw) = drive->id->config;
  2701. protocol = (gcw[1] & 0xC0) >> 6;
  2702. device_type = gcw[1] & 0x1F;
  2703. removable = !!(gcw[0] & 0x80);
  2704. packet_size = gcw[0] & 0x3;
  2705. /* Check that we can support this device */
  2706. if (protocol != 2)
  2707. printk(KERN_ERR "ide-tape: Protocol (0x%02x) is not ATAPI\n",
  2708. protocol);
  2709. else if (device_type != 1)
  2710. printk(KERN_ERR "ide-tape: Device type (0x%02x) is not set "
  2711. "to tape\n", device_type);
  2712. else if (!removable)
  2713. printk(KERN_ERR "ide-tape: The removable flag is not set\n");
  2714. else if (packet_size != 0) {
  2715. printk(KERN_ERR "ide-tape: Packet size (0x%02x) is not 12"
  2716. " bytes\n", packet_size);
  2717. } else
  2718. return 1;
  2719. return 0;
  2720. }
  2721. static void idetape_get_inquiry_results(ide_drive_t *drive)
  2722. {
  2723. idetape_tape_t *tape = drive->driver_data;
  2724. struct ide_atapi_pc pc;
  2725. char fw_rev[6], vendor_id[10], product_id[18];
  2726. idetape_create_inquiry_cmd(&pc);
  2727. if (idetape_queue_pc_tail(drive, &pc)) {
  2728. printk(KERN_ERR "ide-tape: %s: can't get INQUIRY results\n",
  2729. tape->name);
  2730. return;
  2731. }
  2732. memcpy(vendor_id, &pc.buf[8], 8);
  2733. memcpy(product_id, &pc.buf[16], 16);
  2734. memcpy(fw_rev, &pc.buf[32], 4);
  2735. ide_fixstring(vendor_id, 10, 0);
  2736. ide_fixstring(product_id, 18, 0);
  2737. ide_fixstring(fw_rev, 6, 0);
  2738. printk(KERN_INFO "ide-tape: %s <-> %s: %s %s rev %s\n",
  2739. drive->name, tape->name, vendor_id, product_id, fw_rev);
  2740. }
  2741. /*
  2742. * Ask the tape about its various parameters. In particular, we will adjust our
  2743. * data transfer buffer size to the recommended value as returned by the tape.
  2744. */
  2745. static void idetape_get_mode_sense_results(ide_drive_t *drive)
  2746. {
  2747. idetape_tape_t *tape = drive->driver_data;
  2748. struct ide_atapi_pc pc;
  2749. u8 *caps;
  2750. u8 speed, max_speed;
  2751. idetape_create_mode_sense_cmd(&pc, IDETAPE_CAPABILITIES_PAGE);
  2752. if (idetape_queue_pc_tail(drive, &pc)) {
  2753. printk(KERN_ERR "ide-tape: Can't get tape parameters - assuming"
  2754. " some default values\n");
  2755. tape->blk_size = 512;
  2756. put_unaligned(52, (u16 *)&tape->caps[12]);
  2757. put_unaligned(540, (u16 *)&tape->caps[14]);
  2758. put_unaligned(6*52, (u16 *)&tape->caps[16]);
  2759. return;
  2760. }
  2761. caps = pc.buf + 4 + pc.buf[3];
  2762. /* convert to host order and save for later use */
  2763. speed = be16_to_cpu(*(u16 *)&caps[14]);
  2764. max_speed = be16_to_cpu(*(u16 *)&caps[8]);
  2765. put_unaligned(max_speed, (u16 *)&caps[8]);
  2766. put_unaligned(be16_to_cpu(*(u16 *)&caps[12]), (u16 *)&caps[12]);
  2767. put_unaligned(speed, (u16 *)&caps[14]);
  2768. put_unaligned(be16_to_cpu(*(u16 *)&caps[16]), (u16 *)&caps[16]);
  2769. if (!speed) {
  2770. printk(KERN_INFO "ide-tape: %s: invalid tape speed "
  2771. "(assuming 650KB/sec)\n", drive->name);
  2772. put_unaligned(650, (u16 *)&caps[14]);
  2773. }
  2774. if (!max_speed) {
  2775. printk(KERN_INFO "ide-tape: %s: invalid max_speed "
  2776. "(assuming 650KB/sec)\n", drive->name);
  2777. put_unaligned(650, (u16 *)&caps[8]);
  2778. }
  2779. memcpy(&tape->caps, caps, 20);
  2780. if (caps[7] & 0x02)
  2781. tape->blk_size = 512;
  2782. else if (caps[7] & 0x04)
  2783. tape->blk_size = 1024;
  2784. }
  2785. #ifdef CONFIG_IDE_PROC_FS
  2786. static void idetape_add_settings(ide_drive_t *drive)
  2787. {
  2788. idetape_tape_t *tape = drive->driver_data;
  2789. ide_add_setting(drive, "buffer", SETTING_READ, TYPE_SHORT, 0, 0xffff,
  2790. 1, 2, (u16 *)&tape->caps[16], NULL);
  2791. ide_add_setting(drive, "pipeline_min", SETTING_RW, TYPE_INT, 1, 0xffff,
  2792. tape->stage_size / 1024, 1, &tape->min_pipeline, NULL);
  2793. ide_add_setting(drive, "pipeline", SETTING_RW, TYPE_INT, 1, 0xffff,
  2794. tape->stage_size / 1024, 1, &tape->max_stages, NULL);
  2795. ide_add_setting(drive, "pipeline_max", SETTING_RW, TYPE_INT, 1, 0xffff,
  2796. tape->stage_size / 1024, 1, &tape->max_pipeline, NULL);
  2797. ide_add_setting(drive, "pipeline_used", SETTING_READ, TYPE_INT, 0,
  2798. 0xffff, tape->stage_size / 1024, 1, &tape->nr_stages,
  2799. NULL);
  2800. ide_add_setting(drive, "pipeline_pending", SETTING_READ, TYPE_INT, 0,
  2801. 0xffff, tape->stage_size / 1024, 1,
  2802. &tape->nr_pending_stages, NULL);
  2803. ide_add_setting(drive, "speed", SETTING_READ, TYPE_SHORT, 0, 0xffff,
  2804. 1, 1, (u16 *)&tape->caps[14], NULL);
  2805. ide_add_setting(drive, "stage", SETTING_READ, TYPE_INT, 0, 0xffff, 1,
  2806. 1024, &tape->stage_size, NULL);
  2807. ide_add_setting(drive, "tdsc", SETTING_RW, TYPE_INT, IDETAPE_DSC_RW_MIN,
  2808. IDETAPE_DSC_RW_MAX, 1000, HZ, &tape->best_dsc_rw_freq,
  2809. NULL);
  2810. ide_add_setting(drive, "dsc_overlap", SETTING_RW, TYPE_BYTE, 0, 1, 1,
  2811. 1, &drive->dsc_overlap, NULL);
  2812. ide_add_setting(drive, "avg_speed", SETTING_READ, TYPE_INT, 0, 0xffff,
  2813. 1, 1, &tape->avg_speed, NULL);
  2814. ide_add_setting(drive, "debug_mask", SETTING_RW, TYPE_INT, 0, 0xffff, 1,
  2815. 1, &tape->debug_mask, NULL);
  2816. }
  2817. #else
  2818. static inline void idetape_add_settings(ide_drive_t *drive) { ; }
  2819. #endif
  2820. /*
  2821. * The function below is called to:
  2822. *
  2823. * 1. Initialize our various state variables.
  2824. * 2. Ask the tape for its capabilities.
  2825. * 3. Allocate a buffer which will be used for data transfer. The buffer size
  2826. * is chosen based on the recommendation which we received in step 2.
  2827. *
  2828. * Note that at this point ide.c already assigned us an irq, so that we can
  2829. * queue requests here and wait for their completion.
  2830. */
  2831. static void idetape_setup(ide_drive_t *drive, idetape_tape_t *tape, int minor)
  2832. {
  2833. unsigned long t1, tmid, tn, t;
  2834. int speed;
  2835. int stage_size;
  2836. u8 gcw[2];
  2837. struct sysinfo si;
  2838. u16 *ctl = (u16 *)&tape->caps[12];
  2839. spin_lock_init(&tape->lock);
  2840. drive->dsc_overlap = 1;
  2841. if (drive->hwif->host_flags & IDE_HFLAG_NO_DSC) {
  2842. printk(KERN_INFO "ide-tape: %s: disabling DSC overlap\n",
  2843. tape->name);
  2844. drive->dsc_overlap = 0;
  2845. }
  2846. /* Seagate Travan drives do not support DSC overlap. */
  2847. if (strstr(drive->id->model, "Seagate STT3401"))
  2848. drive->dsc_overlap = 0;
  2849. tape->minor = minor;
  2850. tape->name[0] = 'h';
  2851. tape->name[1] = 't';
  2852. tape->name[2] = '0' + minor;
  2853. tape->chrdev_dir = IDETAPE_DIR_NONE;
  2854. tape->pc = tape->pc_stack;
  2855. *((unsigned short *) &gcw) = drive->id->config;
  2856. /* Command packet DRQ type */
  2857. if (((gcw[0] & 0x60) >> 5) == 1)
  2858. set_bit(IDETAPE_FLAG_DRQ_INTERRUPT, &tape->flags);
  2859. tape->min_pipeline = 10;
  2860. tape->max_pipeline = 10;
  2861. tape->max_stages = 10;
  2862. idetape_get_inquiry_results(drive);
  2863. idetape_get_mode_sense_results(drive);
  2864. ide_tape_get_bsize_from_bdesc(drive);
  2865. tape->user_bs_factor = 1;
  2866. tape->stage_size = *ctl * tape->blk_size;
  2867. while (tape->stage_size > 0xffff) {
  2868. printk(KERN_NOTICE "ide-tape: decreasing stage size\n");
  2869. *ctl /= 2;
  2870. tape->stage_size = *ctl * tape->blk_size;
  2871. }
  2872. stage_size = tape->stage_size;
  2873. tape->pages_per_stage = stage_size / PAGE_SIZE;
  2874. if (stage_size % PAGE_SIZE) {
  2875. tape->pages_per_stage++;
  2876. tape->excess_bh_size = PAGE_SIZE - stage_size % PAGE_SIZE;
  2877. }
  2878. /* Select the "best" DSC read/write polling freq and pipeline size. */
  2879. speed = max(*(u16 *)&tape->caps[14], *(u16 *)&tape->caps[8]);
  2880. tape->max_stages = speed * 1000 * 10 / tape->stage_size;
  2881. /* Limit memory use for pipeline to 10% of physical memory */
  2882. si_meminfo(&si);
  2883. if (tape->max_stages * tape->stage_size >
  2884. si.totalram * si.mem_unit / 10)
  2885. tape->max_stages =
  2886. si.totalram * si.mem_unit / (10 * tape->stage_size);
  2887. tape->max_stages = min(tape->max_stages, IDETAPE_MAX_PIPELINE_STAGES);
  2888. tape->min_pipeline = min(tape->max_stages, IDETAPE_MIN_PIPELINE_STAGES);
  2889. tape->max_pipeline =
  2890. min(tape->max_stages * 2, IDETAPE_MAX_PIPELINE_STAGES);
  2891. if (tape->max_stages == 0) {
  2892. tape->max_stages = 1;
  2893. tape->min_pipeline = 1;
  2894. tape->max_pipeline = 1;
  2895. }
  2896. t1 = (tape->stage_size * HZ) / (speed * 1000);
  2897. tmid = (*(u16 *)&tape->caps[16] * 32 * HZ) / (speed * 125);
  2898. tn = (IDETAPE_FIFO_THRESHOLD * tape->stage_size * HZ) / (speed * 1000);
  2899. if (tape->max_stages)
  2900. t = tn;
  2901. else
  2902. t = t1;
  2903. /*
  2904. * Ensure that the number we got makes sense; limit it within
  2905. * IDETAPE_DSC_RW_MIN and IDETAPE_DSC_RW_MAX.
  2906. */
  2907. tape->best_dsc_rw_freq = max_t(unsigned long,
  2908. min_t(unsigned long, t, IDETAPE_DSC_RW_MAX),
  2909. IDETAPE_DSC_RW_MIN);
  2910. printk(KERN_INFO "ide-tape: %s <-> %s: %dKBps, %d*%dkB buffer, "
  2911. "%dkB pipeline, %lums tDSC%s\n",
  2912. drive->name, tape->name, *(u16 *)&tape->caps[14],
  2913. (*(u16 *)&tape->caps[16] * 512) / tape->stage_size,
  2914. tape->stage_size / 1024,
  2915. tape->max_stages * tape->stage_size / 1024,
  2916. tape->best_dsc_rw_freq * 1000 / HZ,
  2917. drive->using_dma ? ", DMA":"");
  2918. idetape_add_settings(drive);
  2919. }
  2920. static void ide_tape_remove(ide_drive_t *drive)
  2921. {
  2922. idetape_tape_t *tape = drive->driver_data;
  2923. ide_proc_unregister_driver(drive, tape->driver);
  2924. ide_unregister_region(tape->disk);
  2925. ide_tape_put(tape);
  2926. }
  2927. static void ide_tape_release(struct kref *kref)
  2928. {
  2929. struct ide_tape_obj *tape = to_ide_tape(kref);
  2930. ide_drive_t *drive = tape->drive;
  2931. struct gendisk *g = tape->disk;
  2932. BUG_ON(tape->first_stage != NULL || tape->merge_stage_size);
  2933. drive->dsc_overlap = 0;
  2934. drive->driver_data = NULL;
  2935. device_destroy(idetape_sysfs_class, MKDEV(IDETAPE_MAJOR, tape->minor));
  2936. device_destroy(idetape_sysfs_class,
  2937. MKDEV(IDETAPE_MAJOR, tape->minor + 128));
  2938. idetape_devs[tape->minor] = NULL;
  2939. g->private_data = NULL;
  2940. put_disk(g);
  2941. kfree(tape);
  2942. }
  2943. #ifdef CONFIG_IDE_PROC_FS
  2944. static int proc_idetape_read_name
  2945. (char *page, char **start, off_t off, int count, int *eof, void *data)
  2946. {
  2947. ide_drive_t *drive = (ide_drive_t *) data;
  2948. idetape_tape_t *tape = drive->driver_data;
  2949. char *out = page;
  2950. int len;
  2951. len = sprintf(out, "%s\n", tape->name);
  2952. PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
  2953. }
  2954. static ide_proc_entry_t idetape_proc[] = {
  2955. { "capacity", S_IFREG|S_IRUGO, proc_ide_read_capacity, NULL },
  2956. { "name", S_IFREG|S_IRUGO, proc_idetape_read_name, NULL },
  2957. { NULL, 0, NULL, NULL }
  2958. };
  2959. #endif
  2960. static int ide_tape_probe(ide_drive_t *);
  2961. static ide_driver_t idetape_driver = {
  2962. .gen_driver = {
  2963. .owner = THIS_MODULE,
  2964. .name = "ide-tape",
  2965. .bus = &ide_bus_type,
  2966. },
  2967. .probe = ide_tape_probe,
  2968. .remove = ide_tape_remove,
  2969. .version = IDETAPE_VERSION,
  2970. .media = ide_tape,
  2971. .supports_dsc_overlap = 1,
  2972. .do_request = idetape_do_request,
  2973. .end_request = idetape_end_request,
  2974. .error = __ide_error,
  2975. .abort = __ide_abort,
  2976. #ifdef CONFIG_IDE_PROC_FS
  2977. .proc = idetape_proc,
  2978. #endif
  2979. };
  2980. /* Our character device supporting functions, passed to register_chrdev. */
  2981. static const struct file_operations idetape_fops = {
  2982. .owner = THIS_MODULE,
  2983. .read = idetape_chrdev_read,
  2984. .write = idetape_chrdev_write,
  2985. .ioctl = idetape_chrdev_ioctl,
  2986. .open = idetape_chrdev_open,
  2987. .release = idetape_chrdev_release,
  2988. };
  2989. static int idetape_open(struct inode *inode, struct file *filp)
  2990. {
  2991. struct gendisk *disk = inode->i_bdev->bd_disk;
  2992. struct ide_tape_obj *tape;
  2993. tape = ide_tape_get(disk);
  2994. if (!tape)
  2995. return -ENXIO;
  2996. return 0;
  2997. }
  2998. static int idetape_release(struct inode *inode, struct file *filp)
  2999. {
  3000. struct gendisk *disk = inode->i_bdev->bd_disk;
  3001. struct ide_tape_obj *tape = ide_tape_g(disk);
  3002. ide_tape_put(tape);
  3003. return 0;
  3004. }
  3005. static int idetape_ioctl(struct inode *inode, struct file *file,
  3006. unsigned int cmd, unsigned long arg)
  3007. {
  3008. struct block_device *bdev = inode->i_bdev;
  3009. struct ide_tape_obj *tape = ide_tape_g(bdev->bd_disk);
  3010. ide_drive_t *drive = tape->drive;
  3011. int err = generic_ide_ioctl(drive, file, bdev, cmd, arg);
  3012. if (err == -EINVAL)
  3013. err = idetape_blkdev_ioctl(drive, cmd, arg);
  3014. return err;
  3015. }
  3016. static struct block_device_operations idetape_block_ops = {
  3017. .owner = THIS_MODULE,
  3018. .open = idetape_open,
  3019. .release = idetape_release,
  3020. .ioctl = idetape_ioctl,
  3021. };
  3022. static int ide_tape_probe(ide_drive_t *drive)
  3023. {
  3024. idetape_tape_t *tape;
  3025. struct gendisk *g;
  3026. int minor;
  3027. if (!strstr("ide-tape", drive->driver_req))
  3028. goto failed;
  3029. if (!drive->present)
  3030. goto failed;
  3031. if (drive->media != ide_tape)
  3032. goto failed;
  3033. if (!idetape_identify_device(drive)) {
  3034. printk(KERN_ERR "ide-tape: %s: not supported by this version of"
  3035. " the driver\n", drive->name);
  3036. goto failed;
  3037. }
  3038. if (drive->scsi) {
  3039. printk(KERN_INFO "ide-tape: passing drive %s to ide-scsi"
  3040. " emulation.\n", drive->name);
  3041. goto failed;
  3042. }
  3043. tape = kzalloc(sizeof(idetape_tape_t), GFP_KERNEL);
  3044. if (tape == NULL) {
  3045. printk(KERN_ERR "ide-tape: %s: Can't allocate a tape struct\n",
  3046. drive->name);
  3047. goto failed;
  3048. }
  3049. g = alloc_disk(1 << PARTN_BITS);
  3050. if (!g)
  3051. goto out_free_tape;
  3052. ide_init_disk(g, drive);
  3053. ide_proc_register_driver(drive, &idetape_driver);
  3054. kref_init(&tape->kref);
  3055. tape->drive = drive;
  3056. tape->driver = &idetape_driver;
  3057. tape->disk = g;
  3058. g->private_data = &tape->driver;
  3059. drive->driver_data = tape;
  3060. mutex_lock(&idetape_ref_mutex);
  3061. for (minor = 0; idetape_devs[minor]; minor++)
  3062. ;
  3063. idetape_devs[minor] = tape;
  3064. mutex_unlock(&idetape_ref_mutex);
  3065. idetape_setup(drive, tape, minor);
  3066. device_create(idetape_sysfs_class, &drive->gendev,
  3067. MKDEV(IDETAPE_MAJOR, minor), "%s", tape->name);
  3068. device_create(idetape_sysfs_class, &drive->gendev,
  3069. MKDEV(IDETAPE_MAJOR, minor + 128), "n%s", tape->name);
  3070. g->fops = &idetape_block_ops;
  3071. ide_register_region(g);
  3072. return 0;
  3073. out_free_tape:
  3074. kfree(tape);
  3075. failed:
  3076. return -ENODEV;
  3077. }
  3078. static void __exit idetape_exit(void)
  3079. {
  3080. driver_unregister(&idetape_driver.gen_driver);
  3081. class_destroy(idetape_sysfs_class);
  3082. unregister_chrdev(IDETAPE_MAJOR, "ht");
  3083. }
  3084. static int __init idetape_init(void)
  3085. {
  3086. int error = 1;
  3087. idetape_sysfs_class = class_create(THIS_MODULE, "ide_tape");
  3088. if (IS_ERR(idetape_sysfs_class)) {
  3089. idetape_sysfs_class = NULL;
  3090. printk(KERN_ERR "Unable to create sysfs class for ide tapes\n");
  3091. error = -EBUSY;
  3092. goto out;
  3093. }
  3094. if (register_chrdev(IDETAPE_MAJOR, "ht", &idetape_fops)) {
  3095. printk(KERN_ERR "ide-tape: Failed to register chrdev"
  3096. " interface\n");
  3097. error = -EBUSY;
  3098. goto out_free_class;
  3099. }
  3100. error = driver_register(&idetape_driver.gen_driver);
  3101. if (error)
  3102. goto out_free_driver;
  3103. return 0;
  3104. out_free_driver:
  3105. driver_unregister(&idetape_driver.gen_driver);
  3106. out_free_class:
  3107. class_destroy(idetape_sysfs_class);
  3108. out:
  3109. return error;
  3110. }
  3111. MODULE_ALIAS("ide:*m-tape*");
  3112. module_init(idetape_init);
  3113. module_exit(idetape_exit);
  3114. MODULE_ALIAS_CHARDEV_MAJOR(IDETAPE_MAJOR);
  3115. MODULE_DESCRIPTION("ATAPI Streaming TAPE Driver");
  3116. MODULE_LICENSE("GPL");