ide-tape.c 108 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777
  1. /*
  2. * IDE ATAPI streaming tape driver.
  3. *
  4. * Copyright (C) 1995-1999 Gadi Oxman <gadio@netvision.net.il>
  5. * Copyright (C) 2003-2005 Bartlomiej Zolnierkiewicz
  6. *
  7. * This driver was constructed as a student project in the software laboratory
  8. * of the faculty of electrical engineering in the Technion - Israel's
  9. * Institute Of Technology, with the guide of Avner Lottem and Dr. Ilana David.
  10. *
  11. * It is hereby placed under the terms of the GNU general public license.
  12. * (See linux/COPYING).
  13. *
  14. * For a historical changelog see
  15. * Documentation/ide/ChangeLog.ide-tape.1995-2002
  16. */
  17. #define IDETAPE_VERSION "1.20"
  18. #include <linux/module.h>
  19. #include <linux/types.h>
  20. #include <linux/string.h>
  21. #include <linux/kernel.h>
  22. #include <linux/delay.h>
  23. #include <linux/timer.h>
  24. #include <linux/mm.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/jiffies.h>
  27. #include <linux/major.h>
  28. #include <linux/errno.h>
  29. #include <linux/genhd.h>
  30. #include <linux/slab.h>
  31. #include <linux/pci.h>
  32. #include <linux/ide.h>
  33. #include <linux/smp_lock.h>
  34. #include <linux/completion.h>
  35. #include <linux/bitops.h>
  36. #include <linux/mutex.h>
  37. #include <scsi/scsi.h>
  38. #include <asm/byteorder.h>
  39. #include <linux/irq.h>
  40. #include <linux/uaccess.h>
  41. #include <linux/io.h>
  42. #include <asm/unaligned.h>
  43. #include <linux/mtio.h>
  44. enum {
  45. /* output errors only */
  46. DBG_ERR = (1 << 0),
  47. /* output all sense key/asc */
  48. DBG_SENSE = (1 << 1),
  49. /* info regarding all chrdev-related procedures */
  50. DBG_CHRDEV = (1 << 2),
  51. /* all remaining procedures */
  52. DBG_PROCS = (1 << 3),
  53. /* buffer alloc info (pc_stack & rq_stack) */
  54. DBG_PCRQ_STACK = (1 << 4),
  55. };
  56. /* define to see debug info */
  57. #define IDETAPE_DEBUG_LOG 0
  58. #if IDETAPE_DEBUG_LOG
  59. #define debug_log(lvl, fmt, args...) \
  60. { \
  61. if (tape->debug_mask & lvl) \
  62. printk(KERN_INFO "ide-tape: " fmt, ## args); \
  63. }
  64. #else
  65. #define debug_log(lvl, fmt, args...) do {} while (0)
  66. #endif
  67. /**************************** Tunable parameters *****************************/
  68. /*
  69. * Pipelined mode parameters.
  70. *
  71. * We try to use the minimum number of stages which is enough to keep the tape
  72. * constantly streaming. To accomplish that, we implement a feedback loop around
  73. * the maximum number of stages:
  74. *
  75. * We start from MIN maximum stages (we will not even use MIN stages if we don't
  76. * need them), increment it by RATE*(MAX-MIN) whenever we sense that the
  77. * pipeline is empty, until we reach the optimum value or until we reach MAX.
  78. *
  79. * Setting the following parameter to 0 is illegal: the pipelined mode cannot be
  80. * disabled (idetape_calculate_speeds() divides by tape->max_stages.)
  81. */
  82. #define IDETAPE_MIN_PIPELINE_STAGES 1
  83. #define IDETAPE_MAX_PIPELINE_STAGES 400
  84. #define IDETAPE_INCREASE_STAGES_RATE 20
  85. /*
  86. * After each failed packet command we issue a request sense command and retry
  87. * the packet command IDETAPE_MAX_PC_RETRIES times.
  88. *
  89. * Setting IDETAPE_MAX_PC_RETRIES to 0 will disable retries.
  90. */
  91. #define IDETAPE_MAX_PC_RETRIES 3
  92. /*
  93. * With each packet command, we allocate a buffer of IDETAPE_PC_BUFFER_SIZE
  94. * bytes. This is used for several packet commands (Not for READ/WRITE commands)
  95. */
  96. #define IDETAPE_PC_BUFFER_SIZE 256
  97. /*
  98. * In various places in the driver, we need to allocate storage
  99. * for packet commands and requests, which will remain valid while
  100. * we leave the driver to wait for an interrupt or a timeout event.
  101. */
  102. #define IDETAPE_PC_STACK (10 + IDETAPE_MAX_PC_RETRIES)
  103. /*
  104. * Some drives (for example, Seagate STT3401A Travan) require a very long
  105. * timeout, because they don't return an interrupt or clear their busy bit
  106. * until after the command completes (even retension commands).
  107. */
  108. #define IDETAPE_WAIT_CMD (900*HZ)
  109. /*
  110. * The following parameter is used to select the point in the internal tape fifo
  111. * in which we will start to refill the buffer. Decreasing the following
  112. * parameter will improve the system's latency and interactive response, while
  113. * using a high value might improve system throughput.
  114. */
  115. #define IDETAPE_FIFO_THRESHOLD 2
  116. /*
  117. * DSC polling parameters.
  118. *
  119. * Polling for DSC (a single bit in the status register) is a very important
  120. * function in ide-tape. There are two cases in which we poll for DSC:
  121. *
  122. * 1. Before a read/write packet command, to ensure that we can transfer data
  123. * from/to the tape's data buffers, without causing an actual media access.
  124. * In case the tape is not ready yet, we take out our request from the device
  125. * request queue, so that ide.c could service requests from the other device
  126. * on the same interface in the meantime.
  127. *
  128. * 2. After the successful initialization of a "media access packet command",
  129. * which is a command that can take a long time to complete (the interval can
  130. * range from several seconds to even an hour). Again, we postpone our request
  131. * in the middle to free the bus for the other device. The polling frequency
  132. * here should be lower than the read/write frequency since those media access
  133. * commands are slow. We start from a "fast" frequency - IDETAPE_DSC_MA_FAST
  134. * (1 second), and if we don't receive DSC after IDETAPE_DSC_MA_THRESHOLD
  135. * (5 min), we switch it to a lower frequency - IDETAPE_DSC_MA_SLOW (1 min).
  136. *
  137. * We also set a timeout for the timer, in case something goes wrong. The
  138. * timeout should be longer then the maximum execution time of a tape operation.
  139. */
  140. /* DSC timings. */
  141. #define IDETAPE_DSC_RW_MIN 5*HZ/100 /* 50 msec */
  142. #define IDETAPE_DSC_RW_MAX 40*HZ/100 /* 400 msec */
  143. #define IDETAPE_DSC_RW_TIMEOUT 2*60*HZ /* 2 minutes */
  144. #define IDETAPE_DSC_MA_FAST 2*HZ /* 2 seconds */
  145. #define IDETAPE_DSC_MA_THRESHOLD 5*60*HZ /* 5 minutes */
  146. #define IDETAPE_DSC_MA_SLOW 30*HZ /* 30 seconds */
  147. #define IDETAPE_DSC_MA_TIMEOUT 2*60*60*HZ /* 2 hours */
  148. /*************************** End of tunable parameters ***********************/
  149. /* Read/Write error simulation */
  150. #define SIMULATE_ERRORS 0
  151. /* tape directions */
  152. enum {
  153. IDETAPE_DIR_NONE = (1 << 0),
  154. IDETAPE_DIR_READ = (1 << 1),
  155. IDETAPE_DIR_WRITE = (1 << 2),
  156. };
  157. struct idetape_bh {
  158. u32 b_size;
  159. atomic_t b_count;
  160. struct idetape_bh *b_reqnext;
  161. char *b_data;
  162. };
  163. /* Tape door status */
  164. #define DOOR_UNLOCKED 0
  165. #define DOOR_LOCKED 1
  166. #define DOOR_EXPLICITLY_LOCKED 2
  167. /* Some defines for the SPACE command */
  168. #define IDETAPE_SPACE_OVER_FILEMARK 1
  169. #define IDETAPE_SPACE_TO_EOD 3
  170. /* Some defines for the LOAD UNLOAD command */
  171. #define IDETAPE_LU_LOAD_MASK 1
  172. #define IDETAPE_LU_RETENSION_MASK 2
  173. #define IDETAPE_LU_EOT_MASK 4
  174. /*
  175. * Special requests for our block device strategy routine.
  176. *
  177. * In order to service a character device command, we add special requests to
  178. * the tail of our block device request queue and wait for their completion.
  179. */
  180. enum {
  181. REQ_IDETAPE_PC1 = (1 << 0), /* packet command (first stage) */
  182. REQ_IDETAPE_PC2 = (1 << 1), /* packet command (second stage) */
  183. REQ_IDETAPE_READ = (1 << 2),
  184. REQ_IDETAPE_WRITE = (1 << 3),
  185. };
  186. /* Error codes returned in rq->errors to the higher part of the driver. */
  187. #define IDETAPE_ERROR_GENERAL 101
  188. #define IDETAPE_ERROR_FILEMARK 102
  189. #define IDETAPE_ERROR_EOD 103
  190. /* Structures related to the SELECT SENSE / MODE SENSE packet commands. */
  191. #define IDETAPE_BLOCK_DESCRIPTOR 0
  192. #define IDETAPE_CAPABILITIES_PAGE 0x2a
  193. /* Tape flag bits values. */
  194. enum {
  195. IDETAPE_FLAG_IGNORE_DSC = (1 << 0),
  196. /* 0 When the tape position is unknown */
  197. IDETAPE_FLAG_ADDRESS_VALID = (1 << 1),
  198. /* Device already opened */
  199. IDETAPE_FLAG_BUSY = (1 << 2),
  200. /* Error detected in a pipeline stage */
  201. IDETAPE_FLAG_PIPELINE_ERR = (1 << 3),
  202. /* Attempt to auto-detect the current user block size */
  203. IDETAPE_FLAG_DETECT_BS = (1 << 4),
  204. /* Currently on a filemark */
  205. IDETAPE_FLAG_FILEMARK = (1 << 5),
  206. /* DRQ interrupt device */
  207. IDETAPE_FLAG_DRQ_INTERRUPT = (1 << 6),
  208. /* pipeline active */
  209. IDETAPE_FLAG_PIPELINE_ACTIVE = (1 << 7),
  210. /* 0 = no tape is loaded, so we don't rewind after ejecting */
  211. IDETAPE_FLAG_MEDIUM_PRESENT = (1 << 8),
  212. };
  213. /* A pipeline stage. */
  214. typedef struct idetape_stage_s {
  215. struct request rq; /* The corresponding request */
  216. struct idetape_bh *bh; /* The data buffers */
  217. struct idetape_stage_s *next; /* Pointer to the next stage */
  218. } idetape_stage_t;
  219. /*
  220. * Most of our global data which we need to save even as we leave the driver due
  221. * to an interrupt or a timer event is stored in the struct defined below.
  222. */
  223. typedef struct ide_tape_obj {
  224. ide_drive_t *drive;
  225. ide_driver_t *driver;
  226. struct gendisk *disk;
  227. struct kref kref;
  228. /*
  229. * Since a typical character device operation requires more
  230. * than one packet command, we provide here enough memory
  231. * for the maximum of interconnected packet commands.
  232. * The packet commands are stored in the circular array pc_stack.
  233. * pc_stack_index points to the last used entry, and warps around
  234. * to the start when we get to the last array entry.
  235. *
  236. * pc points to the current processed packet command.
  237. *
  238. * failed_pc points to the last failed packet command, or contains
  239. * NULL if we do not need to retry any packet command. This is
  240. * required since an additional packet command is needed before the
  241. * retry, to get detailed information on what went wrong.
  242. */
  243. /* Current packet command */
  244. struct ide_atapi_pc *pc;
  245. /* Last failed packet command */
  246. struct ide_atapi_pc *failed_pc;
  247. /* Packet command stack */
  248. struct ide_atapi_pc pc_stack[IDETAPE_PC_STACK];
  249. /* Next free packet command storage space */
  250. int pc_stack_index;
  251. struct request rq_stack[IDETAPE_PC_STACK];
  252. /* We implement a circular array */
  253. int rq_stack_index;
  254. /*
  255. * DSC polling variables.
  256. *
  257. * While polling for DSC we use postponed_rq to postpone the current
  258. * request so that ide.c will be able to service pending requests on the
  259. * other device. Note that at most we will have only one DSC (usually
  260. * data transfer) request in the device request queue. Additional
  261. * requests can be queued in our internal pipeline, but they will be
  262. * visible to ide.c only one at a time.
  263. */
  264. struct request *postponed_rq;
  265. /* The time in which we started polling for DSC */
  266. unsigned long dsc_polling_start;
  267. /* Timer used to poll for dsc */
  268. struct timer_list dsc_timer;
  269. /* Read/Write dsc polling frequency */
  270. unsigned long best_dsc_rw_freq;
  271. unsigned long dsc_poll_freq;
  272. unsigned long dsc_timeout;
  273. /* Read position information */
  274. u8 partition;
  275. /* Current block */
  276. unsigned int first_frame;
  277. /* Last error information */
  278. u8 sense_key, asc, ascq;
  279. /* Character device operation */
  280. unsigned int minor;
  281. /* device name */
  282. char name[4];
  283. /* Current character device data transfer direction */
  284. u8 chrdev_dir;
  285. /* tape block size, usually 512 or 1024 bytes */
  286. unsigned short blk_size;
  287. int user_bs_factor;
  288. /* Copy of the tape's Capabilities and Mechanical Page */
  289. u8 caps[20];
  290. /*
  291. * Active data transfer request parameters.
  292. *
  293. * At most, there is only one ide-tape originated data transfer request
  294. * in the device request queue. This allows ide.c to easily service
  295. * requests from the other device when we postpone our active request.
  296. * In the pipelined operation mode, we use our internal pipeline
  297. * structure to hold more data requests. The data buffer size is chosen
  298. * based on the tape's recommendation.
  299. */
  300. /* ptr to the request which is waiting in the device request queue */
  301. struct request *active_data_rq;
  302. /* Data buffer size chosen based on the tape's recommendation */
  303. int stage_size;
  304. idetape_stage_t *merge_stage;
  305. int merge_stage_size;
  306. struct idetape_bh *bh;
  307. char *b_data;
  308. int b_count;
  309. /*
  310. * Pipeline parameters.
  311. *
  312. * To accomplish non-pipelined mode, we simply set the following
  313. * variables to zero (or NULL, where appropriate).
  314. */
  315. /* Number of currently used stages */
  316. int nr_stages;
  317. /* Number of pending stages */
  318. int nr_pending_stages;
  319. /* We will not allocate more than this number of stages */
  320. int max_stages, min_pipeline, max_pipeline;
  321. /* The first stage which will be removed from the pipeline */
  322. idetape_stage_t *first_stage;
  323. /* The currently active stage */
  324. idetape_stage_t *active_stage;
  325. /* Will be serviced after the currently active request */
  326. idetape_stage_t *next_stage;
  327. /* New requests will be added to the pipeline here */
  328. idetape_stage_t *last_stage;
  329. /* Optional free stage which we can use */
  330. idetape_stage_t *cache_stage;
  331. int pages_per_stage;
  332. /* Wasted space in each stage */
  333. int excess_bh_size;
  334. /* Status/Action flags: long for set_bit */
  335. unsigned long flags;
  336. /* protects the ide-tape queue */
  337. spinlock_t lock;
  338. /* Measures average tape speed */
  339. unsigned long avg_time;
  340. int avg_size;
  341. int avg_speed;
  342. /* the door is currently locked */
  343. int door_locked;
  344. /* the tape hardware is write protected */
  345. char drv_write_prot;
  346. /* the tape is write protected (hardware or opened as read-only) */
  347. char write_prot;
  348. /*
  349. * Limit the number of times a request can be postponed, to avoid an
  350. * infinite postpone deadlock.
  351. */
  352. int postpone_cnt;
  353. /*
  354. * Measures number of frames:
  355. *
  356. * 1. written/read to/from the driver pipeline (pipeline_head).
  357. * 2. written/read to/from the tape buffers (idetape_bh).
  358. * 3. written/read by the tape to/from the media (tape_head).
  359. */
  360. int pipeline_head;
  361. int buffer_head;
  362. int tape_head;
  363. int last_tape_head;
  364. /* Speed control at the tape buffers input/output */
  365. unsigned long insert_time;
  366. int insert_size;
  367. int insert_speed;
  368. int max_insert_speed;
  369. int measure_insert_time;
  370. /* Speed regulation negative feedback loop */
  371. int speed_control;
  372. int pipeline_head_speed;
  373. int controlled_pipeline_head_speed;
  374. int uncontrolled_pipeline_head_speed;
  375. int controlled_last_pipeline_head;
  376. unsigned long uncontrolled_pipeline_head_time;
  377. unsigned long controlled_pipeline_head_time;
  378. int controlled_previous_pipeline_head;
  379. int uncontrolled_previous_pipeline_head;
  380. unsigned long controlled_previous_head_time;
  381. unsigned long uncontrolled_previous_head_time;
  382. int restart_speed_control_req;
  383. u32 debug_mask;
  384. } idetape_tape_t;
  385. static DEFINE_MUTEX(idetape_ref_mutex);
  386. static struct class *idetape_sysfs_class;
  387. #define to_ide_tape(obj) container_of(obj, struct ide_tape_obj, kref)
  388. #define ide_tape_g(disk) \
  389. container_of((disk)->private_data, struct ide_tape_obj, driver)
  390. static struct ide_tape_obj *ide_tape_get(struct gendisk *disk)
  391. {
  392. struct ide_tape_obj *tape = NULL;
  393. mutex_lock(&idetape_ref_mutex);
  394. tape = ide_tape_g(disk);
  395. if (tape)
  396. kref_get(&tape->kref);
  397. mutex_unlock(&idetape_ref_mutex);
  398. return tape;
  399. }
  400. static void ide_tape_release(struct kref *);
  401. static void ide_tape_put(struct ide_tape_obj *tape)
  402. {
  403. mutex_lock(&idetape_ref_mutex);
  404. kref_put(&tape->kref, ide_tape_release);
  405. mutex_unlock(&idetape_ref_mutex);
  406. }
  407. /*
  408. * The variables below are used for the character device interface. Additional
  409. * state variables are defined in our ide_drive_t structure.
  410. */
  411. static struct ide_tape_obj *idetape_devs[MAX_HWIFS * MAX_DRIVES];
  412. #define ide_tape_f(file) ((file)->private_data)
  413. static struct ide_tape_obj *ide_tape_chrdev_get(unsigned int i)
  414. {
  415. struct ide_tape_obj *tape = NULL;
  416. mutex_lock(&idetape_ref_mutex);
  417. tape = idetape_devs[i];
  418. if (tape)
  419. kref_get(&tape->kref);
  420. mutex_unlock(&idetape_ref_mutex);
  421. return tape;
  422. }
  423. static void idetape_input_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
  424. unsigned int bcount)
  425. {
  426. struct idetape_bh *bh = pc->bh;
  427. int count;
  428. while (bcount) {
  429. if (bh == NULL) {
  430. printk(KERN_ERR "ide-tape: bh == NULL in "
  431. "idetape_input_buffers\n");
  432. ide_atapi_discard_data(drive, bcount);
  433. return;
  434. }
  435. count = min(
  436. (unsigned int)(bh->b_size - atomic_read(&bh->b_count)),
  437. bcount);
  438. HWIF(drive)->atapi_input_bytes(drive, bh->b_data +
  439. atomic_read(&bh->b_count), count);
  440. bcount -= count;
  441. atomic_add(count, &bh->b_count);
  442. if (atomic_read(&bh->b_count) == bh->b_size) {
  443. bh = bh->b_reqnext;
  444. if (bh)
  445. atomic_set(&bh->b_count, 0);
  446. }
  447. }
  448. pc->bh = bh;
  449. }
  450. static void idetape_output_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
  451. unsigned int bcount)
  452. {
  453. struct idetape_bh *bh = pc->bh;
  454. int count;
  455. while (bcount) {
  456. if (bh == NULL) {
  457. printk(KERN_ERR "ide-tape: bh == NULL in %s\n",
  458. __func__);
  459. return;
  460. }
  461. count = min((unsigned int)pc->b_count, (unsigned int)bcount);
  462. HWIF(drive)->atapi_output_bytes(drive, pc->b_data, count);
  463. bcount -= count;
  464. pc->b_data += count;
  465. pc->b_count -= count;
  466. if (!pc->b_count) {
  467. bh = bh->b_reqnext;
  468. pc->bh = bh;
  469. if (bh) {
  470. pc->b_data = bh->b_data;
  471. pc->b_count = atomic_read(&bh->b_count);
  472. }
  473. }
  474. }
  475. }
  476. static void idetape_update_buffers(struct ide_atapi_pc *pc)
  477. {
  478. struct idetape_bh *bh = pc->bh;
  479. int count;
  480. unsigned int bcount = pc->xferred;
  481. if (pc->flags & PC_FLAG_WRITING)
  482. return;
  483. while (bcount) {
  484. if (bh == NULL) {
  485. printk(KERN_ERR "ide-tape: bh == NULL in %s\n",
  486. __func__);
  487. return;
  488. }
  489. count = min((unsigned int)bh->b_size, (unsigned int)bcount);
  490. atomic_set(&bh->b_count, count);
  491. if (atomic_read(&bh->b_count) == bh->b_size)
  492. bh = bh->b_reqnext;
  493. bcount -= count;
  494. }
  495. pc->bh = bh;
  496. }
  497. /*
  498. * idetape_next_pc_storage returns a pointer to a place in which we can
  499. * safely store a packet command, even though we intend to leave the
  500. * driver. A storage space for a maximum of IDETAPE_PC_STACK packet
  501. * commands is allocated at initialization time.
  502. */
  503. static struct ide_atapi_pc *idetape_next_pc_storage(ide_drive_t *drive)
  504. {
  505. idetape_tape_t *tape = drive->driver_data;
  506. debug_log(DBG_PCRQ_STACK, "pc_stack_index=%d\n", tape->pc_stack_index);
  507. if (tape->pc_stack_index == IDETAPE_PC_STACK)
  508. tape->pc_stack_index = 0;
  509. return (&tape->pc_stack[tape->pc_stack_index++]);
  510. }
  511. /*
  512. * idetape_next_rq_storage is used along with idetape_next_pc_storage.
  513. * Since we queue packet commands in the request queue, we need to
  514. * allocate a request, along with the allocation of a packet command.
  515. */
  516. /**************************************************************
  517. * *
  518. * This should get fixed to use kmalloc(.., GFP_ATOMIC) *
  519. * followed later on by kfree(). -ml *
  520. * *
  521. **************************************************************/
  522. static struct request *idetape_next_rq_storage(ide_drive_t *drive)
  523. {
  524. idetape_tape_t *tape = drive->driver_data;
  525. debug_log(DBG_PCRQ_STACK, "rq_stack_index=%d\n", tape->rq_stack_index);
  526. if (tape->rq_stack_index == IDETAPE_PC_STACK)
  527. tape->rq_stack_index = 0;
  528. return (&tape->rq_stack[tape->rq_stack_index++]);
  529. }
  530. static void idetape_init_pc(struct ide_atapi_pc *pc)
  531. {
  532. memset(pc->c, 0, 12);
  533. pc->retries = 0;
  534. pc->flags = 0;
  535. pc->req_xfer = 0;
  536. pc->buf = pc->pc_buf;
  537. pc->buf_size = IDETAPE_PC_BUFFER_SIZE;
  538. pc->bh = NULL;
  539. pc->b_data = NULL;
  540. }
  541. /*
  542. * called on each failed packet command retry to analyze the request sense. We
  543. * currently do not utilize this information.
  544. */
  545. static void idetape_analyze_error(ide_drive_t *drive, u8 *sense)
  546. {
  547. idetape_tape_t *tape = drive->driver_data;
  548. struct ide_atapi_pc *pc = tape->failed_pc;
  549. tape->sense_key = sense[2] & 0xF;
  550. tape->asc = sense[12];
  551. tape->ascq = sense[13];
  552. debug_log(DBG_ERR, "pc = %x, sense key = %x, asc = %x, ascq = %x\n",
  553. pc->c[0], tape->sense_key, tape->asc, tape->ascq);
  554. /* Correct pc->xferred by asking the tape. */
  555. if (pc->flags & PC_FLAG_DMA_ERROR) {
  556. pc->xferred = pc->req_xfer -
  557. tape->blk_size *
  558. be32_to_cpu(get_unaligned((u32 *)&sense[3]));
  559. idetape_update_buffers(pc);
  560. }
  561. /*
  562. * If error was the result of a zero-length read or write command,
  563. * with sense key=5, asc=0x22, ascq=0, let it slide. Some drives
  564. * (i.e. Seagate STT3401A Travan) don't support 0-length read/writes.
  565. */
  566. if ((pc->c[0] == READ_6 || pc->c[0] == WRITE_6)
  567. /* length == 0 */
  568. && pc->c[4] == 0 && pc->c[3] == 0 && pc->c[2] == 0) {
  569. if (tape->sense_key == 5) {
  570. /* don't report an error, everything's ok */
  571. pc->error = 0;
  572. /* don't retry read/write */
  573. pc->flags |= PC_FLAG_ABORT;
  574. }
  575. }
  576. if (pc->c[0] == READ_6 && (sense[2] & 0x80)) {
  577. pc->error = IDETAPE_ERROR_FILEMARK;
  578. pc->flags |= PC_FLAG_ABORT;
  579. }
  580. if (pc->c[0] == WRITE_6) {
  581. if ((sense[2] & 0x40) || (tape->sense_key == 0xd
  582. && tape->asc == 0x0 && tape->ascq == 0x2)) {
  583. pc->error = IDETAPE_ERROR_EOD;
  584. pc->flags |= PC_FLAG_ABORT;
  585. }
  586. }
  587. if (pc->c[0] == READ_6 || pc->c[0] == WRITE_6) {
  588. if (tape->sense_key == 8) {
  589. pc->error = IDETAPE_ERROR_EOD;
  590. pc->flags |= PC_FLAG_ABORT;
  591. }
  592. if (!(pc->flags & PC_FLAG_ABORT) &&
  593. pc->xferred)
  594. pc->retries = IDETAPE_MAX_PC_RETRIES + 1;
  595. }
  596. }
  597. static void idetape_activate_next_stage(ide_drive_t *drive)
  598. {
  599. idetape_tape_t *tape = drive->driver_data;
  600. idetape_stage_t *stage = tape->next_stage;
  601. struct request *rq = &stage->rq;
  602. debug_log(DBG_PROCS, "Enter %s\n", __func__);
  603. if (stage == NULL) {
  604. printk(KERN_ERR "ide-tape: bug: Trying to activate a non"
  605. " existing stage\n");
  606. return;
  607. }
  608. rq->rq_disk = tape->disk;
  609. rq->buffer = NULL;
  610. rq->special = (void *)stage->bh;
  611. tape->active_data_rq = rq;
  612. tape->active_stage = stage;
  613. tape->next_stage = stage->next;
  614. }
  615. /* Free a stage along with its related buffers completely. */
  616. static void __idetape_kfree_stage(idetape_stage_t *stage)
  617. {
  618. struct idetape_bh *prev_bh, *bh = stage->bh;
  619. int size;
  620. while (bh != NULL) {
  621. if (bh->b_data != NULL) {
  622. size = (int) bh->b_size;
  623. while (size > 0) {
  624. free_page((unsigned long) bh->b_data);
  625. size -= PAGE_SIZE;
  626. bh->b_data += PAGE_SIZE;
  627. }
  628. }
  629. prev_bh = bh;
  630. bh = bh->b_reqnext;
  631. kfree(prev_bh);
  632. }
  633. kfree(stage);
  634. }
  635. static void idetape_kfree_stage(idetape_tape_t *tape, idetape_stage_t *stage)
  636. {
  637. __idetape_kfree_stage(stage);
  638. }
  639. /*
  640. * Remove tape->first_stage from the pipeline. The caller should avoid race
  641. * conditions.
  642. */
  643. static void idetape_remove_stage_head(ide_drive_t *drive)
  644. {
  645. idetape_tape_t *tape = drive->driver_data;
  646. idetape_stage_t *stage;
  647. debug_log(DBG_PROCS, "Enter %s\n", __func__);
  648. if (tape->first_stage == NULL) {
  649. printk(KERN_ERR "ide-tape: bug: tape->first_stage is NULL\n");
  650. return;
  651. }
  652. if (tape->active_stage == tape->first_stage) {
  653. printk(KERN_ERR "ide-tape: bug: Trying to free our active "
  654. "pipeline stage\n");
  655. return;
  656. }
  657. stage = tape->first_stage;
  658. tape->first_stage = stage->next;
  659. idetape_kfree_stage(tape, stage);
  660. tape->nr_stages--;
  661. if (tape->first_stage == NULL) {
  662. tape->last_stage = NULL;
  663. if (tape->next_stage != NULL)
  664. printk(KERN_ERR "ide-tape: bug: tape->next_stage !="
  665. " NULL\n");
  666. if (tape->nr_stages)
  667. printk(KERN_ERR "ide-tape: bug: nr_stages should be 0 "
  668. "now\n");
  669. }
  670. }
  671. /*
  672. * This will free all the pipeline stages starting from new_last_stage->next
  673. * to the end of the list, and point tape->last_stage to new_last_stage.
  674. */
  675. static void idetape_abort_pipeline(ide_drive_t *drive,
  676. idetape_stage_t *new_last_stage)
  677. {
  678. idetape_tape_t *tape = drive->driver_data;
  679. idetape_stage_t *stage = new_last_stage->next;
  680. idetape_stage_t *nstage;
  681. debug_log(DBG_PROCS, "%s: Enter %s\n", tape->name, __func__);
  682. while (stage) {
  683. nstage = stage->next;
  684. idetape_kfree_stage(tape, stage);
  685. --tape->nr_stages;
  686. --tape->nr_pending_stages;
  687. stage = nstage;
  688. }
  689. if (new_last_stage)
  690. new_last_stage->next = NULL;
  691. tape->last_stage = new_last_stage;
  692. tape->next_stage = NULL;
  693. }
  694. /*
  695. * Finish servicing a request and insert a pending pipeline request into the
  696. * main device queue.
  697. */
  698. static int idetape_end_request(ide_drive_t *drive, int uptodate, int nr_sects)
  699. {
  700. struct request *rq = HWGROUP(drive)->rq;
  701. idetape_tape_t *tape = drive->driver_data;
  702. unsigned long flags;
  703. int error;
  704. int remove_stage = 0;
  705. idetape_stage_t *active_stage;
  706. debug_log(DBG_PROCS, "Enter %s\n", __func__);
  707. switch (uptodate) {
  708. case 0: error = IDETAPE_ERROR_GENERAL; break;
  709. case 1: error = 0; break;
  710. default: error = uptodate;
  711. }
  712. rq->errors = error;
  713. if (error)
  714. tape->failed_pc = NULL;
  715. if (!blk_special_request(rq)) {
  716. ide_end_request(drive, uptodate, nr_sects);
  717. return 0;
  718. }
  719. spin_lock_irqsave(&tape->lock, flags);
  720. /* The request was a pipelined data transfer request */
  721. if (tape->active_data_rq == rq) {
  722. active_stage = tape->active_stage;
  723. tape->active_stage = NULL;
  724. tape->active_data_rq = NULL;
  725. tape->nr_pending_stages--;
  726. if (rq->cmd[0] & REQ_IDETAPE_WRITE) {
  727. remove_stage = 1;
  728. if (error) {
  729. set_bit(IDETAPE_FLAG_PIPELINE_ERR,
  730. &tape->flags);
  731. if (error == IDETAPE_ERROR_EOD)
  732. idetape_abort_pipeline(drive,
  733. active_stage);
  734. }
  735. } else if (rq->cmd[0] & REQ_IDETAPE_READ) {
  736. if (error == IDETAPE_ERROR_EOD) {
  737. set_bit(IDETAPE_FLAG_PIPELINE_ERR,
  738. &tape->flags);
  739. idetape_abort_pipeline(drive, active_stage);
  740. }
  741. }
  742. if (tape->next_stage != NULL) {
  743. idetape_activate_next_stage(drive);
  744. /* Insert the next request into the request queue. */
  745. (void)ide_do_drive_cmd(drive, tape->active_data_rq,
  746. ide_end);
  747. } else if (!error) {
  748. /*
  749. * This is a part of the feedback loop which tries to
  750. * find the optimum number of stages. We are starting
  751. * from a minimum maximum number of stages, and if we
  752. * sense that the pipeline is empty, we try to increase
  753. * it, until we reach the user compile time memory
  754. * limit.
  755. */
  756. int i = (tape->max_pipeline - tape->min_pipeline) / 10;
  757. tape->max_stages += max(i, 1);
  758. tape->max_stages = max(tape->max_stages,
  759. tape->min_pipeline);
  760. tape->max_stages = min(tape->max_stages,
  761. tape->max_pipeline);
  762. }
  763. }
  764. ide_end_drive_cmd(drive, 0, 0);
  765. if (remove_stage)
  766. idetape_remove_stage_head(drive);
  767. if (tape->active_data_rq == NULL)
  768. clear_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags);
  769. spin_unlock_irqrestore(&tape->lock, flags);
  770. return 0;
  771. }
  772. static ide_startstop_t idetape_request_sense_callback(ide_drive_t *drive)
  773. {
  774. idetape_tape_t *tape = drive->driver_data;
  775. debug_log(DBG_PROCS, "Enter %s\n", __func__);
  776. if (!tape->pc->error) {
  777. idetape_analyze_error(drive, tape->pc->buf);
  778. idetape_end_request(drive, 1, 0);
  779. } else {
  780. printk(KERN_ERR "ide-tape: Error in REQUEST SENSE itself - "
  781. "Aborting request!\n");
  782. idetape_end_request(drive, 0, 0);
  783. }
  784. return ide_stopped;
  785. }
  786. static void idetape_create_request_sense_cmd(struct ide_atapi_pc *pc)
  787. {
  788. idetape_init_pc(pc);
  789. pc->c[0] = REQUEST_SENSE;
  790. pc->c[4] = 20;
  791. pc->req_xfer = 20;
  792. pc->idetape_callback = &idetape_request_sense_callback;
  793. }
  794. static void idetape_init_rq(struct request *rq, u8 cmd)
  795. {
  796. memset(rq, 0, sizeof(*rq));
  797. rq->cmd_type = REQ_TYPE_SPECIAL;
  798. rq->cmd[0] = cmd;
  799. }
  800. /*
  801. * Generate a new packet command request in front of the request queue, before
  802. * the current request, so that it will be processed immediately, on the next
  803. * pass through the driver. The function below is called from the request
  804. * handling part of the driver (the "bottom" part). Safe storage for the request
  805. * should be allocated with ide_tape_next_{pc,rq}_storage() prior to that.
  806. *
  807. * Memory for those requests is pre-allocated at initialization time, and is
  808. * limited to IDETAPE_PC_STACK requests. We assume that we have enough space for
  809. * the maximum possible number of inter-dependent packet commands.
  810. *
  811. * The higher level of the driver - The ioctl handler and the character device
  812. * handling functions should queue request to the lower level part and wait for
  813. * their completion using idetape_queue_pc_tail or idetape_queue_rw_tail.
  814. */
  815. static void idetape_queue_pc_head(ide_drive_t *drive, struct ide_atapi_pc *pc,
  816. struct request *rq)
  817. {
  818. struct ide_tape_obj *tape = drive->driver_data;
  819. idetape_init_rq(rq, REQ_IDETAPE_PC1);
  820. rq->buffer = (char *) pc;
  821. rq->rq_disk = tape->disk;
  822. (void) ide_do_drive_cmd(drive, rq, ide_preempt);
  823. }
  824. /*
  825. * idetape_retry_pc is called when an error was detected during the
  826. * last packet command. We queue a request sense packet command in
  827. * the head of the request list.
  828. */
  829. static ide_startstop_t idetape_retry_pc (ide_drive_t *drive)
  830. {
  831. idetape_tape_t *tape = drive->driver_data;
  832. struct ide_atapi_pc *pc;
  833. struct request *rq;
  834. (void)ide_read_error(drive);
  835. pc = idetape_next_pc_storage(drive);
  836. rq = idetape_next_rq_storage(drive);
  837. idetape_create_request_sense_cmd(pc);
  838. set_bit(IDETAPE_FLAG_IGNORE_DSC, &tape->flags);
  839. idetape_queue_pc_head(drive, pc, rq);
  840. return ide_stopped;
  841. }
  842. /*
  843. * Postpone the current request so that ide.c will be able to service requests
  844. * from another device on the same hwgroup while we are polling for DSC.
  845. */
  846. static void idetape_postpone_request(ide_drive_t *drive)
  847. {
  848. idetape_tape_t *tape = drive->driver_data;
  849. debug_log(DBG_PROCS, "Enter %s\n", __func__);
  850. tape->postponed_rq = HWGROUP(drive)->rq;
  851. ide_stall_queue(drive, tape->dsc_poll_freq);
  852. }
  853. typedef void idetape_io_buf(ide_drive_t *, struct ide_atapi_pc *, unsigned int);
  854. /*
  855. * This is the usual interrupt handler which will be called during a packet
  856. * command. We will transfer some of the data (as requested by the drive) and
  857. * will re-point interrupt handler to us. When data transfer is finished, we
  858. * will act according to the algorithm described before
  859. * idetape_issue_pc.
  860. */
  861. static ide_startstop_t idetape_pc_intr(ide_drive_t *drive)
  862. {
  863. ide_hwif_t *hwif = drive->hwif;
  864. idetape_tape_t *tape = drive->driver_data;
  865. struct ide_atapi_pc *pc = tape->pc;
  866. xfer_func_t *xferfunc;
  867. idetape_io_buf *iobuf;
  868. unsigned int temp;
  869. #if SIMULATE_ERRORS
  870. static int error_sim_count;
  871. #endif
  872. u16 bcount;
  873. u8 stat, ireason;
  874. debug_log(DBG_PROCS, "Enter %s - interrupt handler\n", __func__);
  875. /* Clear the interrupt */
  876. stat = ide_read_status(drive);
  877. if (pc->flags & PC_FLAG_DMA_IN_PROGRESS) {
  878. if (hwif->dma_ops->dma_end(drive) || (stat & ERR_STAT)) {
  879. /*
  880. * A DMA error is sometimes expected. For example,
  881. * if the tape is crossing a filemark during a
  882. * READ command, it will issue an irq and position
  883. * itself before the filemark, so that only a partial
  884. * data transfer will occur (which causes the DMA
  885. * error). In that case, we will later ask the tape
  886. * how much bytes of the original request were
  887. * actually transferred (we can't receive that
  888. * information from the DMA engine on most chipsets).
  889. */
  890. /*
  891. * On the contrary, a DMA error is never expected;
  892. * it usually indicates a hardware error or abort.
  893. * If the tape crosses a filemark during a READ
  894. * command, it will issue an irq and position itself
  895. * after the filemark (not before). Only a partial
  896. * data transfer will occur, but no DMA error.
  897. * (AS, 19 Apr 2001)
  898. */
  899. pc->flags |= PC_FLAG_DMA_ERROR;
  900. } else {
  901. pc->xferred = pc->req_xfer;
  902. idetape_update_buffers(pc);
  903. }
  904. debug_log(DBG_PROCS, "DMA finished\n");
  905. }
  906. /* No more interrupts */
  907. if ((stat & DRQ_STAT) == 0) {
  908. debug_log(DBG_SENSE, "Packet command completed, %d bytes"
  909. " transferred\n", pc->xferred);
  910. pc->flags &= ~PC_FLAG_DMA_IN_PROGRESS;
  911. local_irq_enable();
  912. #if SIMULATE_ERRORS
  913. if ((pc->c[0] == WRITE_6 || pc->c[0] == READ_6) &&
  914. (++error_sim_count % 100) == 0) {
  915. printk(KERN_INFO "ide-tape: %s: simulating error\n",
  916. tape->name);
  917. stat |= ERR_STAT;
  918. }
  919. #endif
  920. if ((stat & ERR_STAT) && pc->c[0] == REQUEST_SENSE)
  921. stat &= ~ERR_STAT;
  922. if ((stat & ERR_STAT) || (pc->flags & PC_FLAG_DMA_ERROR)) {
  923. /* Error detected */
  924. debug_log(DBG_ERR, "%s: I/O error\n", tape->name);
  925. if (pc->c[0] == REQUEST_SENSE) {
  926. printk(KERN_ERR "ide-tape: I/O error in request"
  927. " sense command\n");
  928. return ide_do_reset(drive);
  929. }
  930. debug_log(DBG_ERR, "[cmd %x]: check condition\n",
  931. pc->c[0]);
  932. /* Retry operation */
  933. return idetape_retry_pc(drive);
  934. }
  935. pc->error = 0;
  936. if ((pc->flags & PC_FLAG_WAIT_FOR_DSC) &&
  937. (stat & SEEK_STAT) == 0) {
  938. /* Media access command */
  939. tape->dsc_polling_start = jiffies;
  940. tape->dsc_poll_freq = IDETAPE_DSC_MA_FAST;
  941. tape->dsc_timeout = jiffies + IDETAPE_DSC_MA_TIMEOUT;
  942. /* Allow ide.c to handle other requests */
  943. idetape_postpone_request(drive);
  944. return ide_stopped;
  945. }
  946. if (tape->failed_pc == pc)
  947. tape->failed_pc = NULL;
  948. /* Command finished - Call the callback function */
  949. return pc->idetape_callback(drive);
  950. }
  951. if (pc->flags & PC_FLAG_DMA_IN_PROGRESS) {
  952. pc->flags &= ~PC_FLAG_DMA_IN_PROGRESS;
  953. printk(KERN_ERR "ide-tape: The tape wants to issue more "
  954. "interrupts in DMA mode\n");
  955. printk(KERN_ERR "ide-tape: DMA disabled, reverting to PIO\n");
  956. ide_dma_off(drive);
  957. return ide_do_reset(drive);
  958. }
  959. /* Get the number of bytes to transfer on this interrupt. */
  960. bcount = (hwif->INB(hwif->io_ports[IDE_BCOUNTH_OFFSET]) << 8) |
  961. hwif->INB(hwif->io_ports[IDE_BCOUNTL_OFFSET]);
  962. ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]);
  963. if (ireason & CD) {
  964. printk(KERN_ERR "ide-tape: CoD != 0 in %s\n", __func__);
  965. return ide_do_reset(drive);
  966. }
  967. if (((ireason & IO) == IO) == !!(pc->flags & PC_FLAG_WRITING)) {
  968. /* Hopefully, we will never get here */
  969. printk(KERN_ERR "ide-tape: We wanted to %s, ",
  970. (ireason & IO) ? "Write" : "Read");
  971. printk(KERN_ERR "ide-tape: but the tape wants us to %s !\n",
  972. (ireason & IO) ? "Read" : "Write");
  973. return ide_do_reset(drive);
  974. }
  975. if (!(pc->flags & PC_FLAG_WRITING)) {
  976. /* Reading - Check that we have enough space */
  977. temp = pc->xferred + bcount;
  978. if (temp > pc->req_xfer) {
  979. if (temp > pc->buf_size) {
  980. printk(KERN_ERR "ide-tape: The tape wants to "
  981. "send us more data than expected "
  982. "- discarding data\n");
  983. ide_atapi_discard_data(drive, bcount);
  984. ide_set_handler(drive, &idetape_pc_intr,
  985. IDETAPE_WAIT_CMD, NULL);
  986. return ide_started;
  987. }
  988. debug_log(DBG_SENSE, "The tape wants to send us more "
  989. "data than expected - allowing transfer\n");
  990. }
  991. iobuf = &idetape_input_buffers;
  992. xferfunc = hwif->atapi_input_bytes;
  993. } else {
  994. iobuf = &idetape_output_buffers;
  995. xferfunc = hwif->atapi_output_bytes;
  996. }
  997. if (pc->bh)
  998. iobuf(drive, pc, bcount);
  999. else
  1000. xferfunc(drive, pc->cur_pos, bcount);
  1001. /* Update the current position */
  1002. pc->xferred += bcount;
  1003. pc->cur_pos += bcount;
  1004. debug_log(DBG_SENSE, "[cmd %x] transferred %d bytes on that intr.\n",
  1005. pc->c[0], bcount);
  1006. /* And set the interrupt handler again */
  1007. ide_set_handler(drive, &idetape_pc_intr, IDETAPE_WAIT_CMD, NULL);
  1008. return ide_started;
  1009. }
  1010. /*
  1011. * Packet Command Interface
  1012. *
  1013. * The current Packet Command is available in tape->pc, and will not change
  1014. * until we finish handling it. Each packet command is associated with a
  1015. * callback function that will be called when the command is finished.
  1016. *
  1017. * The handling will be done in three stages:
  1018. *
  1019. * 1. idetape_issue_pc will send the packet command to the drive, and will set
  1020. * the interrupt handler to idetape_pc_intr.
  1021. *
  1022. * 2. On each interrupt, idetape_pc_intr will be called. This step will be
  1023. * repeated until the device signals us that no more interrupts will be issued.
  1024. *
  1025. * 3. ATAPI Tape media access commands have immediate status with a delayed
  1026. * process. In case of a successful initiation of a media access packet command,
  1027. * the DSC bit will be set when the actual execution of the command is finished.
  1028. * Since the tape drive will not issue an interrupt, we have to poll for this
  1029. * event. In this case, we define the request as "low priority request" by
  1030. * setting rq_status to IDETAPE_RQ_POSTPONED, set a timer to poll for DSC and
  1031. * exit the driver.
  1032. *
  1033. * ide.c will then give higher priority to requests which originate from the
  1034. * other device, until will change rq_status to RQ_ACTIVE.
  1035. *
  1036. * 4. When the packet command is finished, it will be checked for errors.
  1037. *
  1038. * 5. In case an error was found, we queue a request sense packet command in
  1039. * front of the request queue and retry the operation up to
  1040. * IDETAPE_MAX_PC_RETRIES times.
  1041. *
  1042. * 6. In case no error was found, or we decided to give up and not to retry
  1043. * again, the callback function will be called and then we will handle the next
  1044. * request.
  1045. */
  1046. static ide_startstop_t idetape_transfer_pc(ide_drive_t *drive)
  1047. {
  1048. ide_hwif_t *hwif = drive->hwif;
  1049. idetape_tape_t *tape = drive->driver_data;
  1050. struct ide_atapi_pc *pc = tape->pc;
  1051. int retries = 100;
  1052. ide_startstop_t startstop;
  1053. u8 ireason;
  1054. if (ide_wait_stat(&startstop, drive, DRQ_STAT, BUSY_STAT, WAIT_READY)) {
  1055. printk(KERN_ERR "ide-tape: Strange, packet command initiated "
  1056. "yet DRQ isn't asserted\n");
  1057. return startstop;
  1058. }
  1059. ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]);
  1060. while (retries-- && ((ireason & CD) == 0 || (ireason & IO))) {
  1061. printk(KERN_ERR "ide-tape: (IO,CoD != (0,1) while issuing "
  1062. "a packet command, retrying\n");
  1063. udelay(100);
  1064. ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]);
  1065. if (retries == 0) {
  1066. printk(KERN_ERR "ide-tape: (IO,CoD != (0,1) while "
  1067. "issuing a packet command, ignoring\n");
  1068. ireason |= CD;
  1069. ireason &= ~IO;
  1070. }
  1071. }
  1072. if ((ireason & CD) == 0 || (ireason & IO)) {
  1073. printk(KERN_ERR "ide-tape: (IO,CoD) != (0,1) while issuing "
  1074. "a packet command\n");
  1075. return ide_do_reset(drive);
  1076. }
  1077. /* Set the interrupt routine */
  1078. ide_set_handler(drive, &idetape_pc_intr, IDETAPE_WAIT_CMD, NULL);
  1079. #ifdef CONFIG_BLK_DEV_IDEDMA
  1080. /* Begin DMA, if necessary */
  1081. if (pc->flags & PC_FLAG_DMA_IN_PROGRESS)
  1082. hwif->dma_ops->dma_start(drive);
  1083. #endif
  1084. /* Send the actual packet */
  1085. HWIF(drive)->atapi_output_bytes(drive, pc->c, 12);
  1086. return ide_started;
  1087. }
  1088. static ide_startstop_t idetape_issue_pc(ide_drive_t *drive,
  1089. struct ide_atapi_pc *pc)
  1090. {
  1091. ide_hwif_t *hwif = drive->hwif;
  1092. idetape_tape_t *tape = drive->driver_data;
  1093. int dma_ok = 0;
  1094. u16 bcount;
  1095. if (tape->pc->c[0] == REQUEST_SENSE &&
  1096. pc->c[0] == REQUEST_SENSE) {
  1097. printk(KERN_ERR "ide-tape: possible ide-tape.c bug - "
  1098. "Two request sense in serial were issued\n");
  1099. }
  1100. if (tape->failed_pc == NULL && pc->c[0] != REQUEST_SENSE)
  1101. tape->failed_pc = pc;
  1102. /* Set the current packet command */
  1103. tape->pc = pc;
  1104. if (pc->retries > IDETAPE_MAX_PC_RETRIES ||
  1105. (pc->flags & PC_FLAG_ABORT)) {
  1106. /*
  1107. * We will "abort" retrying a packet command in case legitimate
  1108. * error code was received (crossing a filemark, or end of the
  1109. * media, for example).
  1110. */
  1111. if (!(pc->flags & PC_FLAG_ABORT)) {
  1112. if (!(pc->c[0] == TEST_UNIT_READY &&
  1113. tape->sense_key == 2 && tape->asc == 4 &&
  1114. (tape->ascq == 1 || tape->ascq == 8))) {
  1115. printk(KERN_ERR "ide-tape: %s: I/O error, "
  1116. "pc = %2x, key = %2x, "
  1117. "asc = %2x, ascq = %2x\n",
  1118. tape->name, pc->c[0],
  1119. tape->sense_key, tape->asc,
  1120. tape->ascq);
  1121. }
  1122. /* Giving up */
  1123. pc->error = IDETAPE_ERROR_GENERAL;
  1124. }
  1125. tape->failed_pc = NULL;
  1126. return pc->idetape_callback(drive);
  1127. }
  1128. debug_log(DBG_SENSE, "Retry #%d, cmd = %02X\n", pc->retries, pc->c[0]);
  1129. pc->retries++;
  1130. /* We haven't transferred any data yet */
  1131. pc->xferred = 0;
  1132. pc->cur_pos = pc->buf;
  1133. /* Request to transfer the entire buffer at once */
  1134. bcount = pc->req_xfer;
  1135. if (pc->flags & PC_FLAG_DMA_ERROR) {
  1136. pc->flags &= ~PC_FLAG_DMA_ERROR;
  1137. printk(KERN_WARNING "ide-tape: DMA disabled, "
  1138. "reverting to PIO\n");
  1139. ide_dma_off(drive);
  1140. }
  1141. if ((pc->flags & PC_FLAG_DMA_RECOMMENDED) && drive->using_dma)
  1142. dma_ok = !hwif->dma_ops->dma_setup(drive);
  1143. ide_pktcmd_tf_load(drive, IDE_TFLAG_NO_SELECT_MASK |
  1144. IDE_TFLAG_OUT_DEVICE, bcount, dma_ok);
  1145. if (dma_ok)
  1146. /* Will begin DMA later */
  1147. pc->flags |= PC_FLAG_DMA_IN_PROGRESS;
  1148. if (test_bit(IDETAPE_FLAG_DRQ_INTERRUPT, &tape->flags)) {
  1149. ide_execute_command(drive, WIN_PACKETCMD, &idetape_transfer_pc,
  1150. IDETAPE_WAIT_CMD, NULL);
  1151. return ide_started;
  1152. } else {
  1153. hwif->OUTB(WIN_PACKETCMD, hwif->io_ports[IDE_COMMAND_OFFSET]);
  1154. return idetape_transfer_pc(drive);
  1155. }
  1156. }
  1157. static ide_startstop_t idetape_pc_callback(ide_drive_t *drive)
  1158. {
  1159. idetape_tape_t *tape = drive->driver_data;
  1160. debug_log(DBG_PROCS, "Enter %s\n", __func__);
  1161. idetape_end_request(drive, tape->pc->error ? 0 : 1, 0);
  1162. return ide_stopped;
  1163. }
  1164. /* A mode sense command is used to "sense" tape parameters. */
  1165. static void idetape_create_mode_sense_cmd(struct ide_atapi_pc *pc, u8 page_code)
  1166. {
  1167. idetape_init_pc(pc);
  1168. pc->c[0] = MODE_SENSE;
  1169. if (page_code != IDETAPE_BLOCK_DESCRIPTOR)
  1170. /* DBD = 1 - Don't return block descriptors */
  1171. pc->c[1] = 8;
  1172. pc->c[2] = page_code;
  1173. /*
  1174. * Changed pc->c[3] to 0 (255 will at best return unused info).
  1175. *
  1176. * For SCSI this byte is defined as subpage instead of high byte
  1177. * of length and some IDE drives seem to interpret it this way
  1178. * and return an error when 255 is used.
  1179. */
  1180. pc->c[3] = 0;
  1181. /* We will just discard data in that case */
  1182. pc->c[4] = 255;
  1183. if (page_code == IDETAPE_BLOCK_DESCRIPTOR)
  1184. pc->req_xfer = 12;
  1185. else if (page_code == IDETAPE_CAPABILITIES_PAGE)
  1186. pc->req_xfer = 24;
  1187. else
  1188. pc->req_xfer = 50;
  1189. pc->idetape_callback = &idetape_pc_callback;
  1190. }
  1191. static void idetape_calculate_speeds(ide_drive_t *drive)
  1192. {
  1193. idetape_tape_t *tape = drive->driver_data;
  1194. if (time_after(jiffies,
  1195. tape->controlled_pipeline_head_time + 120 * HZ)) {
  1196. tape->controlled_previous_pipeline_head =
  1197. tape->controlled_last_pipeline_head;
  1198. tape->controlled_previous_head_time =
  1199. tape->controlled_pipeline_head_time;
  1200. tape->controlled_last_pipeline_head = tape->pipeline_head;
  1201. tape->controlled_pipeline_head_time = jiffies;
  1202. }
  1203. if (time_after(jiffies, tape->controlled_pipeline_head_time + 60 * HZ))
  1204. tape->controlled_pipeline_head_speed = (tape->pipeline_head -
  1205. tape->controlled_last_pipeline_head) * 32 * HZ /
  1206. (jiffies - tape->controlled_pipeline_head_time);
  1207. else if (time_after(jiffies, tape->controlled_previous_head_time))
  1208. tape->controlled_pipeline_head_speed = (tape->pipeline_head -
  1209. tape->controlled_previous_pipeline_head) * 32 *
  1210. HZ / (jiffies - tape->controlled_previous_head_time);
  1211. if (tape->nr_pending_stages < tape->max_stages/*- 1 */) {
  1212. /* -1 for read mode error recovery */
  1213. if (time_after(jiffies, tape->uncontrolled_previous_head_time +
  1214. 10 * HZ)) {
  1215. tape->uncontrolled_pipeline_head_time = jiffies;
  1216. tape->uncontrolled_pipeline_head_speed =
  1217. (tape->pipeline_head -
  1218. tape->uncontrolled_previous_pipeline_head) *
  1219. 32 * HZ / (jiffies -
  1220. tape->uncontrolled_previous_head_time);
  1221. }
  1222. } else {
  1223. tape->uncontrolled_previous_head_time = jiffies;
  1224. tape->uncontrolled_previous_pipeline_head = tape->pipeline_head;
  1225. if (time_after(jiffies, tape->uncontrolled_pipeline_head_time +
  1226. 30 * HZ))
  1227. tape->uncontrolled_pipeline_head_time = jiffies;
  1228. }
  1229. tape->pipeline_head_speed = max(tape->uncontrolled_pipeline_head_speed,
  1230. tape->controlled_pipeline_head_speed);
  1231. if (tape->speed_control == 1) {
  1232. if (tape->nr_pending_stages >= tape->max_stages / 2)
  1233. tape->max_insert_speed = tape->pipeline_head_speed +
  1234. (1100 - tape->pipeline_head_speed) * 2 *
  1235. (tape->nr_pending_stages - tape->max_stages / 2)
  1236. / tape->max_stages;
  1237. else
  1238. tape->max_insert_speed = 500 +
  1239. (tape->pipeline_head_speed - 500) * 2 *
  1240. tape->nr_pending_stages / tape->max_stages;
  1241. if (tape->nr_pending_stages >= tape->max_stages * 99 / 100)
  1242. tape->max_insert_speed = 5000;
  1243. } else
  1244. tape->max_insert_speed = tape->speed_control;
  1245. tape->max_insert_speed = max(tape->max_insert_speed, 500);
  1246. }
  1247. static ide_startstop_t idetape_media_access_finished(ide_drive_t *drive)
  1248. {
  1249. idetape_tape_t *tape = drive->driver_data;
  1250. struct ide_atapi_pc *pc = tape->pc;
  1251. u8 stat;
  1252. stat = ide_read_status(drive);
  1253. if (stat & SEEK_STAT) {
  1254. if (stat & ERR_STAT) {
  1255. /* Error detected */
  1256. if (pc->c[0] != TEST_UNIT_READY)
  1257. printk(KERN_ERR "ide-tape: %s: I/O error, ",
  1258. tape->name);
  1259. /* Retry operation */
  1260. return idetape_retry_pc(drive);
  1261. }
  1262. pc->error = 0;
  1263. if (tape->failed_pc == pc)
  1264. tape->failed_pc = NULL;
  1265. } else {
  1266. pc->error = IDETAPE_ERROR_GENERAL;
  1267. tape->failed_pc = NULL;
  1268. }
  1269. return pc->idetape_callback(drive);
  1270. }
  1271. static ide_startstop_t idetape_rw_callback(ide_drive_t *drive)
  1272. {
  1273. idetape_tape_t *tape = drive->driver_data;
  1274. struct request *rq = HWGROUP(drive)->rq;
  1275. int blocks = tape->pc->xferred / tape->blk_size;
  1276. tape->avg_size += blocks * tape->blk_size;
  1277. tape->insert_size += blocks * tape->blk_size;
  1278. if (tape->insert_size > 1024 * 1024)
  1279. tape->measure_insert_time = 1;
  1280. if (tape->measure_insert_time) {
  1281. tape->measure_insert_time = 0;
  1282. tape->insert_time = jiffies;
  1283. tape->insert_size = 0;
  1284. }
  1285. if (time_after(jiffies, tape->insert_time))
  1286. tape->insert_speed = tape->insert_size / 1024 * HZ /
  1287. (jiffies - tape->insert_time);
  1288. if (time_after_eq(jiffies, tape->avg_time + HZ)) {
  1289. tape->avg_speed = tape->avg_size * HZ /
  1290. (jiffies - tape->avg_time) / 1024;
  1291. tape->avg_size = 0;
  1292. tape->avg_time = jiffies;
  1293. }
  1294. debug_log(DBG_PROCS, "Enter %s\n", __func__);
  1295. tape->first_frame += blocks;
  1296. rq->current_nr_sectors -= blocks;
  1297. if (!tape->pc->error)
  1298. idetape_end_request(drive, 1, 0);
  1299. else
  1300. idetape_end_request(drive, tape->pc->error, 0);
  1301. return ide_stopped;
  1302. }
  1303. static void idetape_create_read_cmd(idetape_tape_t *tape,
  1304. struct ide_atapi_pc *pc,
  1305. unsigned int length, struct idetape_bh *bh)
  1306. {
  1307. idetape_init_pc(pc);
  1308. pc->c[0] = READ_6;
  1309. put_unaligned(cpu_to_be32(length), (unsigned int *) &pc->c[1]);
  1310. pc->c[1] = 1;
  1311. pc->idetape_callback = &idetape_rw_callback;
  1312. pc->bh = bh;
  1313. atomic_set(&bh->b_count, 0);
  1314. pc->buf = NULL;
  1315. pc->buf_size = length * tape->blk_size;
  1316. pc->req_xfer = pc->buf_size;
  1317. if (pc->req_xfer == tape->stage_size)
  1318. pc->flags |= PC_FLAG_DMA_RECOMMENDED;
  1319. }
  1320. static void idetape_create_write_cmd(idetape_tape_t *tape,
  1321. struct ide_atapi_pc *pc,
  1322. unsigned int length, struct idetape_bh *bh)
  1323. {
  1324. idetape_init_pc(pc);
  1325. pc->c[0] = WRITE_6;
  1326. put_unaligned(cpu_to_be32(length), (unsigned int *) &pc->c[1]);
  1327. pc->c[1] = 1;
  1328. pc->idetape_callback = &idetape_rw_callback;
  1329. pc->flags |= PC_FLAG_WRITING;
  1330. pc->bh = bh;
  1331. pc->b_data = bh->b_data;
  1332. pc->b_count = atomic_read(&bh->b_count);
  1333. pc->buf = NULL;
  1334. pc->buf_size = length * tape->blk_size;
  1335. pc->req_xfer = pc->buf_size;
  1336. if (pc->req_xfer == tape->stage_size)
  1337. pc->flags |= PC_FLAG_DMA_RECOMMENDED;
  1338. }
  1339. static ide_startstop_t idetape_do_request(ide_drive_t *drive,
  1340. struct request *rq, sector_t block)
  1341. {
  1342. idetape_tape_t *tape = drive->driver_data;
  1343. struct ide_atapi_pc *pc = NULL;
  1344. struct request *postponed_rq = tape->postponed_rq;
  1345. u8 stat;
  1346. debug_log(DBG_SENSE, "sector: %ld, nr_sectors: %ld,"
  1347. " current_nr_sectors: %d\n",
  1348. rq->sector, rq->nr_sectors, rq->current_nr_sectors);
  1349. if (!blk_special_request(rq)) {
  1350. /* We do not support buffer cache originated requests. */
  1351. printk(KERN_NOTICE "ide-tape: %s: Unsupported request in "
  1352. "request queue (%d)\n", drive->name, rq->cmd_type);
  1353. ide_end_request(drive, 0, 0);
  1354. return ide_stopped;
  1355. }
  1356. /* Retry a failed packet command */
  1357. if (tape->failed_pc && tape->pc->c[0] == REQUEST_SENSE)
  1358. return idetape_issue_pc(drive, tape->failed_pc);
  1359. if (postponed_rq != NULL)
  1360. if (rq != postponed_rq) {
  1361. printk(KERN_ERR "ide-tape: ide-tape.c bug - "
  1362. "Two DSC requests were queued\n");
  1363. idetape_end_request(drive, 0, 0);
  1364. return ide_stopped;
  1365. }
  1366. tape->postponed_rq = NULL;
  1367. /*
  1368. * If the tape is still busy, postpone our request and service
  1369. * the other device meanwhile.
  1370. */
  1371. stat = ide_read_status(drive);
  1372. if (!drive->dsc_overlap && !(rq->cmd[0] & REQ_IDETAPE_PC2))
  1373. set_bit(IDETAPE_FLAG_IGNORE_DSC, &tape->flags);
  1374. if (drive->post_reset == 1) {
  1375. set_bit(IDETAPE_FLAG_IGNORE_DSC, &tape->flags);
  1376. drive->post_reset = 0;
  1377. }
  1378. if (time_after(jiffies, tape->insert_time))
  1379. tape->insert_speed = tape->insert_size / 1024 * HZ /
  1380. (jiffies - tape->insert_time);
  1381. idetape_calculate_speeds(drive);
  1382. if (!test_and_clear_bit(IDETAPE_FLAG_IGNORE_DSC, &tape->flags) &&
  1383. (stat & SEEK_STAT) == 0) {
  1384. if (postponed_rq == NULL) {
  1385. tape->dsc_polling_start = jiffies;
  1386. tape->dsc_poll_freq = tape->best_dsc_rw_freq;
  1387. tape->dsc_timeout = jiffies + IDETAPE_DSC_RW_TIMEOUT;
  1388. } else if (time_after(jiffies, tape->dsc_timeout)) {
  1389. printk(KERN_ERR "ide-tape: %s: DSC timeout\n",
  1390. tape->name);
  1391. if (rq->cmd[0] & REQ_IDETAPE_PC2) {
  1392. idetape_media_access_finished(drive);
  1393. return ide_stopped;
  1394. } else {
  1395. return ide_do_reset(drive);
  1396. }
  1397. } else if (time_after(jiffies,
  1398. tape->dsc_polling_start +
  1399. IDETAPE_DSC_MA_THRESHOLD))
  1400. tape->dsc_poll_freq = IDETAPE_DSC_MA_SLOW;
  1401. idetape_postpone_request(drive);
  1402. return ide_stopped;
  1403. }
  1404. if (rq->cmd[0] & REQ_IDETAPE_READ) {
  1405. tape->buffer_head++;
  1406. tape->postpone_cnt = 0;
  1407. pc = idetape_next_pc_storage(drive);
  1408. idetape_create_read_cmd(tape, pc, rq->current_nr_sectors,
  1409. (struct idetape_bh *)rq->special);
  1410. goto out;
  1411. }
  1412. if (rq->cmd[0] & REQ_IDETAPE_WRITE) {
  1413. tape->buffer_head++;
  1414. tape->postpone_cnt = 0;
  1415. pc = idetape_next_pc_storage(drive);
  1416. idetape_create_write_cmd(tape, pc, rq->current_nr_sectors,
  1417. (struct idetape_bh *)rq->special);
  1418. goto out;
  1419. }
  1420. if (rq->cmd[0] & REQ_IDETAPE_PC1) {
  1421. pc = (struct ide_atapi_pc *) rq->buffer;
  1422. rq->cmd[0] &= ~(REQ_IDETAPE_PC1);
  1423. rq->cmd[0] |= REQ_IDETAPE_PC2;
  1424. goto out;
  1425. }
  1426. if (rq->cmd[0] & REQ_IDETAPE_PC2) {
  1427. idetape_media_access_finished(drive);
  1428. return ide_stopped;
  1429. }
  1430. BUG();
  1431. out:
  1432. return idetape_issue_pc(drive, pc);
  1433. }
  1434. /* Pipeline related functions */
  1435. /*
  1436. * The function below uses __get_free_page to allocate a pipeline stage, along
  1437. * with all the necessary small buffers which together make a buffer of size
  1438. * tape->stage_size (or a bit more). We attempt to combine sequential pages as
  1439. * much as possible.
  1440. *
  1441. * It returns a pointer to the new allocated stage, or NULL if we can't (or
  1442. * don't want to) allocate a stage.
  1443. *
  1444. * Pipeline stages are optional and are used to increase performance. If we
  1445. * can't allocate them, we'll manage without them.
  1446. */
  1447. static idetape_stage_t *__idetape_kmalloc_stage(idetape_tape_t *tape, int full,
  1448. int clear)
  1449. {
  1450. idetape_stage_t *stage;
  1451. struct idetape_bh *prev_bh, *bh;
  1452. int pages = tape->pages_per_stage;
  1453. char *b_data = NULL;
  1454. stage = kmalloc(sizeof(idetape_stage_t), GFP_KERNEL);
  1455. if (!stage)
  1456. return NULL;
  1457. stage->next = NULL;
  1458. stage->bh = kmalloc(sizeof(struct idetape_bh), GFP_KERNEL);
  1459. bh = stage->bh;
  1460. if (bh == NULL)
  1461. goto abort;
  1462. bh->b_reqnext = NULL;
  1463. bh->b_data = (char *) __get_free_page(GFP_KERNEL);
  1464. if (!bh->b_data)
  1465. goto abort;
  1466. if (clear)
  1467. memset(bh->b_data, 0, PAGE_SIZE);
  1468. bh->b_size = PAGE_SIZE;
  1469. atomic_set(&bh->b_count, full ? bh->b_size : 0);
  1470. while (--pages) {
  1471. b_data = (char *) __get_free_page(GFP_KERNEL);
  1472. if (!b_data)
  1473. goto abort;
  1474. if (clear)
  1475. memset(b_data, 0, PAGE_SIZE);
  1476. if (bh->b_data == b_data + PAGE_SIZE) {
  1477. bh->b_size += PAGE_SIZE;
  1478. bh->b_data -= PAGE_SIZE;
  1479. if (full)
  1480. atomic_add(PAGE_SIZE, &bh->b_count);
  1481. continue;
  1482. }
  1483. if (b_data == bh->b_data + bh->b_size) {
  1484. bh->b_size += PAGE_SIZE;
  1485. if (full)
  1486. atomic_add(PAGE_SIZE, &bh->b_count);
  1487. continue;
  1488. }
  1489. prev_bh = bh;
  1490. bh = kmalloc(sizeof(struct idetape_bh), GFP_KERNEL);
  1491. if (!bh) {
  1492. free_page((unsigned long) b_data);
  1493. goto abort;
  1494. }
  1495. bh->b_reqnext = NULL;
  1496. bh->b_data = b_data;
  1497. bh->b_size = PAGE_SIZE;
  1498. atomic_set(&bh->b_count, full ? bh->b_size : 0);
  1499. prev_bh->b_reqnext = bh;
  1500. }
  1501. bh->b_size -= tape->excess_bh_size;
  1502. if (full)
  1503. atomic_sub(tape->excess_bh_size, &bh->b_count);
  1504. return stage;
  1505. abort:
  1506. __idetape_kfree_stage(stage);
  1507. return NULL;
  1508. }
  1509. static idetape_stage_t *idetape_kmalloc_stage(idetape_tape_t *tape)
  1510. {
  1511. idetape_stage_t *cache_stage = tape->cache_stage;
  1512. debug_log(DBG_PROCS, "Enter %s\n", __func__);
  1513. if (tape->nr_stages >= tape->max_stages)
  1514. return NULL;
  1515. if (cache_stage != NULL) {
  1516. tape->cache_stage = NULL;
  1517. return cache_stage;
  1518. }
  1519. return __idetape_kmalloc_stage(tape, 0, 0);
  1520. }
  1521. static int idetape_copy_stage_from_user(idetape_tape_t *tape,
  1522. idetape_stage_t *stage, const char __user *buf, int n)
  1523. {
  1524. struct idetape_bh *bh = tape->bh;
  1525. int count;
  1526. int ret = 0;
  1527. while (n) {
  1528. if (bh == NULL) {
  1529. printk(KERN_ERR "ide-tape: bh == NULL in %s\n",
  1530. __func__);
  1531. return 1;
  1532. }
  1533. count = min((unsigned int)
  1534. (bh->b_size - atomic_read(&bh->b_count)),
  1535. (unsigned int)n);
  1536. if (copy_from_user(bh->b_data + atomic_read(&bh->b_count), buf,
  1537. count))
  1538. ret = 1;
  1539. n -= count;
  1540. atomic_add(count, &bh->b_count);
  1541. buf += count;
  1542. if (atomic_read(&bh->b_count) == bh->b_size) {
  1543. bh = bh->b_reqnext;
  1544. if (bh)
  1545. atomic_set(&bh->b_count, 0);
  1546. }
  1547. }
  1548. tape->bh = bh;
  1549. return ret;
  1550. }
  1551. static int idetape_copy_stage_to_user(idetape_tape_t *tape, char __user *buf,
  1552. idetape_stage_t *stage, int n)
  1553. {
  1554. struct idetape_bh *bh = tape->bh;
  1555. int count;
  1556. int ret = 0;
  1557. while (n) {
  1558. if (bh == NULL) {
  1559. printk(KERN_ERR "ide-tape: bh == NULL in %s\n",
  1560. __func__);
  1561. return 1;
  1562. }
  1563. count = min(tape->b_count, n);
  1564. if (copy_to_user(buf, tape->b_data, count))
  1565. ret = 1;
  1566. n -= count;
  1567. tape->b_data += count;
  1568. tape->b_count -= count;
  1569. buf += count;
  1570. if (!tape->b_count) {
  1571. bh = bh->b_reqnext;
  1572. tape->bh = bh;
  1573. if (bh) {
  1574. tape->b_data = bh->b_data;
  1575. tape->b_count = atomic_read(&bh->b_count);
  1576. }
  1577. }
  1578. }
  1579. return ret;
  1580. }
  1581. static void idetape_init_merge_stage(idetape_tape_t *tape)
  1582. {
  1583. struct idetape_bh *bh = tape->merge_stage->bh;
  1584. tape->bh = bh;
  1585. if (tape->chrdev_dir == IDETAPE_DIR_WRITE)
  1586. atomic_set(&bh->b_count, 0);
  1587. else {
  1588. tape->b_data = bh->b_data;
  1589. tape->b_count = atomic_read(&bh->b_count);
  1590. }
  1591. }
  1592. static void idetape_switch_buffers(idetape_tape_t *tape, idetape_stage_t *stage)
  1593. {
  1594. struct idetape_bh *tmp;
  1595. tmp = stage->bh;
  1596. stage->bh = tape->merge_stage->bh;
  1597. tape->merge_stage->bh = tmp;
  1598. idetape_init_merge_stage(tape);
  1599. }
  1600. /* Add a new stage at the end of the pipeline. */
  1601. static void idetape_add_stage_tail(ide_drive_t *drive, idetape_stage_t *stage)
  1602. {
  1603. idetape_tape_t *tape = drive->driver_data;
  1604. unsigned long flags;
  1605. debug_log(DBG_PROCS, "Enter %s\n", __func__);
  1606. spin_lock_irqsave(&tape->lock, flags);
  1607. stage->next = NULL;
  1608. if (tape->last_stage != NULL)
  1609. tape->last_stage->next = stage;
  1610. else
  1611. tape->first_stage = stage;
  1612. tape->next_stage = stage;
  1613. tape->last_stage = stage;
  1614. if (tape->next_stage == NULL)
  1615. tape->next_stage = tape->last_stage;
  1616. tape->nr_stages++;
  1617. tape->nr_pending_stages++;
  1618. spin_unlock_irqrestore(&tape->lock, flags);
  1619. }
  1620. /* Install a completion in a pending request and sleep until it is serviced. The
  1621. * caller should ensure that the request will not be serviced before we install
  1622. * the completion (usually by disabling interrupts).
  1623. */
  1624. static void idetape_wait_for_request(ide_drive_t *drive, struct request *rq)
  1625. {
  1626. DECLARE_COMPLETION_ONSTACK(wait);
  1627. idetape_tape_t *tape = drive->driver_data;
  1628. if (rq == NULL || !blk_special_request(rq)) {
  1629. printk(KERN_ERR "ide-tape: bug: Trying to sleep on non-valid"
  1630. " request\n");
  1631. return;
  1632. }
  1633. rq->end_io_data = &wait;
  1634. rq->end_io = blk_end_sync_rq;
  1635. spin_unlock_irq(&tape->lock);
  1636. wait_for_completion(&wait);
  1637. /* The stage and its struct request have been deallocated */
  1638. spin_lock_irq(&tape->lock);
  1639. }
  1640. static ide_startstop_t idetape_read_position_callback(ide_drive_t *drive)
  1641. {
  1642. idetape_tape_t *tape = drive->driver_data;
  1643. u8 *readpos = tape->pc->buf;
  1644. debug_log(DBG_PROCS, "Enter %s\n", __func__);
  1645. if (!tape->pc->error) {
  1646. debug_log(DBG_SENSE, "BOP - %s\n",
  1647. (readpos[0] & 0x80) ? "Yes" : "No");
  1648. debug_log(DBG_SENSE, "EOP - %s\n",
  1649. (readpos[0] & 0x40) ? "Yes" : "No");
  1650. if (readpos[0] & 0x4) {
  1651. printk(KERN_INFO "ide-tape: Block location is unknown"
  1652. "to the tape\n");
  1653. clear_bit(IDETAPE_FLAG_ADDRESS_VALID, &tape->flags);
  1654. idetape_end_request(drive, 0, 0);
  1655. } else {
  1656. debug_log(DBG_SENSE, "Block Location - %u\n",
  1657. be32_to_cpu(*(u32 *)&readpos[4]));
  1658. tape->partition = readpos[1];
  1659. tape->first_frame =
  1660. be32_to_cpu(*(u32 *)&readpos[4]);
  1661. set_bit(IDETAPE_FLAG_ADDRESS_VALID, &tape->flags);
  1662. idetape_end_request(drive, 1, 0);
  1663. }
  1664. } else {
  1665. idetape_end_request(drive, 0, 0);
  1666. }
  1667. return ide_stopped;
  1668. }
  1669. /*
  1670. * Write a filemark if write_filemark=1. Flush the device buffers without
  1671. * writing a filemark otherwise.
  1672. */
  1673. static void idetape_create_write_filemark_cmd(ide_drive_t *drive,
  1674. struct ide_atapi_pc *pc, int write_filemark)
  1675. {
  1676. idetape_init_pc(pc);
  1677. pc->c[0] = WRITE_FILEMARKS;
  1678. pc->c[4] = write_filemark;
  1679. pc->flags |= PC_FLAG_WAIT_FOR_DSC;
  1680. pc->idetape_callback = &idetape_pc_callback;
  1681. }
  1682. static void idetape_create_test_unit_ready_cmd(struct ide_atapi_pc *pc)
  1683. {
  1684. idetape_init_pc(pc);
  1685. pc->c[0] = TEST_UNIT_READY;
  1686. pc->idetape_callback = &idetape_pc_callback;
  1687. }
  1688. /*
  1689. * We add a special packet command request to the tail of the request queue, and
  1690. * wait for it to be serviced. This is not to be called from within the request
  1691. * handling part of the driver! We allocate here data on the stack and it is
  1692. * valid until the request is finished. This is not the case for the bottom part
  1693. * of the driver, where we are always leaving the functions to wait for an
  1694. * interrupt or a timer event.
  1695. *
  1696. * From the bottom part of the driver, we should allocate safe memory using
  1697. * idetape_next_pc_storage() and ide_tape_next_rq_storage(), and add the request
  1698. * to the request list without waiting for it to be serviced! In that case, we
  1699. * usually use idetape_queue_pc_head().
  1700. */
  1701. static int __idetape_queue_pc_tail(ide_drive_t *drive, struct ide_atapi_pc *pc)
  1702. {
  1703. struct ide_tape_obj *tape = drive->driver_data;
  1704. struct request rq;
  1705. idetape_init_rq(&rq, REQ_IDETAPE_PC1);
  1706. rq.buffer = (char *) pc;
  1707. rq.rq_disk = tape->disk;
  1708. return ide_do_drive_cmd(drive, &rq, ide_wait);
  1709. }
  1710. static void idetape_create_load_unload_cmd(ide_drive_t *drive,
  1711. struct ide_atapi_pc *pc, int cmd)
  1712. {
  1713. idetape_init_pc(pc);
  1714. pc->c[0] = START_STOP;
  1715. pc->c[4] = cmd;
  1716. pc->flags |= PC_FLAG_WAIT_FOR_DSC;
  1717. pc->idetape_callback = &idetape_pc_callback;
  1718. }
  1719. static int idetape_wait_ready(ide_drive_t *drive, unsigned long timeout)
  1720. {
  1721. idetape_tape_t *tape = drive->driver_data;
  1722. struct ide_atapi_pc pc;
  1723. int load_attempted = 0;
  1724. /* Wait for the tape to become ready */
  1725. set_bit(IDETAPE_FLAG_MEDIUM_PRESENT, &tape->flags);
  1726. timeout += jiffies;
  1727. while (time_before(jiffies, timeout)) {
  1728. idetape_create_test_unit_ready_cmd(&pc);
  1729. if (!__idetape_queue_pc_tail(drive, &pc))
  1730. return 0;
  1731. if ((tape->sense_key == 2 && tape->asc == 4 && tape->ascq == 2)
  1732. || (tape->asc == 0x3A)) {
  1733. /* no media */
  1734. if (load_attempted)
  1735. return -ENOMEDIUM;
  1736. idetape_create_load_unload_cmd(drive, &pc,
  1737. IDETAPE_LU_LOAD_MASK);
  1738. __idetape_queue_pc_tail(drive, &pc);
  1739. load_attempted = 1;
  1740. /* not about to be ready */
  1741. } else if (!(tape->sense_key == 2 && tape->asc == 4 &&
  1742. (tape->ascq == 1 || tape->ascq == 8)))
  1743. return -EIO;
  1744. msleep(100);
  1745. }
  1746. return -EIO;
  1747. }
  1748. static int idetape_queue_pc_tail(ide_drive_t *drive, struct ide_atapi_pc *pc)
  1749. {
  1750. return __idetape_queue_pc_tail(drive, pc);
  1751. }
  1752. static int idetape_flush_tape_buffers(ide_drive_t *drive)
  1753. {
  1754. struct ide_atapi_pc pc;
  1755. int rc;
  1756. idetape_create_write_filemark_cmd(drive, &pc, 0);
  1757. rc = idetape_queue_pc_tail(drive, &pc);
  1758. if (rc)
  1759. return rc;
  1760. idetape_wait_ready(drive, 60 * 5 * HZ);
  1761. return 0;
  1762. }
  1763. static void idetape_create_read_position_cmd(struct ide_atapi_pc *pc)
  1764. {
  1765. idetape_init_pc(pc);
  1766. pc->c[0] = READ_POSITION;
  1767. pc->req_xfer = 20;
  1768. pc->idetape_callback = &idetape_read_position_callback;
  1769. }
  1770. static int idetape_read_position(ide_drive_t *drive)
  1771. {
  1772. idetape_tape_t *tape = drive->driver_data;
  1773. struct ide_atapi_pc pc;
  1774. int position;
  1775. debug_log(DBG_PROCS, "Enter %s\n", __func__);
  1776. idetape_create_read_position_cmd(&pc);
  1777. if (idetape_queue_pc_tail(drive, &pc))
  1778. return -1;
  1779. position = tape->first_frame;
  1780. return position;
  1781. }
  1782. static void idetape_create_locate_cmd(ide_drive_t *drive,
  1783. struct ide_atapi_pc *pc,
  1784. unsigned int block, u8 partition, int skip)
  1785. {
  1786. idetape_init_pc(pc);
  1787. pc->c[0] = POSITION_TO_ELEMENT;
  1788. pc->c[1] = 2;
  1789. put_unaligned(cpu_to_be32(block), (unsigned int *) &pc->c[3]);
  1790. pc->c[8] = partition;
  1791. pc->flags |= PC_FLAG_WAIT_FOR_DSC;
  1792. pc->idetape_callback = &idetape_pc_callback;
  1793. }
  1794. static int idetape_create_prevent_cmd(ide_drive_t *drive,
  1795. struct ide_atapi_pc *pc, int prevent)
  1796. {
  1797. idetape_tape_t *tape = drive->driver_data;
  1798. /* device supports locking according to capabilities page */
  1799. if (!(tape->caps[6] & 0x01))
  1800. return 0;
  1801. idetape_init_pc(pc);
  1802. pc->c[0] = ALLOW_MEDIUM_REMOVAL;
  1803. pc->c[4] = prevent;
  1804. pc->idetape_callback = &idetape_pc_callback;
  1805. return 1;
  1806. }
  1807. static int __idetape_discard_read_pipeline(ide_drive_t *drive)
  1808. {
  1809. idetape_tape_t *tape = drive->driver_data;
  1810. unsigned long flags;
  1811. int cnt;
  1812. if (tape->chrdev_dir != IDETAPE_DIR_READ)
  1813. return 0;
  1814. /* Remove merge stage. */
  1815. cnt = tape->merge_stage_size / tape->blk_size;
  1816. if (test_and_clear_bit(IDETAPE_FLAG_FILEMARK, &tape->flags))
  1817. ++cnt; /* Filemarks count as 1 sector */
  1818. tape->merge_stage_size = 0;
  1819. if (tape->merge_stage != NULL) {
  1820. __idetape_kfree_stage(tape->merge_stage);
  1821. tape->merge_stage = NULL;
  1822. }
  1823. /* Clear pipeline flags. */
  1824. clear_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags);
  1825. tape->chrdev_dir = IDETAPE_DIR_NONE;
  1826. /* Remove pipeline stages. */
  1827. if (tape->first_stage == NULL)
  1828. return 0;
  1829. spin_lock_irqsave(&tape->lock, flags);
  1830. tape->next_stage = NULL;
  1831. if (test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags))
  1832. idetape_wait_for_request(drive, tape->active_data_rq);
  1833. spin_unlock_irqrestore(&tape->lock, flags);
  1834. while (tape->first_stage != NULL) {
  1835. struct request *rq_ptr = &tape->first_stage->rq;
  1836. cnt += rq_ptr->nr_sectors - rq_ptr->current_nr_sectors;
  1837. if (rq_ptr->errors == IDETAPE_ERROR_FILEMARK)
  1838. ++cnt;
  1839. idetape_remove_stage_head(drive);
  1840. }
  1841. tape->nr_pending_stages = 0;
  1842. tape->max_stages = tape->min_pipeline;
  1843. return cnt;
  1844. }
  1845. /*
  1846. * Position the tape to the requested block using the LOCATE packet command.
  1847. * A READ POSITION command is then issued to check where we are positioned. Like
  1848. * all higher level operations, we queue the commands at the tail of the request
  1849. * queue and wait for their completion.
  1850. */
  1851. static int idetape_position_tape(ide_drive_t *drive, unsigned int block,
  1852. u8 partition, int skip)
  1853. {
  1854. idetape_tape_t *tape = drive->driver_data;
  1855. int retval;
  1856. struct ide_atapi_pc pc;
  1857. if (tape->chrdev_dir == IDETAPE_DIR_READ)
  1858. __idetape_discard_read_pipeline(drive);
  1859. idetape_wait_ready(drive, 60 * 5 * HZ);
  1860. idetape_create_locate_cmd(drive, &pc, block, partition, skip);
  1861. retval = idetape_queue_pc_tail(drive, &pc);
  1862. if (retval)
  1863. return (retval);
  1864. idetape_create_read_position_cmd(&pc);
  1865. return (idetape_queue_pc_tail(drive, &pc));
  1866. }
  1867. static void idetape_discard_read_pipeline(ide_drive_t *drive,
  1868. int restore_position)
  1869. {
  1870. idetape_tape_t *tape = drive->driver_data;
  1871. int cnt;
  1872. int seek, position;
  1873. cnt = __idetape_discard_read_pipeline(drive);
  1874. if (restore_position) {
  1875. position = idetape_read_position(drive);
  1876. seek = position > cnt ? position - cnt : 0;
  1877. if (idetape_position_tape(drive, seek, 0, 0)) {
  1878. printk(KERN_INFO "ide-tape: %s: position_tape failed in"
  1879. " discard_pipeline()\n", tape->name);
  1880. return;
  1881. }
  1882. }
  1883. }
  1884. /*
  1885. * Generate a read/write request for the block device interface and wait for it
  1886. * to be serviced.
  1887. */
  1888. static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int blocks,
  1889. struct idetape_bh *bh)
  1890. {
  1891. idetape_tape_t *tape = drive->driver_data;
  1892. struct request rq;
  1893. debug_log(DBG_SENSE, "%s: cmd=%d\n", __func__, cmd);
  1894. if (test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags)) {
  1895. printk(KERN_ERR "ide-tape: bug: the pipeline is active in %s\n",
  1896. __func__);
  1897. return (0);
  1898. }
  1899. idetape_init_rq(&rq, cmd);
  1900. rq.rq_disk = tape->disk;
  1901. rq.special = (void *)bh;
  1902. rq.sector = tape->first_frame;
  1903. rq.nr_sectors = blocks;
  1904. rq.current_nr_sectors = blocks;
  1905. (void) ide_do_drive_cmd(drive, &rq, ide_wait);
  1906. if ((cmd & (REQ_IDETAPE_READ | REQ_IDETAPE_WRITE)) == 0)
  1907. return 0;
  1908. if (tape->merge_stage)
  1909. idetape_init_merge_stage(tape);
  1910. if (rq.errors == IDETAPE_ERROR_GENERAL)
  1911. return -EIO;
  1912. return (tape->blk_size * (blocks-rq.current_nr_sectors));
  1913. }
  1914. /* start servicing the pipeline stages, starting from tape->next_stage. */
  1915. static void idetape_plug_pipeline(ide_drive_t *drive)
  1916. {
  1917. idetape_tape_t *tape = drive->driver_data;
  1918. if (tape->next_stage == NULL)
  1919. return;
  1920. if (!test_and_set_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags)) {
  1921. idetape_activate_next_stage(drive);
  1922. (void) ide_do_drive_cmd(drive, tape->active_data_rq, ide_end);
  1923. }
  1924. }
  1925. static void idetape_create_inquiry_cmd(struct ide_atapi_pc *pc)
  1926. {
  1927. idetape_init_pc(pc);
  1928. pc->c[0] = INQUIRY;
  1929. pc->c[4] = 254;
  1930. pc->req_xfer = 254;
  1931. pc->idetape_callback = &idetape_pc_callback;
  1932. }
  1933. static void idetape_create_rewind_cmd(ide_drive_t *drive,
  1934. struct ide_atapi_pc *pc)
  1935. {
  1936. idetape_init_pc(pc);
  1937. pc->c[0] = REZERO_UNIT;
  1938. pc->flags |= PC_FLAG_WAIT_FOR_DSC;
  1939. pc->idetape_callback = &idetape_pc_callback;
  1940. }
  1941. static void idetape_create_erase_cmd(struct ide_atapi_pc *pc)
  1942. {
  1943. idetape_init_pc(pc);
  1944. pc->c[0] = ERASE;
  1945. pc->c[1] = 1;
  1946. pc->flags |= PC_FLAG_WAIT_FOR_DSC;
  1947. pc->idetape_callback = &idetape_pc_callback;
  1948. }
  1949. static void idetape_create_space_cmd(struct ide_atapi_pc *pc, int count, u8 cmd)
  1950. {
  1951. idetape_init_pc(pc);
  1952. pc->c[0] = SPACE;
  1953. put_unaligned(cpu_to_be32(count), (unsigned int *) &pc->c[1]);
  1954. pc->c[1] = cmd;
  1955. pc->flags |= PC_FLAG_WAIT_FOR_DSC;
  1956. pc->idetape_callback = &idetape_pc_callback;
  1957. }
  1958. static void idetape_wait_first_stage(ide_drive_t *drive)
  1959. {
  1960. idetape_tape_t *tape = drive->driver_data;
  1961. unsigned long flags;
  1962. if (tape->first_stage == NULL)
  1963. return;
  1964. spin_lock_irqsave(&tape->lock, flags);
  1965. if (tape->active_stage == tape->first_stage)
  1966. idetape_wait_for_request(drive, tape->active_data_rq);
  1967. spin_unlock_irqrestore(&tape->lock, flags);
  1968. }
  1969. /*
  1970. * Try to add a character device originated write request to our pipeline. In
  1971. * case we don't succeed, we revert to non-pipelined operation mode for this
  1972. * request. In order to accomplish that, we
  1973. *
  1974. * 1. Try to allocate a new pipeline stage.
  1975. * 2. If we can't, wait for more and more requests to be serviced and try again
  1976. * each time.
  1977. * 3. If we still can't allocate a stage, fallback to non-pipelined operation
  1978. * mode for this request.
  1979. */
  1980. static int idetape_add_chrdev_write_request(ide_drive_t *drive, int blocks)
  1981. {
  1982. idetape_tape_t *tape = drive->driver_data;
  1983. idetape_stage_t *new_stage;
  1984. unsigned long flags;
  1985. struct request *rq;
  1986. debug_log(DBG_CHRDEV, "Enter %s\n", __func__);
  1987. /* Attempt to allocate a new stage. Beware possible race conditions. */
  1988. while ((new_stage = idetape_kmalloc_stage(tape)) == NULL) {
  1989. spin_lock_irqsave(&tape->lock, flags);
  1990. if (test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags)) {
  1991. idetape_wait_for_request(drive, tape->active_data_rq);
  1992. spin_unlock_irqrestore(&tape->lock, flags);
  1993. } else {
  1994. spin_unlock_irqrestore(&tape->lock, flags);
  1995. idetape_plug_pipeline(drive);
  1996. if (test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE,
  1997. &tape->flags))
  1998. continue;
  1999. /*
  2000. * The machine is short on memory. Fallback to non-
  2001. * pipelined operation mode for this request.
  2002. */
  2003. return idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE,
  2004. blocks, tape->merge_stage->bh);
  2005. }
  2006. }
  2007. rq = &new_stage->rq;
  2008. idetape_init_rq(rq, REQ_IDETAPE_WRITE);
  2009. /* Doesn't actually matter - We always assume sequential access */
  2010. rq->sector = tape->first_frame;
  2011. rq->current_nr_sectors = blocks;
  2012. rq->nr_sectors = blocks;
  2013. idetape_switch_buffers(tape, new_stage);
  2014. idetape_add_stage_tail(drive, new_stage);
  2015. tape->pipeline_head++;
  2016. idetape_calculate_speeds(drive);
  2017. /*
  2018. * Estimate whether the tape has stopped writing by checking if our
  2019. * write pipeline is currently empty. If we are not writing anymore,
  2020. * wait for the pipeline to be almost completely full (90%) before
  2021. * starting to service requests, so that we will be able to keep up with
  2022. * the higher speeds of the tape.
  2023. */
  2024. if (!test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags)) {
  2025. if (tape->nr_stages >= tape->max_stages * 9 / 10 ||
  2026. tape->nr_stages >= tape->max_stages -
  2027. tape->uncontrolled_pipeline_head_speed * 3 * 1024 /
  2028. tape->blk_size) {
  2029. tape->measure_insert_time = 1;
  2030. tape->insert_time = jiffies;
  2031. tape->insert_size = 0;
  2032. tape->insert_speed = 0;
  2033. idetape_plug_pipeline(drive);
  2034. }
  2035. }
  2036. if (test_and_clear_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags))
  2037. /* Return a deferred error */
  2038. return -EIO;
  2039. return blocks;
  2040. }
  2041. /*
  2042. * Wait until all pending pipeline requests are serviced. Typically called on
  2043. * device close.
  2044. */
  2045. static void idetape_wait_for_pipeline(ide_drive_t *drive)
  2046. {
  2047. idetape_tape_t *tape = drive->driver_data;
  2048. unsigned long flags;
  2049. while (tape->next_stage || test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE,
  2050. &tape->flags)) {
  2051. idetape_plug_pipeline(drive);
  2052. spin_lock_irqsave(&tape->lock, flags);
  2053. if (test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags))
  2054. idetape_wait_for_request(drive, tape->active_data_rq);
  2055. spin_unlock_irqrestore(&tape->lock, flags);
  2056. }
  2057. }
  2058. static void idetape_empty_write_pipeline(ide_drive_t *drive)
  2059. {
  2060. idetape_tape_t *tape = drive->driver_data;
  2061. int blocks, min;
  2062. struct idetape_bh *bh;
  2063. if (tape->chrdev_dir != IDETAPE_DIR_WRITE) {
  2064. printk(KERN_ERR "ide-tape: bug: Trying to empty write pipeline,"
  2065. " but we are not writing.\n");
  2066. return;
  2067. }
  2068. if (tape->merge_stage_size > tape->stage_size) {
  2069. printk(KERN_ERR "ide-tape: bug: merge_buffer too big\n");
  2070. tape->merge_stage_size = tape->stage_size;
  2071. }
  2072. if (tape->merge_stage_size) {
  2073. blocks = tape->merge_stage_size / tape->blk_size;
  2074. if (tape->merge_stage_size % tape->blk_size) {
  2075. unsigned int i;
  2076. blocks++;
  2077. i = tape->blk_size - tape->merge_stage_size %
  2078. tape->blk_size;
  2079. bh = tape->bh->b_reqnext;
  2080. while (bh) {
  2081. atomic_set(&bh->b_count, 0);
  2082. bh = bh->b_reqnext;
  2083. }
  2084. bh = tape->bh;
  2085. while (i) {
  2086. if (bh == NULL) {
  2087. printk(KERN_INFO "ide-tape: bug,"
  2088. " bh NULL\n");
  2089. break;
  2090. }
  2091. min = min(i, (unsigned int)(bh->b_size -
  2092. atomic_read(&bh->b_count)));
  2093. memset(bh->b_data + atomic_read(&bh->b_count),
  2094. 0, min);
  2095. atomic_add(min, &bh->b_count);
  2096. i -= min;
  2097. bh = bh->b_reqnext;
  2098. }
  2099. }
  2100. (void) idetape_add_chrdev_write_request(drive, blocks);
  2101. tape->merge_stage_size = 0;
  2102. }
  2103. idetape_wait_for_pipeline(drive);
  2104. if (tape->merge_stage != NULL) {
  2105. __idetape_kfree_stage(tape->merge_stage);
  2106. tape->merge_stage = NULL;
  2107. }
  2108. clear_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags);
  2109. tape->chrdev_dir = IDETAPE_DIR_NONE;
  2110. /*
  2111. * On the next backup, perform the feedback loop again. (I don't want to
  2112. * keep sense information between backups, as some systems are
  2113. * constantly on, and the system load can be totally different on the
  2114. * next backup).
  2115. */
  2116. tape->max_stages = tape->min_pipeline;
  2117. if (tape->first_stage != NULL ||
  2118. tape->next_stage != NULL ||
  2119. tape->last_stage != NULL ||
  2120. tape->nr_stages != 0) {
  2121. printk(KERN_ERR "ide-tape: ide-tape pipeline bug, "
  2122. "first_stage %p, next_stage %p, "
  2123. "last_stage %p, nr_stages %d\n",
  2124. tape->first_stage, tape->next_stage,
  2125. tape->last_stage, tape->nr_stages);
  2126. }
  2127. }
  2128. static void idetape_restart_speed_control(ide_drive_t *drive)
  2129. {
  2130. idetape_tape_t *tape = drive->driver_data;
  2131. tape->restart_speed_control_req = 0;
  2132. tape->pipeline_head = 0;
  2133. tape->controlled_last_pipeline_head = 0;
  2134. tape->controlled_previous_pipeline_head = 0;
  2135. tape->uncontrolled_previous_pipeline_head = 0;
  2136. tape->controlled_pipeline_head_speed = 5000;
  2137. tape->pipeline_head_speed = 5000;
  2138. tape->uncontrolled_pipeline_head_speed = 0;
  2139. tape->controlled_pipeline_head_time =
  2140. tape->uncontrolled_pipeline_head_time = jiffies;
  2141. tape->controlled_previous_head_time =
  2142. tape->uncontrolled_previous_head_time = jiffies;
  2143. }
  2144. static int idetape_init_read(ide_drive_t *drive, int max_stages)
  2145. {
  2146. idetape_tape_t *tape = drive->driver_data;
  2147. idetape_stage_t *new_stage;
  2148. struct request rq;
  2149. int bytes_read;
  2150. u16 blocks = *(u16 *)&tape->caps[12];
  2151. /* Initialize read operation */
  2152. if (tape->chrdev_dir != IDETAPE_DIR_READ) {
  2153. if (tape->chrdev_dir == IDETAPE_DIR_WRITE) {
  2154. idetape_empty_write_pipeline(drive);
  2155. idetape_flush_tape_buffers(drive);
  2156. }
  2157. if (tape->merge_stage || tape->merge_stage_size) {
  2158. printk(KERN_ERR "ide-tape: merge_stage_size should be"
  2159. " 0 now\n");
  2160. tape->merge_stage_size = 0;
  2161. }
  2162. tape->merge_stage = __idetape_kmalloc_stage(tape, 0, 0);
  2163. if (!tape->merge_stage)
  2164. return -ENOMEM;
  2165. tape->chrdev_dir = IDETAPE_DIR_READ;
  2166. /*
  2167. * Issue a read 0 command to ensure that DSC handshake is
  2168. * switched from completion mode to buffer available mode.
  2169. * No point in issuing this if DSC overlap isn't supported, some
  2170. * drives (Seagate STT3401A) will return an error.
  2171. */
  2172. if (drive->dsc_overlap) {
  2173. bytes_read = idetape_queue_rw_tail(drive,
  2174. REQ_IDETAPE_READ, 0,
  2175. tape->merge_stage->bh);
  2176. if (bytes_read < 0) {
  2177. __idetape_kfree_stage(tape->merge_stage);
  2178. tape->merge_stage = NULL;
  2179. tape->chrdev_dir = IDETAPE_DIR_NONE;
  2180. return bytes_read;
  2181. }
  2182. }
  2183. }
  2184. if (tape->restart_speed_control_req)
  2185. idetape_restart_speed_control(drive);
  2186. idetape_init_rq(&rq, REQ_IDETAPE_READ);
  2187. rq.sector = tape->first_frame;
  2188. rq.nr_sectors = blocks;
  2189. rq.current_nr_sectors = blocks;
  2190. if (!test_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags) &&
  2191. tape->nr_stages < max_stages) {
  2192. new_stage = idetape_kmalloc_stage(tape);
  2193. while (new_stage != NULL) {
  2194. new_stage->rq = rq;
  2195. idetape_add_stage_tail(drive, new_stage);
  2196. if (tape->nr_stages >= max_stages)
  2197. break;
  2198. new_stage = idetape_kmalloc_stage(tape);
  2199. }
  2200. }
  2201. if (!test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags)) {
  2202. if (tape->nr_pending_stages >= 3 * max_stages / 4) {
  2203. tape->measure_insert_time = 1;
  2204. tape->insert_time = jiffies;
  2205. tape->insert_size = 0;
  2206. tape->insert_speed = 0;
  2207. idetape_plug_pipeline(drive);
  2208. }
  2209. }
  2210. return 0;
  2211. }
  2212. /*
  2213. * Called from idetape_chrdev_read() to service a character device read request
  2214. * and add read-ahead requests to our pipeline.
  2215. */
  2216. static int idetape_add_chrdev_read_request(ide_drive_t *drive, int blocks)
  2217. {
  2218. idetape_tape_t *tape = drive->driver_data;
  2219. unsigned long flags;
  2220. struct request *rq_ptr;
  2221. int bytes_read;
  2222. debug_log(DBG_PROCS, "Enter %s, %d blocks\n", __func__, blocks);
  2223. /* If we are at a filemark, return a read length of 0 */
  2224. if (test_bit(IDETAPE_FLAG_FILEMARK, &tape->flags))
  2225. return 0;
  2226. /* Wait for the next block to reach the head of the pipeline. */
  2227. idetape_init_read(drive, tape->max_stages);
  2228. if (tape->first_stage == NULL) {
  2229. if (test_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags))
  2230. return 0;
  2231. return idetape_queue_rw_tail(drive, REQ_IDETAPE_READ, blocks,
  2232. tape->merge_stage->bh);
  2233. }
  2234. idetape_wait_first_stage(drive);
  2235. rq_ptr = &tape->first_stage->rq;
  2236. bytes_read = tape->blk_size * (rq_ptr->nr_sectors -
  2237. rq_ptr->current_nr_sectors);
  2238. rq_ptr->nr_sectors = 0;
  2239. rq_ptr->current_nr_sectors = 0;
  2240. if (rq_ptr->errors == IDETAPE_ERROR_EOD)
  2241. return 0;
  2242. else {
  2243. idetape_switch_buffers(tape, tape->first_stage);
  2244. if (rq_ptr->errors == IDETAPE_ERROR_FILEMARK)
  2245. set_bit(IDETAPE_FLAG_FILEMARK, &tape->flags);
  2246. spin_lock_irqsave(&tape->lock, flags);
  2247. idetape_remove_stage_head(drive);
  2248. spin_unlock_irqrestore(&tape->lock, flags);
  2249. tape->pipeline_head++;
  2250. idetape_calculate_speeds(drive);
  2251. }
  2252. if (bytes_read > blocks * tape->blk_size) {
  2253. printk(KERN_ERR "ide-tape: bug: trying to return more bytes"
  2254. " than requested\n");
  2255. bytes_read = blocks * tape->blk_size;
  2256. }
  2257. return (bytes_read);
  2258. }
  2259. static void idetape_pad_zeros(ide_drive_t *drive, int bcount)
  2260. {
  2261. idetape_tape_t *tape = drive->driver_data;
  2262. struct idetape_bh *bh;
  2263. int blocks;
  2264. while (bcount) {
  2265. unsigned int count;
  2266. bh = tape->merge_stage->bh;
  2267. count = min(tape->stage_size, bcount);
  2268. bcount -= count;
  2269. blocks = count / tape->blk_size;
  2270. while (count) {
  2271. atomic_set(&bh->b_count,
  2272. min(count, (unsigned int)bh->b_size));
  2273. memset(bh->b_data, 0, atomic_read(&bh->b_count));
  2274. count -= atomic_read(&bh->b_count);
  2275. bh = bh->b_reqnext;
  2276. }
  2277. idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE, blocks,
  2278. tape->merge_stage->bh);
  2279. }
  2280. }
  2281. static int idetape_pipeline_size(ide_drive_t *drive)
  2282. {
  2283. idetape_tape_t *tape = drive->driver_data;
  2284. idetape_stage_t *stage;
  2285. struct request *rq;
  2286. int size = 0;
  2287. idetape_wait_for_pipeline(drive);
  2288. stage = tape->first_stage;
  2289. while (stage != NULL) {
  2290. rq = &stage->rq;
  2291. size += tape->blk_size * (rq->nr_sectors -
  2292. rq->current_nr_sectors);
  2293. if (rq->errors == IDETAPE_ERROR_FILEMARK)
  2294. size += tape->blk_size;
  2295. stage = stage->next;
  2296. }
  2297. size += tape->merge_stage_size;
  2298. return size;
  2299. }
  2300. /*
  2301. * Rewinds the tape to the Beginning Of the current Partition (BOP). We
  2302. * currently support only one partition.
  2303. */
  2304. static int idetape_rewind_tape(ide_drive_t *drive)
  2305. {
  2306. int retval;
  2307. struct ide_atapi_pc pc;
  2308. idetape_tape_t *tape;
  2309. tape = drive->driver_data;
  2310. debug_log(DBG_SENSE, "Enter %s\n", __func__);
  2311. idetape_create_rewind_cmd(drive, &pc);
  2312. retval = idetape_queue_pc_tail(drive, &pc);
  2313. if (retval)
  2314. return retval;
  2315. idetape_create_read_position_cmd(&pc);
  2316. retval = idetape_queue_pc_tail(drive, &pc);
  2317. if (retval)
  2318. return retval;
  2319. return 0;
  2320. }
  2321. /* mtio.h compatible commands should be issued to the chrdev interface. */
  2322. static int idetape_blkdev_ioctl(ide_drive_t *drive, unsigned int cmd,
  2323. unsigned long arg)
  2324. {
  2325. idetape_tape_t *tape = drive->driver_data;
  2326. void __user *argp = (void __user *)arg;
  2327. struct idetape_config {
  2328. int dsc_rw_frequency;
  2329. int dsc_media_access_frequency;
  2330. int nr_stages;
  2331. } config;
  2332. debug_log(DBG_PROCS, "Enter %s\n", __func__);
  2333. switch (cmd) {
  2334. case 0x0340:
  2335. if (copy_from_user(&config, argp, sizeof(config)))
  2336. return -EFAULT;
  2337. tape->best_dsc_rw_freq = config.dsc_rw_frequency;
  2338. tape->max_stages = config.nr_stages;
  2339. break;
  2340. case 0x0350:
  2341. config.dsc_rw_frequency = (int) tape->best_dsc_rw_freq;
  2342. config.nr_stages = tape->max_stages;
  2343. if (copy_to_user(argp, &config, sizeof(config)))
  2344. return -EFAULT;
  2345. break;
  2346. default:
  2347. return -EIO;
  2348. }
  2349. return 0;
  2350. }
  2351. /*
  2352. * The function below is now a bit more complicated than just passing the
  2353. * command to the tape since we may have crossed some filemarks during our
  2354. * pipelined read-ahead mode. As a minor side effect, the pipeline enables us to
  2355. * support MTFSFM when the filemark is in our internal pipeline even if the tape
  2356. * doesn't support spacing over filemarks in the reverse direction.
  2357. */
  2358. static int idetape_space_over_filemarks(ide_drive_t *drive, short mt_op,
  2359. int mt_count)
  2360. {
  2361. idetape_tape_t *tape = drive->driver_data;
  2362. struct ide_atapi_pc pc;
  2363. unsigned long flags;
  2364. int retval, count = 0;
  2365. int sprev = !!(tape->caps[4] & 0x20);
  2366. if (mt_count == 0)
  2367. return 0;
  2368. if (MTBSF == mt_op || MTBSFM == mt_op) {
  2369. if (!sprev)
  2370. return -EIO;
  2371. mt_count = -mt_count;
  2372. }
  2373. if (tape->chrdev_dir == IDETAPE_DIR_READ) {
  2374. /* its a read-ahead buffer, scan it for crossed filemarks. */
  2375. tape->merge_stage_size = 0;
  2376. if (test_and_clear_bit(IDETAPE_FLAG_FILEMARK, &tape->flags))
  2377. ++count;
  2378. while (tape->first_stage != NULL) {
  2379. if (count == mt_count) {
  2380. if (mt_op == MTFSFM)
  2381. set_bit(IDETAPE_FLAG_FILEMARK,
  2382. &tape->flags);
  2383. return 0;
  2384. }
  2385. spin_lock_irqsave(&tape->lock, flags);
  2386. if (tape->first_stage == tape->active_stage) {
  2387. /*
  2388. * We have reached the active stage in the read
  2389. * pipeline. There is no point in allowing the
  2390. * drive to continue reading any farther, so we
  2391. * stop the pipeline.
  2392. *
  2393. * This section should be moved to a separate
  2394. * subroutine because similar operations are
  2395. * done in __idetape_discard_read_pipeline(),
  2396. * for example.
  2397. */
  2398. tape->next_stage = NULL;
  2399. spin_unlock_irqrestore(&tape->lock, flags);
  2400. idetape_wait_first_stage(drive);
  2401. tape->next_stage = tape->first_stage->next;
  2402. } else
  2403. spin_unlock_irqrestore(&tape->lock, flags);
  2404. if (tape->first_stage->rq.errors ==
  2405. IDETAPE_ERROR_FILEMARK)
  2406. ++count;
  2407. idetape_remove_stage_head(drive);
  2408. }
  2409. idetape_discard_read_pipeline(drive, 0);
  2410. }
  2411. /*
  2412. * The filemark was not found in our internal pipeline; now we can issue
  2413. * the space command.
  2414. */
  2415. switch (mt_op) {
  2416. case MTFSF:
  2417. case MTBSF:
  2418. idetape_create_space_cmd(&pc, mt_count - count,
  2419. IDETAPE_SPACE_OVER_FILEMARK);
  2420. return idetape_queue_pc_tail(drive, &pc);
  2421. case MTFSFM:
  2422. case MTBSFM:
  2423. if (!sprev)
  2424. return -EIO;
  2425. retval = idetape_space_over_filemarks(drive, MTFSF,
  2426. mt_count - count);
  2427. if (retval)
  2428. return retval;
  2429. count = (MTBSFM == mt_op ? 1 : -1);
  2430. return idetape_space_over_filemarks(drive, MTFSF, count);
  2431. default:
  2432. printk(KERN_ERR "ide-tape: MTIO operation %d not supported\n",
  2433. mt_op);
  2434. return -EIO;
  2435. }
  2436. }
  2437. /*
  2438. * Our character device read / write functions.
  2439. *
  2440. * The tape is optimized to maximize throughput when it is transferring an
  2441. * integral number of the "continuous transfer limit", which is a parameter of
  2442. * the specific tape (26kB on my particular tape, 32kB for Onstream).
  2443. *
  2444. * As of version 1.3 of the driver, the character device provides an abstract
  2445. * continuous view of the media - any mix of block sizes (even 1 byte) on the
  2446. * same backup/restore procedure is supported. The driver will internally
  2447. * convert the requests to the recommended transfer unit, so that an unmatch
  2448. * between the user's block size to the recommended size will only result in a
  2449. * (slightly) increased driver overhead, but will no longer hit performance.
  2450. * This is not applicable to Onstream.
  2451. */
  2452. static ssize_t idetape_chrdev_read(struct file *file, char __user *buf,
  2453. size_t count, loff_t *ppos)
  2454. {
  2455. struct ide_tape_obj *tape = ide_tape_f(file);
  2456. ide_drive_t *drive = tape->drive;
  2457. ssize_t bytes_read, temp, actually_read = 0, rc;
  2458. ssize_t ret = 0;
  2459. u16 ctl = *(u16 *)&tape->caps[12];
  2460. debug_log(DBG_CHRDEV, "Enter %s, count %Zd\n", __func__, count);
  2461. if (tape->chrdev_dir != IDETAPE_DIR_READ) {
  2462. if (test_bit(IDETAPE_FLAG_DETECT_BS, &tape->flags))
  2463. if (count > tape->blk_size &&
  2464. (count % tape->blk_size) == 0)
  2465. tape->user_bs_factor = count / tape->blk_size;
  2466. }
  2467. rc = idetape_init_read(drive, tape->max_stages);
  2468. if (rc < 0)
  2469. return rc;
  2470. if (count == 0)
  2471. return (0);
  2472. if (tape->merge_stage_size) {
  2473. actually_read = min((unsigned int)(tape->merge_stage_size),
  2474. (unsigned int)count);
  2475. if (idetape_copy_stage_to_user(tape, buf, tape->merge_stage,
  2476. actually_read))
  2477. ret = -EFAULT;
  2478. buf += actually_read;
  2479. tape->merge_stage_size -= actually_read;
  2480. count -= actually_read;
  2481. }
  2482. while (count >= tape->stage_size) {
  2483. bytes_read = idetape_add_chrdev_read_request(drive, ctl);
  2484. if (bytes_read <= 0)
  2485. goto finish;
  2486. if (idetape_copy_stage_to_user(tape, buf, tape->merge_stage,
  2487. bytes_read))
  2488. ret = -EFAULT;
  2489. buf += bytes_read;
  2490. count -= bytes_read;
  2491. actually_read += bytes_read;
  2492. }
  2493. if (count) {
  2494. bytes_read = idetape_add_chrdev_read_request(drive, ctl);
  2495. if (bytes_read <= 0)
  2496. goto finish;
  2497. temp = min((unsigned long)count, (unsigned long)bytes_read);
  2498. if (idetape_copy_stage_to_user(tape, buf, tape->merge_stage,
  2499. temp))
  2500. ret = -EFAULT;
  2501. actually_read += temp;
  2502. tape->merge_stage_size = bytes_read-temp;
  2503. }
  2504. finish:
  2505. if (!actually_read && test_bit(IDETAPE_FLAG_FILEMARK, &tape->flags)) {
  2506. debug_log(DBG_SENSE, "%s: spacing over filemark\n", tape->name);
  2507. idetape_space_over_filemarks(drive, MTFSF, 1);
  2508. return 0;
  2509. }
  2510. return ret ? ret : actually_read;
  2511. }
  2512. static ssize_t idetape_chrdev_write(struct file *file, const char __user *buf,
  2513. size_t count, loff_t *ppos)
  2514. {
  2515. struct ide_tape_obj *tape = ide_tape_f(file);
  2516. ide_drive_t *drive = tape->drive;
  2517. ssize_t actually_written = 0;
  2518. ssize_t ret = 0;
  2519. u16 ctl = *(u16 *)&tape->caps[12];
  2520. /* The drive is write protected. */
  2521. if (tape->write_prot)
  2522. return -EACCES;
  2523. debug_log(DBG_CHRDEV, "Enter %s, count %Zd\n", __func__, count);
  2524. /* Initialize write operation */
  2525. if (tape->chrdev_dir != IDETAPE_DIR_WRITE) {
  2526. if (tape->chrdev_dir == IDETAPE_DIR_READ)
  2527. idetape_discard_read_pipeline(drive, 1);
  2528. if (tape->merge_stage || tape->merge_stage_size) {
  2529. printk(KERN_ERR "ide-tape: merge_stage_size "
  2530. "should be 0 now\n");
  2531. tape->merge_stage_size = 0;
  2532. }
  2533. tape->merge_stage = __idetape_kmalloc_stage(tape, 0, 0);
  2534. if (!tape->merge_stage)
  2535. return -ENOMEM;
  2536. tape->chrdev_dir = IDETAPE_DIR_WRITE;
  2537. idetape_init_merge_stage(tape);
  2538. /*
  2539. * Issue a write 0 command to ensure that DSC handshake is
  2540. * switched from completion mode to buffer available mode. No
  2541. * point in issuing this if DSC overlap isn't supported, some
  2542. * drives (Seagate STT3401A) will return an error.
  2543. */
  2544. if (drive->dsc_overlap) {
  2545. ssize_t retval = idetape_queue_rw_tail(drive,
  2546. REQ_IDETAPE_WRITE, 0,
  2547. tape->merge_stage->bh);
  2548. if (retval < 0) {
  2549. __idetape_kfree_stage(tape->merge_stage);
  2550. tape->merge_stage = NULL;
  2551. tape->chrdev_dir = IDETAPE_DIR_NONE;
  2552. return retval;
  2553. }
  2554. }
  2555. }
  2556. if (count == 0)
  2557. return (0);
  2558. if (tape->restart_speed_control_req)
  2559. idetape_restart_speed_control(drive);
  2560. if (tape->merge_stage_size) {
  2561. if (tape->merge_stage_size >= tape->stage_size) {
  2562. printk(KERN_ERR "ide-tape: bug: merge buf too big\n");
  2563. tape->merge_stage_size = 0;
  2564. }
  2565. actually_written = min((unsigned int)
  2566. (tape->stage_size - tape->merge_stage_size),
  2567. (unsigned int)count);
  2568. if (idetape_copy_stage_from_user(tape, tape->merge_stage, buf,
  2569. actually_written))
  2570. ret = -EFAULT;
  2571. buf += actually_written;
  2572. tape->merge_stage_size += actually_written;
  2573. count -= actually_written;
  2574. if (tape->merge_stage_size == tape->stage_size) {
  2575. ssize_t retval;
  2576. tape->merge_stage_size = 0;
  2577. retval = idetape_add_chrdev_write_request(drive, ctl);
  2578. if (retval <= 0)
  2579. return (retval);
  2580. }
  2581. }
  2582. while (count >= tape->stage_size) {
  2583. ssize_t retval;
  2584. if (idetape_copy_stage_from_user(tape, tape->merge_stage, buf,
  2585. tape->stage_size))
  2586. ret = -EFAULT;
  2587. buf += tape->stage_size;
  2588. count -= tape->stage_size;
  2589. retval = idetape_add_chrdev_write_request(drive, ctl);
  2590. actually_written += tape->stage_size;
  2591. if (retval <= 0)
  2592. return (retval);
  2593. }
  2594. if (count) {
  2595. actually_written += count;
  2596. if (idetape_copy_stage_from_user(tape, tape->merge_stage, buf,
  2597. count))
  2598. ret = -EFAULT;
  2599. tape->merge_stage_size += count;
  2600. }
  2601. return ret ? ret : actually_written;
  2602. }
  2603. static int idetape_write_filemark(ide_drive_t *drive)
  2604. {
  2605. struct ide_atapi_pc pc;
  2606. /* Write a filemark */
  2607. idetape_create_write_filemark_cmd(drive, &pc, 1);
  2608. if (idetape_queue_pc_tail(drive, &pc)) {
  2609. printk(KERN_ERR "ide-tape: Couldn't write a filemark\n");
  2610. return -EIO;
  2611. }
  2612. return 0;
  2613. }
  2614. /*
  2615. * Called from idetape_chrdev_ioctl when the general mtio MTIOCTOP ioctl is
  2616. * requested.
  2617. *
  2618. * Note: MTBSF and MTBSFM are not supported when the tape doesn't support
  2619. * spacing over filemarks in the reverse direction. In this case, MTFSFM is also
  2620. * usually not supported (it is supported in the rare case in which we crossed
  2621. * the filemark during our read-ahead pipelined operation mode).
  2622. *
  2623. * The following commands are currently not supported:
  2624. *
  2625. * MTFSS, MTBSS, MTWSM, MTSETDENSITY, MTSETDRVBUFFER, MT_ST_BOOLEANS,
  2626. * MT_ST_WRITE_THRESHOLD.
  2627. */
  2628. static int idetape_mtioctop(ide_drive_t *drive, short mt_op, int mt_count)
  2629. {
  2630. idetape_tape_t *tape = drive->driver_data;
  2631. struct ide_atapi_pc pc;
  2632. int i, retval;
  2633. debug_log(DBG_ERR, "Handling MTIOCTOP ioctl: mt_op=%d, mt_count=%d\n",
  2634. mt_op, mt_count);
  2635. /* Commands which need our pipelined read-ahead stages. */
  2636. switch (mt_op) {
  2637. case MTFSF:
  2638. case MTFSFM:
  2639. case MTBSF:
  2640. case MTBSFM:
  2641. if (!mt_count)
  2642. return 0;
  2643. return idetape_space_over_filemarks(drive, mt_op, mt_count);
  2644. default:
  2645. break;
  2646. }
  2647. switch (mt_op) {
  2648. case MTWEOF:
  2649. if (tape->write_prot)
  2650. return -EACCES;
  2651. idetape_discard_read_pipeline(drive, 1);
  2652. for (i = 0; i < mt_count; i++) {
  2653. retval = idetape_write_filemark(drive);
  2654. if (retval)
  2655. return retval;
  2656. }
  2657. return 0;
  2658. case MTREW:
  2659. idetape_discard_read_pipeline(drive, 0);
  2660. if (idetape_rewind_tape(drive))
  2661. return -EIO;
  2662. return 0;
  2663. case MTLOAD:
  2664. idetape_discard_read_pipeline(drive, 0);
  2665. idetape_create_load_unload_cmd(drive, &pc,
  2666. IDETAPE_LU_LOAD_MASK);
  2667. return idetape_queue_pc_tail(drive, &pc);
  2668. case MTUNLOAD:
  2669. case MTOFFL:
  2670. /*
  2671. * If door is locked, attempt to unlock before
  2672. * attempting to eject.
  2673. */
  2674. if (tape->door_locked) {
  2675. if (idetape_create_prevent_cmd(drive, &pc, 0))
  2676. if (!idetape_queue_pc_tail(drive, &pc))
  2677. tape->door_locked = DOOR_UNLOCKED;
  2678. }
  2679. idetape_discard_read_pipeline(drive, 0);
  2680. idetape_create_load_unload_cmd(drive, &pc,
  2681. !IDETAPE_LU_LOAD_MASK);
  2682. retval = idetape_queue_pc_tail(drive, &pc);
  2683. if (!retval)
  2684. clear_bit(IDETAPE_FLAG_MEDIUM_PRESENT, &tape->flags);
  2685. return retval;
  2686. case MTNOP:
  2687. idetape_discard_read_pipeline(drive, 0);
  2688. return idetape_flush_tape_buffers(drive);
  2689. case MTRETEN:
  2690. idetape_discard_read_pipeline(drive, 0);
  2691. idetape_create_load_unload_cmd(drive, &pc,
  2692. IDETAPE_LU_RETENSION_MASK | IDETAPE_LU_LOAD_MASK);
  2693. return idetape_queue_pc_tail(drive, &pc);
  2694. case MTEOM:
  2695. idetape_create_space_cmd(&pc, 0, IDETAPE_SPACE_TO_EOD);
  2696. return idetape_queue_pc_tail(drive, &pc);
  2697. case MTERASE:
  2698. (void)idetape_rewind_tape(drive);
  2699. idetape_create_erase_cmd(&pc);
  2700. return idetape_queue_pc_tail(drive, &pc);
  2701. case MTSETBLK:
  2702. if (mt_count) {
  2703. if (mt_count < tape->blk_size ||
  2704. mt_count % tape->blk_size)
  2705. return -EIO;
  2706. tape->user_bs_factor = mt_count / tape->blk_size;
  2707. clear_bit(IDETAPE_FLAG_DETECT_BS, &tape->flags);
  2708. } else
  2709. set_bit(IDETAPE_FLAG_DETECT_BS, &tape->flags);
  2710. return 0;
  2711. case MTSEEK:
  2712. idetape_discard_read_pipeline(drive, 0);
  2713. return idetape_position_tape(drive,
  2714. mt_count * tape->user_bs_factor, tape->partition, 0);
  2715. case MTSETPART:
  2716. idetape_discard_read_pipeline(drive, 0);
  2717. return idetape_position_tape(drive, 0, mt_count, 0);
  2718. case MTFSR:
  2719. case MTBSR:
  2720. case MTLOCK:
  2721. if (!idetape_create_prevent_cmd(drive, &pc, 1))
  2722. return 0;
  2723. retval = idetape_queue_pc_tail(drive, &pc);
  2724. if (retval)
  2725. return retval;
  2726. tape->door_locked = DOOR_EXPLICITLY_LOCKED;
  2727. return 0;
  2728. case MTUNLOCK:
  2729. if (!idetape_create_prevent_cmd(drive, &pc, 0))
  2730. return 0;
  2731. retval = idetape_queue_pc_tail(drive, &pc);
  2732. if (retval)
  2733. return retval;
  2734. tape->door_locked = DOOR_UNLOCKED;
  2735. return 0;
  2736. default:
  2737. printk(KERN_ERR "ide-tape: MTIO operation %d not supported\n",
  2738. mt_op);
  2739. return -EIO;
  2740. }
  2741. }
  2742. /*
  2743. * Our character device ioctls. General mtio.h magnetic io commands are
  2744. * supported here, and not in the corresponding block interface. Our own
  2745. * ide-tape ioctls are supported on both interfaces.
  2746. */
  2747. static int idetape_chrdev_ioctl(struct inode *inode, struct file *file,
  2748. unsigned int cmd, unsigned long arg)
  2749. {
  2750. struct ide_tape_obj *tape = ide_tape_f(file);
  2751. ide_drive_t *drive = tape->drive;
  2752. struct mtop mtop;
  2753. struct mtget mtget;
  2754. struct mtpos mtpos;
  2755. int block_offset = 0, position = tape->first_frame;
  2756. void __user *argp = (void __user *)arg;
  2757. debug_log(DBG_CHRDEV, "Enter %s, cmd=%u\n", __func__, cmd);
  2758. tape->restart_speed_control_req = 1;
  2759. if (tape->chrdev_dir == IDETAPE_DIR_WRITE) {
  2760. idetape_empty_write_pipeline(drive);
  2761. idetape_flush_tape_buffers(drive);
  2762. }
  2763. if (cmd == MTIOCGET || cmd == MTIOCPOS) {
  2764. block_offset = idetape_pipeline_size(drive) /
  2765. (tape->blk_size * tape->user_bs_factor);
  2766. position = idetape_read_position(drive);
  2767. if (position < 0)
  2768. return -EIO;
  2769. }
  2770. switch (cmd) {
  2771. case MTIOCTOP:
  2772. if (copy_from_user(&mtop, argp, sizeof(struct mtop)))
  2773. return -EFAULT;
  2774. return idetape_mtioctop(drive, mtop.mt_op, mtop.mt_count);
  2775. case MTIOCGET:
  2776. memset(&mtget, 0, sizeof(struct mtget));
  2777. mtget.mt_type = MT_ISSCSI2;
  2778. mtget.mt_blkno = position / tape->user_bs_factor - block_offset;
  2779. mtget.mt_dsreg =
  2780. ((tape->blk_size * tape->user_bs_factor)
  2781. << MT_ST_BLKSIZE_SHIFT) & MT_ST_BLKSIZE_MASK;
  2782. if (tape->drv_write_prot)
  2783. mtget.mt_gstat |= GMT_WR_PROT(0xffffffff);
  2784. if (copy_to_user(argp, &mtget, sizeof(struct mtget)))
  2785. return -EFAULT;
  2786. return 0;
  2787. case MTIOCPOS:
  2788. mtpos.mt_blkno = position / tape->user_bs_factor - block_offset;
  2789. if (copy_to_user(argp, &mtpos, sizeof(struct mtpos)))
  2790. return -EFAULT;
  2791. return 0;
  2792. default:
  2793. if (tape->chrdev_dir == IDETAPE_DIR_READ)
  2794. idetape_discard_read_pipeline(drive, 1);
  2795. return idetape_blkdev_ioctl(drive, cmd, arg);
  2796. }
  2797. }
  2798. /*
  2799. * Do a mode sense page 0 with block descriptor and if it succeeds set the tape
  2800. * block size with the reported value.
  2801. */
  2802. static void ide_tape_get_bsize_from_bdesc(ide_drive_t *drive)
  2803. {
  2804. idetape_tape_t *tape = drive->driver_data;
  2805. struct ide_atapi_pc pc;
  2806. idetape_create_mode_sense_cmd(&pc, IDETAPE_BLOCK_DESCRIPTOR);
  2807. if (idetape_queue_pc_tail(drive, &pc)) {
  2808. printk(KERN_ERR "ide-tape: Can't get block descriptor\n");
  2809. if (tape->blk_size == 0) {
  2810. printk(KERN_WARNING "ide-tape: Cannot deal with zero "
  2811. "block size, assuming 32k\n");
  2812. tape->blk_size = 32768;
  2813. }
  2814. return;
  2815. }
  2816. tape->blk_size = (pc.buf[4 + 5] << 16) +
  2817. (pc.buf[4 + 6] << 8) +
  2818. pc.buf[4 + 7];
  2819. tape->drv_write_prot = (pc.buf[2] & 0x80) >> 7;
  2820. }
  2821. static int idetape_chrdev_open(struct inode *inode, struct file *filp)
  2822. {
  2823. unsigned int minor = iminor(inode), i = minor & ~0xc0;
  2824. ide_drive_t *drive;
  2825. idetape_tape_t *tape;
  2826. struct ide_atapi_pc pc;
  2827. int retval;
  2828. if (i >= MAX_HWIFS * MAX_DRIVES)
  2829. return -ENXIO;
  2830. tape = ide_tape_chrdev_get(i);
  2831. if (!tape)
  2832. return -ENXIO;
  2833. debug_log(DBG_CHRDEV, "Enter %s\n", __func__);
  2834. /*
  2835. * We really want to do nonseekable_open(inode, filp); here, but some
  2836. * versions of tar incorrectly call lseek on tapes and bail out if that
  2837. * fails. So we disallow pread() and pwrite(), but permit lseeks.
  2838. */
  2839. filp->f_mode &= ~(FMODE_PREAD | FMODE_PWRITE);
  2840. drive = tape->drive;
  2841. filp->private_data = tape;
  2842. if (test_and_set_bit(IDETAPE_FLAG_BUSY, &tape->flags)) {
  2843. retval = -EBUSY;
  2844. goto out_put_tape;
  2845. }
  2846. retval = idetape_wait_ready(drive, 60 * HZ);
  2847. if (retval) {
  2848. clear_bit(IDETAPE_FLAG_BUSY, &tape->flags);
  2849. printk(KERN_ERR "ide-tape: %s: drive not ready\n", tape->name);
  2850. goto out_put_tape;
  2851. }
  2852. idetape_read_position(drive);
  2853. if (!test_bit(IDETAPE_FLAG_ADDRESS_VALID, &tape->flags))
  2854. (void)idetape_rewind_tape(drive);
  2855. if (tape->chrdev_dir != IDETAPE_DIR_READ)
  2856. clear_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags);
  2857. /* Read block size and write protect status from drive. */
  2858. ide_tape_get_bsize_from_bdesc(drive);
  2859. /* Set write protect flag if device is opened as read-only. */
  2860. if ((filp->f_flags & O_ACCMODE) == O_RDONLY)
  2861. tape->write_prot = 1;
  2862. else
  2863. tape->write_prot = tape->drv_write_prot;
  2864. /* Make sure drive isn't write protected if user wants to write. */
  2865. if (tape->write_prot) {
  2866. if ((filp->f_flags & O_ACCMODE) == O_WRONLY ||
  2867. (filp->f_flags & O_ACCMODE) == O_RDWR) {
  2868. clear_bit(IDETAPE_FLAG_BUSY, &tape->flags);
  2869. retval = -EROFS;
  2870. goto out_put_tape;
  2871. }
  2872. }
  2873. /* Lock the tape drive door so user can't eject. */
  2874. if (tape->chrdev_dir == IDETAPE_DIR_NONE) {
  2875. if (idetape_create_prevent_cmd(drive, &pc, 1)) {
  2876. if (!idetape_queue_pc_tail(drive, &pc)) {
  2877. if (tape->door_locked != DOOR_EXPLICITLY_LOCKED)
  2878. tape->door_locked = DOOR_LOCKED;
  2879. }
  2880. }
  2881. }
  2882. idetape_restart_speed_control(drive);
  2883. tape->restart_speed_control_req = 0;
  2884. return 0;
  2885. out_put_tape:
  2886. ide_tape_put(tape);
  2887. return retval;
  2888. }
  2889. static void idetape_write_release(ide_drive_t *drive, unsigned int minor)
  2890. {
  2891. idetape_tape_t *tape = drive->driver_data;
  2892. idetape_empty_write_pipeline(drive);
  2893. tape->merge_stage = __idetape_kmalloc_stage(tape, 1, 0);
  2894. if (tape->merge_stage != NULL) {
  2895. idetape_pad_zeros(drive, tape->blk_size *
  2896. (tape->user_bs_factor - 1));
  2897. __idetape_kfree_stage(tape->merge_stage);
  2898. tape->merge_stage = NULL;
  2899. }
  2900. idetape_write_filemark(drive);
  2901. idetape_flush_tape_buffers(drive);
  2902. idetape_flush_tape_buffers(drive);
  2903. }
  2904. static int idetape_chrdev_release(struct inode *inode, struct file *filp)
  2905. {
  2906. struct ide_tape_obj *tape = ide_tape_f(filp);
  2907. ide_drive_t *drive = tape->drive;
  2908. struct ide_atapi_pc pc;
  2909. unsigned int minor = iminor(inode);
  2910. lock_kernel();
  2911. tape = drive->driver_data;
  2912. debug_log(DBG_CHRDEV, "Enter %s\n", __func__);
  2913. if (tape->chrdev_dir == IDETAPE_DIR_WRITE)
  2914. idetape_write_release(drive, minor);
  2915. if (tape->chrdev_dir == IDETAPE_DIR_READ) {
  2916. if (minor < 128)
  2917. idetape_discard_read_pipeline(drive, 1);
  2918. else
  2919. idetape_wait_for_pipeline(drive);
  2920. }
  2921. if (tape->cache_stage != NULL) {
  2922. __idetape_kfree_stage(tape->cache_stage);
  2923. tape->cache_stage = NULL;
  2924. }
  2925. if (minor < 128 && test_bit(IDETAPE_FLAG_MEDIUM_PRESENT, &tape->flags))
  2926. (void) idetape_rewind_tape(drive);
  2927. if (tape->chrdev_dir == IDETAPE_DIR_NONE) {
  2928. if (tape->door_locked == DOOR_LOCKED) {
  2929. if (idetape_create_prevent_cmd(drive, &pc, 0)) {
  2930. if (!idetape_queue_pc_tail(drive, &pc))
  2931. tape->door_locked = DOOR_UNLOCKED;
  2932. }
  2933. }
  2934. }
  2935. clear_bit(IDETAPE_FLAG_BUSY, &tape->flags);
  2936. ide_tape_put(tape);
  2937. unlock_kernel();
  2938. return 0;
  2939. }
  2940. /*
  2941. * check the contents of the ATAPI IDENTIFY command results. We return:
  2942. *
  2943. * 1 - If the tape can be supported by us, based on the information we have so
  2944. * far.
  2945. *
  2946. * 0 - If this tape driver is not currently supported by us.
  2947. */
  2948. static int idetape_identify_device(ide_drive_t *drive)
  2949. {
  2950. u8 gcw[2], protocol, device_type, removable, packet_size;
  2951. if (drive->id_read == 0)
  2952. return 1;
  2953. *((unsigned short *) &gcw) = drive->id->config;
  2954. protocol = (gcw[1] & 0xC0) >> 6;
  2955. device_type = gcw[1] & 0x1F;
  2956. removable = !!(gcw[0] & 0x80);
  2957. packet_size = gcw[0] & 0x3;
  2958. /* Check that we can support this device */
  2959. if (protocol != 2)
  2960. printk(KERN_ERR "ide-tape: Protocol (0x%02x) is not ATAPI\n",
  2961. protocol);
  2962. else if (device_type != 1)
  2963. printk(KERN_ERR "ide-tape: Device type (0x%02x) is not set "
  2964. "to tape\n", device_type);
  2965. else if (!removable)
  2966. printk(KERN_ERR "ide-tape: The removable flag is not set\n");
  2967. else if (packet_size != 0) {
  2968. printk(KERN_ERR "ide-tape: Packet size (0x%02x) is not 12"
  2969. " bytes\n", packet_size);
  2970. } else
  2971. return 1;
  2972. return 0;
  2973. }
  2974. static void idetape_get_inquiry_results(ide_drive_t *drive)
  2975. {
  2976. idetape_tape_t *tape = drive->driver_data;
  2977. struct ide_atapi_pc pc;
  2978. char fw_rev[6], vendor_id[10], product_id[18];
  2979. idetape_create_inquiry_cmd(&pc);
  2980. if (idetape_queue_pc_tail(drive, &pc)) {
  2981. printk(KERN_ERR "ide-tape: %s: can't get INQUIRY results\n",
  2982. tape->name);
  2983. return;
  2984. }
  2985. memcpy(vendor_id, &pc.buf[8], 8);
  2986. memcpy(product_id, &pc.buf[16], 16);
  2987. memcpy(fw_rev, &pc.buf[32], 4);
  2988. ide_fixstring(vendor_id, 10, 0);
  2989. ide_fixstring(product_id, 18, 0);
  2990. ide_fixstring(fw_rev, 6, 0);
  2991. printk(KERN_INFO "ide-tape: %s <-> %s: %s %s rev %s\n",
  2992. drive->name, tape->name, vendor_id, product_id, fw_rev);
  2993. }
  2994. /*
  2995. * Ask the tape about its various parameters. In particular, we will adjust our
  2996. * data transfer buffer size to the recommended value as returned by the tape.
  2997. */
  2998. static void idetape_get_mode_sense_results(ide_drive_t *drive)
  2999. {
  3000. idetape_tape_t *tape = drive->driver_data;
  3001. struct ide_atapi_pc pc;
  3002. u8 *caps;
  3003. u8 speed, max_speed;
  3004. idetape_create_mode_sense_cmd(&pc, IDETAPE_CAPABILITIES_PAGE);
  3005. if (idetape_queue_pc_tail(drive, &pc)) {
  3006. printk(KERN_ERR "ide-tape: Can't get tape parameters - assuming"
  3007. " some default values\n");
  3008. tape->blk_size = 512;
  3009. put_unaligned(52, (u16 *)&tape->caps[12]);
  3010. put_unaligned(540, (u16 *)&tape->caps[14]);
  3011. put_unaligned(6*52, (u16 *)&tape->caps[16]);
  3012. return;
  3013. }
  3014. caps = pc.buf + 4 + pc.buf[3];
  3015. /* convert to host order and save for later use */
  3016. speed = be16_to_cpu(*(u16 *)&caps[14]);
  3017. max_speed = be16_to_cpu(*(u16 *)&caps[8]);
  3018. put_unaligned(max_speed, (u16 *)&caps[8]);
  3019. put_unaligned(be16_to_cpu(*(u16 *)&caps[12]), (u16 *)&caps[12]);
  3020. put_unaligned(speed, (u16 *)&caps[14]);
  3021. put_unaligned(be16_to_cpu(*(u16 *)&caps[16]), (u16 *)&caps[16]);
  3022. if (!speed) {
  3023. printk(KERN_INFO "ide-tape: %s: invalid tape speed "
  3024. "(assuming 650KB/sec)\n", drive->name);
  3025. put_unaligned(650, (u16 *)&caps[14]);
  3026. }
  3027. if (!max_speed) {
  3028. printk(KERN_INFO "ide-tape: %s: invalid max_speed "
  3029. "(assuming 650KB/sec)\n", drive->name);
  3030. put_unaligned(650, (u16 *)&caps[8]);
  3031. }
  3032. memcpy(&tape->caps, caps, 20);
  3033. if (caps[7] & 0x02)
  3034. tape->blk_size = 512;
  3035. else if (caps[7] & 0x04)
  3036. tape->blk_size = 1024;
  3037. }
  3038. #ifdef CONFIG_IDE_PROC_FS
  3039. static void idetape_add_settings(ide_drive_t *drive)
  3040. {
  3041. idetape_tape_t *tape = drive->driver_data;
  3042. ide_add_setting(drive, "buffer", SETTING_READ, TYPE_SHORT, 0, 0xffff,
  3043. 1, 2, (u16 *)&tape->caps[16], NULL);
  3044. ide_add_setting(drive, "pipeline_min", SETTING_RW, TYPE_INT, 1, 0xffff,
  3045. tape->stage_size / 1024, 1, &tape->min_pipeline, NULL);
  3046. ide_add_setting(drive, "pipeline", SETTING_RW, TYPE_INT, 1, 0xffff,
  3047. tape->stage_size / 1024, 1, &tape->max_stages, NULL);
  3048. ide_add_setting(drive, "pipeline_max", SETTING_RW, TYPE_INT, 1, 0xffff,
  3049. tape->stage_size / 1024, 1, &tape->max_pipeline, NULL);
  3050. ide_add_setting(drive, "pipeline_used", SETTING_READ, TYPE_INT, 0,
  3051. 0xffff, tape->stage_size / 1024, 1, &tape->nr_stages,
  3052. NULL);
  3053. ide_add_setting(drive, "pipeline_pending", SETTING_READ, TYPE_INT, 0,
  3054. 0xffff, tape->stage_size / 1024, 1,
  3055. &tape->nr_pending_stages, NULL);
  3056. ide_add_setting(drive, "speed", SETTING_READ, TYPE_SHORT, 0, 0xffff,
  3057. 1, 1, (u16 *)&tape->caps[14], NULL);
  3058. ide_add_setting(drive, "stage", SETTING_READ, TYPE_INT, 0, 0xffff, 1,
  3059. 1024, &tape->stage_size, NULL);
  3060. ide_add_setting(drive, "tdsc", SETTING_RW, TYPE_INT, IDETAPE_DSC_RW_MIN,
  3061. IDETAPE_DSC_RW_MAX, 1000, HZ, &tape->best_dsc_rw_freq,
  3062. NULL);
  3063. ide_add_setting(drive, "dsc_overlap", SETTING_RW, TYPE_BYTE, 0, 1, 1,
  3064. 1, &drive->dsc_overlap, NULL);
  3065. ide_add_setting(drive, "pipeline_head_speed_c", SETTING_READ, TYPE_INT,
  3066. 0, 0xffff, 1, 1, &tape->controlled_pipeline_head_speed,
  3067. NULL);
  3068. ide_add_setting(drive, "pipeline_head_speed_u", SETTING_READ, TYPE_INT,
  3069. 0, 0xffff, 1, 1,
  3070. &tape->uncontrolled_pipeline_head_speed, NULL);
  3071. ide_add_setting(drive, "avg_speed", SETTING_READ, TYPE_INT, 0, 0xffff,
  3072. 1, 1, &tape->avg_speed, NULL);
  3073. ide_add_setting(drive, "debug_mask", SETTING_RW, TYPE_INT, 0, 0xffff, 1,
  3074. 1, &tape->debug_mask, NULL);
  3075. }
  3076. #else
  3077. static inline void idetape_add_settings(ide_drive_t *drive) { ; }
  3078. #endif
  3079. /*
  3080. * The function below is called to:
  3081. *
  3082. * 1. Initialize our various state variables.
  3083. * 2. Ask the tape for its capabilities.
  3084. * 3. Allocate a buffer which will be used for data transfer. The buffer size
  3085. * is chosen based on the recommendation which we received in step 2.
  3086. *
  3087. * Note that at this point ide.c already assigned us an irq, so that we can
  3088. * queue requests here and wait for their completion.
  3089. */
  3090. static void idetape_setup(ide_drive_t *drive, idetape_tape_t *tape, int minor)
  3091. {
  3092. unsigned long t1, tmid, tn, t;
  3093. int speed;
  3094. int stage_size;
  3095. u8 gcw[2];
  3096. struct sysinfo si;
  3097. u16 *ctl = (u16 *)&tape->caps[12];
  3098. spin_lock_init(&tape->lock);
  3099. drive->dsc_overlap = 1;
  3100. if (drive->hwif->host_flags & IDE_HFLAG_NO_DSC) {
  3101. printk(KERN_INFO "ide-tape: %s: disabling DSC overlap\n",
  3102. tape->name);
  3103. drive->dsc_overlap = 0;
  3104. }
  3105. /* Seagate Travan drives do not support DSC overlap. */
  3106. if (strstr(drive->id->model, "Seagate STT3401"))
  3107. drive->dsc_overlap = 0;
  3108. tape->minor = minor;
  3109. tape->name[0] = 'h';
  3110. tape->name[1] = 't';
  3111. tape->name[2] = '0' + minor;
  3112. tape->chrdev_dir = IDETAPE_DIR_NONE;
  3113. tape->pc = tape->pc_stack;
  3114. tape->max_insert_speed = 10000;
  3115. tape->speed_control = 1;
  3116. *((unsigned short *) &gcw) = drive->id->config;
  3117. /* Command packet DRQ type */
  3118. if (((gcw[0] & 0x60) >> 5) == 1)
  3119. set_bit(IDETAPE_FLAG_DRQ_INTERRUPT, &tape->flags);
  3120. tape->min_pipeline = 10;
  3121. tape->max_pipeline = 10;
  3122. tape->max_stages = 10;
  3123. idetape_get_inquiry_results(drive);
  3124. idetape_get_mode_sense_results(drive);
  3125. ide_tape_get_bsize_from_bdesc(drive);
  3126. tape->user_bs_factor = 1;
  3127. tape->stage_size = *ctl * tape->blk_size;
  3128. while (tape->stage_size > 0xffff) {
  3129. printk(KERN_NOTICE "ide-tape: decreasing stage size\n");
  3130. *ctl /= 2;
  3131. tape->stage_size = *ctl * tape->blk_size;
  3132. }
  3133. stage_size = tape->stage_size;
  3134. tape->pages_per_stage = stage_size / PAGE_SIZE;
  3135. if (stage_size % PAGE_SIZE) {
  3136. tape->pages_per_stage++;
  3137. tape->excess_bh_size = PAGE_SIZE - stage_size % PAGE_SIZE;
  3138. }
  3139. /* Select the "best" DSC read/write polling freq and pipeline size. */
  3140. speed = max(*(u16 *)&tape->caps[14], *(u16 *)&tape->caps[8]);
  3141. tape->max_stages = speed * 1000 * 10 / tape->stage_size;
  3142. /* Limit memory use for pipeline to 10% of physical memory */
  3143. si_meminfo(&si);
  3144. if (tape->max_stages * tape->stage_size >
  3145. si.totalram * si.mem_unit / 10)
  3146. tape->max_stages =
  3147. si.totalram * si.mem_unit / (10 * tape->stage_size);
  3148. tape->max_stages = min(tape->max_stages, IDETAPE_MAX_PIPELINE_STAGES);
  3149. tape->min_pipeline = min(tape->max_stages, IDETAPE_MIN_PIPELINE_STAGES);
  3150. tape->max_pipeline =
  3151. min(tape->max_stages * 2, IDETAPE_MAX_PIPELINE_STAGES);
  3152. if (tape->max_stages == 0) {
  3153. tape->max_stages = 1;
  3154. tape->min_pipeline = 1;
  3155. tape->max_pipeline = 1;
  3156. }
  3157. t1 = (tape->stage_size * HZ) / (speed * 1000);
  3158. tmid = (*(u16 *)&tape->caps[16] * 32 * HZ) / (speed * 125);
  3159. tn = (IDETAPE_FIFO_THRESHOLD * tape->stage_size * HZ) / (speed * 1000);
  3160. if (tape->max_stages)
  3161. t = tn;
  3162. else
  3163. t = t1;
  3164. /*
  3165. * Ensure that the number we got makes sense; limit it within
  3166. * IDETAPE_DSC_RW_MIN and IDETAPE_DSC_RW_MAX.
  3167. */
  3168. tape->best_dsc_rw_freq = max_t(unsigned long,
  3169. min_t(unsigned long, t, IDETAPE_DSC_RW_MAX),
  3170. IDETAPE_DSC_RW_MIN);
  3171. printk(KERN_INFO "ide-tape: %s <-> %s: %dKBps, %d*%dkB buffer, "
  3172. "%dkB pipeline, %lums tDSC%s\n",
  3173. drive->name, tape->name, *(u16 *)&tape->caps[14],
  3174. (*(u16 *)&tape->caps[16] * 512) / tape->stage_size,
  3175. tape->stage_size / 1024,
  3176. tape->max_stages * tape->stage_size / 1024,
  3177. tape->best_dsc_rw_freq * 1000 / HZ,
  3178. drive->using_dma ? ", DMA":"");
  3179. idetape_add_settings(drive);
  3180. }
  3181. static void ide_tape_remove(ide_drive_t *drive)
  3182. {
  3183. idetape_tape_t *tape = drive->driver_data;
  3184. ide_proc_unregister_driver(drive, tape->driver);
  3185. ide_unregister_region(tape->disk);
  3186. ide_tape_put(tape);
  3187. }
  3188. static void ide_tape_release(struct kref *kref)
  3189. {
  3190. struct ide_tape_obj *tape = to_ide_tape(kref);
  3191. ide_drive_t *drive = tape->drive;
  3192. struct gendisk *g = tape->disk;
  3193. BUG_ON(tape->first_stage != NULL || tape->merge_stage_size);
  3194. drive->dsc_overlap = 0;
  3195. drive->driver_data = NULL;
  3196. device_destroy(idetape_sysfs_class, MKDEV(IDETAPE_MAJOR, tape->minor));
  3197. device_destroy(idetape_sysfs_class,
  3198. MKDEV(IDETAPE_MAJOR, tape->minor + 128));
  3199. idetape_devs[tape->minor] = NULL;
  3200. g->private_data = NULL;
  3201. put_disk(g);
  3202. kfree(tape);
  3203. }
  3204. #ifdef CONFIG_IDE_PROC_FS
  3205. static int proc_idetape_read_name
  3206. (char *page, char **start, off_t off, int count, int *eof, void *data)
  3207. {
  3208. ide_drive_t *drive = (ide_drive_t *) data;
  3209. idetape_tape_t *tape = drive->driver_data;
  3210. char *out = page;
  3211. int len;
  3212. len = sprintf(out, "%s\n", tape->name);
  3213. PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
  3214. }
  3215. static ide_proc_entry_t idetape_proc[] = {
  3216. { "capacity", S_IFREG|S_IRUGO, proc_ide_read_capacity, NULL },
  3217. { "name", S_IFREG|S_IRUGO, proc_idetape_read_name, NULL },
  3218. { NULL, 0, NULL, NULL }
  3219. };
  3220. #endif
  3221. static int ide_tape_probe(ide_drive_t *);
  3222. static ide_driver_t idetape_driver = {
  3223. .gen_driver = {
  3224. .owner = THIS_MODULE,
  3225. .name = "ide-tape",
  3226. .bus = &ide_bus_type,
  3227. },
  3228. .probe = ide_tape_probe,
  3229. .remove = ide_tape_remove,
  3230. .version = IDETAPE_VERSION,
  3231. .media = ide_tape,
  3232. .supports_dsc_overlap = 1,
  3233. .do_request = idetape_do_request,
  3234. .end_request = idetape_end_request,
  3235. .error = __ide_error,
  3236. .abort = __ide_abort,
  3237. #ifdef CONFIG_IDE_PROC_FS
  3238. .proc = idetape_proc,
  3239. #endif
  3240. };
  3241. /* Our character device supporting functions, passed to register_chrdev. */
  3242. static const struct file_operations idetape_fops = {
  3243. .owner = THIS_MODULE,
  3244. .read = idetape_chrdev_read,
  3245. .write = idetape_chrdev_write,
  3246. .ioctl = idetape_chrdev_ioctl,
  3247. .open = idetape_chrdev_open,
  3248. .release = idetape_chrdev_release,
  3249. };
  3250. static int idetape_open(struct inode *inode, struct file *filp)
  3251. {
  3252. struct gendisk *disk = inode->i_bdev->bd_disk;
  3253. struct ide_tape_obj *tape;
  3254. tape = ide_tape_get(disk);
  3255. if (!tape)
  3256. return -ENXIO;
  3257. return 0;
  3258. }
  3259. static int idetape_release(struct inode *inode, struct file *filp)
  3260. {
  3261. struct gendisk *disk = inode->i_bdev->bd_disk;
  3262. struct ide_tape_obj *tape = ide_tape_g(disk);
  3263. ide_tape_put(tape);
  3264. return 0;
  3265. }
  3266. static int idetape_ioctl(struct inode *inode, struct file *file,
  3267. unsigned int cmd, unsigned long arg)
  3268. {
  3269. struct block_device *bdev = inode->i_bdev;
  3270. struct ide_tape_obj *tape = ide_tape_g(bdev->bd_disk);
  3271. ide_drive_t *drive = tape->drive;
  3272. int err = generic_ide_ioctl(drive, file, bdev, cmd, arg);
  3273. if (err == -EINVAL)
  3274. err = idetape_blkdev_ioctl(drive, cmd, arg);
  3275. return err;
  3276. }
  3277. static struct block_device_operations idetape_block_ops = {
  3278. .owner = THIS_MODULE,
  3279. .open = idetape_open,
  3280. .release = idetape_release,
  3281. .ioctl = idetape_ioctl,
  3282. };
  3283. static int ide_tape_probe(ide_drive_t *drive)
  3284. {
  3285. idetape_tape_t *tape;
  3286. struct gendisk *g;
  3287. int minor;
  3288. if (!strstr("ide-tape", drive->driver_req))
  3289. goto failed;
  3290. if (!drive->present)
  3291. goto failed;
  3292. if (drive->media != ide_tape)
  3293. goto failed;
  3294. if (!idetape_identify_device(drive)) {
  3295. printk(KERN_ERR "ide-tape: %s: not supported by this version of"
  3296. " the driver\n", drive->name);
  3297. goto failed;
  3298. }
  3299. if (drive->scsi) {
  3300. printk(KERN_INFO "ide-tape: passing drive %s to ide-scsi"
  3301. " emulation.\n", drive->name);
  3302. goto failed;
  3303. }
  3304. tape = kzalloc(sizeof(idetape_tape_t), GFP_KERNEL);
  3305. if (tape == NULL) {
  3306. printk(KERN_ERR "ide-tape: %s: Can't allocate a tape struct\n",
  3307. drive->name);
  3308. goto failed;
  3309. }
  3310. g = alloc_disk(1 << PARTN_BITS);
  3311. if (!g)
  3312. goto out_free_tape;
  3313. ide_init_disk(g, drive);
  3314. ide_proc_register_driver(drive, &idetape_driver);
  3315. kref_init(&tape->kref);
  3316. tape->drive = drive;
  3317. tape->driver = &idetape_driver;
  3318. tape->disk = g;
  3319. g->private_data = &tape->driver;
  3320. drive->driver_data = tape;
  3321. mutex_lock(&idetape_ref_mutex);
  3322. for (minor = 0; idetape_devs[minor]; minor++)
  3323. ;
  3324. idetape_devs[minor] = tape;
  3325. mutex_unlock(&idetape_ref_mutex);
  3326. idetape_setup(drive, tape, minor);
  3327. device_create(idetape_sysfs_class, &drive->gendev,
  3328. MKDEV(IDETAPE_MAJOR, minor), "%s", tape->name);
  3329. device_create(idetape_sysfs_class, &drive->gendev,
  3330. MKDEV(IDETAPE_MAJOR, minor + 128), "n%s", tape->name);
  3331. g->fops = &idetape_block_ops;
  3332. ide_register_region(g);
  3333. return 0;
  3334. out_free_tape:
  3335. kfree(tape);
  3336. failed:
  3337. return -ENODEV;
  3338. }
  3339. static void __exit idetape_exit(void)
  3340. {
  3341. driver_unregister(&idetape_driver.gen_driver);
  3342. class_destroy(idetape_sysfs_class);
  3343. unregister_chrdev(IDETAPE_MAJOR, "ht");
  3344. }
  3345. static int __init idetape_init(void)
  3346. {
  3347. int error = 1;
  3348. idetape_sysfs_class = class_create(THIS_MODULE, "ide_tape");
  3349. if (IS_ERR(idetape_sysfs_class)) {
  3350. idetape_sysfs_class = NULL;
  3351. printk(KERN_ERR "Unable to create sysfs class for ide tapes\n");
  3352. error = -EBUSY;
  3353. goto out;
  3354. }
  3355. if (register_chrdev(IDETAPE_MAJOR, "ht", &idetape_fops)) {
  3356. printk(KERN_ERR "ide-tape: Failed to register chrdev"
  3357. " interface\n");
  3358. error = -EBUSY;
  3359. goto out_free_class;
  3360. }
  3361. error = driver_register(&idetape_driver.gen_driver);
  3362. if (error)
  3363. goto out_free_driver;
  3364. return 0;
  3365. out_free_driver:
  3366. driver_unregister(&idetape_driver.gen_driver);
  3367. out_free_class:
  3368. class_destroy(idetape_sysfs_class);
  3369. out:
  3370. return error;
  3371. }
  3372. MODULE_ALIAS("ide:*m-tape*");
  3373. module_init(idetape_init);
  3374. module_exit(idetape_exit);
  3375. MODULE_ALIAS_CHARDEV_MAJOR(IDETAPE_MAJOR);
  3376. MODULE_DESCRIPTION("ATAPI Streaming TAPE Driver");
  3377. MODULE_LICENSE("GPL");