ide-tape.c 108 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784
  1. /*
  2. * IDE ATAPI streaming tape driver.
  3. *
  4. * Copyright (C) 1995-1999 Gadi Oxman <gadio@netvision.net.il>
  5. * Copyright (C) 2003-2005 Bartlomiej Zolnierkiewicz
  6. *
  7. * This driver was constructed as a student project in the software laboratory
  8. * of the faculty of electrical engineering in the Technion - Israel's
  9. * Institute Of Technology, with the guide of Avner Lottem and Dr. Ilana David.
  10. *
  11. * It is hereby placed under the terms of the GNU general public license.
  12. * (See linux/COPYING).
  13. *
  14. * For a historical changelog see
  15. * Documentation/ide/ChangeLog.ide-tape.1995-2002
  16. */
  17. #define IDETAPE_VERSION "1.20"
  18. #include <linux/module.h>
  19. #include <linux/types.h>
  20. #include <linux/string.h>
  21. #include <linux/kernel.h>
  22. #include <linux/delay.h>
  23. #include <linux/timer.h>
  24. #include <linux/mm.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/jiffies.h>
  27. #include <linux/major.h>
  28. #include <linux/errno.h>
  29. #include <linux/genhd.h>
  30. #include <linux/slab.h>
  31. #include <linux/pci.h>
  32. #include <linux/ide.h>
  33. #include <linux/smp_lock.h>
  34. #include <linux/completion.h>
  35. #include <linux/bitops.h>
  36. #include <linux/mutex.h>
  37. #include <scsi/scsi.h>
  38. #include <asm/byteorder.h>
  39. #include <linux/irq.h>
  40. #include <linux/uaccess.h>
  41. #include <linux/io.h>
  42. #include <asm/unaligned.h>
  43. #include <linux/mtio.h>
  44. enum {
  45. /* output errors only */
  46. DBG_ERR = (1 << 0),
  47. /* output all sense key/asc */
  48. DBG_SENSE = (1 << 1),
  49. /* info regarding all chrdev-related procedures */
  50. DBG_CHRDEV = (1 << 2),
  51. /* all remaining procedures */
  52. DBG_PROCS = (1 << 3),
  53. /* buffer alloc info (pc_stack & rq_stack) */
  54. DBG_PCRQ_STACK = (1 << 4),
  55. };
  56. /* define to see debug info */
  57. #define IDETAPE_DEBUG_LOG 0
  58. #if IDETAPE_DEBUG_LOG
  59. #define debug_log(lvl, fmt, args...) \
  60. { \
  61. if (tape->debug_mask & lvl) \
  62. printk(KERN_INFO "ide-tape: " fmt, ## args); \
  63. }
  64. #else
  65. #define debug_log(lvl, fmt, args...) do {} while (0)
  66. #endif
  67. /**************************** Tunable parameters *****************************/
  68. /*
  69. * Pipelined mode parameters.
  70. *
  71. * We try to use the minimum number of stages which is enough to keep the tape
  72. * constantly streaming. To accomplish that, we implement a feedback loop around
  73. * the maximum number of stages:
  74. *
  75. * We start from MIN maximum stages (we will not even use MIN stages if we don't
  76. * need them), increment it by RATE*(MAX-MIN) whenever we sense that the
  77. * pipeline is empty, until we reach the optimum value or until we reach MAX.
  78. *
  79. * Setting the following parameter to 0 is illegal: the pipelined mode cannot be
  80. * disabled (idetape_calculate_speeds() divides by tape->max_stages.)
  81. */
  82. #define IDETAPE_MIN_PIPELINE_STAGES 1
  83. #define IDETAPE_MAX_PIPELINE_STAGES 400
  84. #define IDETAPE_INCREASE_STAGES_RATE 20
  85. /*
  86. * After each failed packet command we issue a request sense command and retry
  87. * the packet command IDETAPE_MAX_PC_RETRIES times.
  88. *
  89. * Setting IDETAPE_MAX_PC_RETRIES to 0 will disable retries.
  90. */
  91. #define IDETAPE_MAX_PC_RETRIES 3
  92. /*
  93. * With each packet command, we allocate a buffer of IDETAPE_PC_BUFFER_SIZE
  94. * bytes. This is used for several packet commands (Not for READ/WRITE commands)
  95. */
  96. #define IDETAPE_PC_BUFFER_SIZE 256
  97. /*
  98. * In various places in the driver, we need to allocate storage
  99. * for packet commands and requests, which will remain valid while
  100. * we leave the driver to wait for an interrupt or a timeout event.
  101. */
  102. #define IDETAPE_PC_STACK (10 + IDETAPE_MAX_PC_RETRIES)
  103. /*
  104. * Some drives (for example, Seagate STT3401A Travan) require a very long
  105. * timeout, because they don't return an interrupt or clear their busy bit
  106. * until after the command completes (even retension commands).
  107. */
  108. #define IDETAPE_WAIT_CMD (900*HZ)
  109. /*
  110. * The following parameter is used to select the point in the internal tape fifo
  111. * in which we will start to refill the buffer. Decreasing the following
  112. * parameter will improve the system's latency and interactive response, while
  113. * using a high value might improve system throughput.
  114. */
  115. #define IDETAPE_FIFO_THRESHOLD 2
  116. /*
  117. * DSC polling parameters.
  118. *
  119. * Polling for DSC (a single bit in the status register) is a very important
  120. * function in ide-tape. There are two cases in which we poll for DSC:
  121. *
  122. * 1. Before a read/write packet command, to ensure that we can transfer data
  123. * from/to the tape's data buffers, without causing an actual media access.
  124. * In case the tape is not ready yet, we take out our request from the device
  125. * request queue, so that ide.c could service requests from the other device
  126. * on the same interface in the meantime.
  127. *
  128. * 2. After the successful initialization of a "media access packet command",
  129. * which is a command that can take a long time to complete (the interval can
  130. * range from several seconds to even an hour). Again, we postpone our request
  131. * in the middle to free the bus for the other device. The polling frequency
  132. * here should be lower than the read/write frequency since those media access
  133. * commands are slow. We start from a "fast" frequency - IDETAPE_DSC_MA_FAST
  134. * (1 second), and if we don't receive DSC after IDETAPE_DSC_MA_THRESHOLD
  135. * (5 min), we switch it to a lower frequency - IDETAPE_DSC_MA_SLOW (1 min).
  136. *
  137. * We also set a timeout for the timer, in case something goes wrong. The
  138. * timeout should be longer then the maximum execution time of a tape operation.
  139. */
  140. /* DSC timings. */
  141. #define IDETAPE_DSC_RW_MIN 5*HZ/100 /* 50 msec */
  142. #define IDETAPE_DSC_RW_MAX 40*HZ/100 /* 400 msec */
  143. #define IDETAPE_DSC_RW_TIMEOUT 2*60*HZ /* 2 minutes */
  144. #define IDETAPE_DSC_MA_FAST 2*HZ /* 2 seconds */
  145. #define IDETAPE_DSC_MA_THRESHOLD 5*60*HZ /* 5 minutes */
  146. #define IDETAPE_DSC_MA_SLOW 30*HZ /* 30 seconds */
  147. #define IDETAPE_DSC_MA_TIMEOUT 2*60*60*HZ /* 2 hours */
  148. /*************************** End of tunable parameters ***********************/
  149. /* Read/Write error simulation */
  150. #define SIMULATE_ERRORS 0
  151. /* tape directions */
  152. enum {
  153. IDETAPE_DIR_NONE = (1 << 0),
  154. IDETAPE_DIR_READ = (1 << 1),
  155. IDETAPE_DIR_WRITE = (1 << 2),
  156. };
  157. struct idetape_bh {
  158. u32 b_size;
  159. atomic_t b_count;
  160. struct idetape_bh *b_reqnext;
  161. char *b_data;
  162. };
  163. /* Tape door status */
  164. #define DOOR_UNLOCKED 0
  165. #define DOOR_LOCKED 1
  166. #define DOOR_EXPLICITLY_LOCKED 2
  167. /* Some defines for the SPACE command */
  168. #define IDETAPE_SPACE_OVER_FILEMARK 1
  169. #define IDETAPE_SPACE_TO_EOD 3
  170. /* Some defines for the LOAD UNLOAD command */
  171. #define IDETAPE_LU_LOAD_MASK 1
  172. #define IDETAPE_LU_RETENSION_MASK 2
  173. #define IDETAPE_LU_EOT_MASK 4
  174. /*
  175. * Special requests for our block device strategy routine.
  176. *
  177. * In order to service a character device command, we add special requests to
  178. * the tail of our block device request queue and wait for their completion.
  179. */
  180. enum {
  181. REQ_IDETAPE_PC1 = (1 << 0), /* packet command (first stage) */
  182. REQ_IDETAPE_PC2 = (1 << 1), /* packet command (second stage) */
  183. REQ_IDETAPE_READ = (1 << 2),
  184. REQ_IDETAPE_WRITE = (1 << 3),
  185. };
  186. /* Error codes returned in rq->errors to the higher part of the driver. */
  187. #define IDETAPE_ERROR_GENERAL 101
  188. #define IDETAPE_ERROR_FILEMARK 102
  189. #define IDETAPE_ERROR_EOD 103
  190. /* Structures related to the SELECT SENSE / MODE SENSE packet commands. */
  191. #define IDETAPE_BLOCK_DESCRIPTOR 0
  192. #define IDETAPE_CAPABILITIES_PAGE 0x2a
  193. /* Tape flag bits values. */
  194. enum {
  195. IDETAPE_FLAG_IGNORE_DSC = (1 << 0),
  196. /* 0 When the tape position is unknown */
  197. IDETAPE_FLAG_ADDRESS_VALID = (1 << 1),
  198. /* Device already opened */
  199. IDETAPE_FLAG_BUSY = (1 << 2),
  200. /* Error detected in a pipeline stage */
  201. IDETAPE_FLAG_PIPELINE_ERR = (1 << 3),
  202. /* Attempt to auto-detect the current user block size */
  203. IDETAPE_FLAG_DETECT_BS = (1 << 4),
  204. /* Currently on a filemark */
  205. IDETAPE_FLAG_FILEMARK = (1 << 5),
  206. /* DRQ interrupt device */
  207. IDETAPE_FLAG_DRQ_INTERRUPT = (1 << 6),
  208. /* pipeline active */
  209. IDETAPE_FLAG_PIPELINE_ACTIVE = (1 << 7),
  210. /* 0 = no tape is loaded, so we don't rewind after ejecting */
  211. IDETAPE_FLAG_MEDIUM_PRESENT = (1 << 8),
  212. };
  213. /* A pipeline stage. */
  214. typedef struct idetape_stage_s {
  215. struct request rq; /* The corresponding request */
  216. struct idetape_bh *bh; /* The data buffers */
  217. struct idetape_stage_s *next; /* Pointer to the next stage */
  218. } idetape_stage_t;
  219. /*
  220. * Most of our global data which we need to save even as we leave the driver due
  221. * to an interrupt or a timer event is stored in the struct defined below.
  222. */
  223. typedef struct ide_tape_obj {
  224. ide_drive_t *drive;
  225. ide_driver_t *driver;
  226. struct gendisk *disk;
  227. struct kref kref;
  228. /*
  229. * Since a typical character device operation requires more
  230. * than one packet command, we provide here enough memory
  231. * for the maximum of interconnected packet commands.
  232. * The packet commands are stored in the circular array pc_stack.
  233. * pc_stack_index points to the last used entry, and warps around
  234. * to the start when we get to the last array entry.
  235. *
  236. * pc points to the current processed packet command.
  237. *
  238. * failed_pc points to the last failed packet command, or contains
  239. * NULL if we do not need to retry any packet command. This is
  240. * required since an additional packet command is needed before the
  241. * retry, to get detailed information on what went wrong.
  242. */
  243. /* Current packet command */
  244. struct ide_atapi_pc *pc;
  245. /* Last failed packet command */
  246. struct ide_atapi_pc *failed_pc;
  247. /* Packet command stack */
  248. struct ide_atapi_pc pc_stack[IDETAPE_PC_STACK];
  249. /* Next free packet command storage space */
  250. int pc_stack_index;
  251. struct request rq_stack[IDETAPE_PC_STACK];
  252. /* We implement a circular array */
  253. int rq_stack_index;
  254. /*
  255. * DSC polling variables.
  256. *
  257. * While polling for DSC we use postponed_rq to postpone the current
  258. * request so that ide.c will be able to service pending requests on the
  259. * other device. Note that at most we will have only one DSC (usually
  260. * data transfer) request in the device request queue. Additional
  261. * requests can be queued in our internal pipeline, but they will be
  262. * visible to ide.c only one at a time.
  263. */
  264. struct request *postponed_rq;
  265. /* The time in which we started polling for DSC */
  266. unsigned long dsc_polling_start;
  267. /* Timer used to poll for dsc */
  268. struct timer_list dsc_timer;
  269. /* Read/Write dsc polling frequency */
  270. unsigned long best_dsc_rw_freq;
  271. unsigned long dsc_poll_freq;
  272. unsigned long dsc_timeout;
  273. /* Read position information */
  274. u8 partition;
  275. /* Current block */
  276. unsigned int first_frame;
  277. /* Last error information */
  278. u8 sense_key, asc, ascq;
  279. /* Character device operation */
  280. unsigned int minor;
  281. /* device name */
  282. char name[4];
  283. /* Current character device data transfer direction */
  284. u8 chrdev_dir;
  285. /* tape block size, usually 512 or 1024 bytes */
  286. unsigned short blk_size;
  287. int user_bs_factor;
  288. /* Copy of the tape's Capabilities and Mechanical Page */
  289. u8 caps[20];
  290. /*
  291. * Active data transfer request parameters.
  292. *
  293. * At most, there is only one ide-tape originated data transfer request
  294. * in the device request queue. This allows ide.c to easily service
  295. * requests from the other device when we postpone our active request.
  296. * In the pipelined operation mode, we use our internal pipeline
  297. * structure to hold more data requests. The data buffer size is chosen
  298. * based on the tape's recommendation.
  299. */
  300. /* ptr to the request which is waiting in the device request queue */
  301. struct request *active_data_rq;
  302. /* Data buffer size chosen based on the tape's recommendation */
  303. int stage_size;
  304. idetape_stage_t *merge_stage;
  305. int merge_stage_size;
  306. struct idetape_bh *bh;
  307. char *b_data;
  308. int b_count;
  309. /*
  310. * Pipeline parameters.
  311. *
  312. * To accomplish non-pipelined mode, we simply set the following
  313. * variables to zero (or NULL, where appropriate).
  314. */
  315. /* Number of currently used stages */
  316. int nr_stages;
  317. /* Number of pending stages */
  318. int nr_pending_stages;
  319. /* We will not allocate more than this number of stages */
  320. int max_stages, min_pipeline, max_pipeline;
  321. /* The first stage which will be removed from the pipeline */
  322. idetape_stage_t *first_stage;
  323. /* The currently active stage */
  324. idetape_stage_t *active_stage;
  325. /* Will be serviced after the currently active request */
  326. idetape_stage_t *next_stage;
  327. /* New requests will be added to the pipeline here */
  328. idetape_stage_t *last_stage;
  329. /* Optional free stage which we can use */
  330. idetape_stage_t *cache_stage;
  331. int pages_per_stage;
  332. /* Wasted space in each stage */
  333. int excess_bh_size;
  334. /* Status/Action flags: long for set_bit */
  335. unsigned long flags;
  336. /* protects the ide-tape queue */
  337. spinlock_t lock;
  338. /* Measures average tape speed */
  339. unsigned long avg_time;
  340. int avg_size;
  341. int avg_speed;
  342. /* the door is currently locked */
  343. int door_locked;
  344. /* the tape hardware is write protected */
  345. char drv_write_prot;
  346. /* the tape is write protected (hardware or opened as read-only) */
  347. char write_prot;
  348. /*
  349. * Limit the number of times a request can be postponed, to avoid an
  350. * infinite postpone deadlock.
  351. */
  352. int postpone_cnt;
  353. /*
  354. * Measures number of frames:
  355. *
  356. * 1. written/read to/from the driver pipeline (pipeline_head).
  357. * 2. written/read to/from the tape buffers (idetape_bh).
  358. * 3. written/read by the tape to/from the media (tape_head).
  359. */
  360. int pipeline_head;
  361. int buffer_head;
  362. int tape_head;
  363. int last_tape_head;
  364. /* Speed control at the tape buffers input/output */
  365. unsigned long insert_time;
  366. int insert_size;
  367. int insert_speed;
  368. int max_insert_speed;
  369. int measure_insert_time;
  370. /* Speed regulation negative feedback loop */
  371. int speed_control;
  372. int pipeline_head_speed;
  373. int controlled_pipeline_head_speed;
  374. int uncontrolled_pipeline_head_speed;
  375. int controlled_last_pipeline_head;
  376. unsigned long uncontrolled_pipeline_head_time;
  377. unsigned long controlled_pipeline_head_time;
  378. int controlled_previous_pipeline_head;
  379. int uncontrolled_previous_pipeline_head;
  380. unsigned long controlled_previous_head_time;
  381. unsigned long uncontrolled_previous_head_time;
  382. int restart_speed_control_req;
  383. u32 debug_mask;
  384. } idetape_tape_t;
  385. static DEFINE_MUTEX(idetape_ref_mutex);
  386. static struct class *idetape_sysfs_class;
  387. #define to_ide_tape(obj) container_of(obj, struct ide_tape_obj, kref)
  388. #define ide_tape_g(disk) \
  389. container_of((disk)->private_data, struct ide_tape_obj, driver)
  390. static struct ide_tape_obj *ide_tape_get(struct gendisk *disk)
  391. {
  392. struct ide_tape_obj *tape = NULL;
  393. mutex_lock(&idetape_ref_mutex);
  394. tape = ide_tape_g(disk);
  395. if (tape)
  396. kref_get(&tape->kref);
  397. mutex_unlock(&idetape_ref_mutex);
  398. return tape;
  399. }
  400. static void ide_tape_release(struct kref *);
  401. static void ide_tape_put(struct ide_tape_obj *tape)
  402. {
  403. mutex_lock(&idetape_ref_mutex);
  404. kref_put(&tape->kref, ide_tape_release);
  405. mutex_unlock(&idetape_ref_mutex);
  406. }
  407. /*
  408. * The variables below are used for the character device interface. Additional
  409. * state variables are defined in our ide_drive_t structure.
  410. */
  411. static struct ide_tape_obj *idetape_devs[MAX_HWIFS * MAX_DRIVES];
  412. #define ide_tape_f(file) ((file)->private_data)
  413. static struct ide_tape_obj *ide_tape_chrdev_get(unsigned int i)
  414. {
  415. struct ide_tape_obj *tape = NULL;
  416. mutex_lock(&idetape_ref_mutex);
  417. tape = idetape_devs[i];
  418. if (tape)
  419. kref_get(&tape->kref);
  420. mutex_unlock(&idetape_ref_mutex);
  421. return tape;
  422. }
  423. static void idetape_input_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
  424. unsigned int bcount)
  425. {
  426. struct idetape_bh *bh = pc->bh;
  427. int count;
  428. while (bcount) {
  429. if (bh == NULL) {
  430. printk(KERN_ERR "ide-tape: bh == NULL in "
  431. "idetape_input_buffers\n");
  432. ide_atapi_discard_data(drive, bcount);
  433. return;
  434. }
  435. count = min(
  436. (unsigned int)(bh->b_size - atomic_read(&bh->b_count)),
  437. bcount);
  438. HWIF(drive)->atapi_input_bytes(drive, bh->b_data +
  439. atomic_read(&bh->b_count), count);
  440. bcount -= count;
  441. atomic_add(count, &bh->b_count);
  442. if (atomic_read(&bh->b_count) == bh->b_size) {
  443. bh = bh->b_reqnext;
  444. if (bh)
  445. atomic_set(&bh->b_count, 0);
  446. }
  447. }
  448. pc->bh = bh;
  449. }
  450. static void idetape_output_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
  451. unsigned int bcount)
  452. {
  453. struct idetape_bh *bh = pc->bh;
  454. int count;
  455. while (bcount) {
  456. if (bh == NULL) {
  457. printk(KERN_ERR "ide-tape: bh == NULL in %s\n",
  458. __func__);
  459. return;
  460. }
  461. count = min((unsigned int)pc->b_count, (unsigned int)bcount);
  462. HWIF(drive)->atapi_output_bytes(drive, pc->b_data, count);
  463. bcount -= count;
  464. pc->b_data += count;
  465. pc->b_count -= count;
  466. if (!pc->b_count) {
  467. bh = bh->b_reqnext;
  468. pc->bh = bh;
  469. if (bh) {
  470. pc->b_data = bh->b_data;
  471. pc->b_count = atomic_read(&bh->b_count);
  472. }
  473. }
  474. }
  475. }
  476. static void idetape_update_buffers(struct ide_atapi_pc *pc)
  477. {
  478. struct idetape_bh *bh = pc->bh;
  479. int count;
  480. unsigned int bcount = pc->xferred;
  481. if (pc->flags & PC_FLAG_WRITING)
  482. return;
  483. while (bcount) {
  484. if (bh == NULL) {
  485. printk(KERN_ERR "ide-tape: bh == NULL in %s\n",
  486. __func__);
  487. return;
  488. }
  489. count = min((unsigned int)bh->b_size, (unsigned int)bcount);
  490. atomic_set(&bh->b_count, count);
  491. if (atomic_read(&bh->b_count) == bh->b_size)
  492. bh = bh->b_reqnext;
  493. bcount -= count;
  494. }
  495. pc->bh = bh;
  496. }
  497. /*
  498. * idetape_next_pc_storage returns a pointer to a place in which we can
  499. * safely store a packet command, even though we intend to leave the
  500. * driver. A storage space for a maximum of IDETAPE_PC_STACK packet
  501. * commands is allocated at initialization time.
  502. */
  503. static struct ide_atapi_pc *idetape_next_pc_storage(ide_drive_t *drive)
  504. {
  505. idetape_tape_t *tape = drive->driver_data;
  506. debug_log(DBG_PCRQ_STACK, "pc_stack_index=%d\n", tape->pc_stack_index);
  507. if (tape->pc_stack_index == IDETAPE_PC_STACK)
  508. tape->pc_stack_index = 0;
  509. return (&tape->pc_stack[tape->pc_stack_index++]);
  510. }
  511. /*
  512. * idetape_next_rq_storage is used along with idetape_next_pc_storage.
  513. * Since we queue packet commands in the request queue, we need to
  514. * allocate a request, along with the allocation of a packet command.
  515. */
  516. /**************************************************************
  517. * *
  518. * This should get fixed to use kmalloc(.., GFP_ATOMIC) *
  519. * followed later on by kfree(). -ml *
  520. * *
  521. **************************************************************/
  522. static struct request *idetape_next_rq_storage(ide_drive_t *drive)
  523. {
  524. idetape_tape_t *tape = drive->driver_data;
  525. debug_log(DBG_PCRQ_STACK, "rq_stack_index=%d\n", tape->rq_stack_index);
  526. if (tape->rq_stack_index == IDETAPE_PC_STACK)
  527. tape->rq_stack_index = 0;
  528. return (&tape->rq_stack[tape->rq_stack_index++]);
  529. }
  530. static void idetape_init_pc(struct ide_atapi_pc *pc)
  531. {
  532. memset(pc->c, 0, 12);
  533. pc->retries = 0;
  534. pc->flags = 0;
  535. pc->req_xfer = 0;
  536. pc->buf = pc->pc_buf;
  537. pc->buf_size = IDETAPE_PC_BUFFER_SIZE;
  538. pc->bh = NULL;
  539. pc->b_data = NULL;
  540. }
  541. /*
  542. * called on each failed packet command retry to analyze the request sense. We
  543. * currently do not utilize this information.
  544. */
  545. static void idetape_analyze_error(ide_drive_t *drive, u8 *sense)
  546. {
  547. idetape_tape_t *tape = drive->driver_data;
  548. struct ide_atapi_pc *pc = tape->failed_pc;
  549. tape->sense_key = sense[2] & 0xF;
  550. tape->asc = sense[12];
  551. tape->ascq = sense[13];
  552. debug_log(DBG_ERR, "pc = %x, sense key = %x, asc = %x, ascq = %x\n",
  553. pc->c[0], tape->sense_key, tape->asc, tape->ascq);
  554. /* Correct pc->xferred by asking the tape. */
  555. if (pc->flags & PC_FLAG_DMA_ERROR) {
  556. pc->xferred = pc->req_xfer -
  557. tape->blk_size *
  558. be32_to_cpu(get_unaligned((u32 *)&sense[3]));
  559. idetape_update_buffers(pc);
  560. }
  561. /*
  562. * If error was the result of a zero-length read or write command,
  563. * with sense key=5, asc=0x22, ascq=0, let it slide. Some drives
  564. * (i.e. Seagate STT3401A Travan) don't support 0-length read/writes.
  565. */
  566. if ((pc->c[0] == READ_6 || pc->c[0] == WRITE_6)
  567. /* length == 0 */
  568. && pc->c[4] == 0 && pc->c[3] == 0 && pc->c[2] == 0) {
  569. if (tape->sense_key == 5) {
  570. /* don't report an error, everything's ok */
  571. pc->error = 0;
  572. /* don't retry read/write */
  573. pc->flags |= PC_FLAG_ABORT;
  574. }
  575. }
  576. if (pc->c[0] == READ_6 && (sense[2] & 0x80)) {
  577. pc->error = IDETAPE_ERROR_FILEMARK;
  578. pc->flags |= PC_FLAG_ABORT;
  579. }
  580. if (pc->c[0] == WRITE_6) {
  581. if ((sense[2] & 0x40) || (tape->sense_key == 0xd
  582. && tape->asc == 0x0 && tape->ascq == 0x2)) {
  583. pc->error = IDETAPE_ERROR_EOD;
  584. pc->flags |= PC_FLAG_ABORT;
  585. }
  586. }
  587. if (pc->c[0] == READ_6 || pc->c[0] == WRITE_6) {
  588. if (tape->sense_key == 8) {
  589. pc->error = IDETAPE_ERROR_EOD;
  590. pc->flags |= PC_FLAG_ABORT;
  591. }
  592. if (!(pc->flags & PC_FLAG_ABORT) &&
  593. pc->xferred)
  594. pc->retries = IDETAPE_MAX_PC_RETRIES + 1;
  595. }
  596. }
  597. static void idetape_activate_next_stage(ide_drive_t *drive)
  598. {
  599. idetape_tape_t *tape = drive->driver_data;
  600. idetape_stage_t *stage = tape->next_stage;
  601. struct request *rq = &stage->rq;
  602. debug_log(DBG_PROCS, "Enter %s\n", __func__);
  603. if (stage == NULL) {
  604. printk(KERN_ERR "ide-tape: bug: Trying to activate a non"
  605. " existing stage\n");
  606. return;
  607. }
  608. rq->rq_disk = tape->disk;
  609. rq->buffer = NULL;
  610. rq->special = (void *)stage->bh;
  611. tape->active_data_rq = rq;
  612. tape->active_stage = stage;
  613. tape->next_stage = stage->next;
  614. }
  615. /* Free a stage along with its related buffers completely. */
  616. static void __idetape_kfree_stage(idetape_stage_t *stage)
  617. {
  618. struct idetape_bh *prev_bh, *bh = stage->bh;
  619. int size;
  620. while (bh != NULL) {
  621. if (bh->b_data != NULL) {
  622. size = (int) bh->b_size;
  623. while (size > 0) {
  624. free_page((unsigned long) bh->b_data);
  625. size -= PAGE_SIZE;
  626. bh->b_data += PAGE_SIZE;
  627. }
  628. }
  629. prev_bh = bh;
  630. bh = bh->b_reqnext;
  631. kfree(prev_bh);
  632. }
  633. kfree(stage);
  634. }
  635. static void idetape_kfree_stage(idetape_tape_t *tape, idetape_stage_t *stage)
  636. {
  637. __idetape_kfree_stage(stage);
  638. }
  639. /*
  640. * Remove tape->first_stage from the pipeline. The caller should avoid race
  641. * conditions.
  642. */
  643. static void idetape_remove_stage_head(ide_drive_t *drive)
  644. {
  645. idetape_tape_t *tape = drive->driver_data;
  646. idetape_stage_t *stage;
  647. debug_log(DBG_PROCS, "Enter %s\n", __func__);
  648. if (tape->first_stage == NULL) {
  649. printk(KERN_ERR "ide-tape: bug: tape->first_stage is NULL\n");
  650. return;
  651. }
  652. if (tape->active_stage == tape->first_stage) {
  653. printk(KERN_ERR "ide-tape: bug: Trying to free our active "
  654. "pipeline stage\n");
  655. return;
  656. }
  657. stage = tape->first_stage;
  658. tape->first_stage = stage->next;
  659. idetape_kfree_stage(tape, stage);
  660. tape->nr_stages--;
  661. if (tape->first_stage == NULL) {
  662. tape->last_stage = NULL;
  663. if (tape->next_stage != NULL)
  664. printk(KERN_ERR "ide-tape: bug: tape->next_stage !="
  665. " NULL\n");
  666. if (tape->nr_stages)
  667. printk(KERN_ERR "ide-tape: bug: nr_stages should be 0 "
  668. "now\n");
  669. }
  670. }
  671. /*
  672. * This will free all the pipeline stages starting from new_last_stage->next
  673. * to the end of the list, and point tape->last_stage to new_last_stage.
  674. */
  675. static void idetape_abort_pipeline(ide_drive_t *drive,
  676. idetape_stage_t *new_last_stage)
  677. {
  678. idetape_tape_t *tape = drive->driver_data;
  679. idetape_stage_t *stage = new_last_stage->next;
  680. idetape_stage_t *nstage;
  681. debug_log(DBG_PROCS, "%s: Enter %s\n", tape->name, __func__);
  682. while (stage) {
  683. nstage = stage->next;
  684. idetape_kfree_stage(tape, stage);
  685. --tape->nr_stages;
  686. --tape->nr_pending_stages;
  687. stage = nstage;
  688. }
  689. if (new_last_stage)
  690. new_last_stage->next = NULL;
  691. tape->last_stage = new_last_stage;
  692. tape->next_stage = NULL;
  693. }
  694. /*
  695. * Finish servicing a request and insert a pending pipeline request into the
  696. * main device queue.
  697. */
  698. static int idetape_end_request(ide_drive_t *drive, int uptodate, int nr_sects)
  699. {
  700. struct request *rq = HWGROUP(drive)->rq;
  701. idetape_tape_t *tape = drive->driver_data;
  702. unsigned long flags;
  703. int error;
  704. int remove_stage = 0;
  705. idetape_stage_t *active_stage;
  706. debug_log(DBG_PROCS, "Enter %s\n", __func__);
  707. switch (uptodate) {
  708. case 0: error = IDETAPE_ERROR_GENERAL; break;
  709. case 1: error = 0; break;
  710. default: error = uptodate;
  711. }
  712. rq->errors = error;
  713. if (error)
  714. tape->failed_pc = NULL;
  715. if (!blk_special_request(rq)) {
  716. ide_end_request(drive, uptodate, nr_sects);
  717. return 0;
  718. }
  719. spin_lock_irqsave(&tape->lock, flags);
  720. /* The request was a pipelined data transfer request */
  721. if (tape->active_data_rq == rq) {
  722. active_stage = tape->active_stage;
  723. tape->active_stage = NULL;
  724. tape->active_data_rq = NULL;
  725. tape->nr_pending_stages--;
  726. if (rq->cmd[0] & REQ_IDETAPE_WRITE) {
  727. remove_stage = 1;
  728. if (error) {
  729. set_bit(IDETAPE_FLAG_PIPELINE_ERR,
  730. &tape->flags);
  731. if (error == IDETAPE_ERROR_EOD)
  732. idetape_abort_pipeline(drive,
  733. active_stage);
  734. }
  735. } else if (rq->cmd[0] & REQ_IDETAPE_READ) {
  736. if (error == IDETAPE_ERROR_EOD) {
  737. set_bit(IDETAPE_FLAG_PIPELINE_ERR,
  738. &tape->flags);
  739. idetape_abort_pipeline(drive, active_stage);
  740. }
  741. }
  742. if (tape->next_stage != NULL) {
  743. idetape_activate_next_stage(drive);
  744. /* Insert the next request into the request queue. */
  745. (void)ide_do_drive_cmd(drive, tape->active_data_rq,
  746. ide_end);
  747. } else if (!error) {
  748. /*
  749. * This is a part of the feedback loop which tries to
  750. * find the optimum number of stages. We are starting
  751. * from a minimum maximum number of stages, and if we
  752. * sense that the pipeline is empty, we try to increase
  753. * it, until we reach the user compile time memory
  754. * limit.
  755. */
  756. int i = (tape->max_pipeline - tape->min_pipeline) / 10;
  757. tape->max_stages += max(i, 1);
  758. tape->max_stages = max(tape->max_stages,
  759. tape->min_pipeline);
  760. tape->max_stages = min(tape->max_stages,
  761. tape->max_pipeline);
  762. }
  763. }
  764. ide_end_drive_cmd(drive, 0, 0);
  765. if (remove_stage)
  766. idetape_remove_stage_head(drive);
  767. if (tape->active_data_rq == NULL)
  768. clear_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags);
  769. spin_unlock_irqrestore(&tape->lock, flags);
  770. return 0;
  771. }
  772. static ide_startstop_t idetape_request_sense_callback(ide_drive_t *drive)
  773. {
  774. idetape_tape_t *tape = drive->driver_data;
  775. debug_log(DBG_PROCS, "Enter %s\n", __func__);
  776. if (!tape->pc->error) {
  777. idetape_analyze_error(drive, tape->pc->buf);
  778. idetape_end_request(drive, 1, 0);
  779. } else {
  780. printk(KERN_ERR "ide-tape: Error in REQUEST SENSE itself - "
  781. "Aborting request!\n");
  782. idetape_end_request(drive, 0, 0);
  783. }
  784. return ide_stopped;
  785. }
  786. static void idetape_create_request_sense_cmd(struct ide_atapi_pc *pc)
  787. {
  788. idetape_init_pc(pc);
  789. pc->c[0] = REQUEST_SENSE;
  790. pc->c[4] = 20;
  791. pc->req_xfer = 20;
  792. pc->idetape_callback = &idetape_request_sense_callback;
  793. }
  794. static void idetape_init_rq(struct request *rq, u8 cmd)
  795. {
  796. memset(rq, 0, sizeof(*rq));
  797. rq->cmd_type = REQ_TYPE_SPECIAL;
  798. rq->cmd[0] = cmd;
  799. }
  800. /*
  801. * Generate a new packet command request in front of the request queue, before
  802. * the current request, so that it will be processed immediately, on the next
  803. * pass through the driver. The function below is called from the request
  804. * handling part of the driver (the "bottom" part). Safe storage for the request
  805. * should be allocated with ide_tape_next_{pc,rq}_storage() prior to that.
  806. *
  807. * Memory for those requests is pre-allocated at initialization time, and is
  808. * limited to IDETAPE_PC_STACK requests. We assume that we have enough space for
  809. * the maximum possible number of inter-dependent packet commands.
  810. *
  811. * The higher level of the driver - The ioctl handler and the character device
  812. * handling functions should queue request to the lower level part and wait for
  813. * their completion using idetape_queue_pc_tail or idetape_queue_rw_tail.
  814. */
  815. static void idetape_queue_pc_head(ide_drive_t *drive, struct ide_atapi_pc *pc,
  816. struct request *rq)
  817. {
  818. struct ide_tape_obj *tape = drive->driver_data;
  819. idetape_init_rq(rq, REQ_IDETAPE_PC1);
  820. rq->buffer = (char *) pc;
  821. rq->rq_disk = tape->disk;
  822. (void) ide_do_drive_cmd(drive, rq, ide_preempt);
  823. }
  824. /*
  825. * idetape_retry_pc is called when an error was detected during the
  826. * last packet command. We queue a request sense packet command in
  827. * the head of the request list.
  828. */
  829. static ide_startstop_t idetape_retry_pc (ide_drive_t *drive)
  830. {
  831. idetape_tape_t *tape = drive->driver_data;
  832. struct ide_atapi_pc *pc;
  833. struct request *rq;
  834. (void)ide_read_error(drive);
  835. pc = idetape_next_pc_storage(drive);
  836. rq = idetape_next_rq_storage(drive);
  837. idetape_create_request_sense_cmd(pc);
  838. set_bit(IDETAPE_FLAG_IGNORE_DSC, &tape->flags);
  839. idetape_queue_pc_head(drive, pc, rq);
  840. return ide_stopped;
  841. }
  842. /*
  843. * Postpone the current request so that ide.c will be able to service requests
  844. * from another device on the same hwgroup while we are polling for DSC.
  845. */
  846. static void idetape_postpone_request(ide_drive_t *drive)
  847. {
  848. idetape_tape_t *tape = drive->driver_data;
  849. debug_log(DBG_PROCS, "Enter %s\n", __func__);
  850. tape->postponed_rq = HWGROUP(drive)->rq;
  851. ide_stall_queue(drive, tape->dsc_poll_freq);
  852. }
  853. typedef void idetape_io_buf(ide_drive_t *, struct ide_atapi_pc *, unsigned int);
  854. /*
  855. * This is the usual interrupt handler which will be called during a packet
  856. * command. We will transfer some of the data (as requested by the drive) and
  857. * will re-point interrupt handler to us. When data transfer is finished, we
  858. * will act according to the algorithm described before
  859. * idetape_issue_pc.
  860. */
  861. static ide_startstop_t idetape_pc_intr(ide_drive_t *drive)
  862. {
  863. ide_hwif_t *hwif = drive->hwif;
  864. idetape_tape_t *tape = drive->driver_data;
  865. struct ide_atapi_pc *pc = tape->pc;
  866. xfer_func_t *xferfunc;
  867. idetape_io_buf *iobuf;
  868. unsigned int temp;
  869. #if SIMULATE_ERRORS
  870. static int error_sim_count;
  871. #endif
  872. u16 bcount;
  873. u8 stat, ireason;
  874. debug_log(DBG_PROCS, "Enter %s - interrupt handler\n", __func__);
  875. /* Clear the interrupt */
  876. stat = ide_read_status(drive);
  877. if (pc->flags & PC_FLAG_DMA_IN_PROGRESS) {
  878. if (hwif->ide_dma_end(drive) || (stat & ERR_STAT)) {
  879. /*
  880. * A DMA error is sometimes expected. For example,
  881. * if the tape is crossing a filemark during a
  882. * READ command, it will issue an irq and position
  883. * itself before the filemark, so that only a partial
  884. * data transfer will occur (which causes the DMA
  885. * error). In that case, we will later ask the tape
  886. * how much bytes of the original request were
  887. * actually transferred (we can't receive that
  888. * information from the DMA engine on most chipsets).
  889. */
  890. /*
  891. * On the contrary, a DMA error is never expected;
  892. * it usually indicates a hardware error or abort.
  893. * If the tape crosses a filemark during a READ
  894. * command, it will issue an irq and position itself
  895. * after the filemark (not before). Only a partial
  896. * data transfer will occur, but no DMA error.
  897. * (AS, 19 Apr 2001)
  898. */
  899. pc->flags |= PC_FLAG_DMA_ERROR;
  900. } else {
  901. pc->xferred = pc->req_xfer;
  902. idetape_update_buffers(pc);
  903. }
  904. debug_log(DBG_PROCS, "DMA finished\n");
  905. }
  906. /* No more interrupts */
  907. if ((stat & DRQ_STAT) == 0) {
  908. debug_log(DBG_SENSE, "Packet command completed, %d bytes"
  909. " transferred\n", pc->xferred);
  910. pc->flags &= ~PC_FLAG_DMA_IN_PROGRESS;
  911. local_irq_enable();
  912. #if SIMULATE_ERRORS
  913. if ((pc->c[0] == WRITE_6 || pc->c[0] == READ_6) &&
  914. (++error_sim_count % 100) == 0) {
  915. printk(KERN_INFO "ide-tape: %s: simulating error\n",
  916. tape->name);
  917. stat |= ERR_STAT;
  918. }
  919. #endif
  920. if ((stat & ERR_STAT) && pc->c[0] == REQUEST_SENSE)
  921. stat &= ~ERR_STAT;
  922. if ((stat & ERR_STAT) || (pc->flags & PC_FLAG_DMA_ERROR)) {
  923. /* Error detected */
  924. debug_log(DBG_ERR, "%s: I/O error\n", tape->name);
  925. if (pc->c[0] == REQUEST_SENSE) {
  926. printk(KERN_ERR "ide-tape: I/O error in request"
  927. " sense command\n");
  928. return ide_do_reset(drive);
  929. }
  930. debug_log(DBG_ERR, "[cmd %x]: check condition\n",
  931. pc->c[0]);
  932. /* Retry operation */
  933. return idetape_retry_pc(drive);
  934. }
  935. pc->error = 0;
  936. if ((pc->flags & PC_FLAG_WAIT_FOR_DSC) &&
  937. (stat & SEEK_STAT) == 0) {
  938. /* Media access command */
  939. tape->dsc_polling_start = jiffies;
  940. tape->dsc_poll_freq = IDETAPE_DSC_MA_FAST;
  941. tape->dsc_timeout = jiffies + IDETAPE_DSC_MA_TIMEOUT;
  942. /* Allow ide.c to handle other requests */
  943. idetape_postpone_request(drive);
  944. return ide_stopped;
  945. }
  946. if (tape->failed_pc == pc)
  947. tape->failed_pc = NULL;
  948. /* Command finished - Call the callback function */
  949. return pc->idetape_callback(drive);
  950. }
  951. if (pc->flags & PC_FLAG_DMA_IN_PROGRESS) {
  952. pc->flags &= ~PC_FLAG_DMA_IN_PROGRESS;
  953. printk(KERN_ERR "ide-tape: The tape wants to issue more "
  954. "interrupts in DMA mode\n");
  955. printk(KERN_ERR "ide-tape: DMA disabled, reverting to PIO\n");
  956. ide_dma_off(drive);
  957. return ide_do_reset(drive);
  958. }
  959. /* Get the number of bytes to transfer on this interrupt. */
  960. bcount = (hwif->INB(hwif->io_ports[IDE_BCOUNTH_OFFSET]) << 8) |
  961. hwif->INB(hwif->io_ports[IDE_BCOUNTL_OFFSET]);
  962. ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]);
  963. if (ireason & CD) {
  964. printk(KERN_ERR "ide-tape: CoD != 0 in %s\n", __func__);
  965. return ide_do_reset(drive);
  966. }
  967. if (((ireason & IO) == IO) == !!(pc->flags & PC_FLAG_WRITING)) {
  968. /* Hopefully, we will never get here */
  969. printk(KERN_ERR "ide-tape: We wanted to %s, ",
  970. (ireason & IO) ? "Write" : "Read");
  971. printk(KERN_ERR "ide-tape: but the tape wants us to %s !\n",
  972. (ireason & IO) ? "Read" : "Write");
  973. return ide_do_reset(drive);
  974. }
  975. if (!(pc->flags & PC_FLAG_WRITING)) {
  976. /* Reading - Check that we have enough space */
  977. temp = pc->xferred + bcount;
  978. if (temp > pc->req_xfer) {
  979. if (temp > pc->buf_size) {
  980. printk(KERN_ERR "ide-tape: The tape wants to "
  981. "send us more data than expected "
  982. "- discarding data\n");
  983. ide_atapi_discard_data(drive, bcount);
  984. ide_set_handler(drive, &idetape_pc_intr,
  985. IDETAPE_WAIT_CMD, NULL);
  986. return ide_started;
  987. }
  988. debug_log(DBG_SENSE, "The tape wants to send us more "
  989. "data than expected - allowing transfer\n");
  990. }
  991. iobuf = &idetape_input_buffers;
  992. xferfunc = hwif->atapi_input_bytes;
  993. } else {
  994. iobuf = &idetape_output_buffers;
  995. xferfunc = hwif->atapi_output_bytes;
  996. }
  997. if (pc->bh)
  998. iobuf(drive, pc, bcount);
  999. else
  1000. xferfunc(drive, pc->cur_pos, bcount);
  1001. /* Update the current position */
  1002. pc->xferred += bcount;
  1003. pc->cur_pos += bcount;
  1004. debug_log(DBG_SENSE, "[cmd %x] transferred %d bytes on that intr.\n",
  1005. pc->c[0], bcount);
  1006. /* And set the interrupt handler again */
  1007. ide_set_handler(drive, &idetape_pc_intr, IDETAPE_WAIT_CMD, NULL);
  1008. return ide_started;
  1009. }
  1010. /*
  1011. * Packet Command Interface
  1012. *
  1013. * The current Packet Command is available in tape->pc, and will not change
  1014. * until we finish handling it. Each packet command is associated with a
  1015. * callback function that will be called when the command is finished.
  1016. *
  1017. * The handling will be done in three stages:
  1018. *
  1019. * 1. idetape_issue_pc will send the packet command to the drive, and will set
  1020. * the interrupt handler to idetape_pc_intr.
  1021. *
  1022. * 2. On each interrupt, idetape_pc_intr will be called. This step will be
  1023. * repeated until the device signals us that no more interrupts will be issued.
  1024. *
  1025. * 3. ATAPI Tape media access commands have immediate status with a delayed
  1026. * process. In case of a successful initiation of a media access packet command,
  1027. * the DSC bit will be set when the actual execution of the command is finished.
  1028. * Since the tape drive will not issue an interrupt, we have to poll for this
  1029. * event. In this case, we define the request as "low priority request" by
  1030. * setting rq_status to IDETAPE_RQ_POSTPONED, set a timer to poll for DSC and
  1031. * exit the driver.
  1032. *
  1033. * ide.c will then give higher priority to requests which originate from the
  1034. * other device, until will change rq_status to RQ_ACTIVE.
  1035. *
  1036. * 4. When the packet command is finished, it will be checked for errors.
  1037. *
  1038. * 5. In case an error was found, we queue a request sense packet command in
  1039. * front of the request queue and retry the operation up to
  1040. * IDETAPE_MAX_PC_RETRIES times.
  1041. *
  1042. * 6. In case no error was found, or we decided to give up and not to retry
  1043. * again, the callback function will be called and then we will handle the next
  1044. * request.
  1045. */
  1046. static ide_startstop_t idetape_transfer_pc(ide_drive_t *drive)
  1047. {
  1048. ide_hwif_t *hwif = drive->hwif;
  1049. idetape_tape_t *tape = drive->driver_data;
  1050. struct ide_atapi_pc *pc = tape->pc;
  1051. int retries = 100;
  1052. ide_startstop_t startstop;
  1053. u8 ireason;
  1054. if (ide_wait_stat(&startstop, drive, DRQ_STAT, BUSY_STAT, WAIT_READY)) {
  1055. printk(KERN_ERR "ide-tape: Strange, packet command initiated "
  1056. "yet DRQ isn't asserted\n");
  1057. return startstop;
  1058. }
  1059. ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]);
  1060. while (retries-- && ((ireason & CD) == 0 || (ireason & IO))) {
  1061. printk(KERN_ERR "ide-tape: (IO,CoD != (0,1) while issuing "
  1062. "a packet command, retrying\n");
  1063. udelay(100);
  1064. ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]);
  1065. if (retries == 0) {
  1066. printk(KERN_ERR "ide-tape: (IO,CoD != (0,1) while "
  1067. "issuing a packet command, ignoring\n");
  1068. ireason |= CD;
  1069. ireason &= ~IO;
  1070. }
  1071. }
  1072. if ((ireason & CD) == 0 || (ireason & IO)) {
  1073. printk(KERN_ERR "ide-tape: (IO,CoD) != (0,1) while issuing "
  1074. "a packet command\n");
  1075. return ide_do_reset(drive);
  1076. }
  1077. /* Set the interrupt routine */
  1078. ide_set_handler(drive, &idetape_pc_intr, IDETAPE_WAIT_CMD, NULL);
  1079. #ifdef CONFIG_BLK_DEV_IDEDMA
  1080. /* Begin DMA, if necessary */
  1081. if (pc->flags & PC_FLAG_DMA_IN_PROGRESS)
  1082. hwif->dma_start(drive);
  1083. #endif
  1084. /* Send the actual packet */
  1085. HWIF(drive)->atapi_output_bytes(drive, pc->c, 12);
  1086. return ide_started;
  1087. }
  1088. static ide_startstop_t idetape_issue_pc(ide_drive_t *drive,
  1089. struct ide_atapi_pc *pc)
  1090. {
  1091. ide_hwif_t *hwif = drive->hwif;
  1092. idetape_tape_t *tape = drive->driver_data;
  1093. int dma_ok = 0;
  1094. u16 bcount;
  1095. if (tape->pc->c[0] == REQUEST_SENSE &&
  1096. pc->c[0] == REQUEST_SENSE) {
  1097. printk(KERN_ERR "ide-tape: possible ide-tape.c bug - "
  1098. "Two request sense in serial were issued\n");
  1099. }
  1100. if (tape->failed_pc == NULL && pc->c[0] != REQUEST_SENSE)
  1101. tape->failed_pc = pc;
  1102. /* Set the current packet command */
  1103. tape->pc = pc;
  1104. if (pc->retries > IDETAPE_MAX_PC_RETRIES ||
  1105. (pc->flags & PC_FLAG_ABORT)) {
  1106. /*
  1107. * We will "abort" retrying a packet command in case legitimate
  1108. * error code was received (crossing a filemark, or end of the
  1109. * media, for example).
  1110. */
  1111. if (!(pc->flags & PC_FLAG_ABORT)) {
  1112. if (!(pc->c[0] == TEST_UNIT_READY &&
  1113. tape->sense_key == 2 && tape->asc == 4 &&
  1114. (tape->ascq == 1 || tape->ascq == 8))) {
  1115. printk(KERN_ERR "ide-tape: %s: I/O error, "
  1116. "pc = %2x, key = %2x, "
  1117. "asc = %2x, ascq = %2x\n",
  1118. tape->name, pc->c[0],
  1119. tape->sense_key, tape->asc,
  1120. tape->ascq);
  1121. }
  1122. /* Giving up */
  1123. pc->error = IDETAPE_ERROR_GENERAL;
  1124. }
  1125. tape->failed_pc = NULL;
  1126. return pc->idetape_callback(drive);
  1127. }
  1128. debug_log(DBG_SENSE, "Retry #%d, cmd = %02X\n", pc->retries, pc->c[0]);
  1129. pc->retries++;
  1130. /* We haven't transferred any data yet */
  1131. pc->xferred = 0;
  1132. pc->cur_pos = pc->buf;
  1133. /* Request to transfer the entire buffer at once */
  1134. bcount = pc->req_xfer;
  1135. if (pc->flags & PC_FLAG_DMA_ERROR) {
  1136. pc->flags &= ~PC_FLAG_DMA_ERROR;
  1137. printk(KERN_WARNING "ide-tape: DMA disabled, "
  1138. "reverting to PIO\n");
  1139. ide_dma_off(drive);
  1140. }
  1141. if ((pc->flags & PC_FLAG_DMA_RECOMMENDED) && drive->using_dma)
  1142. dma_ok = !hwif->dma_setup(drive);
  1143. ide_pktcmd_tf_load(drive, IDE_TFLAG_NO_SELECT_MASK |
  1144. IDE_TFLAG_OUT_DEVICE, bcount, dma_ok);
  1145. if (dma_ok)
  1146. /* Will begin DMA later */
  1147. pc->flags |= PC_FLAG_DMA_IN_PROGRESS;
  1148. if (test_bit(IDETAPE_FLAG_DRQ_INTERRUPT, &tape->flags)) {
  1149. ide_execute_command(drive, WIN_PACKETCMD, &idetape_transfer_pc,
  1150. IDETAPE_WAIT_CMD, NULL);
  1151. return ide_started;
  1152. } else {
  1153. hwif->OUTB(WIN_PACKETCMD, hwif->io_ports[IDE_COMMAND_OFFSET]);
  1154. return idetape_transfer_pc(drive);
  1155. }
  1156. }
  1157. static ide_startstop_t idetape_pc_callback(ide_drive_t *drive)
  1158. {
  1159. idetape_tape_t *tape = drive->driver_data;
  1160. debug_log(DBG_PROCS, "Enter %s\n", __func__);
  1161. idetape_end_request(drive, tape->pc->error ? 0 : 1, 0);
  1162. return ide_stopped;
  1163. }
  1164. /* A mode sense command is used to "sense" tape parameters. */
  1165. static void idetape_create_mode_sense_cmd(struct ide_atapi_pc *pc, u8 page_code)
  1166. {
  1167. idetape_init_pc(pc);
  1168. pc->c[0] = MODE_SENSE;
  1169. if (page_code != IDETAPE_BLOCK_DESCRIPTOR)
  1170. /* DBD = 1 - Don't return block descriptors */
  1171. pc->c[1] = 8;
  1172. pc->c[2] = page_code;
  1173. /*
  1174. * Changed pc->c[3] to 0 (255 will at best return unused info).
  1175. *
  1176. * For SCSI this byte is defined as subpage instead of high byte
  1177. * of length and some IDE drives seem to interpret it this way
  1178. * and return an error when 255 is used.
  1179. */
  1180. pc->c[3] = 0;
  1181. /* We will just discard data in that case */
  1182. pc->c[4] = 255;
  1183. if (page_code == IDETAPE_BLOCK_DESCRIPTOR)
  1184. pc->req_xfer = 12;
  1185. else if (page_code == IDETAPE_CAPABILITIES_PAGE)
  1186. pc->req_xfer = 24;
  1187. else
  1188. pc->req_xfer = 50;
  1189. pc->idetape_callback = &idetape_pc_callback;
  1190. }
  1191. static void idetape_calculate_speeds(ide_drive_t *drive)
  1192. {
  1193. idetape_tape_t *tape = drive->driver_data;
  1194. if (time_after(jiffies,
  1195. tape->controlled_pipeline_head_time + 120 * HZ)) {
  1196. tape->controlled_previous_pipeline_head =
  1197. tape->controlled_last_pipeline_head;
  1198. tape->controlled_previous_head_time =
  1199. tape->controlled_pipeline_head_time;
  1200. tape->controlled_last_pipeline_head = tape->pipeline_head;
  1201. tape->controlled_pipeline_head_time = jiffies;
  1202. }
  1203. if (time_after(jiffies, tape->controlled_pipeline_head_time + 60 * HZ))
  1204. tape->controlled_pipeline_head_speed = (tape->pipeline_head -
  1205. tape->controlled_last_pipeline_head) * 32 * HZ /
  1206. (jiffies - tape->controlled_pipeline_head_time);
  1207. else if (time_after(jiffies, tape->controlled_previous_head_time))
  1208. tape->controlled_pipeline_head_speed = (tape->pipeline_head -
  1209. tape->controlled_previous_pipeline_head) * 32 *
  1210. HZ / (jiffies - tape->controlled_previous_head_time);
  1211. if (tape->nr_pending_stages < tape->max_stages/*- 1 */) {
  1212. /* -1 for read mode error recovery */
  1213. if (time_after(jiffies, tape->uncontrolled_previous_head_time +
  1214. 10 * HZ)) {
  1215. tape->uncontrolled_pipeline_head_time = jiffies;
  1216. tape->uncontrolled_pipeline_head_speed =
  1217. (tape->pipeline_head -
  1218. tape->uncontrolled_previous_pipeline_head) *
  1219. 32 * HZ / (jiffies -
  1220. tape->uncontrolled_previous_head_time);
  1221. }
  1222. } else {
  1223. tape->uncontrolled_previous_head_time = jiffies;
  1224. tape->uncontrolled_previous_pipeline_head = tape->pipeline_head;
  1225. if (time_after(jiffies, tape->uncontrolled_pipeline_head_time +
  1226. 30 * HZ))
  1227. tape->uncontrolled_pipeline_head_time = jiffies;
  1228. }
  1229. tape->pipeline_head_speed = max(tape->uncontrolled_pipeline_head_speed,
  1230. tape->controlled_pipeline_head_speed);
  1231. if (tape->speed_control == 1) {
  1232. if (tape->nr_pending_stages >= tape->max_stages / 2)
  1233. tape->max_insert_speed = tape->pipeline_head_speed +
  1234. (1100 - tape->pipeline_head_speed) * 2 *
  1235. (tape->nr_pending_stages - tape->max_stages / 2)
  1236. / tape->max_stages;
  1237. else
  1238. tape->max_insert_speed = 500 +
  1239. (tape->pipeline_head_speed - 500) * 2 *
  1240. tape->nr_pending_stages / tape->max_stages;
  1241. if (tape->nr_pending_stages >= tape->max_stages * 99 / 100)
  1242. tape->max_insert_speed = 5000;
  1243. } else
  1244. tape->max_insert_speed = tape->speed_control;
  1245. tape->max_insert_speed = max(tape->max_insert_speed, 500);
  1246. }
  1247. static ide_startstop_t idetape_media_access_finished(ide_drive_t *drive)
  1248. {
  1249. idetape_tape_t *tape = drive->driver_data;
  1250. struct ide_atapi_pc *pc = tape->pc;
  1251. u8 stat;
  1252. stat = ide_read_status(drive);
  1253. if (stat & SEEK_STAT) {
  1254. if (stat & ERR_STAT) {
  1255. /* Error detected */
  1256. if (pc->c[0] != TEST_UNIT_READY)
  1257. printk(KERN_ERR "ide-tape: %s: I/O error, ",
  1258. tape->name);
  1259. /* Retry operation */
  1260. return idetape_retry_pc(drive);
  1261. }
  1262. pc->error = 0;
  1263. if (tape->failed_pc == pc)
  1264. tape->failed_pc = NULL;
  1265. } else {
  1266. pc->error = IDETAPE_ERROR_GENERAL;
  1267. tape->failed_pc = NULL;
  1268. }
  1269. return pc->idetape_callback(drive);
  1270. }
  1271. static ide_startstop_t idetape_rw_callback(ide_drive_t *drive)
  1272. {
  1273. idetape_tape_t *tape = drive->driver_data;
  1274. struct request *rq = HWGROUP(drive)->rq;
  1275. int blocks = tape->pc->xferred / tape->blk_size;
  1276. tape->avg_size += blocks * tape->blk_size;
  1277. tape->insert_size += blocks * tape->blk_size;
  1278. if (tape->insert_size > 1024 * 1024)
  1279. tape->measure_insert_time = 1;
  1280. if (tape->measure_insert_time) {
  1281. tape->measure_insert_time = 0;
  1282. tape->insert_time = jiffies;
  1283. tape->insert_size = 0;
  1284. }
  1285. if (time_after(jiffies, tape->insert_time))
  1286. tape->insert_speed = tape->insert_size / 1024 * HZ /
  1287. (jiffies - tape->insert_time);
  1288. if (time_after_eq(jiffies, tape->avg_time + HZ)) {
  1289. tape->avg_speed = tape->avg_size * HZ /
  1290. (jiffies - tape->avg_time) / 1024;
  1291. tape->avg_size = 0;
  1292. tape->avg_time = jiffies;
  1293. }
  1294. debug_log(DBG_PROCS, "Enter %s\n", __func__);
  1295. tape->first_frame += blocks;
  1296. rq->current_nr_sectors -= blocks;
  1297. if (!tape->pc->error)
  1298. idetape_end_request(drive, 1, 0);
  1299. else
  1300. idetape_end_request(drive, tape->pc->error, 0);
  1301. return ide_stopped;
  1302. }
  1303. static void idetape_create_read_cmd(idetape_tape_t *tape,
  1304. struct ide_atapi_pc *pc,
  1305. unsigned int length, struct idetape_bh *bh)
  1306. {
  1307. idetape_init_pc(pc);
  1308. pc->c[0] = READ_6;
  1309. put_unaligned(cpu_to_be32(length), (unsigned int *) &pc->c[1]);
  1310. pc->c[1] = 1;
  1311. pc->idetape_callback = &idetape_rw_callback;
  1312. pc->bh = bh;
  1313. atomic_set(&bh->b_count, 0);
  1314. pc->buf = NULL;
  1315. pc->buf_size = length * tape->blk_size;
  1316. pc->req_xfer = pc->buf_size;
  1317. if (pc->req_xfer == tape->stage_size)
  1318. pc->flags |= PC_FLAG_DMA_RECOMMENDED;
  1319. }
  1320. static void idetape_create_write_cmd(idetape_tape_t *tape,
  1321. struct ide_atapi_pc *pc,
  1322. unsigned int length, struct idetape_bh *bh)
  1323. {
  1324. idetape_init_pc(pc);
  1325. pc->c[0] = WRITE_6;
  1326. put_unaligned(cpu_to_be32(length), (unsigned int *) &pc->c[1]);
  1327. pc->c[1] = 1;
  1328. pc->idetape_callback = &idetape_rw_callback;
  1329. pc->flags |= PC_FLAG_WRITING;
  1330. pc->bh = bh;
  1331. pc->b_data = bh->b_data;
  1332. pc->b_count = atomic_read(&bh->b_count);
  1333. pc->buf = NULL;
  1334. pc->buf_size = length * tape->blk_size;
  1335. pc->req_xfer = pc->buf_size;
  1336. if (pc->req_xfer == tape->stage_size)
  1337. pc->flags |= PC_FLAG_DMA_RECOMMENDED;
  1338. }
  1339. static ide_startstop_t idetape_do_request(ide_drive_t *drive,
  1340. struct request *rq, sector_t block)
  1341. {
  1342. idetape_tape_t *tape = drive->driver_data;
  1343. struct ide_atapi_pc *pc = NULL;
  1344. struct request *postponed_rq = tape->postponed_rq;
  1345. u8 stat;
  1346. debug_log(DBG_SENSE, "sector: %ld, nr_sectors: %ld,"
  1347. " current_nr_sectors: %d\n",
  1348. rq->sector, rq->nr_sectors, rq->current_nr_sectors);
  1349. if (!blk_special_request(rq)) {
  1350. /* We do not support buffer cache originated requests. */
  1351. printk(KERN_NOTICE "ide-tape: %s: Unsupported request in "
  1352. "request queue (%d)\n", drive->name, rq->cmd_type);
  1353. ide_end_request(drive, 0, 0);
  1354. return ide_stopped;
  1355. }
  1356. /* Retry a failed packet command */
  1357. if (tape->failed_pc && tape->pc->c[0] == REQUEST_SENSE)
  1358. return idetape_issue_pc(drive, tape->failed_pc);
  1359. if (postponed_rq != NULL)
  1360. if (rq != postponed_rq) {
  1361. printk(KERN_ERR "ide-tape: ide-tape.c bug - "
  1362. "Two DSC requests were queued\n");
  1363. idetape_end_request(drive, 0, 0);
  1364. return ide_stopped;
  1365. }
  1366. tape->postponed_rq = NULL;
  1367. /*
  1368. * If the tape is still busy, postpone our request and service
  1369. * the other device meanwhile.
  1370. */
  1371. stat = ide_read_status(drive);
  1372. if (!drive->dsc_overlap && !(rq->cmd[0] & REQ_IDETAPE_PC2))
  1373. set_bit(IDETAPE_FLAG_IGNORE_DSC, &tape->flags);
  1374. if (drive->post_reset == 1) {
  1375. set_bit(IDETAPE_FLAG_IGNORE_DSC, &tape->flags);
  1376. drive->post_reset = 0;
  1377. }
  1378. if (time_after(jiffies, tape->insert_time))
  1379. tape->insert_speed = tape->insert_size / 1024 * HZ /
  1380. (jiffies - tape->insert_time);
  1381. idetape_calculate_speeds(drive);
  1382. if (!test_and_clear_bit(IDETAPE_FLAG_IGNORE_DSC, &tape->flags) &&
  1383. (stat & SEEK_STAT) == 0) {
  1384. if (postponed_rq == NULL) {
  1385. tape->dsc_polling_start = jiffies;
  1386. tape->dsc_poll_freq = tape->best_dsc_rw_freq;
  1387. tape->dsc_timeout = jiffies + IDETAPE_DSC_RW_TIMEOUT;
  1388. } else if (time_after(jiffies, tape->dsc_timeout)) {
  1389. printk(KERN_ERR "ide-tape: %s: DSC timeout\n",
  1390. tape->name);
  1391. if (rq->cmd[0] & REQ_IDETAPE_PC2) {
  1392. idetape_media_access_finished(drive);
  1393. return ide_stopped;
  1394. } else {
  1395. return ide_do_reset(drive);
  1396. }
  1397. } else if (time_after(jiffies,
  1398. tape->dsc_polling_start +
  1399. IDETAPE_DSC_MA_THRESHOLD))
  1400. tape->dsc_poll_freq = IDETAPE_DSC_MA_SLOW;
  1401. idetape_postpone_request(drive);
  1402. return ide_stopped;
  1403. }
  1404. if (rq->cmd[0] & REQ_IDETAPE_READ) {
  1405. tape->buffer_head++;
  1406. tape->postpone_cnt = 0;
  1407. pc = idetape_next_pc_storage(drive);
  1408. idetape_create_read_cmd(tape, pc, rq->current_nr_sectors,
  1409. (struct idetape_bh *)rq->special);
  1410. goto out;
  1411. }
  1412. if (rq->cmd[0] & REQ_IDETAPE_WRITE) {
  1413. tape->buffer_head++;
  1414. tape->postpone_cnt = 0;
  1415. pc = idetape_next_pc_storage(drive);
  1416. idetape_create_write_cmd(tape, pc, rq->current_nr_sectors,
  1417. (struct idetape_bh *)rq->special);
  1418. goto out;
  1419. }
  1420. if (rq->cmd[0] & REQ_IDETAPE_PC1) {
  1421. pc = (struct ide_atapi_pc *) rq->buffer;
  1422. rq->cmd[0] &= ~(REQ_IDETAPE_PC1);
  1423. rq->cmd[0] |= REQ_IDETAPE_PC2;
  1424. goto out;
  1425. }
  1426. if (rq->cmd[0] & REQ_IDETAPE_PC2) {
  1427. idetape_media_access_finished(drive);
  1428. return ide_stopped;
  1429. }
  1430. BUG();
  1431. out:
  1432. return idetape_issue_pc(drive, pc);
  1433. }
  1434. /* Pipeline related functions */
  1435. static inline int idetape_pipeline_active(idetape_tape_t *tape)
  1436. {
  1437. int rc1, rc2;
  1438. rc1 = test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags);
  1439. rc2 = (tape->active_data_rq != NULL);
  1440. return rc1;
  1441. }
  1442. /*
  1443. * The function below uses __get_free_page to allocate a pipeline stage, along
  1444. * with all the necessary small buffers which together make a buffer of size
  1445. * tape->stage_size (or a bit more). We attempt to combine sequential pages as
  1446. * much as possible.
  1447. *
  1448. * It returns a pointer to the new allocated stage, or NULL if we can't (or
  1449. * don't want to) allocate a stage.
  1450. *
  1451. * Pipeline stages are optional and are used to increase performance. If we
  1452. * can't allocate them, we'll manage without them.
  1453. */
  1454. static idetape_stage_t *__idetape_kmalloc_stage(idetape_tape_t *tape, int full,
  1455. int clear)
  1456. {
  1457. idetape_stage_t *stage;
  1458. struct idetape_bh *prev_bh, *bh;
  1459. int pages = tape->pages_per_stage;
  1460. char *b_data = NULL;
  1461. stage = kmalloc(sizeof(idetape_stage_t), GFP_KERNEL);
  1462. if (!stage)
  1463. return NULL;
  1464. stage->next = NULL;
  1465. stage->bh = kmalloc(sizeof(struct idetape_bh), GFP_KERNEL);
  1466. bh = stage->bh;
  1467. if (bh == NULL)
  1468. goto abort;
  1469. bh->b_reqnext = NULL;
  1470. bh->b_data = (char *) __get_free_page(GFP_KERNEL);
  1471. if (!bh->b_data)
  1472. goto abort;
  1473. if (clear)
  1474. memset(bh->b_data, 0, PAGE_SIZE);
  1475. bh->b_size = PAGE_SIZE;
  1476. atomic_set(&bh->b_count, full ? bh->b_size : 0);
  1477. while (--pages) {
  1478. b_data = (char *) __get_free_page(GFP_KERNEL);
  1479. if (!b_data)
  1480. goto abort;
  1481. if (clear)
  1482. memset(b_data, 0, PAGE_SIZE);
  1483. if (bh->b_data == b_data + PAGE_SIZE) {
  1484. bh->b_size += PAGE_SIZE;
  1485. bh->b_data -= PAGE_SIZE;
  1486. if (full)
  1487. atomic_add(PAGE_SIZE, &bh->b_count);
  1488. continue;
  1489. }
  1490. if (b_data == bh->b_data + bh->b_size) {
  1491. bh->b_size += PAGE_SIZE;
  1492. if (full)
  1493. atomic_add(PAGE_SIZE, &bh->b_count);
  1494. continue;
  1495. }
  1496. prev_bh = bh;
  1497. bh = kmalloc(sizeof(struct idetape_bh), GFP_KERNEL);
  1498. if (!bh) {
  1499. free_page((unsigned long) b_data);
  1500. goto abort;
  1501. }
  1502. bh->b_reqnext = NULL;
  1503. bh->b_data = b_data;
  1504. bh->b_size = PAGE_SIZE;
  1505. atomic_set(&bh->b_count, full ? bh->b_size : 0);
  1506. prev_bh->b_reqnext = bh;
  1507. }
  1508. bh->b_size -= tape->excess_bh_size;
  1509. if (full)
  1510. atomic_sub(tape->excess_bh_size, &bh->b_count);
  1511. return stage;
  1512. abort:
  1513. __idetape_kfree_stage(stage);
  1514. return NULL;
  1515. }
  1516. static idetape_stage_t *idetape_kmalloc_stage(idetape_tape_t *tape)
  1517. {
  1518. idetape_stage_t *cache_stage = tape->cache_stage;
  1519. debug_log(DBG_PROCS, "Enter %s\n", __func__);
  1520. if (tape->nr_stages >= tape->max_stages)
  1521. return NULL;
  1522. if (cache_stage != NULL) {
  1523. tape->cache_stage = NULL;
  1524. return cache_stage;
  1525. }
  1526. return __idetape_kmalloc_stage(tape, 0, 0);
  1527. }
  1528. static int idetape_copy_stage_from_user(idetape_tape_t *tape,
  1529. idetape_stage_t *stage, const char __user *buf, int n)
  1530. {
  1531. struct idetape_bh *bh = tape->bh;
  1532. int count;
  1533. int ret = 0;
  1534. while (n) {
  1535. if (bh == NULL) {
  1536. printk(KERN_ERR "ide-tape: bh == NULL in %s\n",
  1537. __func__);
  1538. return 1;
  1539. }
  1540. count = min((unsigned int)
  1541. (bh->b_size - atomic_read(&bh->b_count)),
  1542. (unsigned int)n);
  1543. if (copy_from_user(bh->b_data + atomic_read(&bh->b_count), buf,
  1544. count))
  1545. ret = 1;
  1546. n -= count;
  1547. atomic_add(count, &bh->b_count);
  1548. buf += count;
  1549. if (atomic_read(&bh->b_count) == bh->b_size) {
  1550. bh = bh->b_reqnext;
  1551. if (bh)
  1552. atomic_set(&bh->b_count, 0);
  1553. }
  1554. }
  1555. tape->bh = bh;
  1556. return ret;
  1557. }
  1558. static int idetape_copy_stage_to_user(idetape_tape_t *tape, char __user *buf,
  1559. idetape_stage_t *stage, int n)
  1560. {
  1561. struct idetape_bh *bh = tape->bh;
  1562. int count;
  1563. int ret = 0;
  1564. while (n) {
  1565. if (bh == NULL) {
  1566. printk(KERN_ERR "ide-tape: bh == NULL in %s\n",
  1567. __func__);
  1568. return 1;
  1569. }
  1570. count = min(tape->b_count, n);
  1571. if (copy_to_user(buf, tape->b_data, count))
  1572. ret = 1;
  1573. n -= count;
  1574. tape->b_data += count;
  1575. tape->b_count -= count;
  1576. buf += count;
  1577. if (!tape->b_count) {
  1578. bh = bh->b_reqnext;
  1579. tape->bh = bh;
  1580. if (bh) {
  1581. tape->b_data = bh->b_data;
  1582. tape->b_count = atomic_read(&bh->b_count);
  1583. }
  1584. }
  1585. }
  1586. return ret;
  1587. }
  1588. static void idetape_init_merge_stage(idetape_tape_t *tape)
  1589. {
  1590. struct idetape_bh *bh = tape->merge_stage->bh;
  1591. tape->bh = bh;
  1592. if (tape->chrdev_dir == IDETAPE_DIR_WRITE)
  1593. atomic_set(&bh->b_count, 0);
  1594. else {
  1595. tape->b_data = bh->b_data;
  1596. tape->b_count = atomic_read(&bh->b_count);
  1597. }
  1598. }
  1599. static void idetape_switch_buffers(idetape_tape_t *tape, idetape_stage_t *stage)
  1600. {
  1601. struct idetape_bh *tmp;
  1602. tmp = stage->bh;
  1603. stage->bh = tape->merge_stage->bh;
  1604. tape->merge_stage->bh = tmp;
  1605. idetape_init_merge_stage(tape);
  1606. }
  1607. /* Add a new stage at the end of the pipeline. */
  1608. static void idetape_add_stage_tail(ide_drive_t *drive, idetape_stage_t *stage)
  1609. {
  1610. idetape_tape_t *tape = drive->driver_data;
  1611. unsigned long flags;
  1612. debug_log(DBG_PROCS, "Enter %s\n", __func__);
  1613. spin_lock_irqsave(&tape->lock, flags);
  1614. stage->next = NULL;
  1615. if (tape->last_stage != NULL)
  1616. tape->last_stage->next = stage;
  1617. else
  1618. tape->first_stage = stage;
  1619. tape->next_stage = stage;
  1620. tape->last_stage = stage;
  1621. if (tape->next_stage == NULL)
  1622. tape->next_stage = tape->last_stage;
  1623. tape->nr_stages++;
  1624. tape->nr_pending_stages++;
  1625. spin_unlock_irqrestore(&tape->lock, flags);
  1626. }
  1627. /* Install a completion in a pending request and sleep until it is serviced. The
  1628. * caller should ensure that the request will not be serviced before we install
  1629. * the completion (usually by disabling interrupts).
  1630. */
  1631. static void idetape_wait_for_request(ide_drive_t *drive, struct request *rq)
  1632. {
  1633. DECLARE_COMPLETION_ONSTACK(wait);
  1634. idetape_tape_t *tape = drive->driver_data;
  1635. if (rq == NULL || !blk_special_request(rq)) {
  1636. printk(KERN_ERR "ide-tape: bug: Trying to sleep on non-valid"
  1637. " request\n");
  1638. return;
  1639. }
  1640. rq->end_io_data = &wait;
  1641. rq->end_io = blk_end_sync_rq;
  1642. spin_unlock_irq(&tape->lock);
  1643. wait_for_completion(&wait);
  1644. /* The stage and its struct request have been deallocated */
  1645. spin_lock_irq(&tape->lock);
  1646. }
  1647. static ide_startstop_t idetape_read_position_callback(ide_drive_t *drive)
  1648. {
  1649. idetape_tape_t *tape = drive->driver_data;
  1650. u8 *readpos = tape->pc->buf;
  1651. debug_log(DBG_PROCS, "Enter %s\n", __func__);
  1652. if (!tape->pc->error) {
  1653. debug_log(DBG_SENSE, "BOP - %s\n",
  1654. (readpos[0] & 0x80) ? "Yes" : "No");
  1655. debug_log(DBG_SENSE, "EOP - %s\n",
  1656. (readpos[0] & 0x40) ? "Yes" : "No");
  1657. if (readpos[0] & 0x4) {
  1658. printk(KERN_INFO "ide-tape: Block location is unknown"
  1659. "to the tape\n");
  1660. clear_bit(IDETAPE_FLAG_ADDRESS_VALID, &tape->flags);
  1661. idetape_end_request(drive, 0, 0);
  1662. } else {
  1663. debug_log(DBG_SENSE, "Block Location - %u\n",
  1664. be32_to_cpu(*(u32 *)&readpos[4]));
  1665. tape->partition = readpos[1];
  1666. tape->first_frame =
  1667. be32_to_cpu(*(u32 *)&readpos[4]);
  1668. set_bit(IDETAPE_FLAG_ADDRESS_VALID, &tape->flags);
  1669. idetape_end_request(drive, 1, 0);
  1670. }
  1671. } else {
  1672. idetape_end_request(drive, 0, 0);
  1673. }
  1674. return ide_stopped;
  1675. }
  1676. /*
  1677. * Write a filemark if write_filemark=1. Flush the device buffers without
  1678. * writing a filemark otherwise.
  1679. */
  1680. static void idetape_create_write_filemark_cmd(ide_drive_t *drive,
  1681. struct ide_atapi_pc *pc, int write_filemark)
  1682. {
  1683. idetape_init_pc(pc);
  1684. pc->c[0] = WRITE_FILEMARKS;
  1685. pc->c[4] = write_filemark;
  1686. pc->flags |= PC_FLAG_WAIT_FOR_DSC;
  1687. pc->idetape_callback = &idetape_pc_callback;
  1688. }
  1689. static void idetape_create_test_unit_ready_cmd(struct ide_atapi_pc *pc)
  1690. {
  1691. idetape_init_pc(pc);
  1692. pc->c[0] = TEST_UNIT_READY;
  1693. pc->idetape_callback = &idetape_pc_callback;
  1694. }
  1695. /*
  1696. * We add a special packet command request to the tail of the request queue, and
  1697. * wait for it to be serviced. This is not to be called from within the request
  1698. * handling part of the driver! We allocate here data on the stack and it is
  1699. * valid until the request is finished. This is not the case for the bottom part
  1700. * of the driver, where we are always leaving the functions to wait for an
  1701. * interrupt or a timer event.
  1702. *
  1703. * From the bottom part of the driver, we should allocate safe memory using
  1704. * idetape_next_pc_storage() and ide_tape_next_rq_storage(), and add the request
  1705. * to the request list without waiting for it to be serviced! In that case, we
  1706. * usually use idetape_queue_pc_head().
  1707. */
  1708. static int __idetape_queue_pc_tail(ide_drive_t *drive, struct ide_atapi_pc *pc)
  1709. {
  1710. struct ide_tape_obj *tape = drive->driver_data;
  1711. struct request rq;
  1712. idetape_init_rq(&rq, REQ_IDETAPE_PC1);
  1713. rq.buffer = (char *) pc;
  1714. rq.rq_disk = tape->disk;
  1715. return ide_do_drive_cmd(drive, &rq, ide_wait);
  1716. }
  1717. static void idetape_create_load_unload_cmd(ide_drive_t *drive,
  1718. struct ide_atapi_pc *pc, int cmd)
  1719. {
  1720. idetape_init_pc(pc);
  1721. pc->c[0] = START_STOP;
  1722. pc->c[4] = cmd;
  1723. pc->flags |= PC_FLAG_WAIT_FOR_DSC;
  1724. pc->idetape_callback = &idetape_pc_callback;
  1725. }
  1726. static int idetape_wait_ready(ide_drive_t *drive, unsigned long timeout)
  1727. {
  1728. idetape_tape_t *tape = drive->driver_data;
  1729. struct ide_atapi_pc pc;
  1730. int load_attempted = 0;
  1731. /* Wait for the tape to become ready */
  1732. set_bit(IDETAPE_FLAG_MEDIUM_PRESENT, &tape->flags);
  1733. timeout += jiffies;
  1734. while (time_before(jiffies, timeout)) {
  1735. idetape_create_test_unit_ready_cmd(&pc);
  1736. if (!__idetape_queue_pc_tail(drive, &pc))
  1737. return 0;
  1738. if ((tape->sense_key == 2 && tape->asc == 4 && tape->ascq == 2)
  1739. || (tape->asc == 0x3A)) {
  1740. /* no media */
  1741. if (load_attempted)
  1742. return -ENOMEDIUM;
  1743. idetape_create_load_unload_cmd(drive, &pc,
  1744. IDETAPE_LU_LOAD_MASK);
  1745. __idetape_queue_pc_tail(drive, &pc);
  1746. load_attempted = 1;
  1747. /* not about to be ready */
  1748. } else if (!(tape->sense_key == 2 && tape->asc == 4 &&
  1749. (tape->ascq == 1 || tape->ascq == 8)))
  1750. return -EIO;
  1751. msleep(100);
  1752. }
  1753. return -EIO;
  1754. }
  1755. static int idetape_queue_pc_tail(ide_drive_t *drive, struct ide_atapi_pc *pc)
  1756. {
  1757. return __idetape_queue_pc_tail(drive, pc);
  1758. }
  1759. static int idetape_flush_tape_buffers(ide_drive_t *drive)
  1760. {
  1761. struct ide_atapi_pc pc;
  1762. int rc;
  1763. idetape_create_write_filemark_cmd(drive, &pc, 0);
  1764. rc = idetape_queue_pc_tail(drive, &pc);
  1765. if (rc)
  1766. return rc;
  1767. idetape_wait_ready(drive, 60 * 5 * HZ);
  1768. return 0;
  1769. }
  1770. static void idetape_create_read_position_cmd(struct ide_atapi_pc *pc)
  1771. {
  1772. idetape_init_pc(pc);
  1773. pc->c[0] = READ_POSITION;
  1774. pc->req_xfer = 20;
  1775. pc->idetape_callback = &idetape_read_position_callback;
  1776. }
  1777. static int idetape_read_position(ide_drive_t *drive)
  1778. {
  1779. idetape_tape_t *tape = drive->driver_data;
  1780. struct ide_atapi_pc pc;
  1781. int position;
  1782. debug_log(DBG_PROCS, "Enter %s\n", __func__);
  1783. idetape_create_read_position_cmd(&pc);
  1784. if (idetape_queue_pc_tail(drive, &pc))
  1785. return -1;
  1786. position = tape->first_frame;
  1787. return position;
  1788. }
  1789. static void idetape_create_locate_cmd(ide_drive_t *drive,
  1790. struct ide_atapi_pc *pc,
  1791. unsigned int block, u8 partition, int skip)
  1792. {
  1793. idetape_init_pc(pc);
  1794. pc->c[0] = POSITION_TO_ELEMENT;
  1795. pc->c[1] = 2;
  1796. put_unaligned(cpu_to_be32(block), (unsigned int *) &pc->c[3]);
  1797. pc->c[8] = partition;
  1798. pc->flags |= PC_FLAG_WAIT_FOR_DSC;
  1799. pc->idetape_callback = &idetape_pc_callback;
  1800. }
  1801. static int idetape_create_prevent_cmd(ide_drive_t *drive,
  1802. struct ide_atapi_pc *pc, int prevent)
  1803. {
  1804. idetape_tape_t *tape = drive->driver_data;
  1805. /* device supports locking according to capabilities page */
  1806. if (!(tape->caps[6] & 0x01))
  1807. return 0;
  1808. idetape_init_pc(pc);
  1809. pc->c[0] = ALLOW_MEDIUM_REMOVAL;
  1810. pc->c[4] = prevent;
  1811. pc->idetape_callback = &idetape_pc_callback;
  1812. return 1;
  1813. }
  1814. static int __idetape_discard_read_pipeline(ide_drive_t *drive)
  1815. {
  1816. idetape_tape_t *tape = drive->driver_data;
  1817. unsigned long flags;
  1818. int cnt;
  1819. if (tape->chrdev_dir != IDETAPE_DIR_READ)
  1820. return 0;
  1821. /* Remove merge stage. */
  1822. cnt = tape->merge_stage_size / tape->blk_size;
  1823. if (test_and_clear_bit(IDETAPE_FLAG_FILEMARK, &tape->flags))
  1824. ++cnt; /* Filemarks count as 1 sector */
  1825. tape->merge_stage_size = 0;
  1826. if (tape->merge_stage != NULL) {
  1827. __idetape_kfree_stage(tape->merge_stage);
  1828. tape->merge_stage = NULL;
  1829. }
  1830. /* Clear pipeline flags. */
  1831. clear_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags);
  1832. tape->chrdev_dir = IDETAPE_DIR_NONE;
  1833. /* Remove pipeline stages. */
  1834. if (tape->first_stage == NULL)
  1835. return 0;
  1836. spin_lock_irqsave(&tape->lock, flags);
  1837. tape->next_stage = NULL;
  1838. if (idetape_pipeline_active(tape))
  1839. idetape_wait_for_request(drive, tape->active_data_rq);
  1840. spin_unlock_irqrestore(&tape->lock, flags);
  1841. while (tape->first_stage != NULL) {
  1842. struct request *rq_ptr = &tape->first_stage->rq;
  1843. cnt += rq_ptr->nr_sectors - rq_ptr->current_nr_sectors;
  1844. if (rq_ptr->errors == IDETAPE_ERROR_FILEMARK)
  1845. ++cnt;
  1846. idetape_remove_stage_head(drive);
  1847. }
  1848. tape->nr_pending_stages = 0;
  1849. tape->max_stages = tape->min_pipeline;
  1850. return cnt;
  1851. }
  1852. /*
  1853. * Position the tape to the requested block using the LOCATE packet command.
  1854. * A READ POSITION command is then issued to check where we are positioned. Like
  1855. * all higher level operations, we queue the commands at the tail of the request
  1856. * queue and wait for their completion.
  1857. */
  1858. static int idetape_position_tape(ide_drive_t *drive, unsigned int block,
  1859. u8 partition, int skip)
  1860. {
  1861. idetape_tape_t *tape = drive->driver_data;
  1862. int retval;
  1863. struct ide_atapi_pc pc;
  1864. if (tape->chrdev_dir == IDETAPE_DIR_READ)
  1865. __idetape_discard_read_pipeline(drive);
  1866. idetape_wait_ready(drive, 60 * 5 * HZ);
  1867. idetape_create_locate_cmd(drive, &pc, block, partition, skip);
  1868. retval = idetape_queue_pc_tail(drive, &pc);
  1869. if (retval)
  1870. return (retval);
  1871. idetape_create_read_position_cmd(&pc);
  1872. return (idetape_queue_pc_tail(drive, &pc));
  1873. }
  1874. static void idetape_discard_read_pipeline(ide_drive_t *drive,
  1875. int restore_position)
  1876. {
  1877. idetape_tape_t *tape = drive->driver_data;
  1878. int cnt;
  1879. int seek, position;
  1880. cnt = __idetape_discard_read_pipeline(drive);
  1881. if (restore_position) {
  1882. position = idetape_read_position(drive);
  1883. seek = position > cnt ? position - cnt : 0;
  1884. if (idetape_position_tape(drive, seek, 0, 0)) {
  1885. printk(KERN_INFO "ide-tape: %s: position_tape failed in"
  1886. " discard_pipeline()\n", tape->name);
  1887. return;
  1888. }
  1889. }
  1890. }
  1891. /*
  1892. * Generate a read/write request for the block device interface and wait for it
  1893. * to be serviced.
  1894. */
  1895. static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int blocks,
  1896. struct idetape_bh *bh)
  1897. {
  1898. idetape_tape_t *tape = drive->driver_data;
  1899. struct request rq;
  1900. debug_log(DBG_SENSE, "%s: cmd=%d\n", __func__, cmd);
  1901. if (idetape_pipeline_active(tape)) {
  1902. printk(KERN_ERR "ide-tape: bug: the pipeline is active in %s\n",
  1903. __func__);
  1904. return (0);
  1905. }
  1906. idetape_init_rq(&rq, cmd);
  1907. rq.rq_disk = tape->disk;
  1908. rq.special = (void *)bh;
  1909. rq.sector = tape->first_frame;
  1910. rq.nr_sectors = blocks;
  1911. rq.current_nr_sectors = blocks;
  1912. (void) ide_do_drive_cmd(drive, &rq, ide_wait);
  1913. if ((cmd & (REQ_IDETAPE_READ | REQ_IDETAPE_WRITE)) == 0)
  1914. return 0;
  1915. if (tape->merge_stage)
  1916. idetape_init_merge_stage(tape);
  1917. if (rq.errors == IDETAPE_ERROR_GENERAL)
  1918. return -EIO;
  1919. return (tape->blk_size * (blocks-rq.current_nr_sectors));
  1920. }
  1921. /* start servicing the pipeline stages, starting from tape->next_stage. */
  1922. static void idetape_plug_pipeline(ide_drive_t *drive)
  1923. {
  1924. idetape_tape_t *tape = drive->driver_data;
  1925. if (tape->next_stage == NULL)
  1926. return;
  1927. if (!idetape_pipeline_active(tape)) {
  1928. set_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags);
  1929. idetape_activate_next_stage(drive);
  1930. (void) ide_do_drive_cmd(drive, tape->active_data_rq, ide_end);
  1931. }
  1932. }
  1933. static void idetape_create_inquiry_cmd(struct ide_atapi_pc *pc)
  1934. {
  1935. idetape_init_pc(pc);
  1936. pc->c[0] = INQUIRY;
  1937. pc->c[4] = 254;
  1938. pc->req_xfer = 254;
  1939. pc->idetape_callback = &idetape_pc_callback;
  1940. }
  1941. static void idetape_create_rewind_cmd(ide_drive_t *drive,
  1942. struct ide_atapi_pc *pc)
  1943. {
  1944. idetape_init_pc(pc);
  1945. pc->c[0] = REZERO_UNIT;
  1946. pc->flags |= PC_FLAG_WAIT_FOR_DSC;
  1947. pc->idetape_callback = &idetape_pc_callback;
  1948. }
  1949. static void idetape_create_erase_cmd(struct ide_atapi_pc *pc)
  1950. {
  1951. idetape_init_pc(pc);
  1952. pc->c[0] = ERASE;
  1953. pc->c[1] = 1;
  1954. pc->flags |= PC_FLAG_WAIT_FOR_DSC;
  1955. pc->idetape_callback = &idetape_pc_callback;
  1956. }
  1957. static void idetape_create_space_cmd(struct ide_atapi_pc *pc, int count, u8 cmd)
  1958. {
  1959. idetape_init_pc(pc);
  1960. pc->c[0] = SPACE;
  1961. put_unaligned(cpu_to_be32(count), (unsigned int *) &pc->c[1]);
  1962. pc->c[1] = cmd;
  1963. pc->flags |= PC_FLAG_WAIT_FOR_DSC;
  1964. pc->idetape_callback = &idetape_pc_callback;
  1965. }
  1966. static void idetape_wait_first_stage(ide_drive_t *drive)
  1967. {
  1968. idetape_tape_t *tape = drive->driver_data;
  1969. unsigned long flags;
  1970. if (tape->first_stage == NULL)
  1971. return;
  1972. spin_lock_irqsave(&tape->lock, flags);
  1973. if (tape->active_stage == tape->first_stage)
  1974. idetape_wait_for_request(drive, tape->active_data_rq);
  1975. spin_unlock_irqrestore(&tape->lock, flags);
  1976. }
  1977. /*
  1978. * Try to add a character device originated write request to our pipeline. In
  1979. * case we don't succeed, we revert to non-pipelined operation mode for this
  1980. * request. In order to accomplish that, we
  1981. *
  1982. * 1. Try to allocate a new pipeline stage.
  1983. * 2. If we can't, wait for more and more requests to be serviced and try again
  1984. * each time.
  1985. * 3. If we still can't allocate a stage, fallback to non-pipelined operation
  1986. * mode for this request.
  1987. */
  1988. static int idetape_add_chrdev_write_request(ide_drive_t *drive, int blocks)
  1989. {
  1990. idetape_tape_t *tape = drive->driver_data;
  1991. idetape_stage_t *new_stage;
  1992. unsigned long flags;
  1993. struct request *rq;
  1994. debug_log(DBG_CHRDEV, "Enter %s\n", __func__);
  1995. /* Attempt to allocate a new stage. Beware possible race conditions. */
  1996. while ((new_stage = idetape_kmalloc_stage(tape)) == NULL) {
  1997. spin_lock_irqsave(&tape->lock, flags);
  1998. if (idetape_pipeline_active(tape)) {
  1999. idetape_wait_for_request(drive, tape->active_data_rq);
  2000. spin_unlock_irqrestore(&tape->lock, flags);
  2001. } else {
  2002. spin_unlock_irqrestore(&tape->lock, flags);
  2003. idetape_plug_pipeline(drive);
  2004. if (idetape_pipeline_active(tape))
  2005. continue;
  2006. /*
  2007. * The machine is short on memory. Fallback to non-
  2008. * pipelined operation mode for this request.
  2009. */
  2010. return idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE,
  2011. blocks, tape->merge_stage->bh);
  2012. }
  2013. }
  2014. rq = &new_stage->rq;
  2015. idetape_init_rq(rq, REQ_IDETAPE_WRITE);
  2016. /* Doesn't actually matter - We always assume sequential access */
  2017. rq->sector = tape->first_frame;
  2018. rq->current_nr_sectors = blocks;
  2019. rq->nr_sectors = blocks;
  2020. idetape_switch_buffers(tape, new_stage);
  2021. idetape_add_stage_tail(drive, new_stage);
  2022. tape->pipeline_head++;
  2023. idetape_calculate_speeds(drive);
  2024. /*
  2025. * Estimate whether the tape has stopped writing by checking if our
  2026. * write pipeline is currently empty. If we are not writing anymore,
  2027. * wait for the pipeline to be almost completely full (90%) before
  2028. * starting to service requests, so that we will be able to keep up with
  2029. * the higher speeds of the tape.
  2030. */
  2031. if (!idetape_pipeline_active(tape)) {
  2032. if (tape->nr_stages >= tape->max_stages * 9 / 10 ||
  2033. tape->nr_stages >= tape->max_stages -
  2034. tape->uncontrolled_pipeline_head_speed * 3 * 1024 /
  2035. tape->blk_size) {
  2036. tape->measure_insert_time = 1;
  2037. tape->insert_time = jiffies;
  2038. tape->insert_size = 0;
  2039. tape->insert_speed = 0;
  2040. idetape_plug_pipeline(drive);
  2041. }
  2042. }
  2043. if (test_and_clear_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags))
  2044. /* Return a deferred error */
  2045. return -EIO;
  2046. return blocks;
  2047. }
  2048. /*
  2049. * Wait until all pending pipeline requests are serviced. Typically called on
  2050. * device close.
  2051. */
  2052. static void idetape_wait_for_pipeline(ide_drive_t *drive)
  2053. {
  2054. idetape_tape_t *tape = drive->driver_data;
  2055. unsigned long flags;
  2056. while (tape->next_stage || idetape_pipeline_active(tape)) {
  2057. idetape_plug_pipeline(drive);
  2058. spin_lock_irqsave(&tape->lock, flags);
  2059. if (idetape_pipeline_active(tape))
  2060. idetape_wait_for_request(drive, tape->active_data_rq);
  2061. spin_unlock_irqrestore(&tape->lock, flags);
  2062. }
  2063. }
  2064. static void idetape_empty_write_pipeline(ide_drive_t *drive)
  2065. {
  2066. idetape_tape_t *tape = drive->driver_data;
  2067. int blocks, min;
  2068. struct idetape_bh *bh;
  2069. if (tape->chrdev_dir != IDETAPE_DIR_WRITE) {
  2070. printk(KERN_ERR "ide-tape: bug: Trying to empty write pipeline,"
  2071. " but we are not writing.\n");
  2072. return;
  2073. }
  2074. if (tape->merge_stage_size > tape->stage_size) {
  2075. printk(KERN_ERR "ide-tape: bug: merge_buffer too big\n");
  2076. tape->merge_stage_size = tape->stage_size;
  2077. }
  2078. if (tape->merge_stage_size) {
  2079. blocks = tape->merge_stage_size / tape->blk_size;
  2080. if (tape->merge_stage_size % tape->blk_size) {
  2081. unsigned int i;
  2082. blocks++;
  2083. i = tape->blk_size - tape->merge_stage_size %
  2084. tape->blk_size;
  2085. bh = tape->bh->b_reqnext;
  2086. while (bh) {
  2087. atomic_set(&bh->b_count, 0);
  2088. bh = bh->b_reqnext;
  2089. }
  2090. bh = tape->bh;
  2091. while (i) {
  2092. if (bh == NULL) {
  2093. printk(KERN_INFO "ide-tape: bug,"
  2094. " bh NULL\n");
  2095. break;
  2096. }
  2097. min = min(i, (unsigned int)(bh->b_size -
  2098. atomic_read(&bh->b_count)));
  2099. memset(bh->b_data + atomic_read(&bh->b_count),
  2100. 0, min);
  2101. atomic_add(min, &bh->b_count);
  2102. i -= min;
  2103. bh = bh->b_reqnext;
  2104. }
  2105. }
  2106. (void) idetape_add_chrdev_write_request(drive, blocks);
  2107. tape->merge_stage_size = 0;
  2108. }
  2109. idetape_wait_for_pipeline(drive);
  2110. if (tape->merge_stage != NULL) {
  2111. __idetape_kfree_stage(tape->merge_stage);
  2112. tape->merge_stage = NULL;
  2113. }
  2114. clear_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags);
  2115. tape->chrdev_dir = IDETAPE_DIR_NONE;
  2116. /*
  2117. * On the next backup, perform the feedback loop again. (I don't want to
  2118. * keep sense information between backups, as some systems are
  2119. * constantly on, and the system load can be totally different on the
  2120. * next backup).
  2121. */
  2122. tape->max_stages = tape->min_pipeline;
  2123. if (tape->first_stage != NULL ||
  2124. tape->next_stage != NULL ||
  2125. tape->last_stage != NULL ||
  2126. tape->nr_stages != 0) {
  2127. printk(KERN_ERR "ide-tape: ide-tape pipeline bug, "
  2128. "first_stage %p, next_stage %p, "
  2129. "last_stage %p, nr_stages %d\n",
  2130. tape->first_stage, tape->next_stage,
  2131. tape->last_stage, tape->nr_stages);
  2132. }
  2133. }
  2134. static void idetape_restart_speed_control(ide_drive_t *drive)
  2135. {
  2136. idetape_tape_t *tape = drive->driver_data;
  2137. tape->restart_speed_control_req = 0;
  2138. tape->pipeline_head = 0;
  2139. tape->controlled_last_pipeline_head = 0;
  2140. tape->controlled_previous_pipeline_head = 0;
  2141. tape->uncontrolled_previous_pipeline_head = 0;
  2142. tape->controlled_pipeline_head_speed = 5000;
  2143. tape->pipeline_head_speed = 5000;
  2144. tape->uncontrolled_pipeline_head_speed = 0;
  2145. tape->controlled_pipeline_head_time =
  2146. tape->uncontrolled_pipeline_head_time = jiffies;
  2147. tape->controlled_previous_head_time =
  2148. tape->uncontrolled_previous_head_time = jiffies;
  2149. }
  2150. static int idetape_init_read(ide_drive_t *drive, int max_stages)
  2151. {
  2152. idetape_tape_t *tape = drive->driver_data;
  2153. idetape_stage_t *new_stage;
  2154. struct request rq;
  2155. int bytes_read;
  2156. u16 blocks = *(u16 *)&tape->caps[12];
  2157. /* Initialize read operation */
  2158. if (tape->chrdev_dir != IDETAPE_DIR_READ) {
  2159. if (tape->chrdev_dir == IDETAPE_DIR_WRITE) {
  2160. idetape_empty_write_pipeline(drive);
  2161. idetape_flush_tape_buffers(drive);
  2162. }
  2163. if (tape->merge_stage || tape->merge_stage_size) {
  2164. printk(KERN_ERR "ide-tape: merge_stage_size should be"
  2165. " 0 now\n");
  2166. tape->merge_stage_size = 0;
  2167. }
  2168. tape->merge_stage = __idetape_kmalloc_stage(tape, 0, 0);
  2169. if (!tape->merge_stage)
  2170. return -ENOMEM;
  2171. tape->chrdev_dir = IDETAPE_DIR_READ;
  2172. /*
  2173. * Issue a read 0 command to ensure that DSC handshake is
  2174. * switched from completion mode to buffer available mode.
  2175. * No point in issuing this if DSC overlap isn't supported, some
  2176. * drives (Seagate STT3401A) will return an error.
  2177. */
  2178. if (drive->dsc_overlap) {
  2179. bytes_read = idetape_queue_rw_tail(drive,
  2180. REQ_IDETAPE_READ, 0,
  2181. tape->merge_stage->bh);
  2182. if (bytes_read < 0) {
  2183. __idetape_kfree_stage(tape->merge_stage);
  2184. tape->merge_stage = NULL;
  2185. tape->chrdev_dir = IDETAPE_DIR_NONE;
  2186. return bytes_read;
  2187. }
  2188. }
  2189. }
  2190. if (tape->restart_speed_control_req)
  2191. idetape_restart_speed_control(drive);
  2192. idetape_init_rq(&rq, REQ_IDETAPE_READ);
  2193. rq.sector = tape->first_frame;
  2194. rq.nr_sectors = blocks;
  2195. rq.current_nr_sectors = blocks;
  2196. if (!test_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags) &&
  2197. tape->nr_stages < max_stages) {
  2198. new_stage = idetape_kmalloc_stage(tape);
  2199. while (new_stage != NULL) {
  2200. new_stage->rq = rq;
  2201. idetape_add_stage_tail(drive, new_stage);
  2202. if (tape->nr_stages >= max_stages)
  2203. break;
  2204. new_stage = idetape_kmalloc_stage(tape);
  2205. }
  2206. }
  2207. if (!idetape_pipeline_active(tape)) {
  2208. if (tape->nr_pending_stages >= 3 * max_stages / 4) {
  2209. tape->measure_insert_time = 1;
  2210. tape->insert_time = jiffies;
  2211. tape->insert_size = 0;
  2212. tape->insert_speed = 0;
  2213. idetape_plug_pipeline(drive);
  2214. }
  2215. }
  2216. return 0;
  2217. }
  2218. /*
  2219. * Called from idetape_chrdev_read() to service a character device read request
  2220. * and add read-ahead requests to our pipeline.
  2221. */
  2222. static int idetape_add_chrdev_read_request(ide_drive_t *drive, int blocks)
  2223. {
  2224. idetape_tape_t *tape = drive->driver_data;
  2225. unsigned long flags;
  2226. struct request *rq_ptr;
  2227. int bytes_read;
  2228. debug_log(DBG_PROCS, "Enter %s, %d blocks\n", __func__, blocks);
  2229. /* If we are at a filemark, return a read length of 0 */
  2230. if (test_bit(IDETAPE_FLAG_FILEMARK, &tape->flags))
  2231. return 0;
  2232. /* Wait for the next block to reach the head of the pipeline. */
  2233. idetape_init_read(drive, tape->max_stages);
  2234. if (tape->first_stage == NULL) {
  2235. if (test_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags))
  2236. return 0;
  2237. return idetape_queue_rw_tail(drive, REQ_IDETAPE_READ, blocks,
  2238. tape->merge_stage->bh);
  2239. }
  2240. idetape_wait_first_stage(drive);
  2241. rq_ptr = &tape->first_stage->rq;
  2242. bytes_read = tape->blk_size * (rq_ptr->nr_sectors -
  2243. rq_ptr->current_nr_sectors);
  2244. rq_ptr->nr_sectors = 0;
  2245. rq_ptr->current_nr_sectors = 0;
  2246. if (rq_ptr->errors == IDETAPE_ERROR_EOD)
  2247. return 0;
  2248. else {
  2249. idetape_switch_buffers(tape, tape->first_stage);
  2250. if (rq_ptr->errors == IDETAPE_ERROR_FILEMARK)
  2251. set_bit(IDETAPE_FLAG_FILEMARK, &tape->flags);
  2252. spin_lock_irqsave(&tape->lock, flags);
  2253. idetape_remove_stage_head(drive);
  2254. spin_unlock_irqrestore(&tape->lock, flags);
  2255. tape->pipeline_head++;
  2256. idetape_calculate_speeds(drive);
  2257. }
  2258. if (bytes_read > blocks * tape->blk_size) {
  2259. printk(KERN_ERR "ide-tape: bug: trying to return more bytes"
  2260. " than requested\n");
  2261. bytes_read = blocks * tape->blk_size;
  2262. }
  2263. return (bytes_read);
  2264. }
  2265. static void idetape_pad_zeros(ide_drive_t *drive, int bcount)
  2266. {
  2267. idetape_tape_t *tape = drive->driver_data;
  2268. struct idetape_bh *bh;
  2269. int blocks;
  2270. while (bcount) {
  2271. unsigned int count;
  2272. bh = tape->merge_stage->bh;
  2273. count = min(tape->stage_size, bcount);
  2274. bcount -= count;
  2275. blocks = count / tape->blk_size;
  2276. while (count) {
  2277. atomic_set(&bh->b_count,
  2278. min(count, (unsigned int)bh->b_size));
  2279. memset(bh->b_data, 0, atomic_read(&bh->b_count));
  2280. count -= atomic_read(&bh->b_count);
  2281. bh = bh->b_reqnext;
  2282. }
  2283. idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE, blocks,
  2284. tape->merge_stage->bh);
  2285. }
  2286. }
  2287. static int idetape_pipeline_size(ide_drive_t *drive)
  2288. {
  2289. idetape_tape_t *tape = drive->driver_data;
  2290. idetape_stage_t *stage;
  2291. struct request *rq;
  2292. int size = 0;
  2293. idetape_wait_for_pipeline(drive);
  2294. stage = tape->first_stage;
  2295. while (stage != NULL) {
  2296. rq = &stage->rq;
  2297. size += tape->blk_size * (rq->nr_sectors -
  2298. rq->current_nr_sectors);
  2299. if (rq->errors == IDETAPE_ERROR_FILEMARK)
  2300. size += tape->blk_size;
  2301. stage = stage->next;
  2302. }
  2303. size += tape->merge_stage_size;
  2304. return size;
  2305. }
  2306. /*
  2307. * Rewinds the tape to the Beginning Of the current Partition (BOP). We
  2308. * currently support only one partition.
  2309. */
  2310. static int idetape_rewind_tape(ide_drive_t *drive)
  2311. {
  2312. int retval;
  2313. struct ide_atapi_pc pc;
  2314. idetape_tape_t *tape;
  2315. tape = drive->driver_data;
  2316. debug_log(DBG_SENSE, "Enter %s\n", __func__);
  2317. idetape_create_rewind_cmd(drive, &pc);
  2318. retval = idetape_queue_pc_tail(drive, &pc);
  2319. if (retval)
  2320. return retval;
  2321. idetape_create_read_position_cmd(&pc);
  2322. retval = idetape_queue_pc_tail(drive, &pc);
  2323. if (retval)
  2324. return retval;
  2325. return 0;
  2326. }
  2327. /* mtio.h compatible commands should be issued to the chrdev interface. */
  2328. static int idetape_blkdev_ioctl(ide_drive_t *drive, unsigned int cmd,
  2329. unsigned long arg)
  2330. {
  2331. idetape_tape_t *tape = drive->driver_data;
  2332. void __user *argp = (void __user *)arg;
  2333. struct idetape_config {
  2334. int dsc_rw_frequency;
  2335. int dsc_media_access_frequency;
  2336. int nr_stages;
  2337. } config;
  2338. debug_log(DBG_PROCS, "Enter %s\n", __func__);
  2339. switch (cmd) {
  2340. case 0x0340:
  2341. if (copy_from_user(&config, argp, sizeof(config)))
  2342. return -EFAULT;
  2343. tape->best_dsc_rw_freq = config.dsc_rw_frequency;
  2344. tape->max_stages = config.nr_stages;
  2345. break;
  2346. case 0x0350:
  2347. config.dsc_rw_frequency = (int) tape->best_dsc_rw_freq;
  2348. config.nr_stages = tape->max_stages;
  2349. if (copy_to_user(argp, &config, sizeof(config)))
  2350. return -EFAULT;
  2351. break;
  2352. default:
  2353. return -EIO;
  2354. }
  2355. return 0;
  2356. }
  2357. /*
  2358. * The function below is now a bit more complicated than just passing the
  2359. * command to the tape since we may have crossed some filemarks during our
  2360. * pipelined read-ahead mode. As a minor side effect, the pipeline enables us to
  2361. * support MTFSFM when the filemark is in our internal pipeline even if the tape
  2362. * doesn't support spacing over filemarks in the reverse direction.
  2363. */
  2364. static int idetape_space_over_filemarks(ide_drive_t *drive, short mt_op,
  2365. int mt_count)
  2366. {
  2367. idetape_tape_t *tape = drive->driver_data;
  2368. struct ide_atapi_pc pc;
  2369. unsigned long flags;
  2370. int retval, count = 0;
  2371. int sprev = !!(tape->caps[4] & 0x20);
  2372. if (mt_count == 0)
  2373. return 0;
  2374. if (MTBSF == mt_op || MTBSFM == mt_op) {
  2375. if (!sprev)
  2376. return -EIO;
  2377. mt_count = -mt_count;
  2378. }
  2379. if (tape->chrdev_dir == IDETAPE_DIR_READ) {
  2380. /* its a read-ahead buffer, scan it for crossed filemarks. */
  2381. tape->merge_stage_size = 0;
  2382. if (test_and_clear_bit(IDETAPE_FLAG_FILEMARK, &tape->flags))
  2383. ++count;
  2384. while (tape->first_stage != NULL) {
  2385. if (count == mt_count) {
  2386. if (mt_op == MTFSFM)
  2387. set_bit(IDETAPE_FLAG_FILEMARK,
  2388. &tape->flags);
  2389. return 0;
  2390. }
  2391. spin_lock_irqsave(&tape->lock, flags);
  2392. if (tape->first_stage == tape->active_stage) {
  2393. /*
  2394. * We have reached the active stage in the read
  2395. * pipeline. There is no point in allowing the
  2396. * drive to continue reading any farther, so we
  2397. * stop the pipeline.
  2398. *
  2399. * This section should be moved to a separate
  2400. * subroutine because similar operations are
  2401. * done in __idetape_discard_read_pipeline(),
  2402. * for example.
  2403. */
  2404. tape->next_stage = NULL;
  2405. spin_unlock_irqrestore(&tape->lock, flags);
  2406. idetape_wait_first_stage(drive);
  2407. tape->next_stage = tape->first_stage->next;
  2408. } else
  2409. spin_unlock_irqrestore(&tape->lock, flags);
  2410. if (tape->first_stage->rq.errors ==
  2411. IDETAPE_ERROR_FILEMARK)
  2412. ++count;
  2413. idetape_remove_stage_head(drive);
  2414. }
  2415. idetape_discard_read_pipeline(drive, 0);
  2416. }
  2417. /*
  2418. * The filemark was not found in our internal pipeline; now we can issue
  2419. * the space command.
  2420. */
  2421. switch (mt_op) {
  2422. case MTFSF:
  2423. case MTBSF:
  2424. idetape_create_space_cmd(&pc, mt_count - count,
  2425. IDETAPE_SPACE_OVER_FILEMARK);
  2426. return idetape_queue_pc_tail(drive, &pc);
  2427. case MTFSFM:
  2428. case MTBSFM:
  2429. if (!sprev)
  2430. return -EIO;
  2431. retval = idetape_space_over_filemarks(drive, MTFSF,
  2432. mt_count - count);
  2433. if (retval)
  2434. return retval;
  2435. count = (MTBSFM == mt_op ? 1 : -1);
  2436. return idetape_space_over_filemarks(drive, MTFSF, count);
  2437. default:
  2438. printk(KERN_ERR "ide-tape: MTIO operation %d not supported\n",
  2439. mt_op);
  2440. return -EIO;
  2441. }
  2442. }
  2443. /*
  2444. * Our character device read / write functions.
  2445. *
  2446. * The tape is optimized to maximize throughput when it is transferring an
  2447. * integral number of the "continuous transfer limit", which is a parameter of
  2448. * the specific tape (26kB on my particular tape, 32kB for Onstream).
  2449. *
  2450. * As of version 1.3 of the driver, the character device provides an abstract
  2451. * continuous view of the media - any mix of block sizes (even 1 byte) on the
  2452. * same backup/restore procedure is supported. The driver will internally
  2453. * convert the requests to the recommended transfer unit, so that an unmatch
  2454. * between the user's block size to the recommended size will only result in a
  2455. * (slightly) increased driver overhead, but will no longer hit performance.
  2456. * This is not applicable to Onstream.
  2457. */
  2458. static ssize_t idetape_chrdev_read(struct file *file, char __user *buf,
  2459. size_t count, loff_t *ppos)
  2460. {
  2461. struct ide_tape_obj *tape = ide_tape_f(file);
  2462. ide_drive_t *drive = tape->drive;
  2463. ssize_t bytes_read, temp, actually_read = 0, rc;
  2464. ssize_t ret = 0;
  2465. u16 ctl = *(u16 *)&tape->caps[12];
  2466. debug_log(DBG_CHRDEV, "Enter %s, count %Zd\n", __func__, count);
  2467. if (tape->chrdev_dir != IDETAPE_DIR_READ) {
  2468. if (test_bit(IDETAPE_FLAG_DETECT_BS, &tape->flags))
  2469. if (count > tape->blk_size &&
  2470. (count % tape->blk_size) == 0)
  2471. tape->user_bs_factor = count / tape->blk_size;
  2472. }
  2473. rc = idetape_init_read(drive, tape->max_stages);
  2474. if (rc < 0)
  2475. return rc;
  2476. if (count == 0)
  2477. return (0);
  2478. if (tape->merge_stage_size) {
  2479. actually_read = min((unsigned int)(tape->merge_stage_size),
  2480. (unsigned int)count);
  2481. if (idetape_copy_stage_to_user(tape, buf, tape->merge_stage,
  2482. actually_read))
  2483. ret = -EFAULT;
  2484. buf += actually_read;
  2485. tape->merge_stage_size -= actually_read;
  2486. count -= actually_read;
  2487. }
  2488. while (count >= tape->stage_size) {
  2489. bytes_read = idetape_add_chrdev_read_request(drive, ctl);
  2490. if (bytes_read <= 0)
  2491. goto finish;
  2492. if (idetape_copy_stage_to_user(tape, buf, tape->merge_stage,
  2493. bytes_read))
  2494. ret = -EFAULT;
  2495. buf += bytes_read;
  2496. count -= bytes_read;
  2497. actually_read += bytes_read;
  2498. }
  2499. if (count) {
  2500. bytes_read = idetape_add_chrdev_read_request(drive, ctl);
  2501. if (bytes_read <= 0)
  2502. goto finish;
  2503. temp = min((unsigned long)count, (unsigned long)bytes_read);
  2504. if (idetape_copy_stage_to_user(tape, buf, tape->merge_stage,
  2505. temp))
  2506. ret = -EFAULT;
  2507. actually_read += temp;
  2508. tape->merge_stage_size = bytes_read-temp;
  2509. }
  2510. finish:
  2511. if (!actually_read && test_bit(IDETAPE_FLAG_FILEMARK, &tape->flags)) {
  2512. debug_log(DBG_SENSE, "%s: spacing over filemark\n", tape->name);
  2513. idetape_space_over_filemarks(drive, MTFSF, 1);
  2514. return 0;
  2515. }
  2516. return ret ? ret : actually_read;
  2517. }
  2518. static ssize_t idetape_chrdev_write(struct file *file, const char __user *buf,
  2519. size_t count, loff_t *ppos)
  2520. {
  2521. struct ide_tape_obj *tape = ide_tape_f(file);
  2522. ide_drive_t *drive = tape->drive;
  2523. ssize_t actually_written = 0;
  2524. ssize_t ret = 0;
  2525. u16 ctl = *(u16 *)&tape->caps[12];
  2526. /* The drive is write protected. */
  2527. if (tape->write_prot)
  2528. return -EACCES;
  2529. debug_log(DBG_CHRDEV, "Enter %s, count %Zd\n", __func__, count);
  2530. /* Initialize write operation */
  2531. if (tape->chrdev_dir != IDETAPE_DIR_WRITE) {
  2532. if (tape->chrdev_dir == IDETAPE_DIR_READ)
  2533. idetape_discard_read_pipeline(drive, 1);
  2534. if (tape->merge_stage || tape->merge_stage_size) {
  2535. printk(KERN_ERR "ide-tape: merge_stage_size "
  2536. "should be 0 now\n");
  2537. tape->merge_stage_size = 0;
  2538. }
  2539. tape->merge_stage = __idetape_kmalloc_stage(tape, 0, 0);
  2540. if (!tape->merge_stage)
  2541. return -ENOMEM;
  2542. tape->chrdev_dir = IDETAPE_DIR_WRITE;
  2543. idetape_init_merge_stage(tape);
  2544. /*
  2545. * Issue a write 0 command to ensure that DSC handshake is
  2546. * switched from completion mode to buffer available mode. No
  2547. * point in issuing this if DSC overlap isn't supported, some
  2548. * drives (Seagate STT3401A) will return an error.
  2549. */
  2550. if (drive->dsc_overlap) {
  2551. ssize_t retval = idetape_queue_rw_tail(drive,
  2552. REQ_IDETAPE_WRITE, 0,
  2553. tape->merge_stage->bh);
  2554. if (retval < 0) {
  2555. __idetape_kfree_stage(tape->merge_stage);
  2556. tape->merge_stage = NULL;
  2557. tape->chrdev_dir = IDETAPE_DIR_NONE;
  2558. return retval;
  2559. }
  2560. }
  2561. }
  2562. if (count == 0)
  2563. return (0);
  2564. if (tape->restart_speed_control_req)
  2565. idetape_restart_speed_control(drive);
  2566. if (tape->merge_stage_size) {
  2567. if (tape->merge_stage_size >= tape->stage_size) {
  2568. printk(KERN_ERR "ide-tape: bug: merge buf too big\n");
  2569. tape->merge_stage_size = 0;
  2570. }
  2571. actually_written = min((unsigned int)
  2572. (tape->stage_size - tape->merge_stage_size),
  2573. (unsigned int)count);
  2574. if (idetape_copy_stage_from_user(tape, tape->merge_stage, buf,
  2575. actually_written))
  2576. ret = -EFAULT;
  2577. buf += actually_written;
  2578. tape->merge_stage_size += actually_written;
  2579. count -= actually_written;
  2580. if (tape->merge_stage_size == tape->stage_size) {
  2581. ssize_t retval;
  2582. tape->merge_stage_size = 0;
  2583. retval = idetape_add_chrdev_write_request(drive, ctl);
  2584. if (retval <= 0)
  2585. return (retval);
  2586. }
  2587. }
  2588. while (count >= tape->stage_size) {
  2589. ssize_t retval;
  2590. if (idetape_copy_stage_from_user(tape, tape->merge_stage, buf,
  2591. tape->stage_size))
  2592. ret = -EFAULT;
  2593. buf += tape->stage_size;
  2594. count -= tape->stage_size;
  2595. retval = idetape_add_chrdev_write_request(drive, ctl);
  2596. actually_written += tape->stage_size;
  2597. if (retval <= 0)
  2598. return (retval);
  2599. }
  2600. if (count) {
  2601. actually_written += count;
  2602. if (idetape_copy_stage_from_user(tape, tape->merge_stage, buf,
  2603. count))
  2604. ret = -EFAULT;
  2605. tape->merge_stage_size += count;
  2606. }
  2607. return ret ? ret : actually_written;
  2608. }
  2609. static int idetape_write_filemark(ide_drive_t *drive)
  2610. {
  2611. struct ide_atapi_pc pc;
  2612. /* Write a filemark */
  2613. idetape_create_write_filemark_cmd(drive, &pc, 1);
  2614. if (idetape_queue_pc_tail(drive, &pc)) {
  2615. printk(KERN_ERR "ide-tape: Couldn't write a filemark\n");
  2616. return -EIO;
  2617. }
  2618. return 0;
  2619. }
  2620. /*
  2621. * Called from idetape_chrdev_ioctl when the general mtio MTIOCTOP ioctl is
  2622. * requested.
  2623. *
  2624. * Note: MTBSF and MTBSFM are not supported when the tape doesn't support
  2625. * spacing over filemarks in the reverse direction. In this case, MTFSFM is also
  2626. * usually not supported (it is supported in the rare case in which we crossed
  2627. * the filemark during our read-ahead pipelined operation mode).
  2628. *
  2629. * The following commands are currently not supported:
  2630. *
  2631. * MTFSS, MTBSS, MTWSM, MTSETDENSITY, MTSETDRVBUFFER, MT_ST_BOOLEANS,
  2632. * MT_ST_WRITE_THRESHOLD.
  2633. */
  2634. static int idetape_mtioctop(ide_drive_t *drive, short mt_op, int mt_count)
  2635. {
  2636. idetape_tape_t *tape = drive->driver_data;
  2637. struct ide_atapi_pc pc;
  2638. int i, retval;
  2639. debug_log(DBG_ERR, "Handling MTIOCTOP ioctl: mt_op=%d, mt_count=%d\n",
  2640. mt_op, mt_count);
  2641. /* Commands which need our pipelined read-ahead stages. */
  2642. switch (mt_op) {
  2643. case MTFSF:
  2644. case MTFSFM:
  2645. case MTBSF:
  2646. case MTBSFM:
  2647. if (!mt_count)
  2648. return 0;
  2649. return idetape_space_over_filemarks(drive, mt_op, mt_count);
  2650. default:
  2651. break;
  2652. }
  2653. switch (mt_op) {
  2654. case MTWEOF:
  2655. if (tape->write_prot)
  2656. return -EACCES;
  2657. idetape_discard_read_pipeline(drive, 1);
  2658. for (i = 0; i < mt_count; i++) {
  2659. retval = idetape_write_filemark(drive);
  2660. if (retval)
  2661. return retval;
  2662. }
  2663. return 0;
  2664. case MTREW:
  2665. idetape_discard_read_pipeline(drive, 0);
  2666. if (idetape_rewind_tape(drive))
  2667. return -EIO;
  2668. return 0;
  2669. case MTLOAD:
  2670. idetape_discard_read_pipeline(drive, 0);
  2671. idetape_create_load_unload_cmd(drive, &pc,
  2672. IDETAPE_LU_LOAD_MASK);
  2673. return idetape_queue_pc_tail(drive, &pc);
  2674. case MTUNLOAD:
  2675. case MTOFFL:
  2676. /*
  2677. * If door is locked, attempt to unlock before
  2678. * attempting to eject.
  2679. */
  2680. if (tape->door_locked) {
  2681. if (idetape_create_prevent_cmd(drive, &pc, 0))
  2682. if (!idetape_queue_pc_tail(drive, &pc))
  2683. tape->door_locked = DOOR_UNLOCKED;
  2684. }
  2685. idetape_discard_read_pipeline(drive, 0);
  2686. idetape_create_load_unload_cmd(drive, &pc,
  2687. !IDETAPE_LU_LOAD_MASK);
  2688. retval = idetape_queue_pc_tail(drive, &pc);
  2689. if (!retval)
  2690. clear_bit(IDETAPE_FLAG_MEDIUM_PRESENT, &tape->flags);
  2691. return retval;
  2692. case MTNOP:
  2693. idetape_discard_read_pipeline(drive, 0);
  2694. return idetape_flush_tape_buffers(drive);
  2695. case MTRETEN:
  2696. idetape_discard_read_pipeline(drive, 0);
  2697. idetape_create_load_unload_cmd(drive, &pc,
  2698. IDETAPE_LU_RETENSION_MASK | IDETAPE_LU_LOAD_MASK);
  2699. return idetape_queue_pc_tail(drive, &pc);
  2700. case MTEOM:
  2701. idetape_create_space_cmd(&pc, 0, IDETAPE_SPACE_TO_EOD);
  2702. return idetape_queue_pc_tail(drive, &pc);
  2703. case MTERASE:
  2704. (void)idetape_rewind_tape(drive);
  2705. idetape_create_erase_cmd(&pc);
  2706. return idetape_queue_pc_tail(drive, &pc);
  2707. case MTSETBLK:
  2708. if (mt_count) {
  2709. if (mt_count < tape->blk_size ||
  2710. mt_count % tape->blk_size)
  2711. return -EIO;
  2712. tape->user_bs_factor = mt_count / tape->blk_size;
  2713. clear_bit(IDETAPE_FLAG_DETECT_BS, &tape->flags);
  2714. } else
  2715. set_bit(IDETAPE_FLAG_DETECT_BS, &tape->flags);
  2716. return 0;
  2717. case MTSEEK:
  2718. idetape_discard_read_pipeline(drive, 0);
  2719. return idetape_position_tape(drive,
  2720. mt_count * tape->user_bs_factor, tape->partition, 0);
  2721. case MTSETPART:
  2722. idetape_discard_read_pipeline(drive, 0);
  2723. return idetape_position_tape(drive, 0, mt_count, 0);
  2724. case MTFSR:
  2725. case MTBSR:
  2726. case MTLOCK:
  2727. if (!idetape_create_prevent_cmd(drive, &pc, 1))
  2728. return 0;
  2729. retval = idetape_queue_pc_tail(drive, &pc);
  2730. if (retval)
  2731. return retval;
  2732. tape->door_locked = DOOR_EXPLICITLY_LOCKED;
  2733. return 0;
  2734. case MTUNLOCK:
  2735. if (!idetape_create_prevent_cmd(drive, &pc, 0))
  2736. return 0;
  2737. retval = idetape_queue_pc_tail(drive, &pc);
  2738. if (retval)
  2739. return retval;
  2740. tape->door_locked = DOOR_UNLOCKED;
  2741. return 0;
  2742. default:
  2743. printk(KERN_ERR "ide-tape: MTIO operation %d not supported\n",
  2744. mt_op);
  2745. return -EIO;
  2746. }
  2747. }
  2748. /*
  2749. * Our character device ioctls. General mtio.h magnetic io commands are
  2750. * supported here, and not in the corresponding block interface. Our own
  2751. * ide-tape ioctls are supported on both interfaces.
  2752. */
  2753. static int idetape_chrdev_ioctl(struct inode *inode, struct file *file,
  2754. unsigned int cmd, unsigned long arg)
  2755. {
  2756. struct ide_tape_obj *tape = ide_tape_f(file);
  2757. ide_drive_t *drive = tape->drive;
  2758. struct mtop mtop;
  2759. struct mtget mtget;
  2760. struct mtpos mtpos;
  2761. int block_offset = 0, position = tape->first_frame;
  2762. void __user *argp = (void __user *)arg;
  2763. debug_log(DBG_CHRDEV, "Enter %s, cmd=%u\n", __func__, cmd);
  2764. tape->restart_speed_control_req = 1;
  2765. if (tape->chrdev_dir == IDETAPE_DIR_WRITE) {
  2766. idetape_empty_write_pipeline(drive);
  2767. idetape_flush_tape_buffers(drive);
  2768. }
  2769. if (cmd == MTIOCGET || cmd == MTIOCPOS) {
  2770. block_offset = idetape_pipeline_size(drive) /
  2771. (tape->blk_size * tape->user_bs_factor);
  2772. position = idetape_read_position(drive);
  2773. if (position < 0)
  2774. return -EIO;
  2775. }
  2776. switch (cmd) {
  2777. case MTIOCTOP:
  2778. if (copy_from_user(&mtop, argp, sizeof(struct mtop)))
  2779. return -EFAULT;
  2780. return idetape_mtioctop(drive, mtop.mt_op, mtop.mt_count);
  2781. case MTIOCGET:
  2782. memset(&mtget, 0, sizeof(struct mtget));
  2783. mtget.mt_type = MT_ISSCSI2;
  2784. mtget.mt_blkno = position / tape->user_bs_factor - block_offset;
  2785. mtget.mt_dsreg =
  2786. ((tape->blk_size * tape->user_bs_factor)
  2787. << MT_ST_BLKSIZE_SHIFT) & MT_ST_BLKSIZE_MASK;
  2788. if (tape->drv_write_prot)
  2789. mtget.mt_gstat |= GMT_WR_PROT(0xffffffff);
  2790. if (copy_to_user(argp, &mtget, sizeof(struct mtget)))
  2791. return -EFAULT;
  2792. return 0;
  2793. case MTIOCPOS:
  2794. mtpos.mt_blkno = position / tape->user_bs_factor - block_offset;
  2795. if (copy_to_user(argp, &mtpos, sizeof(struct mtpos)))
  2796. return -EFAULT;
  2797. return 0;
  2798. default:
  2799. if (tape->chrdev_dir == IDETAPE_DIR_READ)
  2800. idetape_discard_read_pipeline(drive, 1);
  2801. return idetape_blkdev_ioctl(drive, cmd, arg);
  2802. }
  2803. }
  2804. /*
  2805. * Do a mode sense page 0 with block descriptor and if it succeeds set the tape
  2806. * block size with the reported value.
  2807. */
  2808. static void ide_tape_get_bsize_from_bdesc(ide_drive_t *drive)
  2809. {
  2810. idetape_tape_t *tape = drive->driver_data;
  2811. struct ide_atapi_pc pc;
  2812. idetape_create_mode_sense_cmd(&pc, IDETAPE_BLOCK_DESCRIPTOR);
  2813. if (idetape_queue_pc_tail(drive, &pc)) {
  2814. printk(KERN_ERR "ide-tape: Can't get block descriptor\n");
  2815. if (tape->blk_size == 0) {
  2816. printk(KERN_WARNING "ide-tape: Cannot deal with zero "
  2817. "block size, assuming 32k\n");
  2818. tape->blk_size = 32768;
  2819. }
  2820. return;
  2821. }
  2822. tape->blk_size = (pc.buf[4 + 5] << 16) +
  2823. (pc.buf[4 + 6] << 8) +
  2824. pc.buf[4 + 7];
  2825. tape->drv_write_prot = (pc.buf[2] & 0x80) >> 7;
  2826. }
  2827. static int idetape_chrdev_open(struct inode *inode, struct file *filp)
  2828. {
  2829. unsigned int minor = iminor(inode), i = minor & ~0xc0;
  2830. ide_drive_t *drive;
  2831. idetape_tape_t *tape;
  2832. struct ide_atapi_pc pc;
  2833. int retval;
  2834. if (i >= MAX_HWIFS * MAX_DRIVES)
  2835. return -ENXIO;
  2836. tape = ide_tape_chrdev_get(i);
  2837. if (!tape)
  2838. return -ENXIO;
  2839. debug_log(DBG_CHRDEV, "Enter %s\n", __func__);
  2840. /*
  2841. * We really want to do nonseekable_open(inode, filp); here, but some
  2842. * versions of tar incorrectly call lseek on tapes and bail out if that
  2843. * fails. So we disallow pread() and pwrite(), but permit lseeks.
  2844. */
  2845. filp->f_mode &= ~(FMODE_PREAD | FMODE_PWRITE);
  2846. drive = tape->drive;
  2847. filp->private_data = tape;
  2848. if (test_and_set_bit(IDETAPE_FLAG_BUSY, &tape->flags)) {
  2849. retval = -EBUSY;
  2850. goto out_put_tape;
  2851. }
  2852. retval = idetape_wait_ready(drive, 60 * HZ);
  2853. if (retval) {
  2854. clear_bit(IDETAPE_FLAG_BUSY, &tape->flags);
  2855. printk(KERN_ERR "ide-tape: %s: drive not ready\n", tape->name);
  2856. goto out_put_tape;
  2857. }
  2858. idetape_read_position(drive);
  2859. if (!test_bit(IDETAPE_FLAG_ADDRESS_VALID, &tape->flags))
  2860. (void)idetape_rewind_tape(drive);
  2861. if (tape->chrdev_dir != IDETAPE_DIR_READ)
  2862. clear_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags);
  2863. /* Read block size and write protect status from drive. */
  2864. ide_tape_get_bsize_from_bdesc(drive);
  2865. /* Set write protect flag if device is opened as read-only. */
  2866. if ((filp->f_flags & O_ACCMODE) == O_RDONLY)
  2867. tape->write_prot = 1;
  2868. else
  2869. tape->write_prot = tape->drv_write_prot;
  2870. /* Make sure drive isn't write protected if user wants to write. */
  2871. if (tape->write_prot) {
  2872. if ((filp->f_flags & O_ACCMODE) == O_WRONLY ||
  2873. (filp->f_flags & O_ACCMODE) == O_RDWR) {
  2874. clear_bit(IDETAPE_FLAG_BUSY, &tape->flags);
  2875. retval = -EROFS;
  2876. goto out_put_tape;
  2877. }
  2878. }
  2879. /* Lock the tape drive door so user can't eject. */
  2880. if (tape->chrdev_dir == IDETAPE_DIR_NONE) {
  2881. if (idetape_create_prevent_cmd(drive, &pc, 1)) {
  2882. if (!idetape_queue_pc_tail(drive, &pc)) {
  2883. if (tape->door_locked != DOOR_EXPLICITLY_LOCKED)
  2884. tape->door_locked = DOOR_LOCKED;
  2885. }
  2886. }
  2887. }
  2888. idetape_restart_speed_control(drive);
  2889. tape->restart_speed_control_req = 0;
  2890. return 0;
  2891. out_put_tape:
  2892. ide_tape_put(tape);
  2893. return retval;
  2894. }
  2895. static void idetape_write_release(ide_drive_t *drive, unsigned int minor)
  2896. {
  2897. idetape_tape_t *tape = drive->driver_data;
  2898. idetape_empty_write_pipeline(drive);
  2899. tape->merge_stage = __idetape_kmalloc_stage(tape, 1, 0);
  2900. if (tape->merge_stage != NULL) {
  2901. idetape_pad_zeros(drive, tape->blk_size *
  2902. (tape->user_bs_factor - 1));
  2903. __idetape_kfree_stage(tape->merge_stage);
  2904. tape->merge_stage = NULL;
  2905. }
  2906. idetape_write_filemark(drive);
  2907. idetape_flush_tape_buffers(drive);
  2908. idetape_flush_tape_buffers(drive);
  2909. }
  2910. static int idetape_chrdev_release(struct inode *inode, struct file *filp)
  2911. {
  2912. struct ide_tape_obj *tape = ide_tape_f(filp);
  2913. ide_drive_t *drive = tape->drive;
  2914. struct ide_atapi_pc pc;
  2915. unsigned int minor = iminor(inode);
  2916. lock_kernel();
  2917. tape = drive->driver_data;
  2918. debug_log(DBG_CHRDEV, "Enter %s\n", __func__);
  2919. if (tape->chrdev_dir == IDETAPE_DIR_WRITE)
  2920. idetape_write_release(drive, minor);
  2921. if (tape->chrdev_dir == IDETAPE_DIR_READ) {
  2922. if (minor < 128)
  2923. idetape_discard_read_pipeline(drive, 1);
  2924. else
  2925. idetape_wait_for_pipeline(drive);
  2926. }
  2927. if (tape->cache_stage != NULL) {
  2928. __idetape_kfree_stage(tape->cache_stage);
  2929. tape->cache_stage = NULL;
  2930. }
  2931. if (minor < 128 && test_bit(IDETAPE_FLAG_MEDIUM_PRESENT, &tape->flags))
  2932. (void) idetape_rewind_tape(drive);
  2933. if (tape->chrdev_dir == IDETAPE_DIR_NONE) {
  2934. if (tape->door_locked == DOOR_LOCKED) {
  2935. if (idetape_create_prevent_cmd(drive, &pc, 0)) {
  2936. if (!idetape_queue_pc_tail(drive, &pc))
  2937. tape->door_locked = DOOR_UNLOCKED;
  2938. }
  2939. }
  2940. }
  2941. clear_bit(IDETAPE_FLAG_BUSY, &tape->flags);
  2942. ide_tape_put(tape);
  2943. unlock_kernel();
  2944. return 0;
  2945. }
  2946. /*
  2947. * check the contents of the ATAPI IDENTIFY command results. We return:
  2948. *
  2949. * 1 - If the tape can be supported by us, based on the information we have so
  2950. * far.
  2951. *
  2952. * 0 - If this tape driver is not currently supported by us.
  2953. */
  2954. static int idetape_identify_device(ide_drive_t *drive)
  2955. {
  2956. u8 gcw[2], protocol, device_type, removable, packet_size;
  2957. if (drive->id_read == 0)
  2958. return 1;
  2959. *((unsigned short *) &gcw) = drive->id->config;
  2960. protocol = (gcw[1] & 0xC0) >> 6;
  2961. device_type = gcw[1] & 0x1F;
  2962. removable = !!(gcw[0] & 0x80);
  2963. packet_size = gcw[0] & 0x3;
  2964. /* Check that we can support this device */
  2965. if (protocol != 2)
  2966. printk(KERN_ERR "ide-tape: Protocol (0x%02x) is not ATAPI\n",
  2967. protocol);
  2968. else if (device_type != 1)
  2969. printk(KERN_ERR "ide-tape: Device type (0x%02x) is not set "
  2970. "to tape\n", device_type);
  2971. else if (!removable)
  2972. printk(KERN_ERR "ide-tape: The removable flag is not set\n");
  2973. else if (packet_size != 0) {
  2974. printk(KERN_ERR "ide-tape: Packet size (0x%02x) is not 12"
  2975. " bytes\n", packet_size);
  2976. } else
  2977. return 1;
  2978. return 0;
  2979. }
  2980. static void idetape_get_inquiry_results(ide_drive_t *drive)
  2981. {
  2982. idetape_tape_t *tape = drive->driver_data;
  2983. struct ide_atapi_pc pc;
  2984. char fw_rev[6], vendor_id[10], product_id[18];
  2985. idetape_create_inquiry_cmd(&pc);
  2986. if (idetape_queue_pc_tail(drive, &pc)) {
  2987. printk(KERN_ERR "ide-tape: %s: can't get INQUIRY results\n",
  2988. tape->name);
  2989. return;
  2990. }
  2991. memcpy(vendor_id, &pc.buf[8], 8);
  2992. memcpy(product_id, &pc.buf[16], 16);
  2993. memcpy(fw_rev, &pc.buf[32], 4);
  2994. ide_fixstring(vendor_id, 10, 0);
  2995. ide_fixstring(product_id, 18, 0);
  2996. ide_fixstring(fw_rev, 6, 0);
  2997. printk(KERN_INFO "ide-tape: %s <-> %s: %s %s rev %s\n",
  2998. drive->name, tape->name, vendor_id, product_id, fw_rev);
  2999. }
  3000. /*
  3001. * Ask the tape about its various parameters. In particular, we will adjust our
  3002. * data transfer buffer size to the recommended value as returned by the tape.
  3003. */
  3004. static void idetape_get_mode_sense_results(ide_drive_t *drive)
  3005. {
  3006. idetape_tape_t *tape = drive->driver_data;
  3007. struct ide_atapi_pc pc;
  3008. u8 *caps;
  3009. u8 speed, max_speed;
  3010. idetape_create_mode_sense_cmd(&pc, IDETAPE_CAPABILITIES_PAGE);
  3011. if (idetape_queue_pc_tail(drive, &pc)) {
  3012. printk(KERN_ERR "ide-tape: Can't get tape parameters - assuming"
  3013. " some default values\n");
  3014. tape->blk_size = 512;
  3015. put_unaligned(52, (u16 *)&tape->caps[12]);
  3016. put_unaligned(540, (u16 *)&tape->caps[14]);
  3017. put_unaligned(6*52, (u16 *)&tape->caps[16]);
  3018. return;
  3019. }
  3020. caps = pc.buf + 4 + pc.buf[3];
  3021. /* convert to host order and save for later use */
  3022. speed = be16_to_cpu(*(u16 *)&caps[14]);
  3023. max_speed = be16_to_cpu(*(u16 *)&caps[8]);
  3024. put_unaligned(max_speed, (u16 *)&caps[8]);
  3025. put_unaligned(be16_to_cpu(*(u16 *)&caps[12]), (u16 *)&caps[12]);
  3026. put_unaligned(speed, (u16 *)&caps[14]);
  3027. put_unaligned(be16_to_cpu(*(u16 *)&caps[16]), (u16 *)&caps[16]);
  3028. if (!speed) {
  3029. printk(KERN_INFO "ide-tape: %s: invalid tape speed "
  3030. "(assuming 650KB/sec)\n", drive->name);
  3031. put_unaligned(650, (u16 *)&caps[14]);
  3032. }
  3033. if (!max_speed) {
  3034. printk(KERN_INFO "ide-tape: %s: invalid max_speed "
  3035. "(assuming 650KB/sec)\n", drive->name);
  3036. put_unaligned(650, (u16 *)&caps[8]);
  3037. }
  3038. memcpy(&tape->caps, caps, 20);
  3039. if (caps[7] & 0x02)
  3040. tape->blk_size = 512;
  3041. else if (caps[7] & 0x04)
  3042. tape->blk_size = 1024;
  3043. }
  3044. #ifdef CONFIG_IDE_PROC_FS
  3045. static void idetape_add_settings(ide_drive_t *drive)
  3046. {
  3047. idetape_tape_t *tape = drive->driver_data;
  3048. ide_add_setting(drive, "buffer", SETTING_READ, TYPE_SHORT, 0, 0xffff,
  3049. 1, 2, (u16 *)&tape->caps[16], NULL);
  3050. ide_add_setting(drive, "pipeline_min", SETTING_RW, TYPE_INT, 1, 0xffff,
  3051. tape->stage_size / 1024, 1, &tape->min_pipeline, NULL);
  3052. ide_add_setting(drive, "pipeline", SETTING_RW, TYPE_INT, 1, 0xffff,
  3053. tape->stage_size / 1024, 1, &tape->max_stages, NULL);
  3054. ide_add_setting(drive, "pipeline_max", SETTING_RW, TYPE_INT, 1, 0xffff,
  3055. tape->stage_size / 1024, 1, &tape->max_pipeline, NULL);
  3056. ide_add_setting(drive, "pipeline_used", SETTING_READ, TYPE_INT, 0,
  3057. 0xffff, tape->stage_size / 1024, 1, &tape->nr_stages,
  3058. NULL);
  3059. ide_add_setting(drive, "pipeline_pending", SETTING_READ, TYPE_INT, 0,
  3060. 0xffff, tape->stage_size / 1024, 1,
  3061. &tape->nr_pending_stages, NULL);
  3062. ide_add_setting(drive, "speed", SETTING_READ, TYPE_SHORT, 0, 0xffff,
  3063. 1, 1, (u16 *)&tape->caps[14], NULL);
  3064. ide_add_setting(drive, "stage", SETTING_READ, TYPE_INT, 0, 0xffff, 1,
  3065. 1024, &tape->stage_size, NULL);
  3066. ide_add_setting(drive, "tdsc", SETTING_RW, TYPE_INT, IDETAPE_DSC_RW_MIN,
  3067. IDETAPE_DSC_RW_MAX, 1000, HZ, &tape->best_dsc_rw_freq,
  3068. NULL);
  3069. ide_add_setting(drive, "dsc_overlap", SETTING_RW, TYPE_BYTE, 0, 1, 1,
  3070. 1, &drive->dsc_overlap, NULL);
  3071. ide_add_setting(drive, "pipeline_head_speed_c", SETTING_READ, TYPE_INT,
  3072. 0, 0xffff, 1, 1, &tape->controlled_pipeline_head_speed,
  3073. NULL);
  3074. ide_add_setting(drive, "pipeline_head_speed_u", SETTING_READ, TYPE_INT,
  3075. 0, 0xffff, 1, 1,
  3076. &tape->uncontrolled_pipeline_head_speed, NULL);
  3077. ide_add_setting(drive, "avg_speed", SETTING_READ, TYPE_INT, 0, 0xffff,
  3078. 1, 1, &tape->avg_speed, NULL);
  3079. ide_add_setting(drive, "debug_mask", SETTING_RW, TYPE_INT, 0, 0xffff, 1,
  3080. 1, &tape->debug_mask, NULL);
  3081. }
  3082. #else
  3083. static inline void idetape_add_settings(ide_drive_t *drive) { ; }
  3084. #endif
  3085. /*
  3086. * The function below is called to:
  3087. *
  3088. * 1. Initialize our various state variables.
  3089. * 2. Ask the tape for its capabilities.
  3090. * 3. Allocate a buffer which will be used for data transfer. The buffer size
  3091. * is chosen based on the recommendation which we received in step 2.
  3092. *
  3093. * Note that at this point ide.c already assigned us an irq, so that we can
  3094. * queue requests here and wait for their completion.
  3095. */
  3096. static void idetape_setup(ide_drive_t *drive, idetape_tape_t *tape, int minor)
  3097. {
  3098. unsigned long t1, tmid, tn, t;
  3099. int speed;
  3100. int stage_size;
  3101. u8 gcw[2];
  3102. struct sysinfo si;
  3103. u16 *ctl = (u16 *)&tape->caps[12];
  3104. spin_lock_init(&tape->lock);
  3105. drive->dsc_overlap = 1;
  3106. if (drive->hwif->host_flags & IDE_HFLAG_NO_DSC) {
  3107. printk(KERN_INFO "ide-tape: %s: disabling DSC overlap\n",
  3108. tape->name);
  3109. drive->dsc_overlap = 0;
  3110. }
  3111. /* Seagate Travan drives do not support DSC overlap. */
  3112. if (strstr(drive->id->model, "Seagate STT3401"))
  3113. drive->dsc_overlap = 0;
  3114. tape->minor = minor;
  3115. tape->name[0] = 'h';
  3116. tape->name[1] = 't';
  3117. tape->name[2] = '0' + minor;
  3118. tape->chrdev_dir = IDETAPE_DIR_NONE;
  3119. tape->pc = tape->pc_stack;
  3120. tape->max_insert_speed = 10000;
  3121. tape->speed_control = 1;
  3122. *((unsigned short *) &gcw) = drive->id->config;
  3123. /* Command packet DRQ type */
  3124. if (((gcw[0] & 0x60) >> 5) == 1)
  3125. set_bit(IDETAPE_FLAG_DRQ_INTERRUPT, &tape->flags);
  3126. tape->min_pipeline = 10;
  3127. tape->max_pipeline = 10;
  3128. tape->max_stages = 10;
  3129. idetape_get_inquiry_results(drive);
  3130. idetape_get_mode_sense_results(drive);
  3131. ide_tape_get_bsize_from_bdesc(drive);
  3132. tape->user_bs_factor = 1;
  3133. tape->stage_size = *ctl * tape->blk_size;
  3134. while (tape->stage_size > 0xffff) {
  3135. printk(KERN_NOTICE "ide-tape: decreasing stage size\n");
  3136. *ctl /= 2;
  3137. tape->stage_size = *ctl * tape->blk_size;
  3138. }
  3139. stage_size = tape->stage_size;
  3140. tape->pages_per_stage = stage_size / PAGE_SIZE;
  3141. if (stage_size % PAGE_SIZE) {
  3142. tape->pages_per_stage++;
  3143. tape->excess_bh_size = PAGE_SIZE - stage_size % PAGE_SIZE;
  3144. }
  3145. /* Select the "best" DSC read/write polling freq and pipeline size. */
  3146. speed = max(*(u16 *)&tape->caps[14], *(u16 *)&tape->caps[8]);
  3147. tape->max_stages = speed * 1000 * 10 / tape->stage_size;
  3148. /* Limit memory use for pipeline to 10% of physical memory */
  3149. si_meminfo(&si);
  3150. if (tape->max_stages * tape->stage_size >
  3151. si.totalram * si.mem_unit / 10)
  3152. tape->max_stages =
  3153. si.totalram * si.mem_unit / (10 * tape->stage_size);
  3154. tape->max_stages = min(tape->max_stages, IDETAPE_MAX_PIPELINE_STAGES);
  3155. tape->min_pipeline = min(tape->max_stages, IDETAPE_MIN_PIPELINE_STAGES);
  3156. tape->max_pipeline =
  3157. min(tape->max_stages * 2, IDETAPE_MAX_PIPELINE_STAGES);
  3158. if (tape->max_stages == 0) {
  3159. tape->max_stages = 1;
  3160. tape->min_pipeline = 1;
  3161. tape->max_pipeline = 1;
  3162. }
  3163. t1 = (tape->stage_size * HZ) / (speed * 1000);
  3164. tmid = (*(u16 *)&tape->caps[16] * 32 * HZ) / (speed * 125);
  3165. tn = (IDETAPE_FIFO_THRESHOLD * tape->stage_size * HZ) / (speed * 1000);
  3166. if (tape->max_stages)
  3167. t = tn;
  3168. else
  3169. t = t1;
  3170. /*
  3171. * Ensure that the number we got makes sense; limit it within
  3172. * IDETAPE_DSC_RW_MIN and IDETAPE_DSC_RW_MAX.
  3173. */
  3174. tape->best_dsc_rw_freq = max_t(unsigned long,
  3175. min_t(unsigned long, t, IDETAPE_DSC_RW_MAX),
  3176. IDETAPE_DSC_RW_MIN);
  3177. printk(KERN_INFO "ide-tape: %s <-> %s: %dKBps, %d*%dkB buffer, "
  3178. "%dkB pipeline, %lums tDSC%s\n",
  3179. drive->name, tape->name, *(u16 *)&tape->caps[14],
  3180. (*(u16 *)&tape->caps[16] * 512) / tape->stage_size,
  3181. tape->stage_size / 1024,
  3182. tape->max_stages * tape->stage_size / 1024,
  3183. tape->best_dsc_rw_freq * 1000 / HZ,
  3184. drive->using_dma ? ", DMA":"");
  3185. idetape_add_settings(drive);
  3186. }
  3187. static void ide_tape_remove(ide_drive_t *drive)
  3188. {
  3189. idetape_tape_t *tape = drive->driver_data;
  3190. ide_proc_unregister_driver(drive, tape->driver);
  3191. ide_unregister_region(tape->disk);
  3192. ide_tape_put(tape);
  3193. }
  3194. static void ide_tape_release(struct kref *kref)
  3195. {
  3196. struct ide_tape_obj *tape = to_ide_tape(kref);
  3197. ide_drive_t *drive = tape->drive;
  3198. struct gendisk *g = tape->disk;
  3199. BUG_ON(tape->first_stage != NULL || tape->merge_stage_size);
  3200. drive->dsc_overlap = 0;
  3201. drive->driver_data = NULL;
  3202. device_destroy(idetape_sysfs_class, MKDEV(IDETAPE_MAJOR, tape->minor));
  3203. device_destroy(idetape_sysfs_class,
  3204. MKDEV(IDETAPE_MAJOR, tape->minor + 128));
  3205. idetape_devs[tape->minor] = NULL;
  3206. g->private_data = NULL;
  3207. put_disk(g);
  3208. kfree(tape);
  3209. }
  3210. #ifdef CONFIG_IDE_PROC_FS
  3211. static int proc_idetape_read_name
  3212. (char *page, char **start, off_t off, int count, int *eof, void *data)
  3213. {
  3214. ide_drive_t *drive = (ide_drive_t *) data;
  3215. idetape_tape_t *tape = drive->driver_data;
  3216. char *out = page;
  3217. int len;
  3218. len = sprintf(out, "%s\n", tape->name);
  3219. PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
  3220. }
  3221. static ide_proc_entry_t idetape_proc[] = {
  3222. { "capacity", S_IFREG|S_IRUGO, proc_ide_read_capacity, NULL },
  3223. { "name", S_IFREG|S_IRUGO, proc_idetape_read_name, NULL },
  3224. { NULL, 0, NULL, NULL }
  3225. };
  3226. #endif
  3227. static int ide_tape_probe(ide_drive_t *);
  3228. static ide_driver_t idetape_driver = {
  3229. .gen_driver = {
  3230. .owner = THIS_MODULE,
  3231. .name = "ide-tape",
  3232. .bus = &ide_bus_type,
  3233. },
  3234. .probe = ide_tape_probe,
  3235. .remove = ide_tape_remove,
  3236. .version = IDETAPE_VERSION,
  3237. .media = ide_tape,
  3238. .supports_dsc_overlap = 1,
  3239. .do_request = idetape_do_request,
  3240. .end_request = idetape_end_request,
  3241. .error = __ide_error,
  3242. .abort = __ide_abort,
  3243. #ifdef CONFIG_IDE_PROC_FS
  3244. .proc = idetape_proc,
  3245. #endif
  3246. };
  3247. /* Our character device supporting functions, passed to register_chrdev. */
  3248. static const struct file_operations idetape_fops = {
  3249. .owner = THIS_MODULE,
  3250. .read = idetape_chrdev_read,
  3251. .write = idetape_chrdev_write,
  3252. .ioctl = idetape_chrdev_ioctl,
  3253. .open = idetape_chrdev_open,
  3254. .release = idetape_chrdev_release,
  3255. };
  3256. static int idetape_open(struct inode *inode, struct file *filp)
  3257. {
  3258. struct gendisk *disk = inode->i_bdev->bd_disk;
  3259. struct ide_tape_obj *tape;
  3260. tape = ide_tape_get(disk);
  3261. if (!tape)
  3262. return -ENXIO;
  3263. return 0;
  3264. }
  3265. static int idetape_release(struct inode *inode, struct file *filp)
  3266. {
  3267. struct gendisk *disk = inode->i_bdev->bd_disk;
  3268. struct ide_tape_obj *tape = ide_tape_g(disk);
  3269. ide_tape_put(tape);
  3270. return 0;
  3271. }
  3272. static int idetape_ioctl(struct inode *inode, struct file *file,
  3273. unsigned int cmd, unsigned long arg)
  3274. {
  3275. struct block_device *bdev = inode->i_bdev;
  3276. struct ide_tape_obj *tape = ide_tape_g(bdev->bd_disk);
  3277. ide_drive_t *drive = tape->drive;
  3278. int err = generic_ide_ioctl(drive, file, bdev, cmd, arg);
  3279. if (err == -EINVAL)
  3280. err = idetape_blkdev_ioctl(drive, cmd, arg);
  3281. return err;
  3282. }
  3283. static struct block_device_operations idetape_block_ops = {
  3284. .owner = THIS_MODULE,
  3285. .open = idetape_open,
  3286. .release = idetape_release,
  3287. .ioctl = idetape_ioctl,
  3288. };
  3289. static int ide_tape_probe(ide_drive_t *drive)
  3290. {
  3291. idetape_tape_t *tape;
  3292. struct gendisk *g;
  3293. int minor;
  3294. if (!strstr("ide-tape", drive->driver_req))
  3295. goto failed;
  3296. if (!drive->present)
  3297. goto failed;
  3298. if (drive->media != ide_tape)
  3299. goto failed;
  3300. if (!idetape_identify_device(drive)) {
  3301. printk(KERN_ERR "ide-tape: %s: not supported by this version of"
  3302. " the driver\n", drive->name);
  3303. goto failed;
  3304. }
  3305. if (drive->scsi) {
  3306. printk(KERN_INFO "ide-tape: passing drive %s to ide-scsi"
  3307. " emulation.\n", drive->name);
  3308. goto failed;
  3309. }
  3310. tape = kzalloc(sizeof(idetape_tape_t), GFP_KERNEL);
  3311. if (tape == NULL) {
  3312. printk(KERN_ERR "ide-tape: %s: Can't allocate a tape struct\n",
  3313. drive->name);
  3314. goto failed;
  3315. }
  3316. g = alloc_disk(1 << PARTN_BITS);
  3317. if (!g)
  3318. goto out_free_tape;
  3319. ide_init_disk(g, drive);
  3320. ide_proc_register_driver(drive, &idetape_driver);
  3321. kref_init(&tape->kref);
  3322. tape->drive = drive;
  3323. tape->driver = &idetape_driver;
  3324. tape->disk = g;
  3325. g->private_data = &tape->driver;
  3326. drive->driver_data = tape;
  3327. mutex_lock(&idetape_ref_mutex);
  3328. for (minor = 0; idetape_devs[minor]; minor++)
  3329. ;
  3330. idetape_devs[minor] = tape;
  3331. mutex_unlock(&idetape_ref_mutex);
  3332. idetape_setup(drive, tape, minor);
  3333. device_create(idetape_sysfs_class, &drive->gendev,
  3334. MKDEV(IDETAPE_MAJOR, minor), "%s", tape->name);
  3335. device_create(idetape_sysfs_class, &drive->gendev,
  3336. MKDEV(IDETAPE_MAJOR, minor + 128), "n%s", tape->name);
  3337. g->fops = &idetape_block_ops;
  3338. ide_register_region(g);
  3339. return 0;
  3340. out_free_tape:
  3341. kfree(tape);
  3342. failed:
  3343. return -ENODEV;
  3344. }
  3345. static void __exit idetape_exit(void)
  3346. {
  3347. driver_unregister(&idetape_driver.gen_driver);
  3348. class_destroy(idetape_sysfs_class);
  3349. unregister_chrdev(IDETAPE_MAJOR, "ht");
  3350. }
  3351. static int __init idetape_init(void)
  3352. {
  3353. int error = 1;
  3354. idetape_sysfs_class = class_create(THIS_MODULE, "ide_tape");
  3355. if (IS_ERR(idetape_sysfs_class)) {
  3356. idetape_sysfs_class = NULL;
  3357. printk(KERN_ERR "Unable to create sysfs class for ide tapes\n");
  3358. error = -EBUSY;
  3359. goto out;
  3360. }
  3361. if (register_chrdev(IDETAPE_MAJOR, "ht", &idetape_fops)) {
  3362. printk(KERN_ERR "ide-tape: Failed to register chrdev"
  3363. " interface\n");
  3364. error = -EBUSY;
  3365. goto out_free_class;
  3366. }
  3367. error = driver_register(&idetape_driver.gen_driver);
  3368. if (error)
  3369. goto out_free_driver;
  3370. return 0;
  3371. out_free_driver:
  3372. driver_unregister(&idetape_driver.gen_driver);
  3373. out_free_class:
  3374. class_destroy(idetape_sysfs_class);
  3375. out:
  3376. return error;
  3377. }
  3378. MODULE_ALIAS("ide:*m-tape*");
  3379. module_init(idetape_init);
  3380. module_exit(idetape_exit);
  3381. MODULE_ALIAS_CHARDEV_MAJOR(IDETAPE_MAJOR);
  3382. MODULE_DESCRIPTION("ATAPI Streaming TAPE Driver");
  3383. MODULE_LICENSE("GPL");