ide-tape.c 97 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439
  1. /*
  2. * IDE ATAPI streaming tape driver.
  3. *
  4. * Copyright (C) 1995-1999 Gadi Oxman <gadio@netvision.net.il>
  5. * Copyright (C) 2003-2005 Bartlomiej Zolnierkiewicz
  6. *
  7. * This driver was constructed as a student project in the software laboratory
  8. * of the faculty of electrical engineering in the Technion - Israel's
  9. * Institute Of Technology, with the guide of Avner Lottem and Dr. Ilana David.
  10. *
  11. * It is hereby placed under the terms of the GNU general public license.
  12. * (See linux/COPYING).
  13. *
  14. * For a historical changelog see
  15. * Documentation/ide/ChangeLog.ide-tape.1995-2002
  16. */
  17. #define IDETAPE_VERSION "1.20"
  18. #include <linux/module.h>
  19. #include <linux/types.h>
  20. #include <linux/string.h>
  21. #include <linux/kernel.h>
  22. #include <linux/delay.h>
  23. #include <linux/timer.h>
  24. #include <linux/mm.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/jiffies.h>
  27. #include <linux/major.h>
  28. #include <linux/errno.h>
  29. #include <linux/genhd.h>
  30. #include <linux/slab.h>
  31. #include <linux/pci.h>
  32. #include <linux/ide.h>
  33. #include <linux/smp_lock.h>
  34. #include <linux/completion.h>
  35. #include <linux/bitops.h>
  36. #include <linux/mutex.h>
  37. #include <scsi/scsi.h>
  38. #include <asm/byteorder.h>
  39. #include <linux/irq.h>
  40. #include <linux/uaccess.h>
  41. #include <linux/io.h>
  42. #include <asm/unaligned.h>
  43. #include <linux/mtio.h>
  44. enum {
  45. /* output errors only */
  46. DBG_ERR = (1 << 0),
  47. /* output all sense key/asc */
  48. DBG_SENSE = (1 << 1),
  49. /* info regarding all chrdev-related procedures */
  50. DBG_CHRDEV = (1 << 2),
  51. /* all remaining procedures */
  52. DBG_PROCS = (1 << 3),
  53. /* buffer alloc info (pc_stack & rq_stack) */
  54. DBG_PCRQ_STACK = (1 << 4),
  55. };
  56. /* define to see debug info */
  57. #define IDETAPE_DEBUG_LOG 0
  58. #if IDETAPE_DEBUG_LOG
  59. #define debug_log(lvl, fmt, args...) \
  60. { \
  61. if (tape->debug_mask & lvl) \
  62. printk(KERN_INFO "ide-tape: " fmt, ## args); \
  63. }
  64. #else
  65. #define debug_log(lvl, fmt, args...) do {} while (0)
  66. #endif
  67. /**************************** Tunable parameters *****************************/
  68. /*
  69. * Pipelined mode parameters.
  70. *
  71. * We try to use the minimum number of stages which is enough to keep the tape
  72. * constantly streaming. To accomplish that, we implement a feedback loop around
  73. * the maximum number of stages:
  74. *
  75. * We start from MIN maximum stages (we will not even use MIN stages if we don't
  76. * need them), increment it by RATE*(MAX-MIN) whenever we sense that the
  77. * pipeline is empty, until we reach the optimum value or until we reach MAX.
  78. */
  79. #define IDETAPE_MIN_PIPELINE_STAGES 1
  80. #define IDETAPE_MAX_PIPELINE_STAGES 400
  81. #define IDETAPE_INCREASE_STAGES_RATE 20
  82. /*
  83. * After each failed packet command we issue a request sense command and retry
  84. * the packet command IDETAPE_MAX_PC_RETRIES times.
  85. *
  86. * Setting IDETAPE_MAX_PC_RETRIES to 0 will disable retries.
  87. */
  88. #define IDETAPE_MAX_PC_RETRIES 3
  89. /*
  90. * With each packet command, we allocate a buffer of IDETAPE_PC_BUFFER_SIZE
  91. * bytes. This is used for several packet commands (Not for READ/WRITE commands)
  92. */
  93. #define IDETAPE_PC_BUFFER_SIZE 256
  94. /*
  95. * In various places in the driver, we need to allocate storage
  96. * for packet commands and requests, which will remain valid while
  97. * we leave the driver to wait for an interrupt or a timeout event.
  98. */
  99. #define IDETAPE_PC_STACK (10 + IDETAPE_MAX_PC_RETRIES)
  100. /*
  101. * Some drives (for example, Seagate STT3401A Travan) require a very long
  102. * timeout, because they don't return an interrupt or clear their busy bit
  103. * until after the command completes (even retension commands).
  104. */
  105. #define IDETAPE_WAIT_CMD (900*HZ)
  106. /*
  107. * The following parameter is used to select the point in the internal tape fifo
  108. * in which we will start to refill the buffer. Decreasing the following
  109. * parameter will improve the system's latency and interactive response, while
  110. * using a high value might improve system throughput.
  111. */
  112. #define IDETAPE_FIFO_THRESHOLD 2
  113. /*
  114. * DSC polling parameters.
  115. *
  116. * Polling for DSC (a single bit in the status register) is a very important
  117. * function in ide-tape. There are two cases in which we poll for DSC:
  118. *
  119. * 1. Before a read/write packet command, to ensure that we can transfer data
  120. * from/to the tape's data buffers, without causing an actual media access.
  121. * In case the tape is not ready yet, we take out our request from the device
  122. * request queue, so that ide.c could service requests from the other device
  123. * on the same interface in the meantime.
  124. *
  125. * 2. After the successful initialization of a "media access packet command",
  126. * which is a command that can take a long time to complete (the interval can
  127. * range from several seconds to even an hour). Again, we postpone our request
  128. * in the middle to free the bus for the other device. The polling frequency
  129. * here should be lower than the read/write frequency since those media access
  130. * commands are slow. We start from a "fast" frequency - IDETAPE_DSC_MA_FAST
  131. * (1 second), and if we don't receive DSC after IDETAPE_DSC_MA_THRESHOLD
  132. * (5 min), we switch it to a lower frequency - IDETAPE_DSC_MA_SLOW (1 min).
  133. *
  134. * We also set a timeout for the timer, in case something goes wrong. The
  135. * timeout should be longer then the maximum execution time of a tape operation.
  136. */
  137. /* DSC timings. */
  138. #define IDETAPE_DSC_RW_MIN 5*HZ/100 /* 50 msec */
  139. #define IDETAPE_DSC_RW_MAX 40*HZ/100 /* 400 msec */
  140. #define IDETAPE_DSC_RW_TIMEOUT 2*60*HZ /* 2 minutes */
  141. #define IDETAPE_DSC_MA_FAST 2*HZ /* 2 seconds */
  142. #define IDETAPE_DSC_MA_THRESHOLD 5*60*HZ /* 5 minutes */
  143. #define IDETAPE_DSC_MA_SLOW 30*HZ /* 30 seconds */
  144. #define IDETAPE_DSC_MA_TIMEOUT 2*60*60*HZ /* 2 hours */
  145. /*************************** End of tunable parameters ***********************/
  146. /* Read/Write error simulation */
  147. #define SIMULATE_ERRORS 0
  148. /* tape directions */
  149. enum {
  150. IDETAPE_DIR_NONE = (1 << 0),
  151. IDETAPE_DIR_READ = (1 << 1),
  152. IDETAPE_DIR_WRITE = (1 << 2),
  153. };
  154. struct idetape_bh {
  155. u32 b_size;
  156. atomic_t b_count;
  157. struct idetape_bh *b_reqnext;
  158. char *b_data;
  159. };
  160. /* Tape door status */
  161. #define DOOR_UNLOCKED 0
  162. #define DOOR_LOCKED 1
  163. #define DOOR_EXPLICITLY_LOCKED 2
  164. /* Some defines for the SPACE command */
  165. #define IDETAPE_SPACE_OVER_FILEMARK 1
  166. #define IDETAPE_SPACE_TO_EOD 3
  167. /* Some defines for the LOAD UNLOAD command */
  168. #define IDETAPE_LU_LOAD_MASK 1
  169. #define IDETAPE_LU_RETENSION_MASK 2
  170. #define IDETAPE_LU_EOT_MASK 4
  171. /*
  172. * Special requests for our block device strategy routine.
  173. *
  174. * In order to service a character device command, we add special requests to
  175. * the tail of our block device request queue and wait for their completion.
  176. */
  177. enum {
  178. REQ_IDETAPE_PC1 = (1 << 0), /* packet command (first stage) */
  179. REQ_IDETAPE_PC2 = (1 << 1), /* packet command (second stage) */
  180. REQ_IDETAPE_READ = (1 << 2),
  181. REQ_IDETAPE_WRITE = (1 << 3),
  182. };
  183. /* Error codes returned in rq->errors to the higher part of the driver. */
  184. #define IDETAPE_ERROR_GENERAL 101
  185. #define IDETAPE_ERROR_FILEMARK 102
  186. #define IDETAPE_ERROR_EOD 103
  187. /* Structures related to the SELECT SENSE / MODE SENSE packet commands. */
  188. #define IDETAPE_BLOCK_DESCRIPTOR 0
  189. #define IDETAPE_CAPABILITIES_PAGE 0x2a
  190. /* Tape flag bits values. */
  191. enum {
  192. IDETAPE_FLAG_IGNORE_DSC = (1 << 0),
  193. /* 0 When the tape position is unknown */
  194. IDETAPE_FLAG_ADDRESS_VALID = (1 << 1),
  195. /* Device already opened */
  196. IDETAPE_FLAG_BUSY = (1 << 2),
  197. /* Error detected in a pipeline stage */
  198. IDETAPE_FLAG_PIPELINE_ERR = (1 << 3),
  199. /* Attempt to auto-detect the current user block size */
  200. IDETAPE_FLAG_DETECT_BS = (1 << 4),
  201. /* Currently on a filemark */
  202. IDETAPE_FLAG_FILEMARK = (1 << 5),
  203. /* DRQ interrupt device */
  204. IDETAPE_FLAG_DRQ_INTERRUPT = (1 << 6),
  205. /* pipeline active */
  206. IDETAPE_FLAG_PIPELINE_ACTIVE = (1 << 7),
  207. /* 0 = no tape is loaded, so we don't rewind after ejecting */
  208. IDETAPE_FLAG_MEDIUM_PRESENT = (1 << 8),
  209. };
  210. /* A pipeline stage. */
  211. typedef struct idetape_stage_s {
  212. struct request rq; /* The corresponding request */
  213. struct idetape_bh *bh; /* The data buffers */
  214. struct idetape_stage_s *next; /* Pointer to the next stage */
  215. } idetape_stage_t;
  216. /*
  217. * Most of our global data which we need to save even as we leave the driver due
  218. * to an interrupt or a timer event is stored in the struct defined below.
  219. */
  220. typedef struct ide_tape_obj {
  221. ide_drive_t *drive;
  222. ide_driver_t *driver;
  223. struct gendisk *disk;
  224. struct kref kref;
  225. /*
  226. * Since a typical character device operation requires more
  227. * than one packet command, we provide here enough memory
  228. * for the maximum of interconnected packet commands.
  229. * The packet commands are stored in the circular array pc_stack.
  230. * pc_stack_index points to the last used entry, and warps around
  231. * to the start when we get to the last array entry.
  232. *
  233. * pc points to the current processed packet command.
  234. *
  235. * failed_pc points to the last failed packet command, or contains
  236. * NULL if we do not need to retry any packet command. This is
  237. * required since an additional packet command is needed before the
  238. * retry, to get detailed information on what went wrong.
  239. */
  240. /* Current packet command */
  241. struct ide_atapi_pc *pc;
  242. /* Last failed packet command */
  243. struct ide_atapi_pc *failed_pc;
  244. /* Packet command stack */
  245. struct ide_atapi_pc pc_stack[IDETAPE_PC_STACK];
  246. /* Next free packet command storage space */
  247. int pc_stack_index;
  248. struct request rq_stack[IDETAPE_PC_STACK];
  249. /* We implement a circular array */
  250. int rq_stack_index;
  251. /*
  252. * DSC polling variables.
  253. *
  254. * While polling for DSC we use postponed_rq to postpone the current
  255. * request so that ide.c will be able to service pending requests on the
  256. * other device. Note that at most we will have only one DSC (usually
  257. * data transfer) request in the device request queue. Additional
  258. * requests can be queued in our internal pipeline, but they will be
  259. * visible to ide.c only one at a time.
  260. */
  261. struct request *postponed_rq;
  262. /* The time in which we started polling for DSC */
  263. unsigned long dsc_polling_start;
  264. /* Timer used to poll for dsc */
  265. struct timer_list dsc_timer;
  266. /* Read/Write dsc polling frequency */
  267. unsigned long best_dsc_rw_freq;
  268. unsigned long dsc_poll_freq;
  269. unsigned long dsc_timeout;
  270. /* Read position information */
  271. u8 partition;
  272. /* Current block */
  273. unsigned int first_frame;
  274. /* Last error information */
  275. u8 sense_key, asc, ascq;
  276. /* Character device operation */
  277. unsigned int minor;
  278. /* device name */
  279. char name[4];
  280. /* Current character device data transfer direction */
  281. u8 chrdev_dir;
  282. /* tape block size, usually 512 or 1024 bytes */
  283. unsigned short blk_size;
  284. int user_bs_factor;
  285. /* Copy of the tape's Capabilities and Mechanical Page */
  286. u8 caps[20];
  287. /*
  288. * Active data transfer request parameters.
  289. *
  290. * At most, there is only one ide-tape originated data transfer request
  291. * in the device request queue. This allows ide.c to easily service
  292. * requests from the other device when we postpone our active request.
  293. * In the pipelined operation mode, we use our internal pipeline
  294. * structure to hold more data requests. The data buffer size is chosen
  295. * based on the tape's recommendation.
  296. */
  297. /* ptr to the request which is waiting in the device request queue */
  298. struct request *active_data_rq;
  299. /* Data buffer size chosen based on the tape's recommendation */
  300. int stage_size;
  301. idetape_stage_t *merge_stage;
  302. int merge_stage_size;
  303. struct idetape_bh *bh;
  304. char *b_data;
  305. int b_count;
  306. /*
  307. * Pipeline parameters.
  308. *
  309. * To accomplish non-pipelined mode, we simply set the following
  310. * variables to zero (or NULL, where appropriate).
  311. */
  312. /* Number of currently used stages */
  313. int nr_stages;
  314. /* Number of pending stages */
  315. int nr_pending_stages;
  316. /* We will not allocate more than this number of stages */
  317. int max_stages, min_pipeline, max_pipeline;
  318. /* The first stage which will be removed from the pipeline */
  319. idetape_stage_t *first_stage;
  320. /* The currently active stage */
  321. idetape_stage_t *active_stage;
  322. /* Will be serviced after the currently active request */
  323. idetape_stage_t *next_stage;
  324. /* New requests will be added to the pipeline here */
  325. idetape_stage_t *last_stage;
  326. int pages_per_stage;
  327. /* Wasted space in each stage */
  328. int excess_bh_size;
  329. /* Status/Action flags: long for set_bit */
  330. unsigned long flags;
  331. /* protects the ide-tape queue */
  332. spinlock_t lock;
  333. /* Measures average tape speed */
  334. unsigned long avg_time;
  335. int avg_size;
  336. int avg_speed;
  337. /* the door is currently locked */
  338. int door_locked;
  339. /* the tape hardware is write protected */
  340. char drv_write_prot;
  341. /* the tape is write protected (hardware or opened as read-only) */
  342. char write_prot;
  343. /*
  344. * Limit the number of times a request can be postponed, to avoid an
  345. * infinite postpone deadlock.
  346. */
  347. int postpone_cnt;
  348. /* Speed control at the tape buffers input/output */
  349. unsigned long insert_time;
  350. int insert_size;
  351. int insert_speed;
  352. int measure_insert_time;
  353. u32 debug_mask;
  354. } idetape_tape_t;
  355. static DEFINE_MUTEX(idetape_ref_mutex);
  356. static struct class *idetape_sysfs_class;
  357. #define to_ide_tape(obj) container_of(obj, struct ide_tape_obj, kref)
  358. #define ide_tape_g(disk) \
  359. container_of((disk)->private_data, struct ide_tape_obj, driver)
  360. static struct ide_tape_obj *ide_tape_get(struct gendisk *disk)
  361. {
  362. struct ide_tape_obj *tape = NULL;
  363. mutex_lock(&idetape_ref_mutex);
  364. tape = ide_tape_g(disk);
  365. if (tape)
  366. kref_get(&tape->kref);
  367. mutex_unlock(&idetape_ref_mutex);
  368. return tape;
  369. }
  370. static void ide_tape_release(struct kref *);
  371. static void ide_tape_put(struct ide_tape_obj *tape)
  372. {
  373. mutex_lock(&idetape_ref_mutex);
  374. kref_put(&tape->kref, ide_tape_release);
  375. mutex_unlock(&idetape_ref_mutex);
  376. }
  377. /*
  378. * The variables below are used for the character device interface. Additional
  379. * state variables are defined in our ide_drive_t structure.
  380. */
  381. static struct ide_tape_obj *idetape_devs[MAX_HWIFS * MAX_DRIVES];
  382. #define ide_tape_f(file) ((file)->private_data)
  383. static struct ide_tape_obj *ide_tape_chrdev_get(unsigned int i)
  384. {
  385. struct ide_tape_obj *tape = NULL;
  386. mutex_lock(&idetape_ref_mutex);
  387. tape = idetape_devs[i];
  388. if (tape)
  389. kref_get(&tape->kref);
  390. mutex_unlock(&idetape_ref_mutex);
  391. return tape;
  392. }
  393. static void idetape_input_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
  394. unsigned int bcount)
  395. {
  396. struct idetape_bh *bh = pc->bh;
  397. int count;
  398. while (bcount) {
  399. if (bh == NULL) {
  400. printk(KERN_ERR "ide-tape: bh == NULL in "
  401. "idetape_input_buffers\n");
  402. ide_atapi_discard_data(drive, bcount);
  403. return;
  404. }
  405. count = min(
  406. (unsigned int)(bh->b_size - atomic_read(&bh->b_count)),
  407. bcount);
  408. HWIF(drive)->atapi_input_bytes(drive, bh->b_data +
  409. atomic_read(&bh->b_count), count);
  410. bcount -= count;
  411. atomic_add(count, &bh->b_count);
  412. if (atomic_read(&bh->b_count) == bh->b_size) {
  413. bh = bh->b_reqnext;
  414. if (bh)
  415. atomic_set(&bh->b_count, 0);
  416. }
  417. }
  418. pc->bh = bh;
  419. }
  420. static void idetape_output_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
  421. unsigned int bcount)
  422. {
  423. struct idetape_bh *bh = pc->bh;
  424. int count;
  425. while (bcount) {
  426. if (bh == NULL) {
  427. printk(KERN_ERR "ide-tape: bh == NULL in %s\n",
  428. __func__);
  429. return;
  430. }
  431. count = min((unsigned int)pc->b_count, (unsigned int)bcount);
  432. HWIF(drive)->atapi_output_bytes(drive, pc->b_data, count);
  433. bcount -= count;
  434. pc->b_data += count;
  435. pc->b_count -= count;
  436. if (!pc->b_count) {
  437. bh = bh->b_reqnext;
  438. pc->bh = bh;
  439. if (bh) {
  440. pc->b_data = bh->b_data;
  441. pc->b_count = atomic_read(&bh->b_count);
  442. }
  443. }
  444. }
  445. }
  446. static void idetape_update_buffers(struct ide_atapi_pc *pc)
  447. {
  448. struct idetape_bh *bh = pc->bh;
  449. int count;
  450. unsigned int bcount = pc->xferred;
  451. if (pc->flags & PC_FLAG_WRITING)
  452. return;
  453. while (bcount) {
  454. if (bh == NULL) {
  455. printk(KERN_ERR "ide-tape: bh == NULL in %s\n",
  456. __func__);
  457. return;
  458. }
  459. count = min((unsigned int)bh->b_size, (unsigned int)bcount);
  460. atomic_set(&bh->b_count, count);
  461. if (atomic_read(&bh->b_count) == bh->b_size)
  462. bh = bh->b_reqnext;
  463. bcount -= count;
  464. }
  465. pc->bh = bh;
  466. }
  467. /*
  468. * idetape_next_pc_storage returns a pointer to a place in which we can
  469. * safely store a packet command, even though we intend to leave the
  470. * driver. A storage space for a maximum of IDETAPE_PC_STACK packet
  471. * commands is allocated at initialization time.
  472. */
  473. static struct ide_atapi_pc *idetape_next_pc_storage(ide_drive_t *drive)
  474. {
  475. idetape_tape_t *tape = drive->driver_data;
  476. debug_log(DBG_PCRQ_STACK, "pc_stack_index=%d\n", tape->pc_stack_index);
  477. if (tape->pc_stack_index == IDETAPE_PC_STACK)
  478. tape->pc_stack_index = 0;
  479. return (&tape->pc_stack[tape->pc_stack_index++]);
  480. }
  481. /*
  482. * idetape_next_rq_storage is used along with idetape_next_pc_storage.
  483. * Since we queue packet commands in the request queue, we need to
  484. * allocate a request, along with the allocation of a packet command.
  485. */
  486. /**************************************************************
  487. * *
  488. * This should get fixed to use kmalloc(.., GFP_ATOMIC) *
  489. * followed later on by kfree(). -ml *
  490. * *
  491. **************************************************************/
  492. static struct request *idetape_next_rq_storage(ide_drive_t *drive)
  493. {
  494. idetape_tape_t *tape = drive->driver_data;
  495. debug_log(DBG_PCRQ_STACK, "rq_stack_index=%d\n", tape->rq_stack_index);
  496. if (tape->rq_stack_index == IDETAPE_PC_STACK)
  497. tape->rq_stack_index = 0;
  498. return (&tape->rq_stack[tape->rq_stack_index++]);
  499. }
  500. static void idetape_init_pc(struct ide_atapi_pc *pc)
  501. {
  502. memset(pc->c, 0, 12);
  503. pc->retries = 0;
  504. pc->flags = 0;
  505. pc->req_xfer = 0;
  506. pc->buf = pc->pc_buf;
  507. pc->buf_size = IDETAPE_PC_BUFFER_SIZE;
  508. pc->bh = NULL;
  509. pc->b_data = NULL;
  510. }
  511. /*
  512. * called on each failed packet command retry to analyze the request sense. We
  513. * currently do not utilize this information.
  514. */
  515. static void idetape_analyze_error(ide_drive_t *drive, u8 *sense)
  516. {
  517. idetape_tape_t *tape = drive->driver_data;
  518. struct ide_atapi_pc *pc = tape->failed_pc;
  519. tape->sense_key = sense[2] & 0xF;
  520. tape->asc = sense[12];
  521. tape->ascq = sense[13];
  522. debug_log(DBG_ERR, "pc = %x, sense key = %x, asc = %x, ascq = %x\n",
  523. pc->c[0], tape->sense_key, tape->asc, tape->ascq);
  524. /* Correct pc->xferred by asking the tape. */
  525. if (pc->flags & PC_FLAG_DMA_ERROR) {
  526. pc->xferred = pc->req_xfer -
  527. tape->blk_size *
  528. be32_to_cpu(get_unaligned((u32 *)&sense[3]));
  529. idetape_update_buffers(pc);
  530. }
  531. /*
  532. * If error was the result of a zero-length read or write command,
  533. * with sense key=5, asc=0x22, ascq=0, let it slide. Some drives
  534. * (i.e. Seagate STT3401A Travan) don't support 0-length read/writes.
  535. */
  536. if ((pc->c[0] == READ_6 || pc->c[0] == WRITE_6)
  537. /* length == 0 */
  538. && pc->c[4] == 0 && pc->c[3] == 0 && pc->c[2] == 0) {
  539. if (tape->sense_key == 5) {
  540. /* don't report an error, everything's ok */
  541. pc->error = 0;
  542. /* don't retry read/write */
  543. pc->flags |= PC_FLAG_ABORT;
  544. }
  545. }
  546. if (pc->c[0] == READ_6 && (sense[2] & 0x80)) {
  547. pc->error = IDETAPE_ERROR_FILEMARK;
  548. pc->flags |= PC_FLAG_ABORT;
  549. }
  550. if (pc->c[0] == WRITE_6) {
  551. if ((sense[2] & 0x40) || (tape->sense_key == 0xd
  552. && tape->asc == 0x0 && tape->ascq == 0x2)) {
  553. pc->error = IDETAPE_ERROR_EOD;
  554. pc->flags |= PC_FLAG_ABORT;
  555. }
  556. }
  557. if (pc->c[0] == READ_6 || pc->c[0] == WRITE_6) {
  558. if (tape->sense_key == 8) {
  559. pc->error = IDETAPE_ERROR_EOD;
  560. pc->flags |= PC_FLAG_ABORT;
  561. }
  562. if (!(pc->flags & PC_FLAG_ABORT) &&
  563. pc->xferred)
  564. pc->retries = IDETAPE_MAX_PC_RETRIES + 1;
  565. }
  566. }
  567. static void idetape_activate_next_stage(ide_drive_t *drive)
  568. {
  569. idetape_tape_t *tape = drive->driver_data;
  570. idetape_stage_t *stage = tape->next_stage;
  571. struct request *rq = &stage->rq;
  572. debug_log(DBG_PROCS, "Enter %s\n", __func__);
  573. if (stage == NULL) {
  574. printk(KERN_ERR "ide-tape: bug: Trying to activate a non"
  575. " existing stage\n");
  576. return;
  577. }
  578. rq->rq_disk = tape->disk;
  579. rq->buffer = NULL;
  580. rq->special = (void *)stage->bh;
  581. tape->active_data_rq = rq;
  582. tape->active_stage = stage;
  583. tape->next_stage = stage->next;
  584. }
  585. /* Free a stage along with its related buffers completely. */
  586. static void __idetape_kfree_stage(idetape_stage_t *stage)
  587. {
  588. struct idetape_bh *prev_bh, *bh = stage->bh;
  589. int size;
  590. while (bh != NULL) {
  591. if (bh->b_data != NULL) {
  592. size = (int) bh->b_size;
  593. while (size > 0) {
  594. free_page((unsigned long) bh->b_data);
  595. size -= PAGE_SIZE;
  596. bh->b_data += PAGE_SIZE;
  597. }
  598. }
  599. prev_bh = bh;
  600. bh = bh->b_reqnext;
  601. kfree(prev_bh);
  602. }
  603. kfree(stage);
  604. }
  605. static void idetape_kfree_stage(idetape_tape_t *tape, idetape_stage_t *stage)
  606. {
  607. __idetape_kfree_stage(stage);
  608. }
  609. /*
  610. * Remove tape->first_stage from the pipeline. The caller should avoid race
  611. * conditions.
  612. */
  613. static void idetape_remove_stage_head(ide_drive_t *drive)
  614. {
  615. idetape_tape_t *tape = drive->driver_data;
  616. idetape_stage_t *stage;
  617. debug_log(DBG_PROCS, "Enter %s\n", __func__);
  618. if (tape->first_stage == NULL) {
  619. printk(KERN_ERR "ide-tape: bug: tape->first_stage is NULL\n");
  620. return;
  621. }
  622. if (tape->active_stage == tape->first_stage) {
  623. printk(KERN_ERR "ide-tape: bug: Trying to free our active "
  624. "pipeline stage\n");
  625. return;
  626. }
  627. stage = tape->first_stage;
  628. tape->first_stage = stage->next;
  629. idetape_kfree_stage(tape, stage);
  630. tape->nr_stages--;
  631. if (tape->first_stage == NULL) {
  632. tape->last_stage = NULL;
  633. if (tape->next_stage != NULL)
  634. printk(KERN_ERR "ide-tape: bug: tape->next_stage !="
  635. " NULL\n");
  636. if (tape->nr_stages)
  637. printk(KERN_ERR "ide-tape: bug: nr_stages should be 0 "
  638. "now\n");
  639. }
  640. }
  641. /*
  642. * This will free all the pipeline stages starting from new_last_stage->next
  643. * to the end of the list, and point tape->last_stage to new_last_stage.
  644. */
  645. static void idetape_abort_pipeline(ide_drive_t *drive,
  646. idetape_stage_t *new_last_stage)
  647. {
  648. idetape_tape_t *tape = drive->driver_data;
  649. idetape_stage_t *stage = new_last_stage->next;
  650. idetape_stage_t *nstage;
  651. debug_log(DBG_PROCS, "%s: Enter %s\n", tape->name, __func__);
  652. while (stage) {
  653. nstage = stage->next;
  654. idetape_kfree_stage(tape, stage);
  655. --tape->nr_stages;
  656. --tape->nr_pending_stages;
  657. stage = nstage;
  658. }
  659. if (new_last_stage)
  660. new_last_stage->next = NULL;
  661. tape->last_stage = new_last_stage;
  662. tape->next_stage = NULL;
  663. }
  664. /*
  665. * Finish servicing a request and insert a pending pipeline request into the
  666. * main device queue.
  667. */
  668. static int idetape_end_request(ide_drive_t *drive, int uptodate, int nr_sects)
  669. {
  670. struct request *rq = HWGROUP(drive)->rq;
  671. idetape_tape_t *tape = drive->driver_data;
  672. unsigned long flags;
  673. int error;
  674. int remove_stage = 0;
  675. idetape_stage_t *active_stage;
  676. debug_log(DBG_PROCS, "Enter %s\n", __func__);
  677. switch (uptodate) {
  678. case 0: error = IDETAPE_ERROR_GENERAL; break;
  679. case 1: error = 0; break;
  680. default: error = uptodate;
  681. }
  682. rq->errors = error;
  683. if (error)
  684. tape->failed_pc = NULL;
  685. if (!blk_special_request(rq)) {
  686. ide_end_request(drive, uptodate, nr_sects);
  687. return 0;
  688. }
  689. spin_lock_irqsave(&tape->lock, flags);
  690. /* The request was a pipelined data transfer request */
  691. if (tape->active_data_rq == rq) {
  692. active_stage = tape->active_stage;
  693. tape->active_stage = NULL;
  694. tape->active_data_rq = NULL;
  695. tape->nr_pending_stages--;
  696. if (rq->cmd[0] & REQ_IDETAPE_WRITE) {
  697. remove_stage = 1;
  698. if (error) {
  699. set_bit(IDETAPE_FLAG_PIPELINE_ERR,
  700. &tape->flags);
  701. if (error == IDETAPE_ERROR_EOD)
  702. idetape_abort_pipeline(drive,
  703. active_stage);
  704. }
  705. } else if (rq->cmd[0] & REQ_IDETAPE_READ) {
  706. if (error == IDETAPE_ERROR_EOD) {
  707. set_bit(IDETAPE_FLAG_PIPELINE_ERR,
  708. &tape->flags);
  709. idetape_abort_pipeline(drive, active_stage);
  710. }
  711. }
  712. if (tape->next_stage != NULL) {
  713. idetape_activate_next_stage(drive);
  714. /* Insert the next request into the request queue. */
  715. (void)ide_do_drive_cmd(drive, tape->active_data_rq,
  716. ide_end);
  717. } else if (!error) {
  718. /*
  719. * This is a part of the feedback loop which tries to
  720. * find the optimum number of stages. We are starting
  721. * from a minimum maximum number of stages, and if we
  722. * sense that the pipeline is empty, we try to increase
  723. * it, until we reach the user compile time memory
  724. * limit.
  725. */
  726. int i = (tape->max_pipeline - tape->min_pipeline) / 10;
  727. tape->max_stages += max(i, 1);
  728. tape->max_stages = max(tape->max_stages,
  729. tape->min_pipeline);
  730. tape->max_stages = min(tape->max_stages,
  731. tape->max_pipeline);
  732. }
  733. }
  734. ide_end_drive_cmd(drive, 0, 0);
  735. if (remove_stage)
  736. idetape_remove_stage_head(drive);
  737. if (tape->active_data_rq == NULL)
  738. clear_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags);
  739. spin_unlock_irqrestore(&tape->lock, flags);
  740. return 0;
  741. }
  742. static ide_startstop_t idetape_request_sense_callback(ide_drive_t *drive)
  743. {
  744. idetape_tape_t *tape = drive->driver_data;
  745. debug_log(DBG_PROCS, "Enter %s\n", __func__);
  746. if (!tape->pc->error) {
  747. idetape_analyze_error(drive, tape->pc->buf);
  748. idetape_end_request(drive, 1, 0);
  749. } else {
  750. printk(KERN_ERR "ide-tape: Error in REQUEST SENSE itself - "
  751. "Aborting request!\n");
  752. idetape_end_request(drive, 0, 0);
  753. }
  754. return ide_stopped;
  755. }
  756. static void idetape_create_request_sense_cmd(struct ide_atapi_pc *pc)
  757. {
  758. idetape_init_pc(pc);
  759. pc->c[0] = REQUEST_SENSE;
  760. pc->c[4] = 20;
  761. pc->req_xfer = 20;
  762. pc->idetape_callback = &idetape_request_sense_callback;
  763. }
  764. static void idetape_init_rq(struct request *rq, u8 cmd)
  765. {
  766. memset(rq, 0, sizeof(*rq));
  767. rq->cmd_type = REQ_TYPE_SPECIAL;
  768. rq->cmd[0] = cmd;
  769. }
  770. /*
  771. * Generate a new packet command request in front of the request queue, before
  772. * the current request, so that it will be processed immediately, on the next
  773. * pass through the driver. The function below is called from the request
  774. * handling part of the driver (the "bottom" part). Safe storage for the request
  775. * should be allocated with ide_tape_next_{pc,rq}_storage() prior to that.
  776. *
  777. * Memory for those requests is pre-allocated at initialization time, and is
  778. * limited to IDETAPE_PC_STACK requests. We assume that we have enough space for
  779. * the maximum possible number of inter-dependent packet commands.
  780. *
  781. * The higher level of the driver - The ioctl handler and the character device
  782. * handling functions should queue request to the lower level part and wait for
  783. * their completion using idetape_queue_pc_tail or idetape_queue_rw_tail.
  784. */
  785. static void idetape_queue_pc_head(ide_drive_t *drive, struct ide_atapi_pc *pc,
  786. struct request *rq)
  787. {
  788. struct ide_tape_obj *tape = drive->driver_data;
  789. idetape_init_rq(rq, REQ_IDETAPE_PC1);
  790. rq->buffer = (char *) pc;
  791. rq->rq_disk = tape->disk;
  792. (void) ide_do_drive_cmd(drive, rq, ide_preempt);
  793. }
  794. /*
  795. * idetape_retry_pc is called when an error was detected during the
  796. * last packet command. We queue a request sense packet command in
  797. * the head of the request list.
  798. */
  799. static ide_startstop_t idetape_retry_pc (ide_drive_t *drive)
  800. {
  801. idetape_tape_t *tape = drive->driver_data;
  802. struct ide_atapi_pc *pc;
  803. struct request *rq;
  804. (void)ide_read_error(drive);
  805. pc = idetape_next_pc_storage(drive);
  806. rq = idetape_next_rq_storage(drive);
  807. idetape_create_request_sense_cmd(pc);
  808. set_bit(IDETAPE_FLAG_IGNORE_DSC, &tape->flags);
  809. idetape_queue_pc_head(drive, pc, rq);
  810. return ide_stopped;
  811. }
  812. /*
  813. * Postpone the current request so that ide.c will be able to service requests
  814. * from another device on the same hwgroup while we are polling for DSC.
  815. */
  816. static void idetape_postpone_request(ide_drive_t *drive)
  817. {
  818. idetape_tape_t *tape = drive->driver_data;
  819. debug_log(DBG_PROCS, "Enter %s\n", __func__);
  820. tape->postponed_rq = HWGROUP(drive)->rq;
  821. ide_stall_queue(drive, tape->dsc_poll_freq);
  822. }
  823. typedef void idetape_io_buf(ide_drive_t *, struct ide_atapi_pc *, unsigned int);
  824. /*
  825. * This is the usual interrupt handler which will be called during a packet
  826. * command. We will transfer some of the data (as requested by the drive) and
  827. * will re-point interrupt handler to us. When data transfer is finished, we
  828. * will act according to the algorithm described before
  829. * idetape_issue_pc.
  830. */
  831. static ide_startstop_t idetape_pc_intr(ide_drive_t *drive)
  832. {
  833. ide_hwif_t *hwif = drive->hwif;
  834. idetape_tape_t *tape = drive->driver_data;
  835. struct ide_atapi_pc *pc = tape->pc;
  836. xfer_func_t *xferfunc;
  837. idetape_io_buf *iobuf;
  838. unsigned int temp;
  839. #if SIMULATE_ERRORS
  840. static int error_sim_count;
  841. #endif
  842. u16 bcount;
  843. u8 stat, ireason;
  844. debug_log(DBG_PROCS, "Enter %s - interrupt handler\n", __func__);
  845. /* Clear the interrupt */
  846. stat = ide_read_status(drive);
  847. if (pc->flags & PC_FLAG_DMA_IN_PROGRESS) {
  848. if (hwif->dma_ops->dma_end(drive) || (stat & ERR_STAT)) {
  849. /*
  850. * A DMA error is sometimes expected. For example,
  851. * if the tape is crossing a filemark during a
  852. * READ command, it will issue an irq and position
  853. * itself before the filemark, so that only a partial
  854. * data transfer will occur (which causes the DMA
  855. * error). In that case, we will later ask the tape
  856. * how much bytes of the original request were
  857. * actually transferred (we can't receive that
  858. * information from the DMA engine on most chipsets).
  859. */
  860. /*
  861. * On the contrary, a DMA error is never expected;
  862. * it usually indicates a hardware error or abort.
  863. * If the tape crosses a filemark during a READ
  864. * command, it will issue an irq and position itself
  865. * after the filemark (not before). Only a partial
  866. * data transfer will occur, but no DMA error.
  867. * (AS, 19 Apr 2001)
  868. */
  869. pc->flags |= PC_FLAG_DMA_ERROR;
  870. } else {
  871. pc->xferred = pc->req_xfer;
  872. idetape_update_buffers(pc);
  873. }
  874. debug_log(DBG_PROCS, "DMA finished\n");
  875. }
  876. /* No more interrupts */
  877. if ((stat & DRQ_STAT) == 0) {
  878. debug_log(DBG_SENSE, "Packet command completed, %d bytes"
  879. " transferred\n", pc->xferred);
  880. pc->flags &= ~PC_FLAG_DMA_IN_PROGRESS;
  881. local_irq_enable();
  882. #if SIMULATE_ERRORS
  883. if ((pc->c[0] == WRITE_6 || pc->c[0] == READ_6) &&
  884. (++error_sim_count % 100) == 0) {
  885. printk(KERN_INFO "ide-tape: %s: simulating error\n",
  886. tape->name);
  887. stat |= ERR_STAT;
  888. }
  889. #endif
  890. if ((stat & ERR_STAT) && pc->c[0] == REQUEST_SENSE)
  891. stat &= ~ERR_STAT;
  892. if ((stat & ERR_STAT) || (pc->flags & PC_FLAG_DMA_ERROR)) {
  893. /* Error detected */
  894. debug_log(DBG_ERR, "%s: I/O error\n", tape->name);
  895. if (pc->c[0] == REQUEST_SENSE) {
  896. printk(KERN_ERR "ide-tape: I/O error in request"
  897. " sense command\n");
  898. return ide_do_reset(drive);
  899. }
  900. debug_log(DBG_ERR, "[cmd %x]: check condition\n",
  901. pc->c[0]);
  902. /* Retry operation */
  903. return idetape_retry_pc(drive);
  904. }
  905. pc->error = 0;
  906. if ((pc->flags & PC_FLAG_WAIT_FOR_DSC) &&
  907. (stat & SEEK_STAT) == 0) {
  908. /* Media access command */
  909. tape->dsc_polling_start = jiffies;
  910. tape->dsc_poll_freq = IDETAPE_DSC_MA_FAST;
  911. tape->dsc_timeout = jiffies + IDETAPE_DSC_MA_TIMEOUT;
  912. /* Allow ide.c to handle other requests */
  913. idetape_postpone_request(drive);
  914. return ide_stopped;
  915. }
  916. if (tape->failed_pc == pc)
  917. tape->failed_pc = NULL;
  918. /* Command finished - Call the callback function */
  919. return pc->idetape_callback(drive);
  920. }
  921. if (pc->flags & PC_FLAG_DMA_IN_PROGRESS) {
  922. pc->flags &= ~PC_FLAG_DMA_IN_PROGRESS;
  923. printk(KERN_ERR "ide-tape: The tape wants to issue more "
  924. "interrupts in DMA mode\n");
  925. printk(KERN_ERR "ide-tape: DMA disabled, reverting to PIO\n");
  926. ide_dma_off(drive);
  927. return ide_do_reset(drive);
  928. }
  929. /* Get the number of bytes to transfer on this interrupt. */
  930. bcount = (hwif->INB(hwif->io_ports[IDE_BCOUNTH_OFFSET]) << 8) |
  931. hwif->INB(hwif->io_ports[IDE_BCOUNTL_OFFSET]);
  932. ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]);
  933. if (ireason & CD) {
  934. printk(KERN_ERR "ide-tape: CoD != 0 in %s\n", __func__);
  935. return ide_do_reset(drive);
  936. }
  937. if (((ireason & IO) == IO) == !!(pc->flags & PC_FLAG_WRITING)) {
  938. /* Hopefully, we will never get here */
  939. printk(KERN_ERR "ide-tape: We wanted to %s, ",
  940. (ireason & IO) ? "Write" : "Read");
  941. printk(KERN_ERR "ide-tape: but the tape wants us to %s !\n",
  942. (ireason & IO) ? "Read" : "Write");
  943. return ide_do_reset(drive);
  944. }
  945. if (!(pc->flags & PC_FLAG_WRITING)) {
  946. /* Reading - Check that we have enough space */
  947. temp = pc->xferred + bcount;
  948. if (temp > pc->req_xfer) {
  949. if (temp > pc->buf_size) {
  950. printk(KERN_ERR "ide-tape: The tape wants to "
  951. "send us more data than expected "
  952. "- discarding data\n");
  953. ide_atapi_discard_data(drive, bcount);
  954. ide_set_handler(drive, &idetape_pc_intr,
  955. IDETAPE_WAIT_CMD, NULL);
  956. return ide_started;
  957. }
  958. debug_log(DBG_SENSE, "The tape wants to send us more "
  959. "data than expected - allowing transfer\n");
  960. }
  961. iobuf = &idetape_input_buffers;
  962. xferfunc = hwif->atapi_input_bytes;
  963. } else {
  964. iobuf = &idetape_output_buffers;
  965. xferfunc = hwif->atapi_output_bytes;
  966. }
  967. if (pc->bh)
  968. iobuf(drive, pc, bcount);
  969. else
  970. xferfunc(drive, pc->cur_pos, bcount);
  971. /* Update the current position */
  972. pc->xferred += bcount;
  973. pc->cur_pos += bcount;
  974. debug_log(DBG_SENSE, "[cmd %x] transferred %d bytes on that intr.\n",
  975. pc->c[0], bcount);
  976. /* And set the interrupt handler again */
  977. ide_set_handler(drive, &idetape_pc_intr, IDETAPE_WAIT_CMD, NULL);
  978. return ide_started;
  979. }
  980. /*
  981. * Packet Command Interface
  982. *
  983. * The current Packet Command is available in tape->pc, and will not change
  984. * until we finish handling it. Each packet command is associated with a
  985. * callback function that will be called when the command is finished.
  986. *
  987. * The handling will be done in three stages:
  988. *
  989. * 1. idetape_issue_pc will send the packet command to the drive, and will set
  990. * the interrupt handler to idetape_pc_intr.
  991. *
  992. * 2. On each interrupt, idetape_pc_intr will be called. This step will be
  993. * repeated until the device signals us that no more interrupts will be issued.
  994. *
  995. * 3. ATAPI Tape media access commands have immediate status with a delayed
  996. * process. In case of a successful initiation of a media access packet command,
  997. * the DSC bit will be set when the actual execution of the command is finished.
  998. * Since the tape drive will not issue an interrupt, we have to poll for this
  999. * event. In this case, we define the request as "low priority request" by
  1000. * setting rq_status to IDETAPE_RQ_POSTPONED, set a timer to poll for DSC and
  1001. * exit the driver.
  1002. *
  1003. * ide.c will then give higher priority to requests which originate from the
  1004. * other device, until will change rq_status to RQ_ACTIVE.
  1005. *
  1006. * 4. When the packet command is finished, it will be checked for errors.
  1007. *
  1008. * 5. In case an error was found, we queue a request sense packet command in
  1009. * front of the request queue and retry the operation up to
  1010. * IDETAPE_MAX_PC_RETRIES times.
  1011. *
  1012. * 6. In case no error was found, or we decided to give up and not to retry
  1013. * again, the callback function will be called and then we will handle the next
  1014. * request.
  1015. */
  1016. static ide_startstop_t idetape_transfer_pc(ide_drive_t *drive)
  1017. {
  1018. ide_hwif_t *hwif = drive->hwif;
  1019. idetape_tape_t *tape = drive->driver_data;
  1020. struct ide_atapi_pc *pc = tape->pc;
  1021. int retries = 100;
  1022. ide_startstop_t startstop;
  1023. u8 ireason;
  1024. if (ide_wait_stat(&startstop, drive, DRQ_STAT, BUSY_STAT, WAIT_READY)) {
  1025. printk(KERN_ERR "ide-tape: Strange, packet command initiated "
  1026. "yet DRQ isn't asserted\n");
  1027. return startstop;
  1028. }
  1029. ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]);
  1030. while (retries-- && ((ireason & CD) == 0 || (ireason & IO))) {
  1031. printk(KERN_ERR "ide-tape: (IO,CoD != (0,1) while issuing "
  1032. "a packet command, retrying\n");
  1033. udelay(100);
  1034. ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]);
  1035. if (retries == 0) {
  1036. printk(KERN_ERR "ide-tape: (IO,CoD != (0,1) while "
  1037. "issuing a packet command, ignoring\n");
  1038. ireason |= CD;
  1039. ireason &= ~IO;
  1040. }
  1041. }
  1042. if ((ireason & CD) == 0 || (ireason & IO)) {
  1043. printk(KERN_ERR "ide-tape: (IO,CoD) != (0,1) while issuing "
  1044. "a packet command\n");
  1045. return ide_do_reset(drive);
  1046. }
  1047. /* Set the interrupt routine */
  1048. ide_set_handler(drive, &idetape_pc_intr, IDETAPE_WAIT_CMD, NULL);
  1049. #ifdef CONFIG_BLK_DEV_IDEDMA
  1050. /* Begin DMA, if necessary */
  1051. if (pc->flags & PC_FLAG_DMA_IN_PROGRESS)
  1052. hwif->dma_ops->dma_start(drive);
  1053. #endif
  1054. /* Send the actual packet */
  1055. HWIF(drive)->atapi_output_bytes(drive, pc->c, 12);
  1056. return ide_started;
  1057. }
  1058. static ide_startstop_t idetape_issue_pc(ide_drive_t *drive,
  1059. struct ide_atapi_pc *pc)
  1060. {
  1061. ide_hwif_t *hwif = drive->hwif;
  1062. idetape_tape_t *tape = drive->driver_data;
  1063. int dma_ok = 0;
  1064. u16 bcount;
  1065. if (tape->pc->c[0] == REQUEST_SENSE &&
  1066. pc->c[0] == REQUEST_SENSE) {
  1067. printk(KERN_ERR "ide-tape: possible ide-tape.c bug - "
  1068. "Two request sense in serial were issued\n");
  1069. }
  1070. if (tape->failed_pc == NULL && pc->c[0] != REQUEST_SENSE)
  1071. tape->failed_pc = pc;
  1072. /* Set the current packet command */
  1073. tape->pc = pc;
  1074. if (pc->retries > IDETAPE_MAX_PC_RETRIES ||
  1075. (pc->flags & PC_FLAG_ABORT)) {
  1076. /*
  1077. * We will "abort" retrying a packet command in case legitimate
  1078. * error code was received (crossing a filemark, or end of the
  1079. * media, for example).
  1080. */
  1081. if (!(pc->flags & PC_FLAG_ABORT)) {
  1082. if (!(pc->c[0] == TEST_UNIT_READY &&
  1083. tape->sense_key == 2 && tape->asc == 4 &&
  1084. (tape->ascq == 1 || tape->ascq == 8))) {
  1085. printk(KERN_ERR "ide-tape: %s: I/O error, "
  1086. "pc = %2x, key = %2x, "
  1087. "asc = %2x, ascq = %2x\n",
  1088. tape->name, pc->c[0],
  1089. tape->sense_key, tape->asc,
  1090. tape->ascq);
  1091. }
  1092. /* Giving up */
  1093. pc->error = IDETAPE_ERROR_GENERAL;
  1094. }
  1095. tape->failed_pc = NULL;
  1096. return pc->idetape_callback(drive);
  1097. }
  1098. debug_log(DBG_SENSE, "Retry #%d, cmd = %02X\n", pc->retries, pc->c[0]);
  1099. pc->retries++;
  1100. /* We haven't transferred any data yet */
  1101. pc->xferred = 0;
  1102. pc->cur_pos = pc->buf;
  1103. /* Request to transfer the entire buffer at once */
  1104. bcount = pc->req_xfer;
  1105. if (pc->flags & PC_FLAG_DMA_ERROR) {
  1106. pc->flags &= ~PC_FLAG_DMA_ERROR;
  1107. printk(KERN_WARNING "ide-tape: DMA disabled, "
  1108. "reverting to PIO\n");
  1109. ide_dma_off(drive);
  1110. }
  1111. if ((pc->flags & PC_FLAG_DMA_RECOMMENDED) && drive->using_dma)
  1112. dma_ok = !hwif->dma_ops->dma_setup(drive);
  1113. ide_pktcmd_tf_load(drive, IDE_TFLAG_NO_SELECT_MASK |
  1114. IDE_TFLAG_OUT_DEVICE, bcount, dma_ok);
  1115. if (dma_ok)
  1116. /* Will begin DMA later */
  1117. pc->flags |= PC_FLAG_DMA_IN_PROGRESS;
  1118. if (test_bit(IDETAPE_FLAG_DRQ_INTERRUPT, &tape->flags)) {
  1119. ide_execute_command(drive, WIN_PACKETCMD, &idetape_transfer_pc,
  1120. IDETAPE_WAIT_CMD, NULL);
  1121. return ide_started;
  1122. } else {
  1123. hwif->OUTB(WIN_PACKETCMD, hwif->io_ports[IDE_COMMAND_OFFSET]);
  1124. return idetape_transfer_pc(drive);
  1125. }
  1126. }
  1127. static ide_startstop_t idetape_pc_callback(ide_drive_t *drive)
  1128. {
  1129. idetape_tape_t *tape = drive->driver_data;
  1130. debug_log(DBG_PROCS, "Enter %s\n", __func__);
  1131. idetape_end_request(drive, tape->pc->error ? 0 : 1, 0);
  1132. return ide_stopped;
  1133. }
  1134. /* A mode sense command is used to "sense" tape parameters. */
  1135. static void idetape_create_mode_sense_cmd(struct ide_atapi_pc *pc, u8 page_code)
  1136. {
  1137. idetape_init_pc(pc);
  1138. pc->c[0] = MODE_SENSE;
  1139. if (page_code != IDETAPE_BLOCK_DESCRIPTOR)
  1140. /* DBD = 1 - Don't return block descriptors */
  1141. pc->c[1] = 8;
  1142. pc->c[2] = page_code;
  1143. /*
  1144. * Changed pc->c[3] to 0 (255 will at best return unused info).
  1145. *
  1146. * For SCSI this byte is defined as subpage instead of high byte
  1147. * of length and some IDE drives seem to interpret it this way
  1148. * and return an error when 255 is used.
  1149. */
  1150. pc->c[3] = 0;
  1151. /* We will just discard data in that case */
  1152. pc->c[4] = 255;
  1153. if (page_code == IDETAPE_BLOCK_DESCRIPTOR)
  1154. pc->req_xfer = 12;
  1155. else if (page_code == IDETAPE_CAPABILITIES_PAGE)
  1156. pc->req_xfer = 24;
  1157. else
  1158. pc->req_xfer = 50;
  1159. pc->idetape_callback = &idetape_pc_callback;
  1160. }
  1161. static ide_startstop_t idetape_media_access_finished(ide_drive_t *drive)
  1162. {
  1163. idetape_tape_t *tape = drive->driver_data;
  1164. struct ide_atapi_pc *pc = tape->pc;
  1165. u8 stat;
  1166. stat = ide_read_status(drive);
  1167. if (stat & SEEK_STAT) {
  1168. if (stat & ERR_STAT) {
  1169. /* Error detected */
  1170. if (pc->c[0] != TEST_UNIT_READY)
  1171. printk(KERN_ERR "ide-tape: %s: I/O error, ",
  1172. tape->name);
  1173. /* Retry operation */
  1174. return idetape_retry_pc(drive);
  1175. }
  1176. pc->error = 0;
  1177. if (tape->failed_pc == pc)
  1178. tape->failed_pc = NULL;
  1179. } else {
  1180. pc->error = IDETAPE_ERROR_GENERAL;
  1181. tape->failed_pc = NULL;
  1182. }
  1183. return pc->idetape_callback(drive);
  1184. }
  1185. static ide_startstop_t idetape_rw_callback(ide_drive_t *drive)
  1186. {
  1187. idetape_tape_t *tape = drive->driver_data;
  1188. struct request *rq = HWGROUP(drive)->rq;
  1189. int blocks = tape->pc->xferred / tape->blk_size;
  1190. tape->avg_size += blocks * tape->blk_size;
  1191. tape->insert_size += blocks * tape->blk_size;
  1192. if (tape->insert_size > 1024 * 1024)
  1193. tape->measure_insert_time = 1;
  1194. if (tape->measure_insert_time) {
  1195. tape->measure_insert_time = 0;
  1196. tape->insert_time = jiffies;
  1197. tape->insert_size = 0;
  1198. }
  1199. if (time_after(jiffies, tape->insert_time))
  1200. tape->insert_speed = tape->insert_size / 1024 * HZ /
  1201. (jiffies - tape->insert_time);
  1202. if (time_after_eq(jiffies, tape->avg_time + HZ)) {
  1203. tape->avg_speed = tape->avg_size * HZ /
  1204. (jiffies - tape->avg_time) / 1024;
  1205. tape->avg_size = 0;
  1206. tape->avg_time = jiffies;
  1207. }
  1208. debug_log(DBG_PROCS, "Enter %s\n", __func__);
  1209. tape->first_frame += blocks;
  1210. rq->current_nr_sectors -= blocks;
  1211. if (!tape->pc->error)
  1212. idetape_end_request(drive, 1, 0);
  1213. else
  1214. idetape_end_request(drive, tape->pc->error, 0);
  1215. return ide_stopped;
  1216. }
  1217. static void idetape_create_read_cmd(idetape_tape_t *tape,
  1218. struct ide_atapi_pc *pc,
  1219. unsigned int length, struct idetape_bh *bh)
  1220. {
  1221. idetape_init_pc(pc);
  1222. pc->c[0] = READ_6;
  1223. put_unaligned(cpu_to_be32(length), (unsigned int *) &pc->c[1]);
  1224. pc->c[1] = 1;
  1225. pc->idetape_callback = &idetape_rw_callback;
  1226. pc->bh = bh;
  1227. atomic_set(&bh->b_count, 0);
  1228. pc->buf = NULL;
  1229. pc->buf_size = length * tape->blk_size;
  1230. pc->req_xfer = pc->buf_size;
  1231. if (pc->req_xfer == tape->stage_size)
  1232. pc->flags |= PC_FLAG_DMA_RECOMMENDED;
  1233. }
  1234. static void idetape_create_write_cmd(idetape_tape_t *tape,
  1235. struct ide_atapi_pc *pc,
  1236. unsigned int length, struct idetape_bh *bh)
  1237. {
  1238. idetape_init_pc(pc);
  1239. pc->c[0] = WRITE_6;
  1240. put_unaligned(cpu_to_be32(length), (unsigned int *) &pc->c[1]);
  1241. pc->c[1] = 1;
  1242. pc->idetape_callback = &idetape_rw_callback;
  1243. pc->flags |= PC_FLAG_WRITING;
  1244. pc->bh = bh;
  1245. pc->b_data = bh->b_data;
  1246. pc->b_count = atomic_read(&bh->b_count);
  1247. pc->buf = NULL;
  1248. pc->buf_size = length * tape->blk_size;
  1249. pc->req_xfer = pc->buf_size;
  1250. if (pc->req_xfer == tape->stage_size)
  1251. pc->flags |= PC_FLAG_DMA_RECOMMENDED;
  1252. }
  1253. static ide_startstop_t idetape_do_request(ide_drive_t *drive,
  1254. struct request *rq, sector_t block)
  1255. {
  1256. idetape_tape_t *tape = drive->driver_data;
  1257. struct ide_atapi_pc *pc = NULL;
  1258. struct request *postponed_rq = tape->postponed_rq;
  1259. u8 stat;
  1260. debug_log(DBG_SENSE, "sector: %ld, nr_sectors: %ld,"
  1261. " current_nr_sectors: %d\n",
  1262. rq->sector, rq->nr_sectors, rq->current_nr_sectors);
  1263. if (!blk_special_request(rq)) {
  1264. /* We do not support buffer cache originated requests. */
  1265. printk(KERN_NOTICE "ide-tape: %s: Unsupported request in "
  1266. "request queue (%d)\n", drive->name, rq->cmd_type);
  1267. ide_end_request(drive, 0, 0);
  1268. return ide_stopped;
  1269. }
  1270. /* Retry a failed packet command */
  1271. if (tape->failed_pc && tape->pc->c[0] == REQUEST_SENSE)
  1272. return idetape_issue_pc(drive, tape->failed_pc);
  1273. if (postponed_rq != NULL)
  1274. if (rq != postponed_rq) {
  1275. printk(KERN_ERR "ide-tape: ide-tape.c bug - "
  1276. "Two DSC requests were queued\n");
  1277. idetape_end_request(drive, 0, 0);
  1278. return ide_stopped;
  1279. }
  1280. tape->postponed_rq = NULL;
  1281. /*
  1282. * If the tape is still busy, postpone our request and service
  1283. * the other device meanwhile.
  1284. */
  1285. stat = ide_read_status(drive);
  1286. if (!drive->dsc_overlap && !(rq->cmd[0] & REQ_IDETAPE_PC2))
  1287. set_bit(IDETAPE_FLAG_IGNORE_DSC, &tape->flags);
  1288. if (drive->post_reset == 1) {
  1289. set_bit(IDETAPE_FLAG_IGNORE_DSC, &tape->flags);
  1290. drive->post_reset = 0;
  1291. }
  1292. if (time_after(jiffies, tape->insert_time))
  1293. tape->insert_speed = tape->insert_size / 1024 * HZ /
  1294. (jiffies - tape->insert_time);
  1295. if (!test_and_clear_bit(IDETAPE_FLAG_IGNORE_DSC, &tape->flags) &&
  1296. (stat & SEEK_STAT) == 0) {
  1297. if (postponed_rq == NULL) {
  1298. tape->dsc_polling_start = jiffies;
  1299. tape->dsc_poll_freq = tape->best_dsc_rw_freq;
  1300. tape->dsc_timeout = jiffies + IDETAPE_DSC_RW_TIMEOUT;
  1301. } else if (time_after(jiffies, tape->dsc_timeout)) {
  1302. printk(KERN_ERR "ide-tape: %s: DSC timeout\n",
  1303. tape->name);
  1304. if (rq->cmd[0] & REQ_IDETAPE_PC2) {
  1305. idetape_media_access_finished(drive);
  1306. return ide_stopped;
  1307. } else {
  1308. return ide_do_reset(drive);
  1309. }
  1310. } else if (time_after(jiffies,
  1311. tape->dsc_polling_start +
  1312. IDETAPE_DSC_MA_THRESHOLD))
  1313. tape->dsc_poll_freq = IDETAPE_DSC_MA_SLOW;
  1314. idetape_postpone_request(drive);
  1315. return ide_stopped;
  1316. }
  1317. if (rq->cmd[0] & REQ_IDETAPE_READ) {
  1318. tape->postpone_cnt = 0;
  1319. pc = idetape_next_pc_storage(drive);
  1320. idetape_create_read_cmd(tape, pc, rq->current_nr_sectors,
  1321. (struct idetape_bh *)rq->special);
  1322. goto out;
  1323. }
  1324. if (rq->cmd[0] & REQ_IDETAPE_WRITE) {
  1325. tape->postpone_cnt = 0;
  1326. pc = idetape_next_pc_storage(drive);
  1327. idetape_create_write_cmd(tape, pc, rq->current_nr_sectors,
  1328. (struct idetape_bh *)rq->special);
  1329. goto out;
  1330. }
  1331. if (rq->cmd[0] & REQ_IDETAPE_PC1) {
  1332. pc = (struct ide_atapi_pc *) rq->buffer;
  1333. rq->cmd[0] &= ~(REQ_IDETAPE_PC1);
  1334. rq->cmd[0] |= REQ_IDETAPE_PC2;
  1335. goto out;
  1336. }
  1337. if (rq->cmd[0] & REQ_IDETAPE_PC2) {
  1338. idetape_media_access_finished(drive);
  1339. return ide_stopped;
  1340. }
  1341. BUG();
  1342. out:
  1343. return idetape_issue_pc(drive, pc);
  1344. }
  1345. /* Pipeline related functions */
  1346. /*
  1347. * The function below uses __get_free_page to allocate a pipeline stage, along
  1348. * with all the necessary small buffers which together make a buffer of size
  1349. * tape->stage_size (or a bit more). We attempt to combine sequential pages as
  1350. * much as possible.
  1351. *
  1352. * It returns a pointer to the new allocated stage, or NULL if we can't (or
  1353. * don't want to) allocate a stage.
  1354. *
  1355. * Pipeline stages are optional and are used to increase performance. If we
  1356. * can't allocate them, we'll manage without them.
  1357. */
  1358. static idetape_stage_t *__idetape_kmalloc_stage(idetape_tape_t *tape, int full,
  1359. int clear)
  1360. {
  1361. idetape_stage_t *stage;
  1362. struct idetape_bh *prev_bh, *bh;
  1363. int pages = tape->pages_per_stage;
  1364. char *b_data = NULL;
  1365. stage = kmalloc(sizeof(idetape_stage_t), GFP_KERNEL);
  1366. if (!stage)
  1367. return NULL;
  1368. stage->next = NULL;
  1369. stage->bh = kmalloc(sizeof(struct idetape_bh), GFP_KERNEL);
  1370. bh = stage->bh;
  1371. if (bh == NULL)
  1372. goto abort;
  1373. bh->b_reqnext = NULL;
  1374. bh->b_data = (char *) __get_free_page(GFP_KERNEL);
  1375. if (!bh->b_data)
  1376. goto abort;
  1377. if (clear)
  1378. memset(bh->b_data, 0, PAGE_SIZE);
  1379. bh->b_size = PAGE_SIZE;
  1380. atomic_set(&bh->b_count, full ? bh->b_size : 0);
  1381. while (--pages) {
  1382. b_data = (char *) __get_free_page(GFP_KERNEL);
  1383. if (!b_data)
  1384. goto abort;
  1385. if (clear)
  1386. memset(b_data, 0, PAGE_SIZE);
  1387. if (bh->b_data == b_data + PAGE_SIZE) {
  1388. bh->b_size += PAGE_SIZE;
  1389. bh->b_data -= PAGE_SIZE;
  1390. if (full)
  1391. atomic_add(PAGE_SIZE, &bh->b_count);
  1392. continue;
  1393. }
  1394. if (b_data == bh->b_data + bh->b_size) {
  1395. bh->b_size += PAGE_SIZE;
  1396. if (full)
  1397. atomic_add(PAGE_SIZE, &bh->b_count);
  1398. continue;
  1399. }
  1400. prev_bh = bh;
  1401. bh = kmalloc(sizeof(struct idetape_bh), GFP_KERNEL);
  1402. if (!bh) {
  1403. free_page((unsigned long) b_data);
  1404. goto abort;
  1405. }
  1406. bh->b_reqnext = NULL;
  1407. bh->b_data = b_data;
  1408. bh->b_size = PAGE_SIZE;
  1409. atomic_set(&bh->b_count, full ? bh->b_size : 0);
  1410. prev_bh->b_reqnext = bh;
  1411. }
  1412. bh->b_size -= tape->excess_bh_size;
  1413. if (full)
  1414. atomic_sub(tape->excess_bh_size, &bh->b_count);
  1415. return stage;
  1416. abort:
  1417. __idetape_kfree_stage(stage);
  1418. return NULL;
  1419. }
  1420. static int idetape_copy_stage_from_user(idetape_tape_t *tape,
  1421. const char __user *buf, int n)
  1422. {
  1423. struct idetape_bh *bh = tape->bh;
  1424. int count;
  1425. int ret = 0;
  1426. while (n) {
  1427. if (bh == NULL) {
  1428. printk(KERN_ERR "ide-tape: bh == NULL in %s\n",
  1429. __func__);
  1430. return 1;
  1431. }
  1432. count = min((unsigned int)
  1433. (bh->b_size - atomic_read(&bh->b_count)),
  1434. (unsigned int)n);
  1435. if (copy_from_user(bh->b_data + atomic_read(&bh->b_count), buf,
  1436. count))
  1437. ret = 1;
  1438. n -= count;
  1439. atomic_add(count, &bh->b_count);
  1440. buf += count;
  1441. if (atomic_read(&bh->b_count) == bh->b_size) {
  1442. bh = bh->b_reqnext;
  1443. if (bh)
  1444. atomic_set(&bh->b_count, 0);
  1445. }
  1446. }
  1447. tape->bh = bh;
  1448. return ret;
  1449. }
  1450. static int idetape_copy_stage_to_user(idetape_tape_t *tape, char __user *buf,
  1451. int n)
  1452. {
  1453. struct idetape_bh *bh = tape->bh;
  1454. int count;
  1455. int ret = 0;
  1456. while (n) {
  1457. if (bh == NULL) {
  1458. printk(KERN_ERR "ide-tape: bh == NULL in %s\n",
  1459. __func__);
  1460. return 1;
  1461. }
  1462. count = min(tape->b_count, n);
  1463. if (copy_to_user(buf, tape->b_data, count))
  1464. ret = 1;
  1465. n -= count;
  1466. tape->b_data += count;
  1467. tape->b_count -= count;
  1468. buf += count;
  1469. if (!tape->b_count) {
  1470. bh = bh->b_reqnext;
  1471. tape->bh = bh;
  1472. if (bh) {
  1473. tape->b_data = bh->b_data;
  1474. tape->b_count = atomic_read(&bh->b_count);
  1475. }
  1476. }
  1477. }
  1478. return ret;
  1479. }
  1480. static void idetape_init_merge_stage(idetape_tape_t *tape)
  1481. {
  1482. struct idetape_bh *bh = tape->merge_stage->bh;
  1483. tape->bh = bh;
  1484. if (tape->chrdev_dir == IDETAPE_DIR_WRITE)
  1485. atomic_set(&bh->b_count, 0);
  1486. else {
  1487. tape->b_data = bh->b_data;
  1488. tape->b_count = atomic_read(&bh->b_count);
  1489. }
  1490. }
  1491. /* Install a completion in a pending request and sleep until it is serviced. The
  1492. * caller should ensure that the request will not be serviced before we install
  1493. * the completion (usually by disabling interrupts).
  1494. */
  1495. static void idetape_wait_for_request(ide_drive_t *drive, struct request *rq)
  1496. {
  1497. DECLARE_COMPLETION_ONSTACK(wait);
  1498. idetape_tape_t *tape = drive->driver_data;
  1499. if (rq == NULL || !blk_special_request(rq)) {
  1500. printk(KERN_ERR "ide-tape: bug: Trying to sleep on non-valid"
  1501. " request\n");
  1502. return;
  1503. }
  1504. rq->end_io_data = &wait;
  1505. rq->end_io = blk_end_sync_rq;
  1506. spin_unlock_irq(&tape->lock);
  1507. wait_for_completion(&wait);
  1508. /* The stage and its struct request have been deallocated */
  1509. spin_lock_irq(&tape->lock);
  1510. }
  1511. static ide_startstop_t idetape_read_position_callback(ide_drive_t *drive)
  1512. {
  1513. idetape_tape_t *tape = drive->driver_data;
  1514. u8 *readpos = tape->pc->buf;
  1515. debug_log(DBG_PROCS, "Enter %s\n", __func__);
  1516. if (!tape->pc->error) {
  1517. debug_log(DBG_SENSE, "BOP - %s\n",
  1518. (readpos[0] & 0x80) ? "Yes" : "No");
  1519. debug_log(DBG_SENSE, "EOP - %s\n",
  1520. (readpos[0] & 0x40) ? "Yes" : "No");
  1521. if (readpos[0] & 0x4) {
  1522. printk(KERN_INFO "ide-tape: Block location is unknown"
  1523. "to the tape\n");
  1524. clear_bit(IDETAPE_FLAG_ADDRESS_VALID, &tape->flags);
  1525. idetape_end_request(drive, 0, 0);
  1526. } else {
  1527. debug_log(DBG_SENSE, "Block Location - %u\n",
  1528. be32_to_cpu(*(u32 *)&readpos[4]));
  1529. tape->partition = readpos[1];
  1530. tape->first_frame =
  1531. be32_to_cpu(*(u32 *)&readpos[4]);
  1532. set_bit(IDETAPE_FLAG_ADDRESS_VALID, &tape->flags);
  1533. idetape_end_request(drive, 1, 0);
  1534. }
  1535. } else {
  1536. idetape_end_request(drive, 0, 0);
  1537. }
  1538. return ide_stopped;
  1539. }
  1540. /*
  1541. * Write a filemark if write_filemark=1. Flush the device buffers without
  1542. * writing a filemark otherwise.
  1543. */
  1544. static void idetape_create_write_filemark_cmd(ide_drive_t *drive,
  1545. struct ide_atapi_pc *pc, int write_filemark)
  1546. {
  1547. idetape_init_pc(pc);
  1548. pc->c[0] = WRITE_FILEMARKS;
  1549. pc->c[4] = write_filemark;
  1550. pc->flags |= PC_FLAG_WAIT_FOR_DSC;
  1551. pc->idetape_callback = &idetape_pc_callback;
  1552. }
  1553. static void idetape_create_test_unit_ready_cmd(struct ide_atapi_pc *pc)
  1554. {
  1555. idetape_init_pc(pc);
  1556. pc->c[0] = TEST_UNIT_READY;
  1557. pc->idetape_callback = &idetape_pc_callback;
  1558. }
  1559. /*
  1560. * We add a special packet command request to the tail of the request queue, and
  1561. * wait for it to be serviced. This is not to be called from within the request
  1562. * handling part of the driver! We allocate here data on the stack and it is
  1563. * valid until the request is finished. This is not the case for the bottom part
  1564. * of the driver, where we are always leaving the functions to wait for an
  1565. * interrupt or a timer event.
  1566. *
  1567. * From the bottom part of the driver, we should allocate safe memory using
  1568. * idetape_next_pc_storage() and ide_tape_next_rq_storage(), and add the request
  1569. * to the request list without waiting for it to be serviced! In that case, we
  1570. * usually use idetape_queue_pc_head().
  1571. */
  1572. static int __idetape_queue_pc_tail(ide_drive_t *drive, struct ide_atapi_pc *pc)
  1573. {
  1574. struct ide_tape_obj *tape = drive->driver_data;
  1575. struct request rq;
  1576. idetape_init_rq(&rq, REQ_IDETAPE_PC1);
  1577. rq.buffer = (char *) pc;
  1578. rq.rq_disk = tape->disk;
  1579. return ide_do_drive_cmd(drive, &rq, ide_wait);
  1580. }
  1581. static void idetape_create_load_unload_cmd(ide_drive_t *drive,
  1582. struct ide_atapi_pc *pc, int cmd)
  1583. {
  1584. idetape_init_pc(pc);
  1585. pc->c[0] = START_STOP;
  1586. pc->c[4] = cmd;
  1587. pc->flags |= PC_FLAG_WAIT_FOR_DSC;
  1588. pc->idetape_callback = &idetape_pc_callback;
  1589. }
  1590. static int idetape_wait_ready(ide_drive_t *drive, unsigned long timeout)
  1591. {
  1592. idetape_tape_t *tape = drive->driver_data;
  1593. struct ide_atapi_pc pc;
  1594. int load_attempted = 0;
  1595. /* Wait for the tape to become ready */
  1596. set_bit(IDETAPE_FLAG_MEDIUM_PRESENT, &tape->flags);
  1597. timeout += jiffies;
  1598. while (time_before(jiffies, timeout)) {
  1599. idetape_create_test_unit_ready_cmd(&pc);
  1600. if (!__idetape_queue_pc_tail(drive, &pc))
  1601. return 0;
  1602. if ((tape->sense_key == 2 && tape->asc == 4 && tape->ascq == 2)
  1603. || (tape->asc == 0x3A)) {
  1604. /* no media */
  1605. if (load_attempted)
  1606. return -ENOMEDIUM;
  1607. idetape_create_load_unload_cmd(drive, &pc,
  1608. IDETAPE_LU_LOAD_MASK);
  1609. __idetape_queue_pc_tail(drive, &pc);
  1610. load_attempted = 1;
  1611. /* not about to be ready */
  1612. } else if (!(tape->sense_key == 2 && tape->asc == 4 &&
  1613. (tape->ascq == 1 || tape->ascq == 8)))
  1614. return -EIO;
  1615. msleep(100);
  1616. }
  1617. return -EIO;
  1618. }
  1619. static int idetape_queue_pc_tail(ide_drive_t *drive, struct ide_atapi_pc *pc)
  1620. {
  1621. return __idetape_queue_pc_tail(drive, pc);
  1622. }
  1623. static int idetape_flush_tape_buffers(ide_drive_t *drive)
  1624. {
  1625. struct ide_atapi_pc pc;
  1626. int rc;
  1627. idetape_create_write_filemark_cmd(drive, &pc, 0);
  1628. rc = idetape_queue_pc_tail(drive, &pc);
  1629. if (rc)
  1630. return rc;
  1631. idetape_wait_ready(drive, 60 * 5 * HZ);
  1632. return 0;
  1633. }
  1634. static void idetape_create_read_position_cmd(struct ide_atapi_pc *pc)
  1635. {
  1636. idetape_init_pc(pc);
  1637. pc->c[0] = READ_POSITION;
  1638. pc->req_xfer = 20;
  1639. pc->idetape_callback = &idetape_read_position_callback;
  1640. }
  1641. static int idetape_read_position(ide_drive_t *drive)
  1642. {
  1643. idetape_tape_t *tape = drive->driver_data;
  1644. struct ide_atapi_pc pc;
  1645. int position;
  1646. debug_log(DBG_PROCS, "Enter %s\n", __func__);
  1647. idetape_create_read_position_cmd(&pc);
  1648. if (idetape_queue_pc_tail(drive, &pc))
  1649. return -1;
  1650. position = tape->first_frame;
  1651. return position;
  1652. }
  1653. static void idetape_create_locate_cmd(ide_drive_t *drive,
  1654. struct ide_atapi_pc *pc,
  1655. unsigned int block, u8 partition, int skip)
  1656. {
  1657. idetape_init_pc(pc);
  1658. pc->c[0] = POSITION_TO_ELEMENT;
  1659. pc->c[1] = 2;
  1660. put_unaligned(cpu_to_be32(block), (unsigned int *) &pc->c[3]);
  1661. pc->c[8] = partition;
  1662. pc->flags |= PC_FLAG_WAIT_FOR_DSC;
  1663. pc->idetape_callback = &idetape_pc_callback;
  1664. }
  1665. static int idetape_create_prevent_cmd(ide_drive_t *drive,
  1666. struct ide_atapi_pc *pc, int prevent)
  1667. {
  1668. idetape_tape_t *tape = drive->driver_data;
  1669. /* device supports locking according to capabilities page */
  1670. if (!(tape->caps[6] & 0x01))
  1671. return 0;
  1672. idetape_init_pc(pc);
  1673. pc->c[0] = ALLOW_MEDIUM_REMOVAL;
  1674. pc->c[4] = prevent;
  1675. pc->idetape_callback = &idetape_pc_callback;
  1676. return 1;
  1677. }
  1678. static int __idetape_discard_read_pipeline(ide_drive_t *drive)
  1679. {
  1680. idetape_tape_t *tape = drive->driver_data;
  1681. unsigned long flags;
  1682. int cnt;
  1683. if (tape->chrdev_dir != IDETAPE_DIR_READ)
  1684. return 0;
  1685. /* Remove merge stage. */
  1686. cnt = tape->merge_stage_size / tape->blk_size;
  1687. if (test_and_clear_bit(IDETAPE_FLAG_FILEMARK, &tape->flags))
  1688. ++cnt; /* Filemarks count as 1 sector */
  1689. tape->merge_stage_size = 0;
  1690. if (tape->merge_stage != NULL) {
  1691. __idetape_kfree_stage(tape->merge_stage);
  1692. tape->merge_stage = NULL;
  1693. }
  1694. /* Clear pipeline flags. */
  1695. clear_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags);
  1696. tape->chrdev_dir = IDETAPE_DIR_NONE;
  1697. /* Remove pipeline stages. */
  1698. if (tape->first_stage == NULL)
  1699. return 0;
  1700. spin_lock_irqsave(&tape->lock, flags);
  1701. tape->next_stage = NULL;
  1702. if (test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags))
  1703. idetape_wait_for_request(drive, tape->active_data_rq);
  1704. spin_unlock_irqrestore(&tape->lock, flags);
  1705. while (tape->first_stage != NULL) {
  1706. struct request *rq_ptr = &tape->first_stage->rq;
  1707. cnt += rq_ptr->nr_sectors - rq_ptr->current_nr_sectors;
  1708. if (rq_ptr->errors == IDETAPE_ERROR_FILEMARK)
  1709. ++cnt;
  1710. idetape_remove_stage_head(drive);
  1711. }
  1712. tape->nr_pending_stages = 0;
  1713. tape->max_stages = tape->min_pipeline;
  1714. return cnt;
  1715. }
  1716. /*
  1717. * Position the tape to the requested block using the LOCATE packet command.
  1718. * A READ POSITION command is then issued to check where we are positioned. Like
  1719. * all higher level operations, we queue the commands at the tail of the request
  1720. * queue and wait for their completion.
  1721. */
  1722. static int idetape_position_tape(ide_drive_t *drive, unsigned int block,
  1723. u8 partition, int skip)
  1724. {
  1725. idetape_tape_t *tape = drive->driver_data;
  1726. int retval;
  1727. struct ide_atapi_pc pc;
  1728. if (tape->chrdev_dir == IDETAPE_DIR_READ)
  1729. __idetape_discard_read_pipeline(drive);
  1730. idetape_wait_ready(drive, 60 * 5 * HZ);
  1731. idetape_create_locate_cmd(drive, &pc, block, partition, skip);
  1732. retval = idetape_queue_pc_tail(drive, &pc);
  1733. if (retval)
  1734. return (retval);
  1735. idetape_create_read_position_cmd(&pc);
  1736. return (idetape_queue_pc_tail(drive, &pc));
  1737. }
  1738. static void idetape_discard_read_pipeline(ide_drive_t *drive,
  1739. int restore_position)
  1740. {
  1741. idetape_tape_t *tape = drive->driver_data;
  1742. int cnt;
  1743. int seek, position;
  1744. cnt = __idetape_discard_read_pipeline(drive);
  1745. if (restore_position) {
  1746. position = idetape_read_position(drive);
  1747. seek = position > cnt ? position - cnt : 0;
  1748. if (idetape_position_tape(drive, seek, 0, 0)) {
  1749. printk(KERN_INFO "ide-tape: %s: position_tape failed in"
  1750. " discard_pipeline()\n", tape->name);
  1751. return;
  1752. }
  1753. }
  1754. }
  1755. /*
  1756. * Generate a read/write request for the block device interface and wait for it
  1757. * to be serviced.
  1758. */
  1759. static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int blocks,
  1760. struct idetape_bh *bh)
  1761. {
  1762. idetape_tape_t *tape = drive->driver_data;
  1763. struct request rq;
  1764. debug_log(DBG_SENSE, "%s: cmd=%d\n", __func__, cmd);
  1765. if (test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags)) {
  1766. printk(KERN_ERR "ide-tape: bug: the pipeline is active in %s\n",
  1767. __func__);
  1768. return (0);
  1769. }
  1770. idetape_init_rq(&rq, cmd);
  1771. rq.rq_disk = tape->disk;
  1772. rq.special = (void *)bh;
  1773. rq.sector = tape->first_frame;
  1774. rq.nr_sectors = blocks;
  1775. rq.current_nr_sectors = blocks;
  1776. (void) ide_do_drive_cmd(drive, &rq, ide_wait);
  1777. if ((cmd & (REQ_IDETAPE_READ | REQ_IDETAPE_WRITE)) == 0)
  1778. return 0;
  1779. if (tape->merge_stage)
  1780. idetape_init_merge_stage(tape);
  1781. if (rq.errors == IDETAPE_ERROR_GENERAL)
  1782. return -EIO;
  1783. return (tape->blk_size * (blocks-rq.current_nr_sectors));
  1784. }
  1785. /* start servicing the pipeline stages, starting from tape->next_stage. */
  1786. static void idetape_plug_pipeline(ide_drive_t *drive)
  1787. {
  1788. idetape_tape_t *tape = drive->driver_data;
  1789. if (tape->next_stage == NULL)
  1790. return;
  1791. if (!test_and_set_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags)) {
  1792. idetape_activate_next_stage(drive);
  1793. (void) ide_do_drive_cmd(drive, tape->active_data_rq, ide_end);
  1794. }
  1795. }
  1796. static void idetape_create_inquiry_cmd(struct ide_atapi_pc *pc)
  1797. {
  1798. idetape_init_pc(pc);
  1799. pc->c[0] = INQUIRY;
  1800. pc->c[4] = 254;
  1801. pc->req_xfer = 254;
  1802. pc->idetape_callback = &idetape_pc_callback;
  1803. }
  1804. static void idetape_create_rewind_cmd(ide_drive_t *drive,
  1805. struct ide_atapi_pc *pc)
  1806. {
  1807. idetape_init_pc(pc);
  1808. pc->c[0] = REZERO_UNIT;
  1809. pc->flags |= PC_FLAG_WAIT_FOR_DSC;
  1810. pc->idetape_callback = &idetape_pc_callback;
  1811. }
  1812. static void idetape_create_erase_cmd(struct ide_atapi_pc *pc)
  1813. {
  1814. idetape_init_pc(pc);
  1815. pc->c[0] = ERASE;
  1816. pc->c[1] = 1;
  1817. pc->flags |= PC_FLAG_WAIT_FOR_DSC;
  1818. pc->idetape_callback = &idetape_pc_callback;
  1819. }
  1820. static void idetape_create_space_cmd(struct ide_atapi_pc *pc, int count, u8 cmd)
  1821. {
  1822. idetape_init_pc(pc);
  1823. pc->c[0] = SPACE;
  1824. put_unaligned(cpu_to_be32(count), (unsigned int *) &pc->c[1]);
  1825. pc->c[1] = cmd;
  1826. pc->flags |= PC_FLAG_WAIT_FOR_DSC;
  1827. pc->idetape_callback = &idetape_pc_callback;
  1828. }
  1829. /* Queue up a character device originated write request. */
  1830. static int idetape_add_chrdev_write_request(ide_drive_t *drive, int blocks)
  1831. {
  1832. idetape_tape_t *tape = drive->driver_data;
  1833. unsigned long flags;
  1834. debug_log(DBG_CHRDEV, "Enter %s\n", __func__);
  1835. /* Attempt to allocate a new stage. Beware possible race conditions. */
  1836. while (1) {
  1837. spin_lock_irqsave(&tape->lock, flags);
  1838. if (test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags)) {
  1839. idetape_wait_for_request(drive, tape->active_data_rq);
  1840. spin_unlock_irqrestore(&tape->lock, flags);
  1841. } else {
  1842. spin_unlock_irqrestore(&tape->lock, flags);
  1843. idetape_plug_pipeline(drive);
  1844. if (test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE,
  1845. &tape->flags))
  1846. continue;
  1847. return idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE,
  1848. blocks, tape->merge_stage->bh);
  1849. }
  1850. }
  1851. }
  1852. /*
  1853. * Wait until all pending pipeline requests are serviced. Typically called on
  1854. * device close.
  1855. */
  1856. static void idetape_wait_for_pipeline(ide_drive_t *drive)
  1857. {
  1858. idetape_tape_t *tape = drive->driver_data;
  1859. unsigned long flags;
  1860. while (tape->next_stage || test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE,
  1861. &tape->flags)) {
  1862. idetape_plug_pipeline(drive);
  1863. spin_lock_irqsave(&tape->lock, flags);
  1864. if (test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags))
  1865. idetape_wait_for_request(drive, tape->active_data_rq);
  1866. spin_unlock_irqrestore(&tape->lock, flags);
  1867. }
  1868. }
  1869. static void idetape_empty_write_pipeline(ide_drive_t *drive)
  1870. {
  1871. idetape_tape_t *tape = drive->driver_data;
  1872. int blocks, min;
  1873. struct idetape_bh *bh;
  1874. if (tape->chrdev_dir != IDETAPE_DIR_WRITE) {
  1875. printk(KERN_ERR "ide-tape: bug: Trying to empty write pipeline,"
  1876. " but we are not writing.\n");
  1877. return;
  1878. }
  1879. if (tape->merge_stage_size > tape->stage_size) {
  1880. printk(KERN_ERR "ide-tape: bug: merge_buffer too big\n");
  1881. tape->merge_stage_size = tape->stage_size;
  1882. }
  1883. if (tape->merge_stage_size) {
  1884. blocks = tape->merge_stage_size / tape->blk_size;
  1885. if (tape->merge_stage_size % tape->blk_size) {
  1886. unsigned int i;
  1887. blocks++;
  1888. i = tape->blk_size - tape->merge_stage_size %
  1889. tape->blk_size;
  1890. bh = tape->bh->b_reqnext;
  1891. while (bh) {
  1892. atomic_set(&bh->b_count, 0);
  1893. bh = bh->b_reqnext;
  1894. }
  1895. bh = tape->bh;
  1896. while (i) {
  1897. if (bh == NULL) {
  1898. printk(KERN_INFO "ide-tape: bug,"
  1899. " bh NULL\n");
  1900. break;
  1901. }
  1902. min = min(i, (unsigned int)(bh->b_size -
  1903. atomic_read(&bh->b_count)));
  1904. memset(bh->b_data + atomic_read(&bh->b_count),
  1905. 0, min);
  1906. atomic_add(min, &bh->b_count);
  1907. i -= min;
  1908. bh = bh->b_reqnext;
  1909. }
  1910. }
  1911. (void) idetape_add_chrdev_write_request(drive, blocks);
  1912. tape->merge_stage_size = 0;
  1913. }
  1914. idetape_wait_for_pipeline(drive);
  1915. if (tape->merge_stage != NULL) {
  1916. __idetape_kfree_stage(tape->merge_stage);
  1917. tape->merge_stage = NULL;
  1918. }
  1919. clear_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags);
  1920. tape->chrdev_dir = IDETAPE_DIR_NONE;
  1921. /*
  1922. * On the next backup, perform the feedback loop again. (I don't want to
  1923. * keep sense information between backups, as some systems are
  1924. * constantly on, and the system load can be totally different on the
  1925. * next backup).
  1926. */
  1927. tape->max_stages = tape->min_pipeline;
  1928. if (tape->first_stage != NULL ||
  1929. tape->next_stage != NULL ||
  1930. tape->last_stage != NULL ||
  1931. tape->nr_stages != 0) {
  1932. printk(KERN_ERR "ide-tape: ide-tape pipeline bug, "
  1933. "first_stage %p, next_stage %p, "
  1934. "last_stage %p, nr_stages %d\n",
  1935. tape->first_stage, tape->next_stage,
  1936. tape->last_stage, tape->nr_stages);
  1937. }
  1938. }
  1939. static int idetape_init_read(ide_drive_t *drive, int max_stages)
  1940. {
  1941. idetape_tape_t *tape = drive->driver_data;
  1942. int bytes_read;
  1943. /* Initialize read operation */
  1944. if (tape->chrdev_dir != IDETAPE_DIR_READ) {
  1945. if (tape->chrdev_dir == IDETAPE_DIR_WRITE) {
  1946. idetape_empty_write_pipeline(drive);
  1947. idetape_flush_tape_buffers(drive);
  1948. }
  1949. if (tape->merge_stage || tape->merge_stage_size) {
  1950. printk(KERN_ERR "ide-tape: merge_stage_size should be"
  1951. " 0 now\n");
  1952. tape->merge_stage_size = 0;
  1953. }
  1954. tape->merge_stage = __idetape_kmalloc_stage(tape, 0, 0);
  1955. if (!tape->merge_stage)
  1956. return -ENOMEM;
  1957. tape->chrdev_dir = IDETAPE_DIR_READ;
  1958. /*
  1959. * Issue a read 0 command to ensure that DSC handshake is
  1960. * switched from completion mode to buffer available mode.
  1961. * No point in issuing this if DSC overlap isn't supported, some
  1962. * drives (Seagate STT3401A) will return an error.
  1963. */
  1964. if (drive->dsc_overlap) {
  1965. bytes_read = idetape_queue_rw_tail(drive,
  1966. REQ_IDETAPE_READ, 0,
  1967. tape->merge_stage->bh);
  1968. if (bytes_read < 0) {
  1969. __idetape_kfree_stage(tape->merge_stage);
  1970. tape->merge_stage = NULL;
  1971. tape->chrdev_dir = IDETAPE_DIR_NONE;
  1972. return bytes_read;
  1973. }
  1974. }
  1975. }
  1976. if (!test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags)) {
  1977. if (tape->nr_pending_stages >= 3 * max_stages / 4) {
  1978. tape->measure_insert_time = 1;
  1979. tape->insert_time = jiffies;
  1980. tape->insert_size = 0;
  1981. tape->insert_speed = 0;
  1982. idetape_plug_pipeline(drive);
  1983. }
  1984. }
  1985. return 0;
  1986. }
  1987. /*
  1988. * Called from idetape_chrdev_read() to service a character device read request
  1989. * and add read-ahead requests to our pipeline.
  1990. */
  1991. static int idetape_add_chrdev_read_request(ide_drive_t *drive, int blocks)
  1992. {
  1993. idetape_tape_t *tape = drive->driver_data;
  1994. debug_log(DBG_PROCS, "Enter %s, %d blocks\n", __func__, blocks);
  1995. /* If we are at a filemark, return a read length of 0 */
  1996. if (test_bit(IDETAPE_FLAG_FILEMARK, &tape->flags))
  1997. return 0;
  1998. idetape_init_read(drive, tape->max_stages);
  1999. if (test_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags))
  2000. return 0;
  2001. return idetape_queue_rw_tail(drive, REQ_IDETAPE_READ, blocks,
  2002. tape->merge_stage->bh);
  2003. }
  2004. static void idetape_pad_zeros(ide_drive_t *drive, int bcount)
  2005. {
  2006. idetape_tape_t *tape = drive->driver_data;
  2007. struct idetape_bh *bh;
  2008. int blocks;
  2009. while (bcount) {
  2010. unsigned int count;
  2011. bh = tape->merge_stage->bh;
  2012. count = min(tape->stage_size, bcount);
  2013. bcount -= count;
  2014. blocks = count / tape->blk_size;
  2015. while (count) {
  2016. atomic_set(&bh->b_count,
  2017. min(count, (unsigned int)bh->b_size));
  2018. memset(bh->b_data, 0, atomic_read(&bh->b_count));
  2019. count -= atomic_read(&bh->b_count);
  2020. bh = bh->b_reqnext;
  2021. }
  2022. idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE, blocks,
  2023. tape->merge_stage->bh);
  2024. }
  2025. }
  2026. static int idetape_pipeline_size(ide_drive_t *drive)
  2027. {
  2028. idetape_tape_t *tape = drive->driver_data;
  2029. idetape_stage_t *stage;
  2030. struct request *rq;
  2031. int size = 0;
  2032. idetape_wait_for_pipeline(drive);
  2033. stage = tape->first_stage;
  2034. while (stage != NULL) {
  2035. rq = &stage->rq;
  2036. size += tape->blk_size * (rq->nr_sectors -
  2037. rq->current_nr_sectors);
  2038. if (rq->errors == IDETAPE_ERROR_FILEMARK)
  2039. size += tape->blk_size;
  2040. stage = stage->next;
  2041. }
  2042. size += tape->merge_stage_size;
  2043. return size;
  2044. }
  2045. /*
  2046. * Rewinds the tape to the Beginning Of the current Partition (BOP). We
  2047. * currently support only one partition.
  2048. */
  2049. static int idetape_rewind_tape(ide_drive_t *drive)
  2050. {
  2051. int retval;
  2052. struct ide_atapi_pc pc;
  2053. idetape_tape_t *tape;
  2054. tape = drive->driver_data;
  2055. debug_log(DBG_SENSE, "Enter %s\n", __func__);
  2056. idetape_create_rewind_cmd(drive, &pc);
  2057. retval = idetape_queue_pc_tail(drive, &pc);
  2058. if (retval)
  2059. return retval;
  2060. idetape_create_read_position_cmd(&pc);
  2061. retval = idetape_queue_pc_tail(drive, &pc);
  2062. if (retval)
  2063. return retval;
  2064. return 0;
  2065. }
  2066. /* mtio.h compatible commands should be issued to the chrdev interface. */
  2067. static int idetape_blkdev_ioctl(ide_drive_t *drive, unsigned int cmd,
  2068. unsigned long arg)
  2069. {
  2070. idetape_tape_t *tape = drive->driver_data;
  2071. void __user *argp = (void __user *)arg;
  2072. struct idetape_config {
  2073. int dsc_rw_frequency;
  2074. int dsc_media_access_frequency;
  2075. int nr_stages;
  2076. } config;
  2077. debug_log(DBG_PROCS, "Enter %s\n", __func__);
  2078. switch (cmd) {
  2079. case 0x0340:
  2080. if (copy_from_user(&config, argp, sizeof(config)))
  2081. return -EFAULT;
  2082. tape->best_dsc_rw_freq = config.dsc_rw_frequency;
  2083. tape->max_stages = config.nr_stages;
  2084. break;
  2085. case 0x0350:
  2086. config.dsc_rw_frequency = (int) tape->best_dsc_rw_freq;
  2087. config.nr_stages = tape->max_stages;
  2088. if (copy_to_user(argp, &config, sizeof(config)))
  2089. return -EFAULT;
  2090. break;
  2091. default:
  2092. return -EIO;
  2093. }
  2094. return 0;
  2095. }
  2096. static int idetape_space_over_filemarks(ide_drive_t *drive, short mt_op,
  2097. int mt_count)
  2098. {
  2099. idetape_tape_t *tape = drive->driver_data;
  2100. struct ide_atapi_pc pc;
  2101. int retval, count = 0;
  2102. int sprev = !!(tape->caps[4] & 0x20);
  2103. if (mt_count == 0)
  2104. return 0;
  2105. if (MTBSF == mt_op || MTBSFM == mt_op) {
  2106. if (!sprev)
  2107. return -EIO;
  2108. mt_count = -mt_count;
  2109. }
  2110. if (tape->chrdev_dir == IDETAPE_DIR_READ) {
  2111. tape->merge_stage_size = 0;
  2112. if (test_and_clear_bit(IDETAPE_FLAG_FILEMARK, &tape->flags))
  2113. ++count;
  2114. idetape_discard_read_pipeline(drive, 0);
  2115. }
  2116. /*
  2117. * The filemark was not found in our internal pipeline; now we can issue
  2118. * the space command.
  2119. */
  2120. switch (mt_op) {
  2121. case MTFSF:
  2122. case MTBSF:
  2123. idetape_create_space_cmd(&pc, mt_count - count,
  2124. IDETAPE_SPACE_OVER_FILEMARK);
  2125. return idetape_queue_pc_tail(drive, &pc);
  2126. case MTFSFM:
  2127. case MTBSFM:
  2128. if (!sprev)
  2129. return -EIO;
  2130. retval = idetape_space_over_filemarks(drive, MTFSF,
  2131. mt_count - count);
  2132. if (retval)
  2133. return retval;
  2134. count = (MTBSFM == mt_op ? 1 : -1);
  2135. return idetape_space_over_filemarks(drive, MTFSF, count);
  2136. default:
  2137. printk(KERN_ERR "ide-tape: MTIO operation %d not supported\n",
  2138. mt_op);
  2139. return -EIO;
  2140. }
  2141. }
  2142. /*
  2143. * Our character device read / write functions.
  2144. *
  2145. * The tape is optimized to maximize throughput when it is transferring an
  2146. * integral number of the "continuous transfer limit", which is a parameter of
  2147. * the specific tape (26kB on my particular tape, 32kB for Onstream).
  2148. *
  2149. * As of version 1.3 of the driver, the character device provides an abstract
  2150. * continuous view of the media - any mix of block sizes (even 1 byte) on the
  2151. * same backup/restore procedure is supported. The driver will internally
  2152. * convert the requests to the recommended transfer unit, so that an unmatch
  2153. * between the user's block size to the recommended size will only result in a
  2154. * (slightly) increased driver overhead, but will no longer hit performance.
  2155. * This is not applicable to Onstream.
  2156. */
  2157. static ssize_t idetape_chrdev_read(struct file *file, char __user *buf,
  2158. size_t count, loff_t *ppos)
  2159. {
  2160. struct ide_tape_obj *tape = ide_tape_f(file);
  2161. ide_drive_t *drive = tape->drive;
  2162. ssize_t bytes_read, temp, actually_read = 0, rc;
  2163. ssize_t ret = 0;
  2164. u16 ctl = *(u16 *)&tape->caps[12];
  2165. debug_log(DBG_CHRDEV, "Enter %s, count %Zd\n", __func__, count);
  2166. if (tape->chrdev_dir != IDETAPE_DIR_READ) {
  2167. if (test_bit(IDETAPE_FLAG_DETECT_BS, &tape->flags))
  2168. if (count > tape->blk_size &&
  2169. (count % tape->blk_size) == 0)
  2170. tape->user_bs_factor = count / tape->blk_size;
  2171. }
  2172. rc = idetape_init_read(drive, tape->max_stages);
  2173. if (rc < 0)
  2174. return rc;
  2175. if (count == 0)
  2176. return (0);
  2177. if (tape->merge_stage_size) {
  2178. actually_read = min((unsigned int)(tape->merge_stage_size),
  2179. (unsigned int)count);
  2180. if (idetape_copy_stage_to_user(tape, buf, actually_read))
  2181. ret = -EFAULT;
  2182. buf += actually_read;
  2183. tape->merge_stage_size -= actually_read;
  2184. count -= actually_read;
  2185. }
  2186. while (count >= tape->stage_size) {
  2187. bytes_read = idetape_add_chrdev_read_request(drive, ctl);
  2188. if (bytes_read <= 0)
  2189. goto finish;
  2190. if (idetape_copy_stage_to_user(tape, buf, bytes_read))
  2191. ret = -EFAULT;
  2192. buf += bytes_read;
  2193. count -= bytes_read;
  2194. actually_read += bytes_read;
  2195. }
  2196. if (count) {
  2197. bytes_read = idetape_add_chrdev_read_request(drive, ctl);
  2198. if (bytes_read <= 0)
  2199. goto finish;
  2200. temp = min((unsigned long)count, (unsigned long)bytes_read);
  2201. if (idetape_copy_stage_to_user(tape, buf, temp))
  2202. ret = -EFAULT;
  2203. actually_read += temp;
  2204. tape->merge_stage_size = bytes_read-temp;
  2205. }
  2206. finish:
  2207. if (!actually_read && test_bit(IDETAPE_FLAG_FILEMARK, &tape->flags)) {
  2208. debug_log(DBG_SENSE, "%s: spacing over filemark\n", tape->name);
  2209. idetape_space_over_filemarks(drive, MTFSF, 1);
  2210. return 0;
  2211. }
  2212. return ret ? ret : actually_read;
  2213. }
  2214. static ssize_t idetape_chrdev_write(struct file *file, const char __user *buf,
  2215. size_t count, loff_t *ppos)
  2216. {
  2217. struct ide_tape_obj *tape = ide_tape_f(file);
  2218. ide_drive_t *drive = tape->drive;
  2219. ssize_t actually_written = 0;
  2220. ssize_t ret = 0;
  2221. u16 ctl = *(u16 *)&tape->caps[12];
  2222. /* The drive is write protected. */
  2223. if (tape->write_prot)
  2224. return -EACCES;
  2225. debug_log(DBG_CHRDEV, "Enter %s, count %Zd\n", __func__, count);
  2226. /* Initialize write operation */
  2227. if (tape->chrdev_dir != IDETAPE_DIR_WRITE) {
  2228. if (tape->chrdev_dir == IDETAPE_DIR_READ)
  2229. idetape_discard_read_pipeline(drive, 1);
  2230. if (tape->merge_stage || tape->merge_stage_size) {
  2231. printk(KERN_ERR "ide-tape: merge_stage_size "
  2232. "should be 0 now\n");
  2233. tape->merge_stage_size = 0;
  2234. }
  2235. tape->merge_stage = __idetape_kmalloc_stage(tape, 0, 0);
  2236. if (!tape->merge_stage)
  2237. return -ENOMEM;
  2238. tape->chrdev_dir = IDETAPE_DIR_WRITE;
  2239. idetape_init_merge_stage(tape);
  2240. /*
  2241. * Issue a write 0 command to ensure that DSC handshake is
  2242. * switched from completion mode to buffer available mode. No
  2243. * point in issuing this if DSC overlap isn't supported, some
  2244. * drives (Seagate STT3401A) will return an error.
  2245. */
  2246. if (drive->dsc_overlap) {
  2247. ssize_t retval = idetape_queue_rw_tail(drive,
  2248. REQ_IDETAPE_WRITE, 0,
  2249. tape->merge_stage->bh);
  2250. if (retval < 0) {
  2251. __idetape_kfree_stage(tape->merge_stage);
  2252. tape->merge_stage = NULL;
  2253. tape->chrdev_dir = IDETAPE_DIR_NONE;
  2254. return retval;
  2255. }
  2256. }
  2257. }
  2258. if (count == 0)
  2259. return (0);
  2260. if (tape->merge_stage_size) {
  2261. if (tape->merge_stage_size >= tape->stage_size) {
  2262. printk(KERN_ERR "ide-tape: bug: merge buf too big\n");
  2263. tape->merge_stage_size = 0;
  2264. }
  2265. actually_written = min((unsigned int)
  2266. (tape->stage_size - tape->merge_stage_size),
  2267. (unsigned int)count);
  2268. if (idetape_copy_stage_from_user(tape, buf, actually_written))
  2269. ret = -EFAULT;
  2270. buf += actually_written;
  2271. tape->merge_stage_size += actually_written;
  2272. count -= actually_written;
  2273. if (tape->merge_stage_size == tape->stage_size) {
  2274. ssize_t retval;
  2275. tape->merge_stage_size = 0;
  2276. retval = idetape_add_chrdev_write_request(drive, ctl);
  2277. if (retval <= 0)
  2278. return (retval);
  2279. }
  2280. }
  2281. while (count >= tape->stage_size) {
  2282. ssize_t retval;
  2283. if (idetape_copy_stage_from_user(tape, buf, tape->stage_size))
  2284. ret = -EFAULT;
  2285. buf += tape->stage_size;
  2286. count -= tape->stage_size;
  2287. retval = idetape_add_chrdev_write_request(drive, ctl);
  2288. actually_written += tape->stage_size;
  2289. if (retval <= 0)
  2290. return (retval);
  2291. }
  2292. if (count) {
  2293. actually_written += count;
  2294. if (idetape_copy_stage_from_user(tape, buf, count))
  2295. ret = -EFAULT;
  2296. tape->merge_stage_size += count;
  2297. }
  2298. return ret ? ret : actually_written;
  2299. }
  2300. static int idetape_write_filemark(ide_drive_t *drive)
  2301. {
  2302. struct ide_atapi_pc pc;
  2303. /* Write a filemark */
  2304. idetape_create_write_filemark_cmd(drive, &pc, 1);
  2305. if (idetape_queue_pc_tail(drive, &pc)) {
  2306. printk(KERN_ERR "ide-tape: Couldn't write a filemark\n");
  2307. return -EIO;
  2308. }
  2309. return 0;
  2310. }
  2311. /*
  2312. * Called from idetape_chrdev_ioctl when the general mtio MTIOCTOP ioctl is
  2313. * requested.
  2314. *
  2315. * Note: MTBSF and MTBSFM are not supported when the tape doesn't support
  2316. * spacing over filemarks in the reverse direction. In this case, MTFSFM is also
  2317. * usually not supported (it is supported in the rare case in which we crossed
  2318. * the filemark during our read-ahead pipelined operation mode).
  2319. *
  2320. * The following commands are currently not supported:
  2321. *
  2322. * MTFSS, MTBSS, MTWSM, MTSETDENSITY, MTSETDRVBUFFER, MT_ST_BOOLEANS,
  2323. * MT_ST_WRITE_THRESHOLD.
  2324. */
  2325. static int idetape_mtioctop(ide_drive_t *drive, short mt_op, int mt_count)
  2326. {
  2327. idetape_tape_t *tape = drive->driver_data;
  2328. struct ide_atapi_pc pc;
  2329. int i, retval;
  2330. debug_log(DBG_ERR, "Handling MTIOCTOP ioctl: mt_op=%d, mt_count=%d\n",
  2331. mt_op, mt_count);
  2332. /* Commands which need our pipelined read-ahead stages. */
  2333. switch (mt_op) {
  2334. case MTFSF:
  2335. case MTFSFM:
  2336. case MTBSF:
  2337. case MTBSFM:
  2338. if (!mt_count)
  2339. return 0;
  2340. return idetape_space_over_filemarks(drive, mt_op, mt_count);
  2341. default:
  2342. break;
  2343. }
  2344. switch (mt_op) {
  2345. case MTWEOF:
  2346. if (tape->write_prot)
  2347. return -EACCES;
  2348. idetape_discard_read_pipeline(drive, 1);
  2349. for (i = 0; i < mt_count; i++) {
  2350. retval = idetape_write_filemark(drive);
  2351. if (retval)
  2352. return retval;
  2353. }
  2354. return 0;
  2355. case MTREW:
  2356. idetape_discard_read_pipeline(drive, 0);
  2357. if (idetape_rewind_tape(drive))
  2358. return -EIO;
  2359. return 0;
  2360. case MTLOAD:
  2361. idetape_discard_read_pipeline(drive, 0);
  2362. idetape_create_load_unload_cmd(drive, &pc,
  2363. IDETAPE_LU_LOAD_MASK);
  2364. return idetape_queue_pc_tail(drive, &pc);
  2365. case MTUNLOAD:
  2366. case MTOFFL:
  2367. /*
  2368. * If door is locked, attempt to unlock before
  2369. * attempting to eject.
  2370. */
  2371. if (tape->door_locked) {
  2372. if (idetape_create_prevent_cmd(drive, &pc, 0))
  2373. if (!idetape_queue_pc_tail(drive, &pc))
  2374. tape->door_locked = DOOR_UNLOCKED;
  2375. }
  2376. idetape_discard_read_pipeline(drive, 0);
  2377. idetape_create_load_unload_cmd(drive, &pc,
  2378. !IDETAPE_LU_LOAD_MASK);
  2379. retval = idetape_queue_pc_tail(drive, &pc);
  2380. if (!retval)
  2381. clear_bit(IDETAPE_FLAG_MEDIUM_PRESENT, &tape->flags);
  2382. return retval;
  2383. case MTNOP:
  2384. idetape_discard_read_pipeline(drive, 0);
  2385. return idetape_flush_tape_buffers(drive);
  2386. case MTRETEN:
  2387. idetape_discard_read_pipeline(drive, 0);
  2388. idetape_create_load_unload_cmd(drive, &pc,
  2389. IDETAPE_LU_RETENSION_MASK | IDETAPE_LU_LOAD_MASK);
  2390. return idetape_queue_pc_tail(drive, &pc);
  2391. case MTEOM:
  2392. idetape_create_space_cmd(&pc, 0, IDETAPE_SPACE_TO_EOD);
  2393. return idetape_queue_pc_tail(drive, &pc);
  2394. case MTERASE:
  2395. (void)idetape_rewind_tape(drive);
  2396. idetape_create_erase_cmd(&pc);
  2397. return idetape_queue_pc_tail(drive, &pc);
  2398. case MTSETBLK:
  2399. if (mt_count) {
  2400. if (mt_count < tape->blk_size ||
  2401. mt_count % tape->blk_size)
  2402. return -EIO;
  2403. tape->user_bs_factor = mt_count / tape->blk_size;
  2404. clear_bit(IDETAPE_FLAG_DETECT_BS, &tape->flags);
  2405. } else
  2406. set_bit(IDETAPE_FLAG_DETECT_BS, &tape->flags);
  2407. return 0;
  2408. case MTSEEK:
  2409. idetape_discard_read_pipeline(drive, 0);
  2410. return idetape_position_tape(drive,
  2411. mt_count * tape->user_bs_factor, tape->partition, 0);
  2412. case MTSETPART:
  2413. idetape_discard_read_pipeline(drive, 0);
  2414. return idetape_position_tape(drive, 0, mt_count, 0);
  2415. case MTFSR:
  2416. case MTBSR:
  2417. case MTLOCK:
  2418. if (!idetape_create_prevent_cmd(drive, &pc, 1))
  2419. return 0;
  2420. retval = idetape_queue_pc_tail(drive, &pc);
  2421. if (retval)
  2422. return retval;
  2423. tape->door_locked = DOOR_EXPLICITLY_LOCKED;
  2424. return 0;
  2425. case MTUNLOCK:
  2426. if (!idetape_create_prevent_cmd(drive, &pc, 0))
  2427. return 0;
  2428. retval = idetape_queue_pc_tail(drive, &pc);
  2429. if (retval)
  2430. return retval;
  2431. tape->door_locked = DOOR_UNLOCKED;
  2432. return 0;
  2433. default:
  2434. printk(KERN_ERR "ide-tape: MTIO operation %d not supported\n",
  2435. mt_op);
  2436. return -EIO;
  2437. }
  2438. }
  2439. /*
  2440. * Our character device ioctls. General mtio.h magnetic io commands are
  2441. * supported here, and not in the corresponding block interface. Our own
  2442. * ide-tape ioctls are supported on both interfaces.
  2443. */
  2444. static int idetape_chrdev_ioctl(struct inode *inode, struct file *file,
  2445. unsigned int cmd, unsigned long arg)
  2446. {
  2447. struct ide_tape_obj *tape = ide_tape_f(file);
  2448. ide_drive_t *drive = tape->drive;
  2449. struct mtop mtop;
  2450. struct mtget mtget;
  2451. struct mtpos mtpos;
  2452. int block_offset = 0, position = tape->first_frame;
  2453. void __user *argp = (void __user *)arg;
  2454. debug_log(DBG_CHRDEV, "Enter %s, cmd=%u\n", __func__, cmd);
  2455. if (tape->chrdev_dir == IDETAPE_DIR_WRITE) {
  2456. idetape_empty_write_pipeline(drive);
  2457. idetape_flush_tape_buffers(drive);
  2458. }
  2459. if (cmd == MTIOCGET || cmd == MTIOCPOS) {
  2460. block_offset = idetape_pipeline_size(drive) /
  2461. (tape->blk_size * tape->user_bs_factor);
  2462. position = idetape_read_position(drive);
  2463. if (position < 0)
  2464. return -EIO;
  2465. }
  2466. switch (cmd) {
  2467. case MTIOCTOP:
  2468. if (copy_from_user(&mtop, argp, sizeof(struct mtop)))
  2469. return -EFAULT;
  2470. return idetape_mtioctop(drive, mtop.mt_op, mtop.mt_count);
  2471. case MTIOCGET:
  2472. memset(&mtget, 0, sizeof(struct mtget));
  2473. mtget.mt_type = MT_ISSCSI2;
  2474. mtget.mt_blkno = position / tape->user_bs_factor - block_offset;
  2475. mtget.mt_dsreg =
  2476. ((tape->blk_size * tape->user_bs_factor)
  2477. << MT_ST_BLKSIZE_SHIFT) & MT_ST_BLKSIZE_MASK;
  2478. if (tape->drv_write_prot)
  2479. mtget.mt_gstat |= GMT_WR_PROT(0xffffffff);
  2480. if (copy_to_user(argp, &mtget, sizeof(struct mtget)))
  2481. return -EFAULT;
  2482. return 0;
  2483. case MTIOCPOS:
  2484. mtpos.mt_blkno = position / tape->user_bs_factor - block_offset;
  2485. if (copy_to_user(argp, &mtpos, sizeof(struct mtpos)))
  2486. return -EFAULT;
  2487. return 0;
  2488. default:
  2489. if (tape->chrdev_dir == IDETAPE_DIR_READ)
  2490. idetape_discard_read_pipeline(drive, 1);
  2491. return idetape_blkdev_ioctl(drive, cmd, arg);
  2492. }
  2493. }
  2494. /*
  2495. * Do a mode sense page 0 with block descriptor and if it succeeds set the tape
  2496. * block size with the reported value.
  2497. */
  2498. static void ide_tape_get_bsize_from_bdesc(ide_drive_t *drive)
  2499. {
  2500. idetape_tape_t *tape = drive->driver_data;
  2501. struct ide_atapi_pc pc;
  2502. idetape_create_mode_sense_cmd(&pc, IDETAPE_BLOCK_DESCRIPTOR);
  2503. if (idetape_queue_pc_tail(drive, &pc)) {
  2504. printk(KERN_ERR "ide-tape: Can't get block descriptor\n");
  2505. if (tape->blk_size == 0) {
  2506. printk(KERN_WARNING "ide-tape: Cannot deal with zero "
  2507. "block size, assuming 32k\n");
  2508. tape->blk_size = 32768;
  2509. }
  2510. return;
  2511. }
  2512. tape->blk_size = (pc.buf[4 + 5] << 16) +
  2513. (pc.buf[4 + 6] << 8) +
  2514. pc.buf[4 + 7];
  2515. tape->drv_write_prot = (pc.buf[2] & 0x80) >> 7;
  2516. }
  2517. static int idetape_chrdev_open(struct inode *inode, struct file *filp)
  2518. {
  2519. unsigned int minor = iminor(inode), i = minor & ~0xc0;
  2520. ide_drive_t *drive;
  2521. idetape_tape_t *tape;
  2522. struct ide_atapi_pc pc;
  2523. int retval;
  2524. if (i >= MAX_HWIFS * MAX_DRIVES)
  2525. return -ENXIO;
  2526. tape = ide_tape_chrdev_get(i);
  2527. if (!tape)
  2528. return -ENXIO;
  2529. debug_log(DBG_CHRDEV, "Enter %s\n", __func__);
  2530. /*
  2531. * We really want to do nonseekable_open(inode, filp); here, but some
  2532. * versions of tar incorrectly call lseek on tapes and bail out if that
  2533. * fails. So we disallow pread() and pwrite(), but permit lseeks.
  2534. */
  2535. filp->f_mode &= ~(FMODE_PREAD | FMODE_PWRITE);
  2536. drive = tape->drive;
  2537. filp->private_data = tape;
  2538. if (test_and_set_bit(IDETAPE_FLAG_BUSY, &tape->flags)) {
  2539. retval = -EBUSY;
  2540. goto out_put_tape;
  2541. }
  2542. retval = idetape_wait_ready(drive, 60 * HZ);
  2543. if (retval) {
  2544. clear_bit(IDETAPE_FLAG_BUSY, &tape->flags);
  2545. printk(KERN_ERR "ide-tape: %s: drive not ready\n", tape->name);
  2546. goto out_put_tape;
  2547. }
  2548. idetape_read_position(drive);
  2549. if (!test_bit(IDETAPE_FLAG_ADDRESS_VALID, &tape->flags))
  2550. (void)idetape_rewind_tape(drive);
  2551. if (tape->chrdev_dir != IDETAPE_DIR_READ)
  2552. clear_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags);
  2553. /* Read block size and write protect status from drive. */
  2554. ide_tape_get_bsize_from_bdesc(drive);
  2555. /* Set write protect flag if device is opened as read-only. */
  2556. if ((filp->f_flags & O_ACCMODE) == O_RDONLY)
  2557. tape->write_prot = 1;
  2558. else
  2559. tape->write_prot = tape->drv_write_prot;
  2560. /* Make sure drive isn't write protected if user wants to write. */
  2561. if (tape->write_prot) {
  2562. if ((filp->f_flags & O_ACCMODE) == O_WRONLY ||
  2563. (filp->f_flags & O_ACCMODE) == O_RDWR) {
  2564. clear_bit(IDETAPE_FLAG_BUSY, &tape->flags);
  2565. retval = -EROFS;
  2566. goto out_put_tape;
  2567. }
  2568. }
  2569. /* Lock the tape drive door so user can't eject. */
  2570. if (tape->chrdev_dir == IDETAPE_DIR_NONE) {
  2571. if (idetape_create_prevent_cmd(drive, &pc, 1)) {
  2572. if (!idetape_queue_pc_tail(drive, &pc)) {
  2573. if (tape->door_locked != DOOR_EXPLICITLY_LOCKED)
  2574. tape->door_locked = DOOR_LOCKED;
  2575. }
  2576. }
  2577. }
  2578. return 0;
  2579. out_put_tape:
  2580. ide_tape_put(tape);
  2581. return retval;
  2582. }
  2583. static void idetape_write_release(ide_drive_t *drive, unsigned int minor)
  2584. {
  2585. idetape_tape_t *tape = drive->driver_data;
  2586. idetape_empty_write_pipeline(drive);
  2587. tape->merge_stage = __idetape_kmalloc_stage(tape, 1, 0);
  2588. if (tape->merge_stage != NULL) {
  2589. idetape_pad_zeros(drive, tape->blk_size *
  2590. (tape->user_bs_factor - 1));
  2591. __idetape_kfree_stage(tape->merge_stage);
  2592. tape->merge_stage = NULL;
  2593. }
  2594. idetape_write_filemark(drive);
  2595. idetape_flush_tape_buffers(drive);
  2596. idetape_flush_tape_buffers(drive);
  2597. }
  2598. static int idetape_chrdev_release(struct inode *inode, struct file *filp)
  2599. {
  2600. struct ide_tape_obj *tape = ide_tape_f(filp);
  2601. ide_drive_t *drive = tape->drive;
  2602. struct ide_atapi_pc pc;
  2603. unsigned int minor = iminor(inode);
  2604. lock_kernel();
  2605. tape = drive->driver_data;
  2606. debug_log(DBG_CHRDEV, "Enter %s\n", __func__);
  2607. if (tape->chrdev_dir == IDETAPE_DIR_WRITE)
  2608. idetape_write_release(drive, minor);
  2609. if (tape->chrdev_dir == IDETAPE_DIR_READ) {
  2610. if (minor < 128)
  2611. idetape_discard_read_pipeline(drive, 1);
  2612. else
  2613. idetape_wait_for_pipeline(drive);
  2614. }
  2615. if (minor < 128 && test_bit(IDETAPE_FLAG_MEDIUM_PRESENT, &tape->flags))
  2616. (void) idetape_rewind_tape(drive);
  2617. if (tape->chrdev_dir == IDETAPE_DIR_NONE) {
  2618. if (tape->door_locked == DOOR_LOCKED) {
  2619. if (idetape_create_prevent_cmd(drive, &pc, 0)) {
  2620. if (!idetape_queue_pc_tail(drive, &pc))
  2621. tape->door_locked = DOOR_UNLOCKED;
  2622. }
  2623. }
  2624. }
  2625. clear_bit(IDETAPE_FLAG_BUSY, &tape->flags);
  2626. ide_tape_put(tape);
  2627. unlock_kernel();
  2628. return 0;
  2629. }
  2630. /*
  2631. * check the contents of the ATAPI IDENTIFY command results. We return:
  2632. *
  2633. * 1 - If the tape can be supported by us, based on the information we have so
  2634. * far.
  2635. *
  2636. * 0 - If this tape driver is not currently supported by us.
  2637. */
  2638. static int idetape_identify_device(ide_drive_t *drive)
  2639. {
  2640. u8 gcw[2], protocol, device_type, removable, packet_size;
  2641. if (drive->id_read == 0)
  2642. return 1;
  2643. *((unsigned short *) &gcw) = drive->id->config;
  2644. protocol = (gcw[1] & 0xC0) >> 6;
  2645. device_type = gcw[1] & 0x1F;
  2646. removable = !!(gcw[0] & 0x80);
  2647. packet_size = gcw[0] & 0x3;
  2648. /* Check that we can support this device */
  2649. if (protocol != 2)
  2650. printk(KERN_ERR "ide-tape: Protocol (0x%02x) is not ATAPI\n",
  2651. protocol);
  2652. else if (device_type != 1)
  2653. printk(KERN_ERR "ide-tape: Device type (0x%02x) is not set "
  2654. "to tape\n", device_type);
  2655. else if (!removable)
  2656. printk(KERN_ERR "ide-tape: The removable flag is not set\n");
  2657. else if (packet_size != 0) {
  2658. printk(KERN_ERR "ide-tape: Packet size (0x%02x) is not 12"
  2659. " bytes\n", packet_size);
  2660. } else
  2661. return 1;
  2662. return 0;
  2663. }
  2664. static void idetape_get_inquiry_results(ide_drive_t *drive)
  2665. {
  2666. idetape_tape_t *tape = drive->driver_data;
  2667. struct ide_atapi_pc pc;
  2668. char fw_rev[6], vendor_id[10], product_id[18];
  2669. idetape_create_inquiry_cmd(&pc);
  2670. if (idetape_queue_pc_tail(drive, &pc)) {
  2671. printk(KERN_ERR "ide-tape: %s: can't get INQUIRY results\n",
  2672. tape->name);
  2673. return;
  2674. }
  2675. memcpy(vendor_id, &pc.buf[8], 8);
  2676. memcpy(product_id, &pc.buf[16], 16);
  2677. memcpy(fw_rev, &pc.buf[32], 4);
  2678. ide_fixstring(vendor_id, 10, 0);
  2679. ide_fixstring(product_id, 18, 0);
  2680. ide_fixstring(fw_rev, 6, 0);
  2681. printk(KERN_INFO "ide-tape: %s <-> %s: %s %s rev %s\n",
  2682. drive->name, tape->name, vendor_id, product_id, fw_rev);
  2683. }
  2684. /*
  2685. * Ask the tape about its various parameters. In particular, we will adjust our
  2686. * data transfer buffer size to the recommended value as returned by the tape.
  2687. */
  2688. static void idetape_get_mode_sense_results(ide_drive_t *drive)
  2689. {
  2690. idetape_tape_t *tape = drive->driver_data;
  2691. struct ide_atapi_pc pc;
  2692. u8 *caps;
  2693. u8 speed, max_speed;
  2694. idetape_create_mode_sense_cmd(&pc, IDETAPE_CAPABILITIES_PAGE);
  2695. if (idetape_queue_pc_tail(drive, &pc)) {
  2696. printk(KERN_ERR "ide-tape: Can't get tape parameters - assuming"
  2697. " some default values\n");
  2698. tape->blk_size = 512;
  2699. put_unaligned(52, (u16 *)&tape->caps[12]);
  2700. put_unaligned(540, (u16 *)&tape->caps[14]);
  2701. put_unaligned(6*52, (u16 *)&tape->caps[16]);
  2702. return;
  2703. }
  2704. caps = pc.buf + 4 + pc.buf[3];
  2705. /* convert to host order and save for later use */
  2706. speed = be16_to_cpu(*(u16 *)&caps[14]);
  2707. max_speed = be16_to_cpu(*(u16 *)&caps[8]);
  2708. put_unaligned(max_speed, (u16 *)&caps[8]);
  2709. put_unaligned(be16_to_cpu(*(u16 *)&caps[12]), (u16 *)&caps[12]);
  2710. put_unaligned(speed, (u16 *)&caps[14]);
  2711. put_unaligned(be16_to_cpu(*(u16 *)&caps[16]), (u16 *)&caps[16]);
  2712. if (!speed) {
  2713. printk(KERN_INFO "ide-tape: %s: invalid tape speed "
  2714. "(assuming 650KB/sec)\n", drive->name);
  2715. put_unaligned(650, (u16 *)&caps[14]);
  2716. }
  2717. if (!max_speed) {
  2718. printk(KERN_INFO "ide-tape: %s: invalid max_speed "
  2719. "(assuming 650KB/sec)\n", drive->name);
  2720. put_unaligned(650, (u16 *)&caps[8]);
  2721. }
  2722. memcpy(&tape->caps, caps, 20);
  2723. if (caps[7] & 0x02)
  2724. tape->blk_size = 512;
  2725. else if (caps[7] & 0x04)
  2726. tape->blk_size = 1024;
  2727. }
  2728. #ifdef CONFIG_IDE_PROC_FS
  2729. static void idetape_add_settings(ide_drive_t *drive)
  2730. {
  2731. idetape_tape_t *tape = drive->driver_data;
  2732. ide_add_setting(drive, "buffer", SETTING_READ, TYPE_SHORT, 0, 0xffff,
  2733. 1, 2, (u16 *)&tape->caps[16], NULL);
  2734. ide_add_setting(drive, "pipeline_min", SETTING_RW, TYPE_INT, 1, 0xffff,
  2735. tape->stage_size / 1024, 1, &tape->min_pipeline, NULL);
  2736. ide_add_setting(drive, "pipeline", SETTING_RW, TYPE_INT, 1, 0xffff,
  2737. tape->stage_size / 1024, 1, &tape->max_stages, NULL);
  2738. ide_add_setting(drive, "pipeline_max", SETTING_RW, TYPE_INT, 1, 0xffff,
  2739. tape->stage_size / 1024, 1, &tape->max_pipeline, NULL);
  2740. ide_add_setting(drive, "pipeline_used", SETTING_READ, TYPE_INT, 0,
  2741. 0xffff, tape->stage_size / 1024, 1, &tape->nr_stages,
  2742. NULL);
  2743. ide_add_setting(drive, "pipeline_pending", SETTING_READ, TYPE_INT, 0,
  2744. 0xffff, tape->stage_size / 1024, 1,
  2745. &tape->nr_pending_stages, NULL);
  2746. ide_add_setting(drive, "speed", SETTING_READ, TYPE_SHORT, 0, 0xffff,
  2747. 1, 1, (u16 *)&tape->caps[14], NULL);
  2748. ide_add_setting(drive, "stage", SETTING_READ, TYPE_INT, 0, 0xffff, 1,
  2749. 1024, &tape->stage_size, NULL);
  2750. ide_add_setting(drive, "tdsc", SETTING_RW, TYPE_INT, IDETAPE_DSC_RW_MIN,
  2751. IDETAPE_DSC_RW_MAX, 1000, HZ, &tape->best_dsc_rw_freq,
  2752. NULL);
  2753. ide_add_setting(drive, "dsc_overlap", SETTING_RW, TYPE_BYTE, 0, 1, 1,
  2754. 1, &drive->dsc_overlap, NULL);
  2755. ide_add_setting(drive, "avg_speed", SETTING_READ, TYPE_INT, 0, 0xffff,
  2756. 1, 1, &tape->avg_speed, NULL);
  2757. ide_add_setting(drive, "debug_mask", SETTING_RW, TYPE_INT, 0, 0xffff, 1,
  2758. 1, &tape->debug_mask, NULL);
  2759. }
  2760. #else
  2761. static inline void idetape_add_settings(ide_drive_t *drive) { ; }
  2762. #endif
  2763. /*
  2764. * The function below is called to:
  2765. *
  2766. * 1. Initialize our various state variables.
  2767. * 2. Ask the tape for its capabilities.
  2768. * 3. Allocate a buffer which will be used for data transfer. The buffer size
  2769. * is chosen based on the recommendation which we received in step 2.
  2770. *
  2771. * Note that at this point ide.c already assigned us an irq, so that we can
  2772. * queue requests here and wait for their completion.
  2773. */
  2774. static void idetape_setup(ide_drive_t *drive, idetape_tape_t *tape, int minor)
  2775. {
  2776. unsigned long t1, tmid, tn, t;
  2777. int speed;
  2778. int stage_size;
  2779. u8 gcw[2];
  2780. struct sysinfo si;
  2781. u16 *ctl = (u16 *)&tape->caps[12];
  2782. spin_lock_init(&tape->lock);
  2783. drive->dsc_overlap = 1;
  2784. if (drive->hwif->host_flags & IDE_HFLAG_NO_DSC) {
  2785. printk(KERN_INFO "ide-tape: %s: disabling DSC overlap\n",
  2786. tape->name);
  2787. drive->dsc_overlap = 0;
  2788. }
  2789. /* Seagate Travan drives do not support DSC overlap. */
  2790. if (strstr(drive->id->model, "Seagate STT3401"))
  2791. drive->dsc_overlap = 0;
  2792. tape->minor = minor;
  2793. tape->name[0] = 'h';
  2794. tape->name[1] = 't';
  2795. tape->name[2] = '0' + minor;
  2796. tape->chrdev_dir = IDETAPE_DIR_NONE;
  2797. tape->pc = tape->pc_stack;
  2798. *((unsigned short *) &gcw) = drive->id->config;
  2799. /* Command packet DRQ type */
  2800. if (((gcw[0] & 0x60) >> 5) == 1)
  2801. set_bit(IDETAPE_FLAG_DRQ_INTERRUPT, &tape->flags);
  2802. tape->min_pipeline = 10;
  2803. tape->max_pipeline = 10;
  2804. tape->max_stages = 10;
  2805. idetape_get_inquiry_results(drive);
  2806. idetape_get_mode_sense_results(drive);
  2807. ide_tape_get_bsize_from_bdesc(drive);
  2808. tape->user_bs_factor = 1;
  2809. tape->stage_size = *ctl * tape->blk_size;
  2810. while (tape->stage_size > 0xffff) {
  2811. printk(KERN_NOTICE "ide-tape: decreasing stage size\n");
  2812. *ctl /= 2;
  2813. tape->stage_size = *ctl * tape->blk_size;
  2814. }
  2815. stage_size = tape->stage_size;
  2816. tape->pages_per_stage = stage_size / PAGE_SIZE;
  2817. if (stage_size % PAGE_SIZE) {
  2818. tape->pages_per_stage++;
  2819. tape->excess_bh_size = PAGE_SIZE - stage_size % PAGE_SIZE;
  2820. }
  2821. /* Select the "best" DSC read/write polling freq and pipeline size. */
  2822. speed = max(*(u16 *)&tape->caps[14], *(u16 *)&tape->caps[8]);
  2823. tape->max_stages = speed * 1000 * 10 / tape->stage_size;
  2824. /* Limit memory use for pipeline to 10% of physical memory */
  2825. si_meminfo(&si);
  2826. if (tape->max_stages * tape->stage_size >
  2827. si.totalram * si.mem_unit / 10)
  2828. tape->max_stages =
  2829. si.totalram * si.mem_unit / (10 * tape->stage_size);
  2830. tape->max_stages = min(tape->max_stages, IDETAPE_MAX_PIPELINE_STAGES);
  2831. tape->min_pipeline = min(tape->max_stages, IDETAPE_MIN_PIPELINE_STAGES);
  2832. tape->max_pipeline =
  2833. min(tape->max_stages * 2, IDETAPE_MAX_PIPELINE_STAGES);
  2834. if (tape->max_stages == 0) {
  2835. tape->max_stages = 1;
  2836. tape->min_pipeline = 1;
  2837. tape->max_pipeline = 1;
  2838. }
  2839. t1 = (tape->stage_size * HZ) / (speed * 1000);
  2840. tmid = (*(u16 *)&tape->caps[16] * 32 * HZ) / (speed * 125);
  2841. tn = (IDETAPE_FIFO_THRESHOLD * tape->stage_size * HZ) / (speed * 1000);
  2842. if (tape->max_stages)
  2843. t = tn;
  2844. else
  2845. t = t1;
  2846. /*
  2847. * Ensure that the number we got makes sense; limit it within
  2848. * IDETAPE_DSC_RW_MIN and IDETAPE_DSC_RW_MAX.
  2849. */
  2850. tape->best_dsc_rw_freq = max_t(unsigned long,
  2851. min_t(unsigned long, t, IDETAPE_DSC_RW_MAX),
  2852. IDETAPE_DSC_RW_MIN);
  2853. printk(KERN_INFO "ide-tape: %s <-> %s: %dKBps, %d*%dkB buffer, "
  2854. "%dkB pipeline, %lums tDSC%s\n",
  2855. drive->name, tape->name, *(u16 *)&tape->caps[14],
  2856. (*(u16 *)&tape->caps[16] * 512) / tape->stage_size,
  2857. tape->stage_size / 1024,
  2858. tape->max_stages * tape->stage_size / 1024,
  2859. tape->best_dsc_rw_freq * 1000 / HZ,
  2860. drive->using_dma ? ", DMA":"");
  2861. idetape_add_settings(drive);
  2862. }
  2863. static void ide_tape_remove(ide_drive_t *drive)
  2864. {
  2865. idetape_tape_t *tape = drive->driver_data;
  2866. ide_proc_unregister_driver(drive, tape->driver);
  2867. ide_unregister_region(tape->disk);
  2868. ide_tape_put(tape);
  2869. }
  2870. static void ide_tape_release(struct kref *kref)
  2871. {
  2872. struct ide_tape_obj *tape = to_ide_tape(kref);
  2873. ide_drive_t *drive = tape->drive;
  2874. struct gendisk *g = tape->disk;
  2875. BUG_ON(tape->first_stage != NULL || tape->merge_stage_size);
  2876. drive->dsc_overlap = 0;
  2877. drive->driver_data = NULL;
  2878. device_destroy(idetape_sysfs_class, MKDEV(IDETAPE_MAJOR, tape->minor));
  2879. device_destroy(idetape_sysfs_class,
  2880. MKDEV(IDETAPE_MAJOR, tape->minor + 128));
  2881. idetape_devs[tape->minor] = NULL;
  2882. g->private_data = NULL;
  2883. put_disk(g);
  2884. kfree(tape);
  2885. }
  2886. #ifdef CONFIG_IDE_PROC_FS
  2887. static int proc_idetape_read_name
  2888. (char *page, char **start, off_t off, int count, int *eof, void *data)
  2889. {
  2890. ide_drive_t *drive = (ide_drive_t *) data;
  2891. idetape_tape_t *tape = drive->driver_data;
  2892. char *out = page;
  2893. int len;
  2894. len = sprintf(out, "%s\n", tape->name);
  2895. PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
  2896. }
  2897. static ide_proc_entry_t idetape_proc[] = {
  2898. { "capacity", S_IFREG|S_IRUGO, proc_ide_read_capacity, NULL },
  2899. { "name", S_IFREG|S_IRUGO, proc_idetape_read_name, NULL },
  2900. { NULL, 0, NULL, NULL }
  2901. };
  2902. #endif
  2903. static int ide_tape_probe(ide_drive_t *);
  2904. static ide_driver_t idetape_driver = {
  2905. .gen_driver = {
  2906. .owner = THIS_MODULE,
  2907. .name = "ide-tape",
  2908. .bus = &ide_bus_type,
  2909. },
  2910. .probe = ide_tape_probe,
  2911. .remove = ide_tape_remove,
  2912. .version = IDETAPE_VERSION,
  2913. .media = ide_tape,
  2914. .supports_dsc_overlap = 1,
  2915. .do_request = idetape_do_request,
  2916. .end_request = idetape_end_request,
  2917. .error = __ide_error,
  2918. .abort = __ide_abort,
  2919. #ifdef CONFIG_IDE_PROC_FS
  2920. .proc = idetape_proc,
  2921. #endif
  2922. };
  2923. /* Our character device supporting functions, passed to register_chrdev. */
  2924. static const struct file_operations idetape_fops = {
  2925. .owner = THIS_MODULE,
  2926. .read = idetape_chrdev_read,
  2927. .write = idetape_chrdev_write,
  2928. .ioctl = idetape_chrdev_ioctl,
  2929. .open = idetape_chrdev_open,
  2930. .release = idetape_chrdev_release,
  2931. };
  2932. static int idetape_open(struct inode *inode, struct file *filp)
  2933. {
  2934. struct gendisk *disk = inode->i_bdev->bd_disk;
  2935. struct ide_tape_obj *tape;
  2936. tape = ide_tape_get(disk);
  2937. if (!tape)
  2938. return -ENXIO;
  2939. return 0;
  2940. }
  2941. static int idetape_release(struct inode *inode, struct file *filp)
  2942. {
  2943. struct gendisk *disk = inode->i_bdev->bd_disk;
  2944. struct ide_tape_obj *tape = ide_tape_g(disk);
  2945. ide_tape_put(tape);
  2946. return 0;
  2947. }
  2948. static int idetape_ioctl(struct inode *inode, struct file *file,
  2949. unsigned int cmd, unsigned long arg)
  2950. {
  2951. struct block_device *bdev = inode->i_bdev;
  2952. struct ide_tape_obj *tape = ide_tape_g(bdev->bd_disk);
  2953. ide_drive_t *drive = tape->drive;
  2954. int err = generic_ide_ioctl(drive, file, bdev, cmd, arg);
  2955. if (err == -EINVAL)
  2956. err = idetape_blkdev_ioctl(drive, cmd, arg);
  2957. return err;
  2958. }
  2959. static struct block_device_operations idetape_block_ops = {
  2960. .owner = THIS_MODULE,
  2961. .open = idetape_open,
  2962. .release = idetape_release,
  2963. .ioctl = idetape_ioctl,
  2964. };
  2965. static int ide_tape_probe(ide_drive_t *drive)
  2966. {
  2967. idetape_tape_t *tape;
  2968. struct gendisk *g;
  2969. int minor;
  2970. if (!strstr("ide-tape", drive->driver_req))
  2971. goto failed;
  2972. if (!drive->present)
  2973. goto failed;
  2974. if (drive->media != ide_tape)
  2975. goto failed;
  2976. if (!idetape_identify_device(drive)) {
  2977. printk(KERN_ERR "ide-tape: %s: not supported by this version of"
  2978. " the driver\n", drive->name);
  2979. goto failed;
  2980. }
  2981. if (drive->scsi) {
  2982. printk(KERN_INFO "ide-tape: passing drive %s to ide-scsi"
  2983. " emulation.\n", drive->name);
  2984. goto failed;
  2985. }
  2986. tape = kzalloc(sizeof(idetape_tape_t), GFP_KERNEL);
  2987. if (tape == NULL) {
  2988. printk(KERN_ERR "ide-tape: %s: Can't allocate a tape struct\n",
  2989. drive->name);
  2990. goto failed;
  2991. }
  2992. g = alloc_disk(1 << PARTN_BITS);
  2993. if (!g)
  2994. goto out_free_tape;
  2995. ide_init_disk(g, drive);
  2996. ide_proc_register_driver(drive, &idetape_driver);
  2997. kref_init(&tape->kref);
  2998. tape->drive = drive;
  2999. tape->driver = &idetape_driver;
  3000. tape->disk = g;
  3001. g->private_data = &tape->driver;
  3002. drive->driver_data = tape;
  3003. mutex_lock(&idetape_ref_mutex);
  3004. for (minor = 0; idetape_devs[minor]; minor++)
  3005. ;
  3006. idetape_devs[minor] = tape;
  3007. mutex_unlock(&idetape_ref_mutex);
  3008. idetape_setup(drive, tape, minor);
  3009. device_create(idetape_sysfs_class, &drive->gendev,
  3010. MKDEV(IDETAPE_MAJOR, minor), "%s", tape->name);
  3011. device_create(idetape_sysfs_class, &drive->gendev,
  3012. MKDEV(IDETAPE_MAJOR, minor + 128), "n%s", tape->name);
  3013. g->fops = &idetape_block_ops;
  3014. ide_register_region(g);
  3015. return 0;
  3016. out_free_tape:
  3017. kfree(tape);
  3018. failed:
  3019. return -ENODEV;
  3020. }
  3021. static void __exit idetape_exit(void)
  3022. {
  3023. driver_unregister(&idetape_driver.gen_driver);
  3024. class_destroy(idetape_sysfs_class);
  3025. unregister_chrdev(IDETAPE_MAJOR, "ht");
  3026. }
  3027. static int __init idetape_init(void)
  3028. {
  3029. int error = 1;
  3030. idetape_sysfs_class = class_create(THIS_MODULE, "ide_tape");
  3031. if (IS_ERR(idetape_sysfs_class)) {
  3032. idetape_sysfs_class = NULL;
  3033. printk(KERN_ERR "Unable to create sysfs class for ide tapes\n");
  3034. error = -EBUSY;
  3035. goto out;
  3036. }
  3037. if (register_chrdev(IDETAPE_MAJOR, "ht", &idetape_fops)) {
  3038. printk(KERN_ERR "ide-tape: Failed to register chrdev"
  3039. " interface\n");
  3040. error = -EBUSY;
  3041. goto out_free_class;
  3042. }
  3043. error = driver_register(&idetape_driver.gen_driver);
  3044. if (error)
  3045. goto out_free_driver;
  3046. return 0;
  3047. out_free_driver:
  3048. driver_unregister(&idetape_driver.gen_driver);
  3049. out_free_class:
  3050. class_destroy(idetape_sysfs_class);
  3051. out:
  3052. return error;
  3053. }
  3054. MODULE_ALIAS("ide:*m-tape*");
  3055. module_init(idetape_init);
  3056. module_exit(idetape_exit);
  3057. MODULE_ALIAS_CHARDEV_MAJOR(IDETAPE_MAJOR);
  3058. MODULE_DESCRIPTION("ATAPI Streaming TAPE Driver");
  3059. MODULE_LICENSE("GPL");