libata-core.c 94 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058
  1. /*
  2. libata-core.c - helper library for ATA
  3. Copyright 2003-2004 Red Hat, Inc. All rights reserved.
  4. Copyright 2003-2004 Jeff Garzik
  5. The contents of this file are subject to the Open
  6. Software License version 1.1 that can be found at
  7. http://www.opensource.org/licenses/osl-1.1.txt and is included herein
  8. by reference.
  9. Alternatively, the contents of this file may be used under the terms
  10. of the GNU General Public License version 2 (the "GPL") as distributed
  11. in the kernel source COPYING file, in which case the provisions of
  12. the GPL are applicable instead of the above. If you wish to allow
  13. the use of your version of this file only under the terms of the
  14. GPL and not to allow others to use your version of this file under
  15. the OSL, indicate your decision by deleting the provisions above and
  16. replace them with the notice and other provisions required by the GPL.
  17. If you do not delete the provisions above, a recipient may use your
  18. version of this file under either the OSL or the GPL.
  19. */
  20. #include <linux/config.h>
  21. #include <linux/kernel.h>
  22. #include <linux/module.h>
  23. #include <linux/pci.h>
  24. #include <linux/init.h>
  25. #include <linux/list.h>
  26. #include <linux/mm.h>
  27. #include <linux/highmem.h>
  28. #include <linux/spinlock.h>
  29. #include <linux/blkdev.h>
  30. #include <linux/delay.h>
  31. #include <linux/timer.h>
  32. #include <linux/interrupt.h>
  33. #include <linux/completion.h>
  34. #include <linux/suspend.h>
  35. #include <linux/workqueue.h>
  36. #include <scsi/scsi.h>
  37. #include "scsi.h"
  38. #include "scsi_priv.h"
  39. #include <scsi/scsi_host.h>
  40. #include <linux/libata.h>
  41. #include <asm/io.h>
  42. #include <asm/semaphore.h>
  43. #include <asm/byteorder.h>
  44. #include "libata.h"
  45. static unsigned int ata_busy_sleep (struct ata_port *ap,
  46. unsigned long tmout_pat,
  47. unsigned long tmout);
  48. static void ata_set_mode(struct ata_port *ap);
  49. static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev);
  50. static unsigned int ata_get_mode_mask(struct ata_port *ap, int shift);
  51. static int fgb(u32 bitmap);
  52. static int ata_choose_xfer_mode(struct ata_port *ap,
  53. u8 *xfer_mode_out,
  54. unsigned int *xfer_shift_out);
  55. static int ata_qc_complete_noop(struct ata_queued_cmd *qc, u8 drv_stat);
  56. static void __ata_qc_complete(struct ata_queued_cmd *qc);
  57. static unsigned int ata_unique_id = 1;
  58. static struct workqueue_struct *ata_wq;
  59. MODULE_AUTHOR("Jeff Garzik");
  60. MODULE_DESCRIPTION("Library module for ATA devices");
  61. MODULE_LICENSE("GPL");
  62. MODULE_VERSION(DRV_VERSION);
  63. /**
  64. * ata_tf_load - send taskfile registers to host controller
  65. * @ap: Port to which output is sent
  66. * @tf: ATA taskfile register set
  67. *
  68. * Outputs ATA taskfile to standard ATA host controller.
  69. *
  70. * LOCKING:
  71. * Inherited from caller.
  72. */
  73. static void ata_tf_load_pio(struct ata_port *ap, struct ata_taskfile *tf)
  74. {
  75. struct ata_ioports *ioaddr = &ap->ioaddr;
  76. unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
  77. if (tf->ctl != ap->last_ctl) {
  78. outb(tf->ctl, ioaddr->ctl_addr);
  79. ap->last_ctl = tf->ctl;
  80. ata_wait_idle(ap);
  81. }
  82. if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
  83. outb(tf->hob_feature, ioaddr->feature_addr);
  84. outb(tf->hob_nsect, ioaddr->nsect_addr);
  85. outb(tf->hob_lbal, ioaddr->lbal_addr);
  86. outb(tf->hob_lbam, ioaddr->lbam_addr);
  87. outb(tf->hob_lbah, ioaddr->lbah_addr);
  88. VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
  89. tf->hob_feature,
  90. tf->hob_nsect,
  91. tf->hob_lbal,
  92. tf->hob_lbam,
  93. tf->hob_lbah);
  94. }
  95. if (is_addr) {
  96. outb(tf->feature, ioaddr->feature_addr);
  97. outb(tf->nsect, ioaddr->nsect_addr);
  98. outb(tf->lbal, ioaddr->lbal_addr);
  99. outb(tf->lbam, ioaddr->lbam_addr);
  100. outb(tf->lbah, ioaddr->lbah_addr);
  101. VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
  102. tf->feature,
  103. tf->nsect,
  104. tf->lbal,
  105. tf->lbam,
  106. tf->lbah);
  107. }
  108. if (tf->flags & ATA_TFLAG_DEVICE) {
  109. outb(tf->device, ioaddr->device_addr);
  110. VPRINTK("device 0x%X\n", tf->device);
  111. }
  112. ata_wait_idle(ap);
  113. }
  114. /**
  115. * ata_tf_load_mmio - send taskfile registers to host controller
  116. * @ap: Port to which output is sent
  117. * @tf: ATA taskfile register set
  118. *
  119. * Outputs ATA taskfile to standard ATA host controller using MMIO.
  120. *
  121. * LOCKING:
  122. * Inherited from caller.
  123. */
  124. static void ata_tf_load_mmio(struct ata_port *ap, struct ata_taskfile *tf)
  125. {
  126. struct ata_ioports *ioaddr = &ap->ioaddr;
  127. unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
  128. if (tf->ctl != ap->last_ctl) {
  129. writeb(tf->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
  130. ap->last_ctl = tf->ctl;
  131. ata_wait_idle(ap);
  132. }
  133. if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
  134. writeb(tf->hob_feature, (void __iomem *) ioaddr->feature_addr);
  135. writeb(tf->hob_nsect, (void __iomem *) ioaddr->nsect_addr);
  136. writeb(tf->hob_lbal, (void __iomem *) ioaddr->lbal_addr);
  137. writeb(tf->hob_lbam, (void __iomem *) ioaddr->lbam_addr);
  138. writeb(tf->hob_lbah, (void __iomem *) ioaddr->lbah_addr);
  139. VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
  140. tf->hob_feature,
  141. tf->hob_nsect,
  142. tf->hob_lbal,
  143. tf->hob_lbam,
  144. tf->hob_lbah);
  145. }
  146. if (is_addr) {
  147. writeb(tf->feature, (void __iomem *) ioaddr->feature_addr);
  148. writeb(tf->nsect, (void __iomem *) ioaddr->nsect_addr);
  149. writeb(tf->lbal, (void __iomem *) ioaddr->lbal_addr);
  150. writeb(tf->lbam, (void __iomem *) ioaddr->lbam_addr);
  151. writeb(tf->lbah, (void __iomem *) ioaddr->lbah_addr);
  152. VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
  153. tf->feature,
  154. tf->nsect,
  155. tf->lbal,
  156. tf->lbam,
  157. tf->lbah);
  158. }
  159. if (tf->flags & ATA_TFLAG_DEVICE) {
  160. writeb(tf->device, (void __iomem *) ioaddr->device_addr);
  161. VPRINTK("device 0x%X\n", tf->device);
  162. }
  163. ata_wait_idle(ap);
  164. }
  165. void ata_tf_load(struct ata_port *ap, struct ata_taskfile *tf)
  166. {
  167. if (ap->flags & ATA_FLAG_MMIO)
  168. ata_tf_load_mmio(ap, tf);
  169. else
  170. ata_tf_load_pio(ap, tf);
  171. }
  172. /**
  173. * ata_exec_command - issue ATA command to host controller
  174. * @ap: port to which command is being issued
  175. * @tf: ATA taskfile register set
  176. *
  177. * Issues PIO/MMIO write to ATA command register, with proper
  178. * synchronization with interrupt handler / other threads.
  179. *
  180. * LOCKING:
  181. * spin_lock_irqsave(host_set lock)
  182. */
  183. static void ata_exec_command_pio(struct ata_port *ap, struct ata_taskfile *tf)
  184. {
  185. DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
  186. outb(tf->command, ap->ioaddr.command_addr);
  187. ata_pause(ap);
  188. }
  189. /**
  190. * ata_exec_command_mmio - issue ATA command to host controller
  191. * @ap: port to which command is being issued
  192. * @tf: ATA taskfile register set
  193. *
  194. * Issues MMIO write to ATA command register, with proper
  195. * synchronization with interrupt handler / other threads.
  196. *
  197. * LOCKING:
  198. * spin_lock_irqsave(host_set lock)
  199. */
  200. static void ata_exec_command_mmio(struct ata_port *ap, struct ata_taskfile *tf)
  201. {
  202. DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
  203. writeb(tf->command, (void __iomem *) ap->ioaddr.command_addr);
  204. ata_pause(ap);
  205. }
  206. void ata_exec_command(struct ata_port *ap, struct ata_taskfile *tf)
  207. {
  208. if (ap->flags & ATA_FLAG_MMIO)
  209. ata_exec_command_mmio(ap, tf);
  210. else
  211. ata_exec_command_pio(ap, tf);
  212. }
  213. /**
  214. * ata_exec - issue ATA command to host controller
  215. * @ap: port to which command is being issued
  216. * @tf: ATA taskfile register set
  217. *
  218. * Issues PIO/MMIO write to ATA command register, with proper
  219. * synchronization with interrupt handler / other threads.
  220. *
  221. * LOCKING:
  222. * Obtains host_set lock.
  223. */
  224. static inline void ata_exec(struct ata_port *ap, struct ata_taskfile *tf)
  225. {
  226. unsigned long flags;
  227. DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
  228. spin_lock_irqsave(&ap->host_set->lock, flags);
  229. ap->ops->exec_command(ap, tf);
  230. spin_unlock_irqrestore(&ap->host_set->lock, flags);
  231. }
  232. /**
  233. * ata_tf_to_host - issue ATA taskfile to host controller
  234. * @ap: port to which command is being issued
  235. * @tf: ATA taskfile register set
  236. *
  237. * Issues ATA taskfile register set to ATA host controller,
  238. * with proper synchronization with interrupt handler and
  239. * other threads.
  240. *
  241. * LOCKING:
  242. * Obtains host_set lock.
  243. */
  244. static void ata_tf_to_host(struct ata_port *ap, struct ata_taskfile *tf)
  245. {
  246. ap->ops->tf_load(ap, tf);
  247. ata_exec(ap, tf);
  248. }
  249. /**
  250. * ata_tf_to_host_nolock - issue ATA taskfile to host controller
  251. * @ap: port to which command is being issued
  252. * @tf: ATA taskfile register set
  253. *
  254. * Issues ATA taskfile register set to ATA host controller,
  255. * with proper synchronization with interrupt handler and
  256. * other threads.
  257. *
  258. * LOCKING:
  259. * spin_lock_irqsave(host_set lock)
  260. */
  261. void ata_tf_to_host_nolock(struct ata_port *ap, struct ata_taskfile *tf)
  262. {
  263. ap->ops->tf_load(ap, tf);
  264. ap->ops->exec_command(ap, tf);
  265. }
  266. /**
  267. * ata_tf_read - input device's ATA taskfile shadow registers
  268. * @ap: Port from which input is read
  269. * @tf: ATA taskfile register set for storing input
  270. *
  271. * Reads ATA taskfile registers for currently-selected device
  272. * into @tf.
  273. *
  274. * LOCKING:
  275. * Inherited from caller.
  276. */
  277. static void ata_tf_read_pio(struct ata_port *ap, struct ata_taskfile *tf)
  278. {
  279. struct ata_ioports *ioaddr = &ap->ioaddr;
  280. tf->nsect = inb(ioaddr->nsect_addr);
  281. tf->lbal = inb(ioaddr->lbal_addr);
  282. tf->lbam = inb(ioaddr->lbam_addr);
  283. tf->lbah = inb(ioaddr->lbah_addr);
  284. tf->device = inb(ioaddr->device_addr);
  285. if (tf->flags & ATA_TFLAG_LBA48) {
  286. outb(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
  287. tf->hob_feature = inb(ioaddr->error_addr);
  288. tf->hob_nsect = inb(ioaddr->nsect_addr);
  289. tf->hob_lbal = inb(ioaddr->lbal_addr);
  290. tf->hob_lbam = inb(ioaddr->lbam_addr);
  291. tf->hob_lbah = inb(ioaddr->lbah_addr);
  292. }
  293. }
  294. /**
  295. * ata_tf_read_mmio - input device's ATA taskfile shadow registers
  296. * @ap: Port from which input is read
  297. * @tf: ATA taskfile register set for storing input
  298. *
  299. * Reads ATA taskfile registers for currently-selected device
  300. * into @tf via MMIO.
  301. *
  302. * LOCKING:
  303. * Inherited from caller.
  304. */
  305. static void ata_tf_read_mmio(struct ata_port *ap, struct ata_taskfile *tf)
  306. {
  307. struct ata_ioports *ioaddr = &ap->ioaddr;
  308. tf->nsect = readb((void __iomem *)ioaddr->nsect_addr);
  309. tf->lbal = readb((void __iomem *)ioaddr->lbal_addr);
  310. tf->lbam = readb((void __iomem *)ioaddr->lbam_addr);
  311. tf->lbah = readb((void __iomem *)ioaddr->lbah_addr);
  312. tf->device = readb((void __iomem *)ioaddr->device_addr);
  313. if (tf->flags & ATA_TFLAG_LBA48) {
  314. writeb(tf->ctl | ATA_HOB, (void __iomem *) ap->ioaddr.ctl_addr);
  315. tf->hob_feature = readb((void __iomem *)ioaddr->error_addr);
  316. tf->hob_nsect = readb((void __iomem *)ioaddr->nsect_addr);
  317. tf->hob_lbal = readb((void __iomem *)ioaddr->lbal_addr);
  318. tf->hob_lbam = readb((void __iomem *)ioaddr->lbam_addr);
  319. tf->hob_lbah = readb((void __iomem *)ioaddr->lbah_addr);
  320. }
  321. }
  322. void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
  323. {
  324. if (ap->flags & ATA_FLAG_MMIO)
  325. ata_tf_read_mmio(ap, tf);
  326. else
  327. ata_tf_read_pio(ap, tf);
  328. }
  329. /**
  330. * ata_check_status_pio - Read device status reg & clear interrupt
  331. * @ap: port where the device is
  332. *
  333. * Reads ATA taskfile status register for currently-selected device
  334. * and return it's value. This also clears pending interrupts
  335. * from this device
  336. *
  337. * LOCKING:
  338. * Inherited from caller.
  339. */
  340. static u8 ata_check_status_pio(struct ata_port *ap)
  341. {
  342. return inb(ap->ioaddr.status_addr);
  343. }
  344. /**
  345. * ata_check_status_mmio - Read device status reg & clear interrupt
  346. * @ap: port where the device is
  347. *
  348. * Reads ATA taskfile status register for currently-selected device
  349. * via MMIO and return it's value. This also clears pending interrupts
  350. * from this device
  351. *
  352. * LOCKING:
  353. * Inherited from caller.
  354. */
  355. static u8 ata_check_status_mmio(struct ata_port *ap)
  356. {
  357. return readb((void __iomem *) ap->ioaddr.status_addr);
  358. }
  359. u8 ata_check_status(struct ata_port *ap)
  360. {
  361. if (ap->flags & ATA_FLAG_MMIO)
  362. return ata_check_status_mmio(ap);
  363. return ata_check_status_pio(ap);
  364. }
  365. u8 ata_altstatus(struct ata_port *ap)
  366. {
  367. if (ap->ops->check_altstatus)
  368. return ap->ops->check_altstatus(ap);
  369. if (ap->flags & ATA_FLAG_MMIO)
  370. return readb((void __iomem *)ap->ioaddr.altstatus_addr);
  371. return inb(ap->ioaddr.altstatus_addr);
  372. }
  373. u8 ata_chk_err(struct ata_port *ap)
  374. {
  375. if (ap->ops->check_err)
  376. return ap->ops->check_err(ap);
  377. if (ap->flags & ATA_FLAG_MMIO) {
  378. return readb((void __iomem *) ap->ioaddr.error_addr);
  379. }
  380. return inb(ap->ioaddr.error_addr);
  381. }
  382. /**
  383. * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
  384. * @tf: Taskfile to convert
  385. * @fis: Buffer into which data will output
  386. * @pmp: Port multiplier port
  387. *
  388. * Converts a standard ATA taskfile to a Serial ATA
  389. * FIS structure (Register - Host to Device).
  390. *
  391. * LOCKING:
  392. * Inherited from caller.
  393. */
  394. void ata_tf_to_fis(struct ata_taskfile *tf, u8 *fis, u8 pmp)
  395. {
  396. fis[0] = 0x27; /* Register - Host to Device FIS */
  397. fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
  398. bit 7 indicates Command FIS */
  399. fis[2] = tf->command;
  400. fis[3] = tf->feature;
  401. fis[4] = tf->lbal;
  402. fis[5] = tf->lbam;
  403. fis[6] = tf->lbah;
  404. fis[7] = tf->device;
  405. fis[8] = tf->hob_lbal;
  406. fis[9] = tf->hob_lbam;
  407. fis[10] = tf->hob_lbah;
  408. fis[11] = tf->hob_feature;
  409. fis[12] = tf->nsect;
  410. fis[13] = tf->hob_nsect;
  411. fis[14] = 0;
  412. fis[15] = tf->ctl;
  413. fis[16] = 0;
  414. fis[17] = 0;
  415. fis[18] = 0;
  416. fis[19] = 0;
  417. }
  418. /**
  419. * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
  420. * @fis: Buffer from which data will be input
  421. * @tf: Taskfile to output
  422. *
  423. * Converts a standard ATA taskfile to a Serial ATA
  424. * FIS structure (Register - Host to Device).
  425. *
  426. * LOCKING:
  427. * Inherited from caller.
  428. */
  429. void ata_tf_from_fis(u8 *fis, struct ata_taskfile *tf)
  430. {
  431. tf->command = fis[2]; /* status */
  432. tf->feature = fis[3]; /* error */
  433. tf->lbal = fis[4];
  434. tf->lbam = fis[5];
  435. tf->lbah = fis[6];
  436. tf->device = fis[7];
  437. tf->hob_lbal = fis[8];
  438. tf->hob_lbam = fis[9];
  439. tf->hob_lbah = fis[10];
  440. tf->nsect = fis[12];
  441. tf->hob_nsect = fis[13];
  442. }
  443. /**
  444. * ata_prot_to_cmd - determine which read/write opcodes to use
  445. * @protocol: ATA_PROT_xxx taskfile protocol
  446. * @lba48: true is lba48 is present
  447. *
  448. * Given necessary input, determine which read/write commands
  449. * to use to transfer data.
  450. *
  451. * LOCKING:
  452. * None.
  453. */
  454. static int ata_prot_to_cmd(int protocol, int lba48)
  455. {
  456. int rcmd = 0, wcmd = 0;
  457. switch (protocol) {
  458. case ATA_PROT_PIO:
  459. if (lba48) {
  460. rcmd = ATA_CMD_PIO_READ_EXT;
  461. wcmd = ATA_CMD_PIO_WRITE_EXT;
  462. } else {
  463. rcmd = ATA_CMD_PIO_READ;
  464. wcmd = ATA_CMD_PIO_WRITE;
  465. }
  466. break;
  467. case ATA_PROT_DMA:
  468. if (lba48) {
  469. rcmd = ATA_CMD_READ_EXT;
  470. wcmd = ATA_CMD_WRITE_EXT;
  471. } else {
  472. rcmd = ATA_CMD_READ;
  473. wcmd = ATA_CMD_WRITE;
  474. }
  475. break;
  476. default:
  477. return -1;
  478. }
  479. return rcmd | (wcmd << 8);
  480. }
  481. /**
  482. * ata_dev_set_protocol - set taskfile protocol and r/w commands
  483. * @dev: device to examine and configure
  484. *
  485. * Examine the device configuration, after we have
  486. * read the identify-device page and configured the
  487. * data transfer mode. Set internal state related to
  488. * the ATA taskfile protocol (pio, pio mult, dma, etc.)
  489. * and calculate the proper read/write commands to use.
  490. *
  491. * LOCKING:
  492. * caller.
  493. */
  494. static void ata_dev_set_protocol(struct ata_device *dev)
  495. {
  496. int pio = (dev->flags & ATA_DFLAG_PIO);
  497. int lba48 = (dev->flags & ATA_DFLAG_LBA48);
  498. int proto, cmd;
  499. if (pio)
  500. proto = dev->xfer_protocol = ATA_PROT_PIO;
  501. else
  502. proto = dev->xfer_protocol = ATA_PROT_DMA;
  503. cmd = ata_prot_to_cmd(proto, lba48);
  504. if (cmd < 0)
  505. BUG();
  506. dev->read_cmd = cmd & 0xff;
  507. dev->write_cmd = (cmd >> 8) & 0xff;
  508. }
  509. static const char * xfer_mode_str[] = {
  510. "UDMA/16",
  511. "UDMA/25",
  512. "UDMA/33",
  513. "UDMA/44",
  514. "UDMA/66",
  515. "UDMA/100",
  516. "UDMA/133",
  517. "UDMA7",
  518. "MWDMA0",
  519. "MWDMA1",
  520. "MWDMA2",
  521. "PIO0",
  522. "PIO1",
  523. "PIO2",
  524. "PIO3",
  525. "PIO4",
  526. };
  527. /**
  528. * ata_udma_string - convert UDMA bit offset to string
  529. * @mask: mask of bits supported; only highest bit counts.
  530. *
  531. * Determine string which represents the highest speed
  532. * (highest bit in @udma_mask).
  533. *
  534. * LOCKING:
  535. * None.
  536. *
  537. * RETURNS:
  538. * Constant C string representing highest speed listed in
  539. * @udma_mask, or the constant C string "<n/a>".
  540. */
  541. static const char *ata_mode_string(unsigned int mask)
  542. {
  543. int i;
  544. for (i = 7; i >= 0; i--)
  545. if (mask & (1 << i))
  546. goto out;
  547. for (i = ATA_SHIFT_MWDMA + 2; i >= ATA_SHIFT_MWDMA; i--)
  548. if (mask & (1 << i))
  549. goto out;
  550. for (i = ATA_SHIFT_PIO + 4; i >= ATA_SHIFT_PIO; i--)
  551. if (mask & (1 << i))
  552. goto out;
  553. return "<n/a>";
  554. out:
  555. return xfer_mode_str[i];
  556. }
  557. /**
  558. * ata_pio_devchk - PATA device presence detection
  559. * @ap: ATA channel to examine
  560. * @device: Device to examine (starting at zero)
  561. *
  562. * This technique was originally described in
  563. * Hale Landis's ATADRVR (www.ata-atapi.com), and
  564. * later found its way into the ATA/ATAPI spec.
  565. *
  566. * Write a pattern to the ATA shadow registers,
  567. * and if a device is present, it will respond by
  568. * correctly storing and echoing back the
  569. * ATA shadow register contents.
  570. *
  571. * LOCKING:
  572. * caller.
  573. */
  574. static unsigned int ata_pio_devchk(struct ata_port *ap,
  575. unsigned int device)
  576. {
  577. struct ata_ioports *ioaddr = &ap->ioaddr;
  578. u8 nsect, lbal;
  579. ap->ops->dev_select(ap, device);
  580. outb(0x55, ioaddr->nsect_addr);
  581. outb(0xaa, ioaddr->lbal_addr);
  582. outb(0xaa, ioaddr->nsect_addr);
  583. outb(0x55, ioaddr->lbal_addr);
  584. outb(0x55, ioaddr->nsect_addr);
  585. outb(0xaa, ioaddr->lbal_addr);
  586. nsect = inb(ioaddr->nsect_addr);
  587. lbal = inb(ioaddr->lbal_addr);
  588. if ((nsect == 0x55) && (lbal == 0xaa))
  589. return 1; /* we found a device */
  590. return 0; /* nothing found */
  591. }
  592. /**
  593. * ata_mmio_devchk - PATA device presence detection
  594. * @ap: ATA channel to examine
  595. * @device: Device to examine (starting at zero)
  596. *
  597. * This technique was originally described in
  598. * Hale Landis's ATADRVR (www.ata-atapi.com), and
  599. * later found its way into the ATA/ATAPI spec.
  600. *
  601. * Write a pattern to the ATA shadow registers,
  602. * and if a device is present, it will respond by
  603. * correctly storing and echoing back the
  604. * ATA shadow register contents.
  605. *
  606. * LOCKING:
  607. * caller.
  608. */
  609. static unsigned int ata_mmio_devchk(struct ata_port *ap,
  610. unsigned int device)
  611. {
  612. struct ata_ioports *ioaddr = &ap->ioaddr;
  613. u8 nsect, lbal;
  614. ap->ops->dev_select(ap, device);
  615. writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
  616. writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
  617. writeb(0xaa, (void __iomem *) ioaddr->nsect_addr);
  618. writeb(0x55, (void __iomem *) ioaddr->lbal_addr);
  619. writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
  620. writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
  621. nsect = readb((void __iomem *) ioaddr->nsect_addr);
  622. lbal = readb((void __iomem *) ioaddr->lbal_addr);
  623. if ((nsect == 0x55) && (lbal == 0xaa))
  624. return 1; /* we found a device */
  625. return 0; /* nothing found */
  626. }
  627. /**
  628. * ata_devchk - PATA device presence detection
  629. * @ap: ATA channel to examine
  630. * @device: Device to examine (starting at zero)
  631. *
  632. * Dispatch ATA device presence detection, depending
  633. * on whether we are using PIO or MMIO to talk to the
  634. * ATA shadow registers.
  635. *
  636. * LOCKING:
  637. * caller.
  638. */
  639. static unsigned int ata_devchk(struct ata_port *ap,
  640. unsigned int device)
  641. {
  642. if (ap->flags & ATA_FLAG_MMIO)
  643. return ata_mmio_devchk(ap, device);
  644. return ata_pio_devchk(ap, device);
  645. }
  646. /**
  647. * ata_dev_classify - determine device type based on ATA-spec signature
  648. * @tf: ATA taskfile register set for device to be identified
  649. *
  650. * Determine from taskfile register contents whether a device is
  651. * ATA or ATAPI, as per "Signature and persistence" section
  652. * of ATA/PI spec (volume 1, sect 5.14).
  653. *
  654. * LOCKING:
  655. * None.
  656. *
  657. * RETURNS:
  658. * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
  659. * the event of failure.
  660. */
  661. unsigned int ata_dev_classify(struct ata_taskfile *tf)
  662. {
  663. /* Apple's open source Darwin code hints that some devices only
  664. * put a proper signature into the LBA mid/high registers,
  665. * So, we only check those. It's sufficient for uniqueness.
  666. */
  667. if (((tf->lbam == 0) && (tf->lbah == 0)) ||
  668. ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
  669. DPRINTK("found ATA device by sig\n");
  670. return ATA_DEV_ATA;
  671. }
  672. if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
  673. ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
  674. DPRINTK("found ATAPI device by sig\n");
  675. return ATA_DEV_ATAPI;
  676. }
  677. DPRINTK("unknown device\n");
  678. return ATA_DEV_UNKNOWN;
  679. }
  680. /**
  681. * ata_dev_try_classify - Parse returned ATA device signature
  682. * @ap: ATA channel to examine
  683. * @device: Device to examine (starting at zero)
  684. *
  685. * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
  686. * an ATA/ATAPI-defined set of values is placed in the ATA
  687. * shadow registers, indicating the results of device detection
  688. * and diagnostics.
  689. *
  690. * Select the ATA device, and read the values from the ATA shadow
  691. * registers. Then parse according to the Error register value,
  692. * and the spec-defined values examined by ata_dev_classify().
  693. *
  694. * LOCKING:
  695. * caller.
  696. */
  697. static u8 ata_dev_try_classify(struct ata_port *ap, unsigned int device)
  698. {
  699. struct ata_device *dev = &ap->device[device];
  700. struct ata_taskfile tf;
  701. unsigned int class;
  702. u8 err;
  703. ap->ops->dev_select(ap, device);
  704. memset(&tf, 0, sizeof(tf));
  705. err = ata_chk_err(ap);
  706. ap->ops->tf_read(ap, &tf);
  707. dev->class = ATA_DEV_NONE;
  708. /* see if device passed diags */
  709. if (err == 1)
  710. /* do nothing */ ;
  711. else if ((device == 0) && (err == 0x81))
  712. /* do nothing */ ;
  713. else
  714. return err;
  715. /* determine if device if ATA or ATAPI */
  716. class = ata_dev_classify(&tf);
  717. if (class == ATA_DEV_UNKNOWN)
  718. return err;
  719. if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
  720. return err;
  721. dev->class = class;
  722. return err;
  723. }
  724. /**
  725. * ata_dev_id_string - Convert IDENTIFY DEVICE page into string
  726. * @id: IDENTIFY DEVICE results we will examine
  727. * @s: string into which data is output
  728. * @ofs: offset into identify device page
  729. * @len: length of string to return. must be an even number.
  730. *
  731. * The strings in the IDENTIFY DEVICE page are broken up into
  732. * 16-bit chunks. Run through the string, and output each
  733. * 8-bit chunk linearly, regardless of platform.
  734. *
  735. * LOCKING:
  736. * caller.
  737. */
  738. void ata_dev_id_string(u16 *id, unsigned char *s,
  739. unsigned int ofs, unsigned int len)
  740. {
  741. unsigned int c;
  742. while (len > 0) {
  743. c = id[ofs] >> 8;
  744. *s = c;
  745. s++;
  746. c = id[ofs] & 0xff;
  747. *s = c;
  748. s++;
  749. ofs++;
  750. len -= 2;
  751. }
  752. }
  753. void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
  754. {
  755. }
  756. /**
  757. * ata_std_dev_select - Select device 0/1 on ATA bus
  758. * @ap: ATA channel to manipulate
  759. * @device: ATA device (numbered from zero) to select
  760. *
  761. * Use the method defined in the ATA specification to
  762. * make either device 0, or device 1, active on the
  763. * ATA channel.
  764. *
  765. * LOCKING:
  766. * caller.
  767. */
  768. void ata_std_dev_select (struct ata_port *ap, unsigned int device)
  769. {
  770. u8 tmp;
  771. if (device == 0)
  772. tmp = ATA_DEVICE_OBS;
  773. else
  774. tmp = ATA_DEVICE_OBS | ATA_DEV1;
  775. if (ap->flags & ATA_FLAG_MMIO) {
  776. writeb(tmp, (void __iomem *) ap->ioaddr.device_addr);
  777. } else {
  778. outb(tmp, ap->ioaddr.device_addr);
  779. }
  780. ata_pause(ap); /* needed; also flushes, for mmio */
  781. }
  782. /**
  783. * ata_dev_select - Select device 0/1 on ATA bus
  784. * @ap: ATA channel to manipulate
  785. * @device: ATA device (numbered from zero) to select
  786. * @wait: non-zero to wait for Status register BSY bit to clear
  787. * @can_sleep: non-zero if context allows sleeping
  788. *
  789. * Use the method defined in the ATA specification to
  790. * make either device 0, or device 1, active on the
  791. * ATA channel.
  792. *
  793. * This is a high-level version of ata_std_dev_select(),
  794. * which additionally provides the services of inserting
  795. * the proper pauses and status polling, where needed.
  796. *
  797. * LOCKING:
  798. * caller.
  799. */
  800. void ata_dev_select(struct ata_port *ap, unsigned int device,
  801. unsigned int wait, unsigned int can_sleep)
  802. {
  803. VPRINTK("ENTER, ata%u: device %u, wait %u\n",
  804. ap->id, device, wait);
  805. if (wait)
  806. ata_wait_idle(ap);
  807. ap->ops->dev_select(ap, device);
  808. if (wait) {
  809. if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
  810. msleep(150);
  811. ata_wait_idle(ap);
  812. }
  813. }
  814. /**
  815. * ata_dump_id - IDENTIFY DEVICE info debugging output
  816. * @dev: Device whose IDENTIFY DEVICE page we will dump
  817. *
  818. * Dump selected 16-bit words from a detected device's
  819. * IDENTIFY PAGE page.
  820. *
  821. * LOCKING:
  822. * caller.
  823. */
  824. static inline void ata_dump_id(struct ata_device *dev)
  825. {
  826. DPRINTK("49==0x%04x "
  827. "53==0x%04x "
  828. "63==0x%04x "
  829. "64==0x%04x "
  830. "75==0x%04x \n",
  831. dev->id[49],
  832. dev->id[53],
  833. dev->id[63],
  834. dev->id[64],
  835. dev->id[75]);
  836. DPRINTK("80==0x%04x "
  837. "81==0x%04x "
  838. "82==0x%04x "
  839. "83==0x%04x "
  840. "84==0x%04x \n",
  841. dev->id[80],
  842. dev->id[81],
  843. dev->id[82],
  844. dev->id[83],
  845. dev->id[84]);
  846. DPRINTK("88==0x%04x "
  847. "93==0x%04x\n",
  848. dev->id[88],
  849. dev->id[93]);
  850. }
  851. /**
  852. * ata_dev_identify - obtain IDENTIFY x DEVICE page
  853. * @ap: port on which device we wish to probe resides
  854. * @device: device bus address, starting at zero
  855. *
  856. * Following bus reset, we issue the IDENTIFY [PACKET] DEVICE
  857. * command, and read back the 512-byte device information page.
  858. * The device information page is fed to us via the standard
  859. * PIO-IN protocol, but we hand-code it here. (TODO: investigate
  860. * using standard PIO-IN paths)
  861. *
  862. * After reading the device information page, we use several
  863. * bits of information from it to initialize data structures
  864. * that will be used during the lifetime of the ata_device.
  865. * Other data from the info page is used to disqualify certain
  866. * older ATA devices we do not wish to support.
  867. *
  868. * LOCKING:
  869. * Inherited from caller. Some functions called by this function
  870. * obtain the host_set lock.
  871. */
  872. static void ata_dev_identify(struct ata_port *ap, unsigned int device)
  873. {
  874. struct ata_device *dev = &ap->device[device];
  875. unsigned int i;
  876. u16 tmp;
  877. unsigned long xfer_modes;
  878. u8 status;
  879. unsigned int using_edd;
  880. DECLARE_COMPLETION(wait);
  881. struct ata_queued_cmd *qc;
  882. unsigned long flags;
  883. int rc;
  884. if (!ata_dev_present(dev)) {
  885. DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n",
  886. ap->id, device);
  887. return;
  888. }
  889. if (ap->flags & (ATA_FLAG_SRST | ATA_FLAG_SATA_RESET))
  890. using_edd = 0;
  891. else
  892. using_edd = 1;
  893. DPRINTK("ENTER, host %u, dev %u\n", ap->id, device);
  894. assert (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ATAPI ||
  895. dev->class == ATA_DEV_NONE);
  896. ata_dev_select(ap, device, 1, 1); /* select device 0/1 */
  897. qc = ata_qc_new_init(ap, dev);
  898. BUG_ON(qc == NULL);
  899. ata_sg_init_one(qc, dev->id, sizeof(dev->id));
  900. qc->dma_dir = DMA_FROM_DEVICE;
  901. qc->tf.protocol = ATA_PROT_PIO;
  902. qc->nsect = 1;
  903. retry:
  904. if (dev->class == ATA_DEV_ATA) {
  905. qc->tf.command = ATA_CMD_ID_ATA;
  906. DPRINTK("do ATA identify\n");
  907. } else {
  908. qc->tf.command = ATA_CMD_ID_ATAPI;
  909. DPRINTK("do ATAPI identify\n");
  910. }
  911. qc->waiting = &wait;
  912. qc->complete_fn = ata_qc_complete_noop;
  913. spin_lock_irqsave(&ap->host_set->lock, flags);
  914. rc = ata_qc_issue(qc);
  915. spin_unlock_irqrestore(&ap->host_set->lock, flags);
  916. if (rc)
  917. goto err_out;
  918. else
  919. wait_for_completion(&wait);
  920. status = ata_chk_status(ap);
  921. if (status & ATA_ERR) {
  922. /*
  923. * arg! EDD works for all test cases, but seems to return
  924. * the ATA signature for some ATAPI devices. Until the
  925. * reason for this is found and fixed, we fix up the mess
  926. * here. If IDENTIFY DEVICE returns command aborted
  927. * (as ATAPI devices do), then we issue an
  928. * IDENTIFY PACKET DEVICE.
  929. *
  930. * ATA software reset (SRST, the default) does not appear
  931. * to have this problem.
  932. */
  933. if ((using_edd) && (qc->tf.command == ATA_CMD_ID_ATA)) {
  934. u8 err = ata_chk_err(ap);
  935. if (err & ATA_ABORTED) {
  936. dev->class = ATA_DEV_ATAPI;
  937. qc->cursg = 0;
  938. qc->cursg_ofs = 0;
  939. qc->cursect = 0;
  940. qc->nsect = 1;
  941. goto retry;
  942. }
  943. }
  944. goto err_out;
  945. }
  946. swap_buf_le16(dev->id, ATA_ID_WORDS);
  947. /* print device capabilities */
  948. printk(KERN_DEBUG "ata%u: dev %u cfg "
  949. "49:%04x 82:%04x 83:%04x 84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n",
  950. ap->id, device, dev->id[49],
  951. dev->id[82], dev->id[83], dev->id[84],
  952. dev->id[85], dev->id[86], dev->id[87],
  953. dev->id[88]);
  954. /*
  955. * common ATA, ATAPI feature tests
  956. */
  957. /* we require LBA and DMA support (bits 8 & 9 of word 49) */
  958. if (!ata_id_has_dma(dev->id) || !ata_id_has_lba(dev->id)) {
  959. printk(KERN_DEBUG "ata%u: no dma/lba\n", ap->id);
  960. goto err_out_nosup;
  961. }
  962. /* quick-n-dirty find max transfer mode; for printk only */
  963. xfer_modes = dev->id[ATA_ID_UDMA_MODES];
  964. if (!xfer_modes)
  965. xfer_modes = (dev->id[ATA_ID_MWDMA_MODES]) << ATA_SHIFT_MWDMA;
  966. if (!xfer_modes) {
  967. xfer_modes = (dev->id[ATA_ID_PIO_MODES]) << (ATA_SHIFT_PIO + 3);
  968. xfer_modes |= (0x7 << ATA_SHIFT_PIO);
  969. }
  970. ata_dump_id(dev);
  971. /* ATA-specific feature tests */
  972. if (dev->class == ATA_DEV_ATA) {
  973. if (!ata_id_is_ata(dev->id)) /* sanity check */
  974. goto err_out_nosup;
  975. tmp = dev->id[ATA_ID_MAJOR_VER];
  976. for (i = 14; i >= 1; i--)
  977. if (tmp & (1 << i))
  978. break;
  979. /* we require at least ATA-3 */
  980. if (i < 3) {
  981. printk(KERN_DEBUG "ata%u: no ATA-3\n", ap->id);
  982. goto err_out_nosup;
  983. }
  984. if (ata_id_has_lba48(dev->id)) {
  985. dev->flags |= ATA_DFLAG_LBA48;
  986. dev->n_sectors = ata_id_u64(dev->id, 100);
  987. } else {
  988. dev->n_sectors = ata_id_u32(dev->id, 60);
  989. }
  990. ap->host->max_cmd_len = 16;
  991. /* print device info to dmesg */
  992. printk(KERN_INFO "ata%u: dev %u ATA, max %s, %Lu sectors:%s\n",
  993. ap->id, device,
  994. ata_mode_string(xfer_modes),
  995. (unsigned long long)dev->n_sectors,
  996. dev->flags & ATA_DFLAG_LBA48 ? " lba48" : "");
  997. }
  998. /* ATAPI-specific feature tests */
  999. else {
  1000. if (ata_id_is_ata(dev->id)) /* sanity check */
  1001. goto err_out_nosup;
  1002. rc = atapi_cdb_len(dev->id);
  1003. if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
  1004. printk(KERN_WARNING "ata%u: unsupported CDB len\n", ap->id);
  1005. goto err_out_nosup;
  1006. }
  1007. ap->cdb_len = (unsigned int) rc;
  1008. ap->host->max_cmd_len = (unsigned char) ap->cdb_len;
  1009. /* print device info to dmesg */
  1010. printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n",
  1011. ap->id, device,
  1012. ata_mode_string(xfer_modes));
  1013. }
  1014. DPRINTK("EXIT, drv_stat = 0x%x\n", ata_chk_status(ap));
  1015. return;
  1016. err_out_nosup:
  1017. printk(KERN_WARNING "ata%u: dev %u not supported, ignoring\n",
  1018. ap->id, device);
  1019. err_out:
  1020. dev->class++; /* converts ATA_DEV_xxx into ATA_DEV_xxx_UNSUP */
  1021. DPRINTK("EXIT, err\n");
  1022. }
  1023. /**
  1024. * ata_bus_probe - Reset and probe ATA bus
  1025. * @ap: Bus to probe
  1026. *
  1027. * LOCKING:
  1028. *
  1029. * RETURNS:
  1030. * Zero on success, non-zero on error.
  1031. */
  1032. static int ata_bus_probe(struct ata_port *ap)
  1033. {
  1034. unsigned int i, found = 0;
  1035. ap->ops->phy_reset(ap);
  1036. if (ap->flags & ATA_FLAG_PORT_DISABLED)
  1037. goto err_out;
  1038. for (i = 0; i < ATA_MAX_DEVICES; i++) {
  1039. ata_dev_identify(ap, i);
  1040. if (ata_dev_present(&ap->device[i])) {
  1041. found = 1;
  1042. if (ap->ops->dev_config)
  1043. ap->ops->dev_config(ap, &ap->device[i]);
  1044. }
  1045. }
  1046. if ((!found) || (ap->flags & ATA_FLAG_PORT_DISABLED))
  1047. goto err_out_disable;
  1048. ata_set_mode(ap);
  1049. if (ap->flags & ATA_FLAG_PORT_DISABLED)
  1050. goto err_out_disable;
  1051. return 0;
  1052. err_out_disable:
  1053. ap->ops->port_disable(ap);
  1054. err_out:
  1055. return -1;
  1056. }
  1057. /**
  1058. * ata_port_probe -
  1059. * @ap:
  1060. *
  1061. * LOCKING:
  1062. */
  1063. void ata_port_probe(struct ata_port *ap)
  1064. {
  1065. ap->flags &= ~ATA_FLAG_PORT_DISABLED;
  1066. }
  1067. /**
  1068. * __sata_phy_reset - Wake/reset a low-level SATA PHY
  1069. * @ap: SATA port associated with target SATA PHY.
  1070. *
  1071. * This function issues commands to standard SATA Sxxx
  1072. * PHY registers, to wake up the phy (and device), and
  1073. * clear any reset condition.
  1074. *
  1075. * LOCKING: None. Serialized during ata_bus_probe().
  1076. *
  1077. */
  1078. void __sata_phy_reset(struct ata_port *ap)
  1079. {
  1080. u32 sstatus;
  1081. unsigned long timeout = jiffies + (HZ * 5);
  1082. if (ap->flags & ATA_FLAG_SATA_RESET) {
  1083. /* issue phy wake/reset */
  1084. scr_write_flush(ap, SCR_CONTROL, 0x301);
  1085. udelay(400); /* FIXME: a guess */
  1086. }
  1087. scr_write_flush(ap, SCR_CONTROL, 0x300); /* phy wake/clear reset */
  1088. /* wait for phy to become ready, if necessary */
  1089. do {
  1090. msleep(200);
  1091. sstatus = scr_read(ap, SCR_STATUS);
  1092. if ((sstatus & 0xf) != 1)
  1093. break;
  1094. } while (time_before(jiffies, timeout));
  1095. /* TODO: phy layer with polling, timeouts, etc. */
  1096. if (sata_dev_present(ap))
  1097. ata_port_probe(ap);
  1098. else {
  1099. sstatus = scr_read(ap, SCR_STATUS);
  1100. printk(KERN_INFO "ata%u: no device found (phy stat %08x)\n",
  1101. ap->id, sstatus);
  1102. ata_port_disable(ap);
  1103. }
  1104. if (ap->flags & ATA_FLAG_PORT_DISABLED)
  1105. return;
  1106. if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
  1107. ata_port_disable(ap);
  1108. return;
  1109. }
  1110. ap->cbl = ATA_CBL_SATA;
  1111. }
  1112. /**
  1113. * sata_phy_reset - Reset SATA bus.
  1114. * @ap: SATA port associated with target SATA PHY.
  1115. *
  1116. * This function resets the SATA bus, and then probes
  1117. * the bus for devices.
  1118. *
  1119. * LOCKING: None. Serialized during ata_bus_probe().
  1120. *
  1121. */
  1122. void sata_phy_reset(struct ata_port *ap)
  1123. {
  1124. __sata_phy_reset(ap);
  1125. if (ap->flags & ATA_FLAG_PORT_DISABLED)
  1126. return;
  1127. ata_bus_reset(ap);
  1128. }
  1129. /**
  1130. * ata_port_disable - Disable port.
  1131. * @ap: Port to be disabled.
  1132. *
  1133. * Modify @ap data structure such that the system
  1134. * thinks that the entire port is disabled, and should
  1135. * never attempt to probe or communicate with devices
  1136. * on this port.
  1137. *
  1138. * LOCKING: host_set lock, or some other form of
  1139. * serialization.
  1140. */
  1141. void ata_port_disable(struct ata_port *ap)
  1142. {
  1143. ap->device[0].class = ATA_DEV_NONE;
  1144. ap->device[1].class = ATA_DEV_NONE;
  1145. ap->flags |= ATA_FLAG_PORT_DISABLED;
  1146. }
  1147. static struct {
  1148. unsigned int shift;
  1149. u8 base;
  1150. } xfer_mode_classes[] = {
  1151. { ATA_SHIFT_UDMA, XFER_UDMA_0 },
  1152. { ATA_SHIFT_MWDMA, XFER_MW_DMA_0 },
  1153. { ATA_SHIFT_PIO, XFER_PIO_0 },
  1154. };
  1155. static inline u8 base_from_shift(unsigned int shift)
  1156. {
  1157. int i;
  1158. for (i = 0; i < ARRAY_SIZE(xfer_mode_classes); i++)
  1159. if (xfer_mode_classes[i].shift == shift)
  1160. return xfer_mode_classes[i].base;
  1161. return 0xff;
  1162. }
  1163. static void ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev)
  1164. {
  1165. int ofs, idx;
  1166. u8 base;
  1167. if (!ata_dev_present(dev) || (ap->flags & ATA_FLAG_PORT_DISABLED))
  1168. return;
  1169. if (dev->xfer_shift == ATA_SHIFT_PIO)
  1170. dev->flags |= ATA_DFLAG_PIO;
  1171. ata_dev_set_xfermode(ap, dev);
  1172. base = base_from_shift(dev->xfer_shift);
  1173. ofs = dev->xfer_mode - base;
  1174. idx = ofs + dev->xfer_shift;
  1175. WARN_ON(idx >= ARRAY_SIZE(xfer_mode_str));
  1176. DPRINTK("idx=%d xfer_shift=%u, xfer_mode=0x%x, base=0x%x, offset=%d\n",
  1177. idx, dev->xfer_shift, (int)dev->xfer_mode, (int)base, ofs);
  1178. printk(KERN_INFO "ata%u: dev %u configured for %s\n",
  1179. ap->id, dev->devno, xfer_mode_str[idx]);
  1180. }
  1181. static int ata_host_set_pio(struct ata_port *ap)
  1182. {
  1183. unsigned int mask;
  1184. int x, i;
  1185. u8 base, xfer_mode;
  1186. mask = ata_get_mode_mask(ap, ATA_SHIFT_PIO);
  1187. x = fgb(mask);
  1188. if (x < 0) {
  1189. printk(KERN_WARNING "ata%u: no PIO support\n", ap->id);
  1190. return -1;
  1191. }
  1192. base = base_from_shift(ATA_SHIFT_PIO);
  1193. xfer_mode = base + x;
  1194. DPRINTK("base 0x%x xfer_mode 0x%x mask 0x%x x %d\n",
  1195. (int)base, (int)xfer_mode, mask, x);
  1196. for (i = 0; i < ATA_MAX_DEVICES; i++) {
  1197. struct ata_device *dev = &ap->device[i];
  1198. if (ata_dev_present(dev)) {
  1199. dev->pio_mode = xfer_mode;
  1200. dev->xfer_mode = xfer_mode;
  1201. dev->xfer_shift = ATA_SHIFT_PIO;
  1202. if (ap->ops->set_piomode)
  1203. ap->ops->set_piomode(ap, dev);
  1204. }
  1205. }
  1206. return 0;
  1207. }
  1208. static void ata_host_set_dma(struct ata_port *ap, u8 xfer_mode,
  1209. unsigned int xfer_shift)
  1210. {
  1211. int i;
  1212. for (i = 0; i < ATA_MAX_DEVICES; i++) {
  1213. struct ata_device *dev = &ap->device[i];
  1214. if (ata_dev_present(dev)) {
  1215. dev->dma_mode = xfer_mode;
  1216. dev->xfer_mode = xfer_mode;
  1217. dev->xfer_shift = xfer_shift;
  1218. if (ap->ops->set_dmamode)
  1219. ap->ops->set_dmamode(ap, dev);
  1220. }
  1221. }
  1222. }
  1223. /**
  1224. * ata_set_mode - Program timings and issue SET FEATURES - XFER
  1225. * @ap: port on which timings will be programmed
  1226. *
  1227. * Set ATA device disk transfer mode (PIO3, UDMA6, etc.).
  1228. *
  1229. * LOCKING: None. Serialized during ata_bus_probe().
  1230. *
  1231. */
  1232. static void ata_set_mode(struct ata_port *ap)
  1233. {
  1234. unsigned int i, xfer_shift;
  1235. u8 xfer_mode;
  1236. int rc;
  1237. /* step 1: always set host PIO timings */
  1238. rc = ata_host_set_pio(ap);
  1239. if (rc)
  1240. goto err_out;
  1241. /* step 2: choose the best data xfer mode */
  1242. xfer_mode = xfer_shift = 0;
  1243. rc = ata_choose_xfer_mode(ap, &xfer_mode, &xfer_shift);
  1244. if (rc)
  1245. goto err_out;
  1246. /* step 3: if that xfer mode isn't PIO, set host DMA timings */
  1247. if (xfer_shift != ATA_SHIFT_PIO)
  1248. ata_host_set_dma(ap, xfer_mode, xfer_shift);
  1249. /* step 4: update devices' xfer mode */
  1250. ata_dev_set_mode(ap, &ap->device[0]);
  1251. ata_dev_set_mode(ap, &ap->device[1]);
  1252. if (ap->flags & ATA_FLAG_PORT_DISABLED)
  1253. return;
  1254. if (ap->ops->post_set_mode)
  1255. ap->ops->post_set_mode(ap);
  1256. for (i = 0; i < 2; i++) {
  1257. struct ata_device *dev = &ap->device[i];
  1258. ata_dev_set_protocol(dev);
  1259. }
  1260. return;
  1261. err_out:
  1262. ata_port_disable(ap);
  1263. }
  1264. /**
  1265. * ata_busy_sleep - sleep until BSY clears, or timeout
  1266. * @ap: port containing status register to be polled
  1267. * @tmout_pat: impatience timeout
  1268. * @tmout: overall timeout
  1269. *
  1270. * Sleep until ATA Status register bit BSY clears,
  1271. * or a timeout occurs.
  1272. *
  1273. * LOCKING: None.
  1274. *
  1275. */
  1276. static unsigned int ata_busy_sleep (struct ata_port *ap,
  1277. unsigned long tmout_pat,
  1278. unsigned long tmout)
  1279. {
  1280. unsigned long timer_start, timeout;
  1281. u8 status;
  1282. status = ata_busy_wait(ap, ATA_BUSY, 300);
  1283. timer_start = jiffies;
  1284. timeout = timer_start + tmout_pat;
  1285. while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
  1286. msleep(50);
  1287. status = ata_busy_wait(ap, ATA_BUSY, 3);
  1288. }
  1289. if (status & ATA_BUSY)
  1290. printk(KERN_WARNING "ata%u is slow to respond, "
  1291. "please be patient\n", ap->id);
  1292. timeout = timer_start + tmout;
  1293. while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
  1294. msleep(50);
  1295. status = ata_chk_status(ap);
  1296. }
  1297. if (status & ATA_BUSY) {
  1298. printk(KERN_ERR "ata%u failed to respond (%lu secs)\n",
  1299. ap->id, tmout / HZ);
  1300. return 1;
  1301. }
  1302. return 0;
  1303. }
  1304. static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
  1305. {
  1306. struct ata_ioports *ioaddr = &ap->ioaddr;
  1307. unsigned int dev0 = devmask & (1 << 0);
  1308. unsigned int dev1 = devmask & (1 << 1);
  1309. unsigned long timeout;
  1310. /* if device 0 was found in ata_devchk, wait for its
  1311. * BSY bit to clear
  1312. */
  1313. if (dev0)
  1314. ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
  1315. /* if device 1 was found in ata_devchk, wait for
  1316. * register access, then wait for BSY to clear
  1317. */
  1318. timeout = jiffies + ATA_TMOUT_BOOT;
  1319. while (dev1) {
  1320. u8 nsect, lbal;
  1321. ap->ops->dev_select(ap, 1);
  1322. if (ap->flags & ATA_FLAG_MMIO) {
  1323. nsect = readb((void __iomem *) ioaddr->nsect_addr);
  1324. lbal = readb((void __iomem *) ioaddr->lbal_addr);
  1325. } else {
  1326. nsect = inb(ioaddr->nsect_addr);
  1327. lbal = inb(ioaddr->lbal_addr);
  1328. }
  1329. if ((nsect == 1) && (lbal == 1))
  1330. break;
  1331. if (time_after(jiffies, timeout)) {
  1332. dev1 = 0;
  1333. break;
  1334. }
  1335. msleep(50); /* give drive a breather */
  1336. }
  1337. if (dev1)
  1338. ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
  1339. /* is all this really necessary? */
  1340. ap->ops->dev_select(ap, 0);
  1341. if (dev1)
  1342. ap->ops->dev_select(ap, 1);
  1343. if (dev0)
  1344. ap->ops->dev_select(ap, 0);
  1345. }
  1346. /**
  1347. * ata_bus_edd -
  1348. * @ap:
  1349. *
  1350. * LOCKING: None. Serialized during ata_bus_probe().
  1351. *
  1352. */
  1353. static unsigned int ata_bus_edd(struct ata_port *ap)
  1354. {
  1355. struct ata_taskfile tf;
  1356. /* set up execute-device-diag (bus reset) taskfile */
  1357. /* also, take interrupts to a known state (disabled) */
  1358. DPRINTK("execute-device-diag\n");
  1359. ata_tf_init(ap, &tf, 0);
  1360. tf.ctl |= ATA_NIEN;
  1361. tf.command = ATA_CMD_EDD;
  1362. tf.protocol = ATA_PROT_NODATA;
  1363. /* do bus reset */
  1364. ata_tf_to_host(ap, &tf);
  1365. /* spec says at least 2ms. but who knows with those
  1366. * crazy ATAPI devices...
  1367. */
  1368. msleep(150);
  1369. return ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
  1370. }
  1371. static unsigned int ata_bus_softreset(struct ata_port *ap,
  1372. unsigned int devmask)
  1373. {
  1374. struct ata_ioports *ioaddr = &ap->ioaddr;
  1375. DPRINTK("ata%u: bus reset via SRST\n", ap->id);
  1376. /* software reset. causes dev0 to be selected */
  1377. if (ap->flags & ATA_FLAG_MMIO) {
  1378. writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
  1379. udelay(20); /* FIXME: flush */
  1380. writeb(ap->ctl | ATA_SRST, (void __iomem *) ioaddr->ctl_addr);
  1381. udelay(20); /* FIXME: flush */
  1382. writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
  1383. } else {
  1384. outb(ap->ctl, ioaddr->ctl_addr);
  1385. udelay(10);
  1386. outb(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
  1387. udelay(10);
  1388. outb(ap->ctl, ioaddr->ctl_addr);
  1389. }
  1390. /* spec mandates ">= 2ms" before checking status.
  1391. * We wait 150ms, because that was the magic delay used for
  1392. * ATAPI devices in Hale Landis's ATADRVR, for the period of time
  1393. * between when the ATA command register is written, and then
  1394. * status is checked. Because waiting for "a while" before
  1395. * checking status is fine, post SRST, we perform this magic
  1396. * delay here as well.
  1397. */
  1398. msleep(150);
  1399. ata_bus_post_reset(ap, devmask);
  1400. return 0;
  1401. }
  1402. /**
  1403. * ata_bus_reset - reset host port and associated ATA channel
  1404. * @ap: port to reset
  1405. *
  1406. * This is typically the first time we actually start issuing
  1407. * commands to the ATA channel. We wait for BSY to clear, then
  1408. * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
  1409. * result. Determine what devices, if any, are on the channel
  1410. * by looking at the device 0/1 error register. Look at the signature
  1411. * stored in each device's taskfile registers, to determine if
  1412. * the device is ATA or ATAPI.
  1413. *
  1414. * LOCKING:
  1415. * Inherited from caller. Some functions called by this function
  1416. * obtain the host_set lock.
  1417. *
  1418. * SIDE EFFECTS:
  1419. * Sets ATA_FLAG_PORT_DISABLED if bus reset fails.
  1420. */
  1421. void ata_bus_reset(struct ata_port *ap)
  1422. {
  1423. struct ata_ioports *ioaddr = &ap->ioaddr;
  1424. unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
  1425. u8 err;
  1426. unsigned int dev0, dev1 = 0, rc = 0, devmask = 0;
  1427. DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no);
  1428. /* determine if device 0/1 are present */
  1429. if (ap->flags & ATA_FLAG_SATA_RESET)
  1430. dev0 = 1;
  1431. else {
  1432. dev0 = ata_devchk(ap, 0);
  1433. if (slave_possible)
  1434. dev1 = ata_devchk(ap, 1);
  1435. }
  1436. if (dev0)
  1437. devmask |= (1 << 0);
  1438. if (dev1)
  1439. devmask |= (1 << 1);
  1440. /* select device 0 again */
  1441. ap->ops->dev_select(ap, 0);
  1442. /* issue bus reset */
  1443. if (ap->flags & ATA_FLAG_SRST)
  1444. rc = ata_bus_softreset(ap, devmask);
  1445. else if ((ap->flags & ATA_FLAG_SATA_RESET) == 0) {
  1446. /* set up device control */
  1447. if (ap->flags & ATA_FLAG_MMIO)
  1448. writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
  1449. else
  1450. outb(ap->ctl, ioaddr->ctl_addr);
  1451. rc = ata_bus_edd(ap);
  1452. }
  1453. if (rc)
  1454. goto err_out;
  1455. /*
  1456. * determine by signature whether we have ATA or ATAPI devices
  1457. */
  1458. err = ata_dev_try_classify(ap, 0);
  1459. if ((slave_possible) && (err != 0x81))
  1460. ata_dev_try_classify(ap, 1);
  1461. /* re-enable interrupts */
  1462. if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
  1463. ata_irq_on(ap);
  1464. /* is double-select really necessary? */
  1465. if (ap->device[1].class != ATA_DEV_NONE)
  1466. ap->ops->dev_select(ap, 1);
  1467. if (ap->device[0].class != ATA_DEV_NONE)
  1468. ap->ops->dev_select(ap, 0);
  1469. /* if no devices were detected, disable this port */
  1470. if ((ap->device[0].class == ATA_DEV_NONE) &&
  1471. (ap->device[1].class == ATA_DEV_NONE))
  1472. goto err_out;
  1473. if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
  1474. /* set up device control for ATA_FLAG_SATA_RESET */
  1475. if (ap->flags & ATA_FLAG_MMIO)
  1476. writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
  1477. else
  1478. outb(ap->ctl, ioaddr->ctl_addr);
  1479. }
  1480. DPRINTK("EXIT\n");
  1481. return;
  1482. err_out:
  1483. printk(KERN_ERR "ata%u: disabling port\n", ap->id);
  1484. ap->ops->port_disable(ap);
  1485. DPRINTK("EXIT\n");
  1486. }
  1487. static void ata_pr_blacklisted(struct ata_port *ap, struct ata_device *dev)
  1488. {
  1489. printk(KERN_WARNING "ata%u: dev %u is on DMA blacklist, disabling DMA\n",
  1490. ap->id, dev->devno);
  1491. }
  1492. static const char * ata_dma_blacklist [] = {
  1493. "WDC AC11000H",
  1494. "WDC AC22100H",
  1495. "WDC AC32500H",
  1496. "WDC AC33100H",
  1497. "WDC AC31600H",
  1498. "WDC AC32100H",
  1499. "WDC AC23200L",
  1500. "Compaq CRD-8241B",
  1501. "CRD-8400B",
  1502. "CRD-8480B",
  1503. "CRD-8482B",
  1504. "CRD-84",
  1505. "SanDisk SDP3B",
  1506. "SanDisk SDP3B-64",
  1507. "SANYO CD-ROM CRD",
  1508. "HITACHI CDR-8",
  1509. "HITACHI CDR-8335",
  1510. "HITACHI CDR-8435",
  1511. "Toshiba CD-ROM XM-6202B",
  1512. "CD-532E-A",
  1513. "E-IDE CD-ROM CR-840",
  1514. "CD-ROM Drive/F5A",
  1515. "WPI CDD-820",
  1516. "SAMSUNG CD-ROM SC-148C",
  1517. "SAMSUNG CD-ROM SC",
  1518. "SanDisk SDP3B-64",
  1519. "SAMSUNG CD-ROM SN-124",
  1520. "ATAPI CD-ROM DRIVE 40X MAXIMUM",
  1521. "_NEC DV5800A",
  1522. };
  1523. static int ata_dma_blacklisted(struct ata_port *ap, struct ata_device *dev)
  1524. {
  1525. unsigned char model_num[40];
  1526. char *s;
  1527. unsigned int len;
  1528. int i;
  1529. ata_dev_id_string(dev->id, model_num, ATA_ID_PROD_OFS,
  1530. sizeof(model_num));
  1531. s = &model_num[0];
  1532. len = strnlen(s, sizeof(model_num));
  1533. /* ATAPI specifies that empty space is blank-filled; remove blanks */
  1534. while ((len > 0) && (s[len - 1] == ' ')) {
  1535. len--;
  1536. s[len] = 0;
  1537. }
  1538. for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i++)
  1539. if (!strncmp(ata_dma_blacklist[i], s, len))
  1540. return 1;
  1541. return 0;
  1542. }
  1543. static unsigned int ata_get_mode_mask(struct ata_port *ap, int shift)
  1544. {
  1545. struct ata_device *master, *slave;
  1546. unsigned int mask;
  1547. master = &ap->device[0];
  1548. slave = &ap->device[1];
  1549. assert (ata_dev_present(master) || ata_dev_present(slave));
  1550. if (shift == ATA_SHIFT_UDMA) {
  1551. mask = ap->udma_mask;
  1552. if (ata_dev_present(master)) {
  1553. mask &= (master->id[ATA_ID_UDMA_MODES] & 0xff);
  1554. if (ata_dma_blacklisted(ap, master)) {
  1555. mask = 0;
  1556. ata_pr_blacklisted(ap, master);
  1557. }
  1558. }
  1559. if (ata_dev_present(slave)) {
  1560. mask &= (slave->id[ATA_ID_UDMA_MODES] & 0xff);
  1561. if (ata_dma_blacklisted(ap, slave)) {
  1562. mask = 0;
  1563. ata_pr_blacklisted(ap, slave);
  1564. }
  1565. }
  1566. }
  1567. else if (shift == ATA_SHIFT_MWDMA) {
  1568. mask = ap->mwdma_mask;
  1569. if (ata_dev_present(master)) {
  1570. mask &= (master->id[ATA_ID_MWDMA_MODES] & 0x07);
  1571. if (ata_dma_blacklisted(ap, master)) {
  1572. mask = 0;
  1573. ata_pr_blacklisted(ap, master);
  1574. }
  1575. }
  1576. if (ata_dev_present(slave)) {
  1577. mask &= (slave->id[ATA_ID_MWDMA_MODES] & 0x07);
  1578. if (ata_dma_blacklisted(ap, slave)) {
  1579. mask = 0;
  1580. ata_pr_blacklisted(ap, slave);
  1581. }
  1582. }
  1583. }
  1584. else if (shift == ATA_SHIFT_PIO) {
  1585. mask = ap->pio_mask;
  1586. if (ata_dev_present(master)) {
  1587. /* spec doesn't return explicit support for
  1588. * PIO0-2, so we fake it
  1589. */
  1590. u16 tmp_mode = master->id[ATA_ID_PIO_MODES] & 0x03;
  1591. tmp_mode <<= 3;
  1592. tmp_mode |= 0x7;
  1593. mask &= tmp_mode;
  1594. }
  1595. if (ata_dev_present(slave)) {
  1596. /* spec doesn't return explicit support for
  1597. * PIO0-2, so we fake it
  1598. */
  1599. u16 tmp_mode = slave->id[ATA_ID_PIO_MODES] & 0x03;
  1600. tmp_mode <<= 3;
  1601. tmp_mode |= 0x7;
  1602. mask &= tmp_mode;
  1603. }
  1604. }
  1605. else {
  1606. mask = 0xffffffff; /* shut up compiler warning */
  1607. BUG();
  1608. }
  1609. return mask;
  1610. }
  1611. /* find greatest bit */
  1612. static int fgb(u32 bitmap)
  1613. {
  1614. unsigned int i;
  1615. int x = -1;
  1616. for (i = 0; i < 32; i++)
  1617. if (bitmap & (1 << i))
  1618. x = i;
  1619. return x;
  1620. }
  1621. /**
  1622. * ata_choose_xfer_mode - attempt to find best transfer mode
  1623. * @ap: Port for which an xfer mode will be selected
  1624. * @xfer_mode_out: (output) SET FEATURES - XFER MODE code
  1625. * @xfer_shift_out: (output) bit shift that selects this mode
  1626. *
  1627. * LOCKING:
  1628. *
  1629. * RETURNS:
  1630. * Zero on success, negative on error.
  1631. */
  1632. static int ata_choose_xfer_mode(struct ata_port *ap,
  1633. u8 *xfer_mode_out,
  1634. unsigned int *xfer_shift_out)
  1635. {
  1636. unsigned int mask, shift;
  1637. int x, i;
  1638. for (i = 0; i < ARRAY_SIZE(xfer_mode_classes); i++) {
  1639. shift = xfer_mode_classes[i].shift;
  1640. mask = ata_get_mode_mask(ap, shift);
  1641. x = fgb(mask);
  1642. if (x >= 0) {
  1643. *xfer_mode_out = xfer_mode_classes[i].base + x;
  1644. *xfer_shift_out = shift;
  1645. return 0;
  1646. }
  1647. }
  1648. return -1;
  1649. }
  1650. /**
  1651. * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
  1652. * @ap: Port associated with device @dev
  1653. * @dev: Device to which command will be sent
  1654. *
  1655. * Issue SET FEATURES - XFER MODE command to device @dev
  1656. * on port @ap.
  1657. *
  1658. * LOCKING: None. Serialized during ata_bus_probe().
  1659. */
  1660. static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev)
  1661. {
  1662. DECLARE_COMPLETION(wait);
  1663. struct ata_queued_cmd *qc;
  1664. int rc;
  1665. unsigned long flags;
  1666. /* set up set-features taskfile */
  1667. DPRINTK("set features - xfer mode\n");
  1668. qc = ata_qc_new_init(ap, dev);
  1669. BUG_ON(qc == NULL);
  1670. qc->tf.command = ATA_CMD_SET_FEATURES;
  1671. qc->tf.feature = SETFEATURES_XFER;
  1672. qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
  1673. qc->tf.protocol = ATA_PROT_NODATA;
  1674. qc->tf.nsect = dev->xfer_mode;
  1675. qc->waiting = &wait;
  1676. qc->complete_fn = ata_qc_complete_noop;
  1677. spin_lock_irqsave(&ap->host_set->lock, flags);
  1678. rc = ata_qc_issue(qc);
  1679. spin_unlock_irqrestore(&ap->host_set->lock, flags);
  1680. if (rc)
  1681. ata_port_disable(ap);
  1682. else
  1683. wait_for_completion(&wait);
  1684. DPRINTK("EXIT\n");
  1685. }
  1686. /**
  1687. * ata_sg_clean -
  1688. * @qc:
  1689. *
  1690. * LOCKING:
  1691. */
  1692. static void ata_sg_clean(struct ata_queued_cmd *qc)
  1693. {
  1694. struct ata_port *ap = qc->ap;
  1695. struct scatterlist *sg = qc->sg;
  1696. int dir = qc->dma_dir;
  1697. assert(qc->flags & ATA_QCFLAG_DMAMAP);
  1698. assert(sg != NULL);
  1699. if (qc->flags & ATA_QCFLAG_SINGLE)
  1700. assert(qc->n_elem == 1);
  1701. DPRINTK("unmapping %u sg elements\n", qc->n_elem);
  1702. if (qc->flags & ATA_QCFLAG_SG)
  1703. dma_unmap_sg(ap->host_set->dev, sg, qc->n_elem, dir);
  1704. else
  1705. dma_unmap_single(ap->host_set->dev, sg_dma_address(&sg[0]),
  1706. sg_dma_len(&sg[0]), dir);
  1707. qc->flags &= ~ATA_QCFLAG_DMAMAP;
  1708. qc->sg = NULL;
  1709. }
  1710. /**
  1711. * ata_fill_sg - Fill PCI IDE PRD table
  1712. * @qc: Metadata associated with taskfile to be transferred
  1713. *
  1714. * Fill PCI IDE PRD (scatter-gather) table with segments
  1715. * associated with the current disk command.
  1716. *
  1717. * LOCKING:
  1718. * spin_lock_irqsave(host_set lock)
  1719. *
  1720. */
  1721. static void ata_fill_sg(struct ata_queued_cmd *qc)
  1722. {
  1723. struct scatterlist *sg = qc->sg;
  1724. struct ata_port *ap = qc->ap;
  1725. unsigned int idx, nelem;
  1726. assert(sg != NULL);
  1727. assert(qc->n_elem > 0);
  1728. idx = 0;
  1729. for (nelem = qc->n_elem; nelem; nelem--,sg++) {
  1730. u32 addr, offset;
  1731. u32 sg_len, len;
  1732. /* determine if physical DMA addr spans 64K boundary.
  1733. * Note h/w doesn't support 64-bit, so we unconditionally
  1734. * truncate dma_addr_t to u32.
  1735. */
  1736. addr = (u32) sg_dma_address(sg);
  1737. sg_len = sg_dma_len(sg);
  1738. while (sg_len) {
  1739. offset = addr & 0xffff;
  1740. len = sg_len;
  1741. if ((offset + sg_len) > 0x10000)
  1742. len = 0x10000 - offset;
  1743. ap->prd[idx].addr = cpu_to_le32(addr);
  1744. ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
  1745. VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
  1746. idx++;
  1747. sg_len -= len;
  1748. addr += len;
  1749. }
  1750. }
  1751. if (idx)
  1752. ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
  1753. }
  1754. /**
  1755. * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
  1756. * @qc: Metadata associated with taskfile to check
  1757. *
  1758. * Allow low-level driver to filter ATA PACKET commands, returning
  1759. * a status indicating whether or not it is OK to use DMA for the
  1760. * supplied PACKET command.
  1761. *
  1762. * LOCKING:
  1763. * RETURNS: 0 when ATAPI DMA can be used
  1764. * nonzero otherwise
  1765. */
  1766. int ata_check_atapi_dma(struct ata_queued_cmd *qc)
  1767. {
  1768. struct ata_port *ap = qc->ap;
  1769. int rc = 0; /* Assume ATAPI DMA is OK by default */
  1770. if (ap->ops->check_atapi_dma)
  1771. rc = ap->ops->check_atapi_dma(qc);
  1772. return rc;
  1773. }
  1774. /**
  1775. * ata_qc_prep - Prepare taskfile for submission
  1776. * @qc: Metadata associated with taskfile to be prepared
  1777. *
  1778. * Prepare ATA taskfile for submission.
  1779. *
  1780. * LOCKING:
  1781. * spin_lock_irqsave(host_set lock)
  1782. */
  1783. void ata_qc_prep(struct ata_queued_cmd *qc)
  1784. {
  1785. if (!(qc->flags & ATA_QCFLAG_DMAMAP))
  1786. return;
  1787. ata_fill_sg(qc);
  1788. }
  1789. void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
  1790. {
  1791. struct scatterlist *sg;
  1792. qc->flags |= ATA_QCFLAG_SINGLE;
  1793. memset(&qc->sgent, 0, sizeof(qc->sgent));
  1794. qc->sg = &qc->sgent;
  1795. qc->n_elem = 1;
  1796. qc->buf_virt = buf;
  1797. sg = qc->sg;
  1798. sg->page = virt_to_page(buf);
  1799. sg->offset = (unsigned long) buf & ~PAGE_MASK;
  1800. sg->length = buflen;
  1801. }
  1802. void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
  1803. unsigned int n_elem)
  1804. {
  1805. qc->flags |= ATA_QCFLAG_SG;
  1806. qc->sg = sg;
  1807. qc->n_elem = n_elem;
  1808. }
  1809. /**
  1810. * ata_sg_setup_one -
  1811. * @qc:
  1812. *
  1813. * LOCKING:
  1814. * spin_lock_irqsave(host_set lock)
  1815. *
  1816. * RETURNS:
  1817. *
  1818. */
  1819. static int ata_sg_setup_one(struct ata_queued_cmd *qc)
  1820. {
  1821. struct ata_port *ap = qc->ap;
  1822. int dir = qc->dma_dir;
  1823. struct scatterlist *sg = qc->sg;
  1824. dma_addr_t dma_address;
  1825. dma_address = dma_map_single(ap->host_set->dev, qc->buf_virt,
  1826. sg->length, dir);
  1827. if (dma_mapping_error(dma_address))
  1828. return -1;
  1829. sg_dma_address(sg) = dma_address;
  1830. sg_dma_len(sg) = sg->length;
  1831. DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
  1832. qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
  1833. return 0;
  1834. }
  1835. /**
  1836. * ata_sg_setup -
  1837. * @qc:
  1838. *
  1839. * LOCKING:
  1840. * spin_lock_irqsave(host_set lock)
  1841. *
  1842. * RETURNS:
  1843. *
  1844. */
  1845. static int ata_sg_setup(struct ata_queued_cmd *qc)
  1846. {
  1847. struct ata_port *ap = qc->ap;
  1848. struct scatterlist *sg = qc->sg;
  1849. int n_elem, dir;
  1850. VPRINTK("ENTER, ata%u\n", ap->id);
  1851. assert(qc->flags & ATA_QCFLAG_SG);
  1852. dir = qc->dma_dir;
  1853. n_elem = dma_map_sg(ap->host_set->dev, sg, qc->n_elem, dir);
  1854. if (n_elem < 1)
  1855. return -1;
  1856. DPRINTK("%d sg elements mapped\n", n_elem);
  1857. qc->n_elem = n_elem;
  1858. return 0;
  1859. }
  1860. /**
  1861. * ata_pio_poll -
  1862. * @ap:
  1863. *
  1864. * LOCKING:
  1865. *
  1866. * RETURNS:
  1867. *
  1868. */
  1869. static unsigned long ata_pio_poll(struct ata_port *ap)
  1870. {
  1871. u8 status;
  1872. unsigned int poll_state = PIO_ST_UNKNOWN;
  1873. unsigned int reg_state = PIO_ST_UNKNOWN;
  1874. const unsigned int tmout_state = PIO_ST_TMOUT;
  1875. switch (ap->pio_task_state) {
  1876. case PIO_ST:
  1877. case PIO_ST_POLL:
  1878. poll_state = PIO_ST_POLL;
  1879. reg_state = PIO_ST;
  1880. break;
  1881. case PIO_ST_LAST:
  1882. case PIO_ST_LAST_POLL:
  1883. poll_state = PIO_ST_LAST_POLL;
  1884. reg_state = PIO_ST_LAST;
  1885. break;
  1886. default:
  1887. BUG();
  1888. break;
  1889. }
  1890. status = ata_chk_status(ap);
  1891. if (status & ATA_BUSY) {
  1892. if (time_after(jiffies, ap->pio_task_timeout)) {
  1893. ap->pio_task_state = tmout_state;
  1894. return 0;
  1895. }
  1896. ap->pio_task_state = poll_state;
  1897. return ATA_SHORT_PAUSE;
  1898. }
  1899. ap->pio_task_state = reg_state;
  1900. return 0;
  1901. }
  1902. /**
  1903. * ata_pio_complete -
  1904. * @ap:
  1905. *
  1906. * LOCKING:
  1907. */
  1908. static void ata_pio_complete (struct ata_port *ap)
  1909. {
  1910. struct ata_queued_cmd *qc;
  1911. u8 drv_stat;
  1912. /*
  1913. * This is purely hueristic. This is a fast path.
  1914. * Sometimes when we enter, BSY will be cleared in
  1915. * a chk-status or two. If not, the drive is probably seeking
  1916. * or something. Snooze for a couple msecs, then
  1917. * chk-status again. If still busy, fall back to
  1918. * PIO_ST_POLL state.
  1919. */
  1920. drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10);
  1921. if (drv_stat & (ATA_BUSY | ATA_DRQ)) {
  1922. msleep(2);
  1923. drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10);
  1924. if (drv_stat & (ATA_BUSY | ATA_DRQ)) {
  1925. ap->pio_task_state = PIO_ST_LAST_POLL;
  1926. ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
  1927. return;
  1928. }
  1929. }
  1930. drv_stat = ata_wait_idle(ap);
  1931. if (!ata_ok(drv_stat)) {
  1932. ap->pio_task_state = PIO_ST_ERR;
  1933. return;
  1934. }
  1935. qc = ata_qc_from_tag(ap, ap->active_tag);
  1936. assert(qc != NULL);
  1937. ap->pio_task_state = PIO_ST_IDLE;
  1938. ata_irq_on(ap);
  1939. ata_qc_complete(qc, drv_stat);
  1940. }
  1941. void swap_buf_le16(u16 *buf, unsigned int buf_words)
  1942. {
  1943. #ifdef __BIG_ENDIAN
  1944. unsigned int i;
  1945. for (i = 0; i < buf_words; i++)
  1946. buf[i] = le16_to_cpu(buf[i]);
  1947. #endif /* __BIG_ENDIAN */
  1948. }
  1949. static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
  1950. unsigned int buflen, int write_data)
  1951. {
  1952. unsigned int i;
  1953. unsigned int words = buflen >> 1;
  1954. u16 *buf16 = (u16 *) buf;
  1955. void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr;
  1956. if (write_data) {
  1957. for (i = 0; i < words; i++)
  1958. writew(le16_to_cpu(buf16[i]), mmio);
  1959. } else {
  1960. for (i = 0; i < words; i++)
  1961. buf16[i] = cpu_to_le16(readw(mmio));
  1962. }
  1963. }
  1964. static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf,
  1965. unsigned int buflen, int write_data)
  1966. {
  1967. unsigned int dwords = buflen >> 1;
  1968. if (write_data)
  1969. outsw(ap->ioaddr.data_addr, buf, dwords);
  1970. else
  1971. insw(ap->ioaddr.data_addr, buf, dwords);
  1972. }
  1973. static void ata_data_xfer(struct ata_port *ap, unsigned char *buf,
  1974. unsigned int buflen, int do_write)
  1975. {
  1976. if (ap->flags & ATA_FLAG_MMIO)
  1977. ata_mmio_data_xfer(ap, buf, buflen, do_write);
  1978. else
  1979. ata_pio_data_xfer(ap, buf, buflen, do_write);
  1980. }
  1981. static void ata_pio_sector(struct ata_queued_cmd *qc)
  1982. {
  1983. int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
  1984. struct scatterlist *sg = qc->sg;
  1985. struct ata_port *ap = qc->ap;
  1986. struct page *page;
  1987. unsigned int offset;
  1988. unsigned char *buf;
  1989. if (qc->cursect == (qc->nsect - 1))
  1990. ap->pio_task_state = PIO_ST_LAST;
  1991. page = sg[qc->cursg].page;
  1992. offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE;
  1993. /* get the current page and offset */
  1994. page = nth_page(page, (offset >> PAGE_SHIFT));
  1995. offset %= PAGE_SIZE;
  1996. buf = kmap(page) + offset;
  1997. qc->cursect++;
  1998. qc->cursg_ofs++;
  1999. if ((qc->cursg_ofs * ATA_SECT_SIZE) == (&sg[qc->cursg])->length) {
  2000. qc->cursg++;
  2001. qc->cursg_ofs = 0;
  2002. }
  2003. DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
  2004. /* do the actual data transfer */
  2005. do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
  2006. ata_data_xfer(ap, buf, ATA_SECT_SIZE, do_write);
  2007. kunmap(page);
  2008. }
  2009. static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
  2010. {
  2011. int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
  2012. struct scatterlist *sg = qc->sg;
  2013. struct ata_port *ap = qc->ap;
  2014. struct page *page;
  2015. unsigned char *buf;
  2016. unsigned int offset, count;
  2017. if (qc->curbytes == qc->nbytes - bytes)
  2018. ap->pio_task_state = PIO_ST_LAST;
  2019. next_sg:
  2020. sg = &qc->sg[qc->cursg];
  2021. next_page:
  2022. page = sg->page;
  2023. offset = sg->offset + qc->cursg_ofs;
  2024. /* get the current page and offset */
  2025. page = nth_page(page, (offset >> PAGE_SHIFT));
  2026. offset %= PAGE_SIZE;
  2027. count = min(sg->length - qc->cursg_ofs, bytes);
  2028. /* don't cross page boundaries */
  2029. count = min(count, (unsigned int)PAGE_SIZE - offset);
  2030. buf = kmap(page) + offset;
  2031. bytes -= count;
  2032. qc->curbytes += count;
  2033. qc->cursg_ofs += count;
  2034. if (qc->cursg_ofs == sg->length) {
  2035. qc->cursg++;
  2036. qc->cursg_ofs = 0;
  2037. }
  2038. DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
  2039. /* do the actual data transfer */
  2040. ata_data_xfer(ap, buf, count, do_write);
  2041. kunmap(page);
  2042. if (bytes) {
  2043. if (qc->cursg_ofs < sg->length)
  2044. goto next_page;
  2045. goto next_sg;
  2046. }
  2047. }
  2048. static void atapi_pio_bytes(struct ata_queued_cmd *qc)
  2049. {
  2050. struct ata_port *ap = qc->ap;
  2051. struct ata_device *dev = qc->dev;
  2052. unsigned int ireason, bc_lo, bc_hi, bytes;
  2053. int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
  2054. ap->ops->tf_read(ap, &qc->tf);
  2055. ireason = qc->tf.nsect;
  2056. bc_lo = qc->tf.lbam;
  2057. bc_hi = qc->tf.lbah;
  2058. bytes = (bc_hi << 8) | bc_lo;
  2059. /* shall be cleared to zero, indicating xfer of data */
  2060. if (ireason & (1 << 0))
  2061. goto err_out;
  2062. /* make sure transfer direction matches expected */
  2063. i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
  2064. if (do_write != i_write)
  2065. goto err_out;
  2066. __atapi_pio_bytes(qc, bytes);
  2067. return;
  2068. err_out:
  2069. printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n",
  2070. ap->id, dev->devno);
  2071. ap->pio_task_state = PIO_ST_ERR;
  2072. }
  2073. /**
  2074. * ata_pio_sector -
  2075. * @ap:
  2076. *
  2077. * LOCKING:
  2078. */
  2079. static void ata_pio_block(struct ata_port *ap)
  2080. {
  2081. struct ata_queued_cmd *qc;
  2082. u8 status;
  2083. /*
  2084. * This is purely hueristic. This is a fast path.
  2085. * Sometimes when we enter, BSY will be cleared in
  2086. * a chk-status or two. If not, the drive is probably seeking
  2087. * or something. Snooze for a couple msecs, then
  2088. * chk-status again. If still busy, fall back to
  2089. * PIO_ST_POLL state.
  2090. */
  2091. status = ata_busy_wait(ap, ATA_BUSY, 5);
  2092. if (status & ATA_BUSY) {
  2093. msleep(2);
  2094. status = ata_busy_wait(ap, ATA_BUSY, 10);
  2095. if (status & ATA_BUSY) {
  2096. ap->pio_task_state = PIO_ST_POLL;
  2097. ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
  2098. return;
  2099. }
  2100. }
  2101. qc = ata_qc_from_tag(ap, ap->active_tag);
  2102. assert(qc != NULL);
  2103. if (is_atapi_taskfile(&qc->tf)) {
  2104. /* no more data to transfer or unsupported ATAPI command */
  2105. if ((status & ATA_DRQ) == 0) {
  2106. ap->pio_task_state = PIO_ST_IDLE;
  2107. ata_irq_on(ap);
  2108. ata_qc_complete(qc, status);
  2109. return;
  2110. }
  2111. atapi_pio_bytes(qc);
  2112. } else {
  2113. /* handle BSY=0, DRQ=0 as error */
  2114. if ((status & ATA_DRQ) == 0) {
  2115. ap->pio_task_state = PIO_ST_ERR;
  2116. return;
  2117. }
  2118. ata_pio_sector(qc);
  2119. }
  2120. }
  2121. static void ata_pio_error(struct ata_port *ap)
  2122. {
  2123. struct ata_queued_cmd *qc;
  2124. u8 drv_stat;
  2125. qc = ata_qc_from_tag(ap, ap->active_tag);
  2126. assert(qc != NULL);
  2127. drv_stat = ata_chk_status(ap);
  2128. printk(KERN_WARNING "ata%u: PIO error, drv_stat 0x%x\n",
  2129. ap->id, drv_stat);
  2130. ap->pio_task_state = PIO_ST_IDLE;
  2131. ata_irq_on(ap);
  2132. ata_qc_complete(qc, drv_stat | ATA_ERR);
  2133. }
  2134. static void ata_pio_task(void *_data)
  2135. {
  2136. struct ata_port *ap = _data;
  2137. unsigned long timeout = 0;
  2138. switch (ap->pio_task_state) {
  2139. case PIO_ST_IDLE:
  2140. return;
  2141. case PIO_ST:
  2142. ata_pio_block(ap);
  2143. break;
  2144. case PIO_ST_LAST:
  2145. ata_pio_complete(ap);
  2146. break;
  2147. case PIO_ST_POLL:
  2148. case PIO_ST_LAST_POLL:
  2149. timeout = ata_pio_poll(ap);
  2150. break;
  2151. case PIO_ST_TMOUT:
  2152. case PIO_ST_ERR:
  2153. ata_pio_error(ap);
  2154. return;
  2155. }
  2156. if (timeout)
  2157. queue_delayed_work(ata_wq, &ap->pio_task,
  2158. timeout);
  2159. else
  2160. queue_work(ata_wq, &ap->pio_task);
  2161. }
  2162. static void atapi_request_sense(struct ata_port *ap, struct ata_device *dev,
  2163. struct scsi_cmnd *cmd)
  2164. {
  2165. DECLARE_COMPLETION(wait);
  2166. struct ata_queued_cmd *qc;
  2167. unsigned long flags;
  2168. int rc;
  2169. DPRINTK("ATAPI request sense\n");
  2170. qc = ata_qc_new_init(ap, dev);
  2171. BUG_ON(qc == NULL);
  2172. /* FIXME: is this needed? */
  2173. memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
  2174. ata_sg_init_one(qc, cmd->sense_buffer, sizeof(cmd->sense_buffer));
  2175. qc->dma_dir = DMA_FROM_DEVICE;
  2176. memset(&qc->cdb, 0, ap->cdb_len);
  2177. qc->cdb[0] = REQUEST_SENSE;
  2178. qc->cdb[4] = SCSI_SENSE_BUFFERSIZE;
  2179. qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
  2180. qc->tf.command = ATA_CMD_PACKET;
  2181. qc->tf.protocol = ATA_PROT_ATAPI;
  2182. qc->tf.lbam = (8 * 1024) & 0xff;
  2183. qc->tf.lbah = (8 * 1024) >> 8;
  2184. qc->nbytes = SCSI_SENSE_BUFFERSIZE;
  2185. qc->waiting = &wait;
  2186. qc->complete_fn = ata_qc_complete_noop;
  2187. spin_lock_irqsave(&ap->host_set->lock, flags);
  2188. rc = ata_qc_issue(qc);
  2189. spin_unlock_irqrestore(&ap->host_set->lock, flags);
  2190. if (rc)
  2191. ata_port_disable(ap);
  2192. else
  2193. wait_for_completion(&wait);
  2194. DPRINTK("EXIT\n");
  2195. }
  2196. /**
  2197. * ata_qc_timeout - Handle timeout of queued command
  2198. * @qc: Command that timed out
  2199. *
  2200. * Some part of the kernel (currently, only the SCSI layer)
  2201. * has noticed that the active command on port @ap has not
  2202. * completed after a specified length of time. Handle this
  2203. * condition by disabling DMA (if necessary) and completing
  2204. * transactions, with error if necessary.
  2205. *
  2206. * This also handles the case of the "lost interrupt", where
  2207. * for some reason (possibly hardware bug, possibly driver bug)
  2208. * an interrupt was not delivered to the driver, even though the
  2209. * transaction completed successfully.
  2210. *
  2211. * LOCKING:
  2212. */
  2213. static void ata_qc_timeout(struct ata_queued_cmd *qc)
  2214. {
  2215. struct ata_port *ap = qc->ap;
  2216. struct ata_device *dev = qc->dev;
  2217. u8 host_stat = 0, drv_stat;
  2218. DPRINTK("ENTER\n");
  2219. /* FIXME: doesn't this conflict with timeout handling? */
  2220. if (qc->dev->class == ATA_DEV_ATAPI && qc->scsicmd) {
  2221. struct scsi_cmnd *cmd = qc->scsicmd;
  2222. if (!scsi_eh_eflags_chk(cmd, SCSI_EH_CANCEL_CMD)) {
  2223. /* finish completing original command */
  2224. __ata_qc_complete(qc);
  2225. atapi_request_sense(ap, dev, cmd);
  2226. cmd->result = (CHECK_CONDITION << 1) | (DID_OK << 16);
  2227. scsi_finish_command(cmd);
  2228. goto out;
  2229. }
  2230. }
  2231. /* hack alert! We cannot use the supplied completion
  2232. * function from inside the ->eh_strategy_handler() thread.
  2233. * libata is the only user of ->eh_strategy_handler() in
  2234. * any kernel, so the default scsi_done() assumes it is
  2235. * not being called from the SCSI EH.
  2236. */
  2237. qc->scsidone = scsi_finish_command;
  2238. switch (qc->tf.protocol) {
  2239. case ATA_PROT_DMA:
  2240. case ATA_PROT_ATAPI_DMA:
  2241. host_stat = ap->ops->bmdma_status(ap);
  2242. /* before we do anything else, clear DMA-Start bit */
  2243. ap->ops->bmdma_stop(ap);
  2244. /* fall through */
  2245. default:
  2246. ata_altstatus(ap);
  2247. drv_stat = ata_chk_status(ap);
  2248. /* ack bmdma irq events */
  2249. ap->ops->irq_clear(ap);
  2250. printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x host_stat 0x%x\n",
  2251. ap->id, qc->tf.command, drv_stat, host_stat);
  2252. /* complete taskfile transaction */
  2253. ata_qc_complete(qc, drv_stat);
  2254. break;
  2255. }
  2256. out:
  2257. DPRINTK("EXIT\n");
  2258. }
  2259. /**
  2260. * ata_eng_timeout - Handle timeout of queued command
  2261. * @ap: Port on which timed-out command is active
  2262. *
  2263. * Some part of the kernel (currently, only the SCSI layer)
  2264. * has noticed that the active command on port @ap has not
  2265. * completed after a specified length of time. Handle this
  2266. * condition by disabling DMA (if necessary) and completing
  2267. * transactions, with error if necessary.
  2268. *
  2269. * This also handles the case of the "lost interrupt", where
  2270. * for some reason (possibly hardware bug, possibly driver bug)
  2271. * an interrupt was not delivered to the driver, even though the
  2272. * transaction completed successfully.
  2273. *
  2274. * LOCKING:
  2275. * Inherited from SCSI layer (none, can sleep)
  2276. */
  2277. void ata_eng_timeout(struct ata_port *ap)
  2278. {
  2279. struct ata_queued_cmd *qc;
  2280. DPRINTK("ENTER\n");
  2281. qc = ata_qc_from_tag(ap, ap->active_tag);
  2282. if (!qc) {
  2283. printk(KERN_ERR "ata%u: BUG: timeout without command\n",
  2284. ap->id);
  2285. goto out;
  2286. }
  2287. ata_qc_timeout(qc);
  2288. out:
  2289. DPRINTK("EXIT\n");
  2290. }
  2291. /**
  2292. * ata_qc_new - Request an available ATA command, for queueing
  2293. * @ap: Port associated with device @dev
  2294. * @dev: Device from whom we request an available command structure
  2295. *
  2296. * LOCKING:
  2297. */
  2298. static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
  2299. {
  2300. struct ata_queued_cmd *qc = NULL;
  2301. unsigned int i;
  2302. for (i = 0; i < ATA_MAX_QUEUE; i++)
  2303. if (!test_and_set_bit(i, &ap->qactive)) {
  2304. qc = ata_qc_from_tag(ap, i);
  2305. break;
  2306. }
  2307. if (qc)
  2308. qc->tag = i;
  2309. return qc;
  2310. }
  2311. /**
  2312. * ata_qc_new_init - Request an available ATA command, and initialize it
  2313. * @ap: Port associated with device @dev
  2314. * @dev: Device from whom we request an available command structure
  2315. *
  2316. * LOCKING:
  2317. */
  2318. struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
  2319. struct ata_device *dev)
  2320. {
  2321. struct ata_queued_cmd *qc;
  2322. qc = ata_qc_new(ap);
  2323. if (qc) {
  2324. qc->sg = NULL;
  2325. qc->flags = 0;
  2326. qc->scsicmd = NULL;
  2327. qc->ap = ap;
  2328. qc->dev = dev;
  2329. qc->cursect = qc->cursg = qc->cursg_ofs = 0;
  2330. qc->nsect = 0;
  2331. qc->nbytes = qc->curbytes = 0;
  2332. ata_tf_init(ap, &qc->tf, dev->devno);
  2333. if (dev->flags & ATA_DFLAG_LBA48)
  2334. qc->tf.flags |= ATA_TFLAG_LBA48;
  2335. }
  2336. return qc;
  2337. }
  2338. static int ata_qc_complete_noop(struct ata_queued_cmd *qc, u8 drv_stat)
  2339. {
  2340. return 0;
  2341. }
  2342. static void __ata_qc_complete(struct ata_queued_cmd *qc)
  2343. {
  2344. struct ata_port *ap = qc->ap;
  2345. unsigned int tag, do_clear = 0;
  2346. qc->flags = 0;
  2347. tag = qc->tag;
  2348. if (likely(ata_tag_valid(tag))) {
  2349. if (tag == ap->active_tag)
  2350. ap->active_tag = ATA_TAG_POISON;
  2351. qc->tag = ATA_TAG_POISON;
  2352. do_clear = 1;
  2353. }
  2354. if (qc->waiting) {
  2355. struct completion *waiting = qc->waiting;
  2356. qc->waiting = NULL;
  2357. complete(waiting);
  2358. }
  2359. if (likely(do_clear))
  2360. clear_bit(tag, &ap->qactive);
  2361. }
  2362. /**
  2363. * ata_qc_free - free unused ata_queued_cmd
  2364. * @qc: Command to complete
  2365. *
  2366. * Designed to free unused ata_queued_cmd object
  2367. * in case something prevents using it.
  2368. *
  2369. * LOCKING:
  2370. *
  2371. */
  2372. void ata_qc_free(struct ata_queued_cmd *qc)
  2373. {
  2374. assert(qc != NULL); /* ata_qc_from_tag _might_ return NULL */
  2375. assert(qc->waiting == NULL); /* nothing should be waiting */
  2376. __ata_qc_complete(qc);
  2377. }
  2378. /**
  2379. * ata_qc_complete - Complete an active ATA command
  2380. * @qc: Command to complete
  2381. * @drv_stat: ATA status register contents
  2382. *
  2383. * LOCKING:
  2384. *
  2385. */
  2386. void ata_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat)
  2387. {
  2388. int rc;
  2389. assert(qc != NULL); /* ata_qc_from_tag _might_ return NULL */
  2390. assert(qc->flags & ATA_QCFLAG_ACTIVE);
  2391. if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
  2392. ata_sg_clean(qc);
  2393. /* call completion callback */
  2394. rc = qc->complete_fn(qc, drv_stat);
  2395. qc->flags &= ~ATA_QCFLAG_ACTIVE;
  2396. /* if callback indicates not to complete command (non-zero),
  2397. * return immediately
  2398. */
  2399. if (rc != 0)
  2400. return;
  2401. __ata_qc_complete(qc);
  2402. VPRINTK("EXIT\n");
  2403. }
  2404. static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
  2405. {
  2406. struct ata_port *ap = qc->ap;
  2407. switch (qc->tf.protocol) {
  2408. case ATA_PROT_DMA:
  2409. case ATA_PROT_ATAPI_DMA:
  2410. return 1;
  2411. case ATA_PROT_ATAPI:
  2412. case ATA_PROT_PIO:
  2413. case ATA_PROT_PIO_MULT:
  2414. if (ap->flags & ATA_FLAG_PIO_DMA)
  2415. return 1;
  2416. /* fall through */
  2417. default:
  2418. return 0;
  2419. }
  2420. /* never reached */
  2421. }
  2422. /**
  2423. * ata_qc_issue - issue taskfile to device
  2424. * @qc: command to issue to device
  2425. *
  2426. * Prepare an ATA command to submission to device.
  2427. * This includes mapping the data into a DMA-able
  2428. * area, filling in the S/G table, and finally
  2429. * writing the taskfile to hardware, starting the command.
  2430. *
  2431. * LOCKING:
  2432. * spin_lock_irqsave(host_set lock)
  2433. *
  2434. * RETURNS:
  2435. * Zero on success, negative on error.
  2436. */
  2437. int ata_qc_issue(struct ata_queued_cmd *qc)
  2438. {
  2439. struct ata_port *ap = qc->ap;
  2440. if (ata_should_dma_map(qc)) {
  2441. if (qc->flags & ATA_QCFLAG_SG) {
  2442. if (ata_sg_setup(qc))
  2443. goto err_out;
  2444. } else if (qc->flags & ATA_QCFLAG_SINGLE) {
  2445. if (ata_sg_setup_one(qc))
  2446. goto err_out;
  2447. }
  2448. } else {
  2449. qc->flags &= ~ATA_QCFLAG_DMAMAP;
  2450. }
  2451. ap->ops->qc_prep(qc);
  2452. qc->ap->active_tag = qc->tag;
  2453. qc->flags |= ATA_QCFLAG_ACTIVE;
  2454. return ap->ops->qc_issue(qc);
  2455. err_out:
  2456. return -1;
  2457. }
  2458. /**
  2459. * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
  2460. * @qc: command to issue to device
  2461. *
  2462. * Using various libata functions and hooks, this function
  2463. * starts an ATA command. ATA commands are grouped into
  2464. * classes called "protocols", and issuing each type of protocol
  2465. * is slightly different.
  2466. *
  2467. * LOCKING:
  2468. * spin_lock_irqsave(host_set lock)
  2469. *
  2470. * RETURNS:
  2471. * Zero on success, negative on error.
  2472. */
  2473. int ata_qc_issue_prot(struct ata_queued_cmd *qc)
  2474. {
  2475. struct ata_port *ap = qc->ap;
  2476. ata_dev_select(ap, qc->dev->devno, 1, 0);
  2477. switch (qc->tf.protocol) {
  2478. case ATA_PROT_NODATA:
  2479. ata_tf_to_host_nolock(ap, &qc->tf);
  2480. break;
  2481. case ATA_PROT_DMA:
  2482. ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
  2483. ap->ops->bmdma_setup(qc); /* set up bmdma */
  2484. ap->ops->bmdma_start(qc); /* initiate bmdma */
  2485. break;
  2486. case ATA_PROT_PIO: /* load tf registers, initiate polling pio */
  2487. ata_qc_set_polling(qc);
  2488. ata_tf_to_host_nolock(ap, &qc->tf);
  2489. ap->pio_task_state = PIO_ST;
  2490. queue_work(ata_wq, &ap->pio_task);
  2491. break;
  2492. case ATA_PROT_ATAPI:
  2493. ata_qc_set_polling(qc);
  2494. ata_tf_to_host_nolock(ap, &qc->tf);
  2495. queue_work(ata_wq, &ap->packet_task);
  2496. break;
  2497. case ATA_PROT_ATAPI_NODATA:
  2498. ata_tf_to_host_nolock(ap, &qc->tf);
  2499. queue_work(ata_wq, &ap->packet_task);
  2500. break;
  2501. case ATA_PROT_ATAPI_DMA:
  2502. ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
  2503. ap->ops->bmdma_setup(qc); /* set up bmdma */
  2504. queue_work(ata_wq, &ap->packet_task);
  2505. break;
  2506. default:
  2507. WARN_ON(1);
  2508. return -1;
  2509. }
  2510. return 0;
  2511. }
  2512. /**
  2513. * ata_bmdma_setup - Set up PCI IDE BMDMA transaction
  2514. * @qc: Info associated with this ATA transaction.
  2515. *
  2516. * LOCKING:
  2517. * spin_lock_irqsave(host_set lock)
  2518. */
  2519. static void ata_bmdma_setup_mmio (struct ata_queued_cmd *qc)
  2520. {
  2521. struct ata_port *ap = qc->ap;
  2522. unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
  2523. u8 dmactl;
  2524. void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
  2525. /* load PRD table addr. */
  2526. mb(); /* make sure PRD table writes are visible to controller */
  2527. writel(ap->prd_dma, mmio + ATA_DMA_TABLE_OFS);
  2528. /* specify data direction, triple-check start bit is clear */
  2529. dmactl = readb(mmio + ATA_DMA_CMD);
  2530. dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
  2531. if (!rw)
  2532. dmactl |= ATA_DMA_WR;
  2533. writeb(dmactl, mmio + ATA_DMA_CMD);
  2534. /* issue r/w command */
  2535. ap->ops->exec_command(ap, &qc->tf);
  2536. }
  2537. /**
  2538. * ata_bmdma_start - Start a PCI IDE BMDMA transaction
  2539. * @qc: Info associated with this ATA transaction.
  2540. *
  2541. * LOCKING:
  2542. * spin_lock_irqsave(host_set lock)
  2543. */
  2544. static void ata_bmdma_start_mmio (struct ata_queued_cmd *qc)
  2545. {
  2546. struct ata_port *ap = qc->ap;
  2547. void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
  2548. u8 dmactl;
  2549. /* start host DMA transaction */
  2550. dmactl = readb(mmio + ATA_DMA_CMD);
  2551. writeb(dmactl | ATA_DMA_START, mmio + ATA_DMA_CMD);
  2552. /* Strictly, one may wish to issue a readb() here, to
  2553. * flush the mmio write. However, control also passes
  2554. * to the hardware at this point, and it will interrupt
  2555. * us when we are to resume control. So, in effect,
  2556. * we don't care when the mmio write flushes.
  2557. * Further, a read of the DMA status register _immediately_
  2558. * following the write may not be what certain flaky hardware
  2559. * is expected, so I think it is best to not add a readb()
  2560. * without first all the MMIO ATA cards/mobos.
  2561. * Or maybe I'm just being paranoid.
  2562. */
  2563. }
  2564. /**
  2565. * ata_bmdma_setup_pio - Set up PCI IDE BMDMA transaction (PIO)
  2566. * @qc: Info associated with this ATA transaction.
  2567. *
  2568. * LOCKING:
  2569. * spin_lock_irqsave(host_set lock)
  2570. */
  2571. static void ata_bmdma_setup_pio (struct ata_queued_cmd *qc)
  2572. {
  2573. struct ata_port *ap = qc->ap;
  2574. unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
  2575. u8 dmactl;
  2576. /* load PRD table addr. */
  2577. outl(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
  2578. /* specify data direction, triple-check start bit is clear */
  2579. dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
  2580. dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
  2581. if (!rw)
  2582. dmactl |= ATA_DMA_WR;
  2583. outb(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
  2584. /* issue r/w command */
  2585. ap->ops->exec_command(ap, &qc->tf);
  2586. }
  2587. /**
  2588. * ata_bmdma_start_pio - Start a PCI IDE BMDMA transaction (PIO)
  2589. * @qc: Info associated with this ATA transaction.
  2590. *
  2591. * LOCKING:
  2592. * spin_lock_irqsave(host_set lock)
  2593. */
  2594. static void ata_bmdma_start_pio (struct ata_queued_cmd *qc)
  2595. {
  2596. struct ata_port *ap = qc->ap;
  2597. u8 dmactl;
  2598. /* start host DMA transaction */
  2599. dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
  2600. outb(dmactl | ATA_DMA_START,
  2601. ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
  2602. }
  2603. void ata_bmdma_start(struct ata_queued_cmd *qc)
  2604. {
  2605. if (qc->ap->flags & ATA_FLAG_MMIO)
  2606. ata_bmdma_start_mmio(qc);
  2607. else
  2608. ata_bmdma_start_pio(qc);
  2609. }
  2610. void ata_bmdma_setup(struct ata_queued_cmd *qc)
  2611. {
  2612. if (qc->ap->flags & ATA_FLAG_MMIO)
  2613. ata_bmdma_setup_mmio(qc);
  2614. else
  2615. ata_bmdma_setup_pio(qc);
  2616. }
  2617. void ata_bmdma_irq_clear(struct ata_port *ap)
  2618. {
  2619. if (ap->flags & ATA_FLAG_MMIO) {
  2620. void __iomem *mmio = ((void __iomem *) ap->ioaddr.bmdma_addr) + ATA_DMA_STATUS;
  2621. writeb(readb(mmio), mmio);
  2622. } else {
  2623. unsigned long addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS;
  2624. outb(inb(addr), addr);
  2625. }
  2626. }
  2627. u8 ata_bmdma_status(struct ata_port *ap)
  2628. {
  2629. u8 host_stat;
  2630. if (ap->flags & ATA_FLAG_MMIO) {
  2631. void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
  2632. host_stat = readb(mmio + ATA_DMA_STATUS);
  2633. } else
  2634. host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
  2635. return host_stat;
  2636. }
  2637. void ata_bmdma_stop(struct ata_port *ap)
  2638. {
  2639. if (ap->flags & ATA_FLAG_MMIO) {
  2640. void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
  2641. /* clear start/stop bit */
  2642. writeb(readb(mmio + ATA_DMA_CMD) & ~ATA_DMA_START,
  2643. mmio + ATA_DMA_CMD);
  2644. } else {
  2645. /* clear start/stop bit */
  2646. outb(inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START,
  2647. ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
  2648. }
  2649. /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
  2650. ata_altstatus(ap); /* dummy read */
  2651. }
  2652. /**
  2653. * ata_host_intr - Handle host interrupt for given (port, task)
  2654. * @ap: Port on which interrupt arrived (possibly...)
  2655. * @qc: Taskfile currently active in engine
  2656. *
  2657. * Handle host interrupt for given queued command. Currently,
  2658. * only DMA interrupts are handled. All other commands are
  2659. * handled via polling with interrupts disabled (nIEN bit).
  2660. *
  2661. * LOCKING:
  2662. * spin_lock_irqsave(host_set lock)
  2663. *
  2664. * RETURNS:
  2665. * One if interrupt was handled, zero if not (shared irq).
  2666. */
  2667. inline unsigned int ata_host_intr (struct ata_port *ap,
  2668. struct ata_queued_cmd *qc)
  2669. {
  2670. u8 status, host_stat;
  2671. switch (qc->tf.protocol) {
  2672. case ATA_PROT_DMA:
  2673. case ATA_PROT_ATAPI_DMA:
  2674. case ATA_PROT_ATAPI:
  2675. /* check status of DMA engine */
  2676. host_stat = ap->ops->bmdma_status(ap);
  2677. VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
  2678. /* if it's not our irq... */
  2679. if (!(host_stat & ATA_DMA_INTR))
  2680. goto idle_irq;
  2681. /* before we do anything else, clear DMA-Start bit */
  2682. ap->ops->bmdma_stop(ap);
  2683. /* fall through */
  2684. case ATA_PROT_ATAPI_NODATA:
  2685. case ATA_PROT_NODATA:
  2686. /* check altstatus */
  2687. status = ata_altstatus(ap);
  2688. if (status & ATA_BUSY)
  2689. goto idle_irq;
  2690. /* check main status, clearing INTRQ */
  2691. status = ata_chk_status(ap);
  2692. if (unlikely(status & ATA_BUSY))
  2693. goto idle_irq;
  2694. DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
  2695. ap->id, qc->tf.protocol, status);
  2696. /* ack bmdma irq events */
  2697. ap->ops->irq_clear(ap);
  2698. /* complete taskfile transaction */
  2699. ata_qc_complete(qc, status);
  2700. break;
  2701. default:
  2702. goto idle_irq;
  2703. }
  2704. return 1; /* irq handled */
  2705. idle_irq:
  2706. ap->stats.idle_irq++;
  2707. #ifdef ATA_IRQ_TRAP
  2708. if ((ap->stats.idle_irq % 1000) == 0) {
  2709. handled = 1;
  2710. ata_irq_ack(ap, 0); /* debug trap */
  2711. printk(KERN_WARNING "ata%d: irq trap\n", ap->id);
  2712. }
  2713. #endif
  2714. return 0; /* irq not handled */
  2715. }
  2716. /**
  2717. * ata_interrupt - Default ATA host interrupt handler
  2718. * @irq: irq line
  2719. * @dev_instance: pointer to our host information structure
  2720. * @regs: unused
  2721. *
  2722. * LOCKING:
  2723. *
  2724. * RETURNS:
  2725. *
  2726. */
  2727. irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
  2728. {
  2729. struct ata_host_set *host_set = dev_instance;
  2730. unsigned int i;
  2731. unsigned int handled = 0;
  2732. unsigned long flags;
  2733. /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
  2734. spin_lock_irqsave(&host_set->lock, flags);
  2735. for (i = 0; i < host_set->n_ports; i++) {
  2736. struct ata_port *ap;
  2737. ap = host_set->ports[i];
  2738. if (ap && (!(ap->flags & ATA_FLAG_PORT_DISABLED))) {
  2739. struct ata_queued_cmd *qc;
  2740. qc = ata_qc_from_tag(ap, ap->active_tag);
  2741. if (qc && (!(qc->tf.ctl & ATA_NIEN)) &&
  2742. (qc->flags & ATA_QCFLAG_ACTIVE))
  2743. handled |= ata_host_intr(ap, qc);
  2744. }
  2745. }
  2746. spin_unlock_irqrestore(&host_set->lock, flags);
  2747. return IRQ_RETVAL(handled);
  2748. }
  2749. /**
  2750. * atapi_packet_task - Write CDB bytes to hardware
  2751. * @_data: Port to which ATAPI device is attached.
  2752. *
  2753. * When device has indicated its readiness to accept
  2754. * a CDB, this function is called. Send the CDB.
  2755. * If DMA is to be performed, exit immediately.
  2756. * Otherwise, we are in polling mode, so poll
  2757. * status under operation succeeds or fails.
  2758. *
  2759. * LOCKING:
  2760. * Kernel thread context (may sleep)
  2761. */
  2762. static void atapi_packet_task(void *_data)
  2763. {
  2764. struct ata_port *ap = _data;
  2765. struct ata_queued_cmd *qc;
  2766. u8 status;
  2767. qc = ata_qc_from_tag(ap, ap->active_tag);
  2768. assert(qc != NULL);
  2769. assert(qc->flags & ATA_QCFLAG_ACTIVE);
  2770. /* sleep-wait for BSY to clear */
  2771. DPRINTK("busy wait\n");
  2772. if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB))
  2773. goto err_out;
  2774. /* make sure DRQ is set */
  2775. status = ata_chk_status(ap);
  2776. if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ)
  2777. goto err_out;
  2778. /* send SCSI cdb */
  2779. DPRINTK("send cdb\n");
  2780. assert(ap->cdb_len >= 12);
  2781. ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1);
  2782. /* if we are DMA'ing, irq handler takes over from here */
  2783. if (qc->tf.protocol == ATA_PROT_ATAPI_DMA)
  2784. ap->ops->bmdma_start(qc); /* initiate bmdma */
  2785. /* non-data commands are also handled via irq */
  2786. else if (qc->tf.protocol == ATA_PROT_ATAPI_NODATA) {
  2787. /* do nothing */
  2788. }
  2789. /* PIO commands are handled by polling */
  2790. else {
  2791. ap->pio_task_state = PIO_ST;
  2792. queue_work(ata_wq, &ap->pio_task);
  2793. }
  2794. return;
  2795. err_out:
  2796. ata_qc_complete(qc, ATA_ERR);
  2797. }
  2798. int ata_port_start (struct ata_port *ap)
  2799. {
  2800. struct device *dev = ap->host_set->dev;
  2801. ap->prd = dma_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, GFP_KERNEL);
  2802. if (!ap->prd)
  2803. return -ENOMEM;
  2804. DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma);
  2805. return 0;
  2806. }
  2807. void ata_port_stop (struct ata_port *ap)
  2808. {
  2809. struct device *dev = ap->host_set->dev;
  2810. dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
  2811. }
  2812. /**
  2813. * ata_host_remove - Unregister SCSI host structure with upper layers
  2814. * @ap: Port to unregister
  2815. * @do_unregister: 1 if we fully unregister, 0 to just stop the port
  2816. *
  2817. * LOCKING:
  2818. */
  2819. static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister)
  2820. {
  2821. struct Scsi_Host *sh = ap->host;
  2822. DPRINTK("ENTER\n");
  2823. if (do_unregister)
  2824. scsi_remove_host(sh);
  2825. ap->ops->port_stop(ap);
  2826. }
  2827. /**
  2828. * ata_host_init - Initialize an ata_port structure
  2829. * @ap: Structure to initialize
  2830. * @host: associated SCSI mid-layer structure
  2831. * @host_set: Collection of hosts to which @ap belongs
  2832. * @ent: Probe information provided by low-level driver
  2833. * @port_no: Port number associated with this ata_port
  2834. *
  2835. * LOCKING:
  2836. *
  2837. */
  2838. static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
  2839. struct ata_host_set *host_set,
  2840. struct ata_probe_ent *ent, unsigned int port_no)
  2841. {
  2842. unsigned int i;
  2843. host->max_id = 16;
  2844. host->max_lun = 1;
  2845. host->max_channel = 1;
  2846. host->unique_id = ata_unique_id++;
  2847. host->max_cmd_len = 12;
  2848. scsi_set_device(host, ent->dev);
  2849. scsi_assign_lock(host, &host_set->lock);
  2850. ap->flags = ATA_FLAG_PORT_DISABLED;
  2851. ap->id = host->unique_id;
  2852. ap->host = host;
  2853. ap->ctl = ATA_DEVCTL_OBS;
  2854. ap->host_set = host_set;
  2855. ap->port_no = port_no;
  2856. ap->hard_port_no =
  2857. ent->legacy_mode ? ent->hard_port_no : port_no;
  2858. ap->pio_mask = ent->pio_mask;
  2859. ap->mwdma_mask = ent->mwdma_mask;
  2860. ap->udma_mask = ent->udma_mask;
  2861. ap->flags |= ent->host_flags;
  2862. ap->ops = ent->port_ops;
  2863. ap->cbl = ATA_CBL_NONE;
  2864. ap->active_tag = ATA_TAG_POISON;
  2865. ap->last_ctl = 0xFF;
  2866. INIT_WORK(&ap->packet_task, atapi_packet_task, ap);
  2867. INIT_WORK(&ap->pio_task, ata_pio_task, ap);
  2868. for (i = 0; i < ATA_MAX_DEVICES; i++)
  2869. ap->device[i].devno = i;
  2870. #ifdef ATA_IRQ_TRAP
  2871. ap->stats.unhandled_irq = 1;
  2872. ap->stats.idle_irq = 1;
  2873. #endif
  2874. memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
  2875. }
  2876. /**
  2877. * ata_host_add - Attach low-level ATA driver to system
  2878. * @ent: Information provided by low-level driver
  2879. * @host_set: Collections of ports to which we add
  2880. * @port_no: Port number associated with this host
  2881. *
  2882. * LOCKING:
  2883. *
  2884. * RETURNS:
  2885. *
  2886. */
  2887. static struct ata_port * ata_host_add(struct ata_probe_ent *ent,
  2888. struct ata_host_set *host_set,
  2889. unsigned int port_no)
  2890. {
  2891. struct Scsi_Host *host;
  2892. struct ata_port *ap;
  2893. int rc;
  2894. DPRINTK("ENTER\n");
  2895. host = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
  2896. if (!host)
  2897. return NULL;
  2898. ap = (struct ata_port *) &host->hostdata[0];
  2899. ata_host_init(ap, host, host_set, ent, port_no);
  2900. rc = ap->ops->port_start(ap);
  2901. if (rc)
  2902. goto err_out;
  2903. return ap;
  2904. err_out:
  2905. scsi_host_put(host);
  2906. return NULL;
  2907. }
  2908. /**
  2909. * ata_device_add -
  2910. * @ent:
  2911. *
  2912. * LOCKING:
  2913. *
  2914. * RETURNS:
  2915. *
  2916. */
  2917. int ata_device_add(struct ata_probe_ent *ent)
  2918. {
  2919. unsigned int count = 0, i;
  2920. struct device *dev = ent->dev;
  2921. struct ata_host_set *host_set;
  2922. DPRINTK("ENTER\n");
  2923. /* alloc a container for our list of ATA ports (buses) */
  2924. host_set = kmalloc(sizeof(struct ata_host_set) +
  2925. (ent->n_ports * sizeof(void *)), GFP_KERNEL);
  2926. if (!host_set)
  2927. return 0;
  2928. memset(host_set, 0, sizeof(struct ata_host_set) + (ent->n_ports * sizeof(void *)));
  2929. spin_lock_init(&host_set->lock);
  2930. host_set->dev = dev;
  2931. host_set->n_ports = ent->n_ports;
  2932. host_set->irq = ent->irq;
  2933. host_set->mmio_base = ent->mmio_base;
  2934. host_set->private_data = ent->private_data;
  2935. host_set->ops = ent->port_ops;
  2936. /* register each port bound to this device */
  2937. for (i = 0; i < ent->n_ports; i++) {
  2938. struct ata_port *ap;
  2939. unsigned long xfer_mode_mask;
  2940. ap = ata_host_add(ent, host_set, i);
  2941. if (!ap)
  2942. goto err_out;
  2943. host_set->ports[i] = ap;
  2944. xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
  2945. (ap->mwdma_mask << ATA_SHIFT_MWDMA) |
  2946. (ap->pio_mask << ATA_SHIFT_PIO);
  2947. /* print per-port info to dmesg */
  2948. printk(KERN_INFO "ata%u: %cATA max %s cmd 0x%lX ctl 0x%lX "
  2949. "bmdma 0x%lX irq %lu\n",
  2950. ap->id,
  2951. ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
  2952. ata_mode_string(xfer_mode_mask),
  2953. ap->ioaddr.cmd_addr,
  2954. ap->ioaddr.ctl_addr,
  2955. ap->ioaddr.bmdma_addr,
  2956. ent->irq);
  2957. ata_chk_status(ap);
  2958. host_set->ops->irq_clear(ap);
  2959. count++;
  2960. }
  2961. if (!count) {
  2962. kfree(host_set);
  2963. return 0;
  2964. }
  2965. /* obtain irq, that is shared between channels */
  2966. if (request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags,
  2967. DRV_NAME, host_set))
  2968. goto err_out;
  2969. /* perform each probe synchronously */
  2970. DPRINTK("probe begin\n");
  2971. for (i = 0; i < count; i++) {
  2972. struct ata_port *ap;
  2973. int rc;
  2974. ap = host_set->ports[i];
  2975. DPRINTK("ata%u: probe begin\n", ap->id);
  2976. rc = ata_bus_probe(ap);
  2977. DPRINTK("ata%u: probe end\n", ap->id);
  2978. if (rc) {
  2979. /* FIXME: do something useful here?
  2980. * Current libata behavior will
  2981. * tear down everything when
  2982. * the module is removed
  2983. * or the h/w is unplugged.
  2984. */
  2985. }
  2986. rc = scsi_add_host(ap->host, dev);
  2987. if (rc) {
  2988. printk(KERN_ERR "ata%u: scsi_add_host failed\n",
  2989. ap->id);
  2990. /* FIXME: do something useful here */
  2991. /* FIXME: handle unconditional calls to
  2992. * scsi_scan_host and ata_host_remove, below,
  2993. * at the very least
  2994. */
  2995. }
  2996. }
  2997. /* probes are done, now scan each port's disk(s) */
  2998. DPRINTK("probe begin\n");
  2999. for (i = 0; i < count; i++) {
  3000. struct ata_port *ap = host_set->ports[i];
  3001. scsi_scan_host(ap->host);
  3002. }
  3003. dev_set_drvdata(dev, host_set);
  3004. VPRINTK("EXIT, returning %u\n", ent->n_ports);
  3005. return ent->n_ports; /* success */
  3006. err_out:
  3007. for (i = 0; i < count; i++) {
  3008. ata_host_remove(host_set->ports[i], 1);
  3009. scsi_host_put(host_set->ports[i]->host);
  3010. }
  3011. kfree(host_set);
  3012. VPRINTK("EXIT, returning 0\n");
  3013. return 0;
  3014. }
  3015. /**
  3016. * ata_scsi_release - SCSI layer callback hook for host unload
  3017. * @host: libata host to be unloaded
  3018. *
  3019. * Performs all duties necessary to shut down a libata port...
  3020. * Kill port kthread, disable port, and release resources.
  3021. *
  3022. * LOCKING:
  3023. * Inherited from SCSI layer.
  3024. *
  3025. * RETURNS:
  3026. * One.
  3027. */
  3028. int ata_scsi_release(struct Scsi_Host *host)
  3029. {
  3030. struct ata_port *ap = (struct ata_port *) &host->hostdata[0];
  3031. DPRINTK("ENTER\n");
  3032. ap->ops->port_disable(ap);
  3033. ata_host_remove(ap, 0);
  3034. DPRINTK("EXIT\n");
  3035. return 1;
  3036. }
  3037. /**
  3038. * ata_std_ports - initialize ioaddr with standard port offsets.
  3039. * @ioaddr: IO address structure to be initialized
  3040. */
  3041. void ata_std_ports(struct ata_ioports *ioaddr)
  3042. {
  3043. ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
  3044. ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
  3045. ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
  3046. ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
  3047. ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
  3048. ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
  3049. ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
  3050. ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
  3051. ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
  3052. ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
  3053. }
  3054. static struct ata_probe_ent *
  3055. ata_probe_ent_alloc(struct device *dev, struct ata_port_info *port)
  3056. {
  3057. struct ata_probe_ent *probe_ent;
  3058. probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
  3059. if (!probe_ent) {
  3060. printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
  3061. kobject_name(&(dev->kobj)));
  3062. return NULL;
  3063. }
  3064. memset(probe_ent, 0, sizeof(*probe_ent));
  3065. INIT_LIST_HEAD(&probe_ent->node);
  3066. probe_ent->dev = dev;
  3067. probe_ent->sht = port->sht;
  3068. probe_ent->host_flags = port->host_flags;
  3069. probe_ent->pio_mask = port->pio_mask;
  3070. probe_ent->mwdma_mask = port->mwdma_mask;
  3071. probe_ent->udma_mask = port->udma_mask;
  3072. probe_ent->port_ops = port->port_ops;
  3073. return probe_ent;
  3074. }
  3075. #ifdef CONFIG_PCI
  3076. struct ata_probe_ent *
  3077. ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port)
  3078. {
  3079. struct ata_probe_ent *probe_ent =
  3080. ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]);
  3081. if (!probe_ent)
  3082. return NULL;
  3083. probe_ent->n_ports = 2;
  3084. probe_ent->irq = pdev->irq;
  3085. probe_ent->irq_flags = SA_SHIRQ;
  3086. probe_ent->port[0].cmd_addr = pci_resource_start(pdev, 0);
  3087. probe_ent->port[0].altstatus_addr =
  3088. probe_ent->port[0].ctl_addr =
  3089. pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS;
  3090. probe_ent->port[0].bmdma_addr = pci_resource_start(pdev, 4);
  3091. probe_ent->port[1].cmd_addr = pci_resource_start(pdev, 2);
  3092. probe_ent->port[1].altstatus_addr =
  3093. probe_ent->port[1].ctl_addr =
  3094. pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS;
  3095. probe_ent->port[1].bmdma_addr = pci_resource_start(pdev, 4) + 8;
  3096. ata_std_ports(&probe_ent->port[0]);
  3097. ata_std_ports(&probe_ent->port[1]);
  3098. return probe_ent;
  3099. }
  3100. static struct ata_probe_ent *
  3101. ata_pci_init_legacy_mode(struct pci_dev *pdev, struct ata_port_info **port,
  3102. struct ata_probe_ent **ppe2)
  3103. {
  3104. struct ata_probe_ent *probe_ent, *probe_ent2;
  3105. probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]);
  3106. if (!probe_ent)
  3107. return NULL;
  3108. probe_ent2 = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[1]);
  3109. if (!probe_ent2) {
  3110. kfree(probe_ent);
  3111. return NULL;
  3112. }
  3113. probe_ent->n_ports = 1;
  3114. probe_ent->irq = 14;
  3115. probe_ent->hard_port_no = 0;
  3116. probe_ent->legacy_mode = 1;
  3117. probe_ent2->n_ports = 1;
  3118. probe_ent2->irq = 15;
  3119. probe_ent2->hard_port_no = 1;
  3120. probe_ent2->legacy_mode = 1;
  3121. probe_ent->port[0].cmd_addr = 0x1f0;
  3122. probe_ent->port[0].altstatus_addr =
  3123. probe_ent->port[0].ctl_addr = 0x3f6;
  3124. probe_ent->port[0].bmdma_addr = pci_resource_start(pdev, 4);
  3125. probe_ent2->port[0].cmd_addr = 0x170;
  3126. probe_ent2->port[0].altstatus_addr =
  3127. probe_ent2->port[0].ctl_addr = 0x376;
  3128. probe_ent2->port[0].bmdma_addr = pci_resource_start(pdev, 4)+8;
  3129. ata_std_ports(&probe_ent->port[0]);
  3130. ata_std_ports(&probe_ent2->port[0]);
  3131. *ppe2 = probe_ent2;
  3132. return probe_ent;
  3133. }
  3134. /**
  3135. * ata_pci_init_one - Initialize/register PCI IDE host controller
  3136. * @pdev: Controller to be initialized
  3137. * @port_info: Information from low-level host driver
  3138. * @n_ports: Number of ports attached to host controller
  3139. *
  3140. * LOCKING:
  3141. * Inherited from PCI layer (may sleep).
  3142. *
  3143. * RETURNS:
  3144. *
  3145. */
  3146. int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
  3147. unsigned int n_ports)
  3148. {
  3149. struct ata_probe_ent *probe_ent, *probe_ent2 = NULL;
  3150. struct ata_port_info *port[2];
  3151. u8 tmp8, mask;
  3152. unsigned int legacy_mode = 0;
  3153. int disable_dev_on_err = 1;
  3154. int rc;
  3155. DPRINTK("ENTER\n");
  3156. port[0] = port_info[0];
  3157. if (n_ports > 1)
  3158. port[1] = port_info[1];
  3159. else
  3160. port[1] = port[0];
  3161. if ((port[0]->host_flags & ATA_FLAG_NO_LEGACY) == 0
  3162. && (pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
  3163. /* TODO: support transitioning to native mode? */
  3164. pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
  3165. mask = (1 << 2) | (1 << 0);
  3166. if ((tmp8 & mask) != mask)
  3167. legacy_mode = (1 << 3);
  3168. }
  3169. /* FIXME... */
  3170. if ((!legacy_mode) && (n_ports > 1)) {
  3171. printk(KERN_ERR "ata: BUG: native mode, n_ports > 1\n");
  3172. return -EINVAL;
  3173. }
  3174. rc = pci_enable_device(pdev);
  3175. if (rc)
  3176. return rc;
  3177. rc = pci_request_regions(pdev, DRV_NAME);
  3178. if (rc) {
  3179. disable_dev_on_err = 0;
  3180. goto err_out;
  3181. }
  3182. if (legacy_mode) {
  3183. if (!request_region(0x1f0, 8, "libata")) {
  3184. struct resource *conflict, res;
  3185. res.start = 0x1f0;
  3186. res.end = 0x1f0 + 8 - 1;
  3187. conflict = ____request_resource(&ioport_resource, &res);
  3188. if (!strcmp(conflict->name, "libata"))
  3189. legacy_mode |= (1 << 0);
  3190. else {
  3191. disable_dev_on_err = 0;
  3192. printk(KERN_WARNING "ata: 0x1f0 IDE port busy\n");
  3193. }
  3194. } else
  3195. legacy_mode |= (1 << 0);
  3196. if (!request_region(0x170, 8, "libata")) {
  3197. struct resource *conflict, res;
  3198. res.start = 0x170;
  3199. res.end = 0x170 + 8 - 1;
  3200. conflict = ____request_resource(&ioport_resource, &res);
  3201. if (!strcmp(conflict->name, "libata"))
  3202. legacy_mode |= (1 << 1);
  3203. else {
  3204. disable_dev_on_err = 0;
  3205. printk(KERN_WARNING "ata: 0x170 IDE port busy\n");
  3206. }
  3207. } else
  3208. legacy_mode |= (1 << 1);
  3209. }
  3210. /* we have legacy mode, but all ports are unavailable */
  3211. if (legacy_mode == (1 << 3)) {
  3212. rc = -EBUSY;
  3213. goto err_out_regions;
  3214. }
  3215. rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
  3216. if (rc)
  3217. goto err_out_regions;
  3218. rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
  3219. if (rc)
  3220. goto err_out_regions;
  3221. if (legacy_mode) {
  3222. probe_ent = ata_pci_init_legacy_mode(pdev, port, &probe_ent2);
  3223. } else
  3224. probe_ent = ata_pci_init_native_mode(pdev, port);
  3225. if (!probe_ent) {
  3226. rc = -ENOMEM;
  3227. goto err_out_regions;
  3228. }
  3229. pci_set_master(pdev);
  3230. /* FIXME: check ata_device_add return */
  3231. if (legacy_mode) {
  3232. if (legacy_mode & (1 << 0))
  3233. ata_device_add(probe_ent);
  3234. if (legacy_mode & (1 << 1))
  3235. ata_device_add(probe_ent2);
  3236. } else
  3237. ata_device_add(probe_ent);
  3238. kfree(probe_ent);
  3239. kfree(probe_ent2);
  3240. return 0;
  3241. err_out_regions:
  3242. if (legacy_mode & (1 << 0))
  3243. release_region(0x1f0, 8);
  3244. if (legacy_mode & (1 << 1))
  3245. release_region(0x170, 8);
  3246. pci_release_regions(pdev);
  3247. err_out:
  3248. if (disable_dev_on_err)
  3249. pci_disable_device(pdev);
  3250. return rc;
  3251. }
  3252. /**
  3253. * ata_pci_remove_one - PCI layer callback for device removal
  3254. * @pdev: PCI device that was removed
  3255. *
  3256. * PCI layer indicates to libata via this hook that
  3257. * hot-unplug or module unload event has occured.
  3258. * Handle this by unregistering all objects associated
  3259. * with this PCI device. Free those objects. Then finally
  3260. * release PCI resources and disable device.
  3261. *
  3262. * LOCKING:
  3263. * Inherited from PCI layer (may sleep).
  3264. */
  3265. void ata_pci_remove_one (struct pci_dev *pdev)
  3266. {
  3267. struct device *dev = pci_dev_to_dev(pdev);
  3268. struct ata_host_set *host_set = dev_get_drvdata(dev);
  3269. struct ata_port *ap;
  3270. unsigned int i;
  3271. for (i = 0; i < host_set->n_ports; i++) {
  3272. ap = host_set->ports[i];
  3273. scsi_remove_host(ap->host);
  3274. }
  3275. free_irq(host_set->irq, host_set);
  3276. if (host_set->ops->host_stop)
  3277. host_set->ops->host_stop(host_set);
  3278. if (host_set->mmio_base)
  3279. iounmap(host_set->mmio_base);
  3280. for (i = 0; i < host_set->n_ports; i++) {
  3281. ap = host_set->ports[i];
  3282. ata_scsi_release(ap->host);
  3283. if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
  3284. struct ata_ioports *ioaddr = &ap->ioaddr;
  3285. if (ioaddr->cmd_addr == 0x1f0)
  3286. release_region(0x1f0, 8);
  3287. else if (ioaddr->cmd_addr == 0x170)
  3288. release_region(0x170, 8);
  3289. }
  3290. scsi_host_put(ap->host);
  3291. }
  3292. kfree(host_set);
  3293. pci_release_regions(pdev);
  3294. pci_disable_device(pdev);
  3295. dev_set_drvdata(dev, NULL);
  3296. }
  3297. /* move to PCI subsystem */
  3298. int pci_test_config_bits(struct pci_dev *pdev, struct pci_bits *bits)
  3299. {
  3300. unsigned long tmp = 0;
  3301. switch (bits->width) {
  3302. case 1: {
  3303. u8 tmp8 = 0;
  3304. pci_read_config_byte(pdev, bits->reg, &tmp8);
  3305. tmp = tmp8;
  3306. break;
  3307. }
  3308. case 2: {
  3309. u16 tmp16 = 0;
  3310. pci_read_config_word(pdev, bits->reg, &tmp16);
  3311. tmp = tmp16;
  3312. break;
  3313. }
  3314. case 4: {
  3315. u32 tmp32 = 0;
  3316. pci_read_config_dword(pdev, bits->reg, &tmp32);
  3317. tmp = tmp32;
  3318. break;
  3319. }
  3320. default:
  3321. return -EINVAL;
  3322. }
  3323. tmp &= bits->mask;
  3324. return (tmp == bits->val) ? 1 : 0;
  3325. }
  3326. #endif /* CONFIG_PCI */
  3327. /**
  3328. * ata_init -
  3329. *
  3330. * LOCKING:
  3331. *
  3332. * RETURNS:
  3333. *
  3334. */
  3335. static int __init ata_init(void)
  3336. {
  3337. ata_wq = create_workqueue("ata");
  3338. if (!ata_wq)
  3339. return -ENOMEM;
  3340. printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
  3341. return 0;
  3342. }
  3343. static void __exit ata_exit(void)
  3344. {
  3345. destroy_workqueue(ata_wq);
  3346. }
  3347. module_init(ata_init);
  3348. module_exit(ata_exit);
  3349. /*
  3350. * libata is essentially a library of internal helper functions for
  3351. * low-level ATA host controller drivers. As such, the API/ABI is
  3352. * likely to change as new drivers are added and updated.
  3353. * Do not depend on ABI/API stability.
  3354. */
  3355. EXPORT_SYMBOL_GPL(ata_std_bios_param);
  3356. EXPORT_SYMBOL_GPL(ata_std_ports);
  3357. EXPORT_SYMBOL_GPL(ata_device_add);
  3358. EXPORT_SYMBOL_GPL(ata_sg_init);
  3359. EXPORT_SYMBOL_GPL(ata_sg_init_one);
  3360. EXPORT_SYMBOL_GPL(ata_qc_complete);
  3361. EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
  3362. EXPORT_SYMBOL_GPL(ata_eng_timeout);
  3363. EXPORT_SYMBOL_GPL(ata_tf_load);
  3364. EXPORT_SYMBOL_GPL(ata_tf_read);
  3365. EXPORT_SYMBOL_GPL(ata_noop_dev_select);
  3366. EXPORT_SYMBOL_GPL(ata_std_dev_select);
  3367. EXPORT_SYMBOL_GPL(ata_tf_to_fis);
  3368. EXPORT_SYMBOL_GPL(ata_tf_from_fis);
  3369. EXPORT_SYMBOL_GPL(ata_check_status);
  3370. EXPORT_SYMBOL_GPL(ata_altstatus);
  3371. EXPORT_SYMBOL_GPL(ata_chk_err);
  3372. EXPORT_SYMBOL_GPL(ata_exec_command);
  3373. EXPORT_SYMBOL_GPL(ata_port_start);
  3374. EXPORT_SYMBOL_GPL(ata_port_stop);
  3375. EXPORT_SYMBOL_GPL(ata_interrupt);
  3376. EXPORT_SYMBOL_GPL(ata_qc_prep);
  3377. EXPORT_SYMBOL_GPL(ata_bmdma_setup);
  3378. EXPORT_SYMBOL_GPL(ata_bmdma_start);
  3379. EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
  3380. EXPORT_SYMBOL_GPL(ata_bmdma_status);
  3381. EXPORT_SYMBOL_GPL(ata_bmdma_stop);
  3382. EXPORT_SYMBOL_GPL(ata_port_probe);
  3383. EXPORT_SYMBOL_GPL(sata_phy_reset);
  3384. EXPORT_SYMBOL_GPL(__sata_phy_reset);
  3385. EXPORT_SYMBOL_GPL(ata_bus_reset);
  3386. EXPORT_SYMBOL_GPL(ata_port_disable);
  3387. EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
  3388. EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
  3389. EXPORT_SYMBOL_GPL(ata_scsi_error);
  3390. EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
  3391. EXPORT_SYMBOL_GPL(ata_scsi_release);
  3392. EXPORT_SYMBOL_GPL(ata_host_intr);
  3393. EXPORT_SYMBOL_GPL(ata_dev_classify);
  3394. EXPORT_SYMBOL_GPL(ata_dev_id_string);
  3395. EXPORT_SYMBOL_GPL(ata_scsi_simulate);
  3396. #ifdef CONFIG_PCI
  3397. EXPORT_SYMBOL_GPL(pci_test_config_bits);
  3398. EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
  3399. EXPORT_SYMBOL_GPL(ata_pci_init_one);
  3400. EXPORT_SYMBOL_GPL(ata_pci_remove_one);
  3401. #endif /* CONFIG_PCI */