sata_mv.c 86 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184
  1. /*
  2. * sata_mv.c - Marvell SATA support
  3. *
  4. * Copyright 2008: Marvell Corporation, all rights reserved.
  5. * Copyright 2005: EMC Corporation, all rights reserved.
  6. * Copyright 2005 Red Hat, Inc. All rights reserved.
  7. *
  8. * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License as published by
  12. * the Free Software Foundation; version 2 of the License.
  13. *
  14. * This program is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. * GNU General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU General Public License
  20. * along with this program; if not, write to the Free Software
  21. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  22. *
  23. */
  24. /*
  25. * sata_mv TODO list:
  26. *
  27. * --> Errata workaround for NCQ device errors.
  28. *
  29. * --> More errata workarounds for PCI-X.
  30. *
  31. * --> Complete a full errata audit for all chipsets to identify others.
  32. *
  33. * --> ATAPI support (Marvell claims the 60xx/70xx chips can do it).
  34. *
  35. * --> Investigate problems with PCI Message Signalled Interrupts (MSI).
  36. *
  37. * --> Cache frequently-accessed registers in mv_port_priv to reduce overhead.
  38. *
  39. * --> Develop a low-power-consumption strategy, and implement it.
  40. *
  41. * --> [Experiment, low priority] Investigate interrupt coalescing.
  42. * Quite often, especially with PCI Message Signalled Interrupts (MSI),
  43. * the overhead reduced by interrupt mitigation is quite often not
  44. * worth the latency cost.
  45. *
  46. * --> [Experiment, Marvell value added] Is it possible to use target
  47. * mode to cross-connect two Linux boxes with Marvell cards? If so,
  48. * creating LibATA target mode support would be very interesting.
  49. *
  50. * Target mode, for those without docs, is the ability to directly
  51. * connect two SATA ports.
  52. */
  53. #include <linux/kernel.h>
  54. #include <linux/module.h>
  55. #include <linux/pci.h>
  56. #include <linux/init.h>
  57. #include <linux/blkdev.h>
  58. #include <linux/delay.h>
  59. #include <linux/interrupt.h>
  60. #include <linux/dmapool.h>
  61. #include <linux/dma-mapping.h>
  62. #include <linux/device.h>
  63. #include <linux/platform_device.h>
  64. #include <linux/ata_platform.h>
  65. #include <linux/mbus.h>
  66. #include <scsi/scsi_host.h>
  67. #include <scsi/scsi_cmnd.h>
  68. #include <scsi/scsi_device.h>
  69. #include <linux/libata.h>
  70. #define DRV_NAME "sata_mv"
  71. #define DRV_VERSION "1.20"
  72. enum {
  73. /* BAR's are enumerated in terms of pci_resource_start() terms */
  74. MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
  75. MV_IO_BAR = 2, /* offset 0x18: IO space */
  76. MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
  77. MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
  78. MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
  79. MV_PCI_REG_BASE = 0,
  80. MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
  81. MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
  82. MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
  83. MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
  84. MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
  85. MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
  86. MV_SATAHC0_REG_BASE = 0x20000,
  87. MV_FLASH_CTL_OFS = 0x1046c,
  88. MV_GPIO_PORT_CTL_OFS = 0x104f0,
  89. MV_RESET_CFG_OFS = 0x180d8,
  90. MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
  91. MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
  92. MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
  93. MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
  94. MV_MAX_Q_DEPTH = 32,
  95. MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
  96. /* CRQB needs alignment on a 1KB boundary. Size == 1KB
  97. * CRPB needs alignment on a 256B boundary. Size == 256B
  98. * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
  99. */
  100. MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
  101. MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
  102. MV_MAX_SG_CT = 256,
  103. MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
  104. /* Determine hc from 0-7 port: hc = port >> MV_PORT_HC_SHIFT */
  105. MV_PORT_HC_SHIFT = 2,
  106. MV_PORTS_PER_HC = (1 << MV_PORT_HC_SHIFT), /* 4 */
  107. /* Determine hc port from 0-7 port: hardport = port & MV_PORT_MASK */
  108. MV_PORT_MASK = (MV_PORTS_PER_HC - 1), /* 3 */
  109. /* Host Flags */
  110. MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
  111. MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
  112. /* SoC integrated controllers, no PCI interface */
  113. MV_FLAG_SOC = (1 << 28),
  114. MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
  115. ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
  116. ATA_FLAG_PIO_POLLING,
  117. MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
  118. CRQB_FLAG_READ = (1 << 0),
  119. CRQB_TAG_SHIFT = 1,
  120. CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
  121. CRQB_PMP_SHIFT = 12, /* CRQB Gen-II/IIE PMP shift */
  122. CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
  123. CRQB_CMD_ADDR_SHIFT = 8,
  124. CRQB_CMD_CS = (0x2 << 11),
  125. CRQB_CMD_LAST = (1 << 15),
  126. CRPB_FLAG_STATUS_SHIFT = 8,
  127. CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
  128. CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
  129. EPRD_FLAG_END_OF_TBL = (1 << 31),
  130. /* PCI interface registers */
  131. PCI_COMMAND_OFS = 0xc00,
  132. PCI_COMMAND_MRDTRIG = (1 << 7), /* PCI Master Read Trigger */
  133. PCI_MAIN_CMD_STS_OFS = 0xd30,
  134. STOP_PCI_MASTER = (1 << 2),
  135. PCI_MASTER_EMPTY = (1 << 3),
  136. GLOB_SFT_RST = (1 << 4),
  137. MV_PCI_MODE_OFS = 0xd00,
  138. MV_PCI_MODE_MASK = 0x30,
  139. MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
  140. MV_PCI_DISC_TIMER = 0xd04,
  141. MV_PCI_MSI_TRIGGER = 0xc38,
  142. MV_PCI_SERR_MASK = 0xc28,
  143. MV_PCI_XBAR_TMOUT_OFS = 0x1d04,
  144. MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
  145. MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
  146. MV_PCI_ERR_ATTRIBUTE = 0x1d48,
  147. MV_PCI_ERR_COMMAND = 0x1d50,
  148. PCI_IRQ_CAUSE_OFS = 0x1d58,
  149. PCI_IRQ_MASK_OFS = 0x1d5c,
  150. PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
  151. PCIE_IRQ_CAUSE_OFS = 0x1900,
  152. PCIE_IRQ_MASK_OFS = 0x1910,
  153. PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
  154. /* Host Controller Main Interrupt Cause/Mask registers (1 per-chip) */
  155. PCI_HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
  156. PCI_HC_MAIN_IRQ_MASK_OFS = 0x1d64,
  157. SOC_HC_MAIN_IRQ_CAUSE_OFS = 0x20020,
  158. SOC_HC_MAIN_IRQ_MASK_OFS = 0x20024,
  159. ERR_IRQ = (1 << 0), /* shift by port # */
  160. DONE_IRQ = (1 << 1), /* shift by port # */
  161. HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
  162. HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
  163. PCI_ERR = (1 << 18),
  164. TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
  165. TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
  166. PORTS_0_3_COAL_DONE = (1 << 8),
  167. PORTS_4_7_COAL_DONE = (1 << 17),
  168. PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
  169. GPIO_INT = (1 << 22),
  170. SELF_INT = (1 << 23),
  171. TWSI_INT = (1 << 24),
  172. HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
  173. HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
  174. HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
  175. HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
  176. PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
  177. PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
  178. HC_MAIN_RSVD),
  179. HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
  180. HC_MAIN_RSVD_5),
  181. HC_MAIN_MASKED_IRQS_SOC = (PORTS_0_3_COAL_DONE | HC_MAIN_RSVD_SOC),
  182. /* SATAHC registers */
  183. HC_CFG_OFS = 0,
  184. HC_IRQ_CAUSE_OFS = 0x14,
  185. DMA_IRQ = (1 << 0), /* shift by port # */
  186. HC_COAL_IRQ = (1 << 4), /* IRQ coalescing */
  187. DEV_IRQ = (1 << 8), /* shift by port # */
  188. /* Shadow block registers */
  189. SHD_BLK_OFS = 0x100,
  190. SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
  191. /* SATA registers */
  192. SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
  193. SATA_ACTIVE_OFS = 0x350,
  194. SATA_FIS_IRQ_CAUSE_OFS = 0x364,
  195. LTMODE_OFS = 0x30c,
  196. LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */
  197. PHY_MODE3 = 0x310,
  198. PHY_MODE4 = 0x314,
  199. PHY_MODE2 = 0x330,
  200. SATA_IFCTL_OFS = 0x344,
  201. SATA_TESTCTL_OFS = 0x348,
  202. SATA_IFSTAT_OFS = 0x34c,
  203. VENDOR_UNIQUE_FIS_OFS = 0x35c,
  204. FISCFG_OFS = 0x360,
  205. FISCFG_WAIT_DEV_ERR = (1 << 8), /* wait for host on DevErr */
  206. FISCFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */
  207. MV5_PHY_MODE = 0x74,
  208. MV5_LTMODE_OFS = 0x30,
  209. MV5_PHY_CTL_OFS = 0x0C,
  210. SATA_INTERFACE_CFG_OFS = 0x050,
  211. MV_M2_PREAMP_MASK = 0x7e0,
  212. /* Port registers */
  213. EDMA_CFG_OFS = 0,
  214. EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
  215. EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
  216. EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
  217. EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
  218. EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
  219. EDMA_CFG_EDMA_FBS = (1 << 16), /* EDMA FIS-Based Switching */
  220. EDMA_CFG_FBS = (1 << 26), /* FIS-Based Switching */
  221. EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
  222. EDMA_ERR_IRQ_MASK_OFS = 0xc,
  223. EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
  224. EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
  225. EDMA_ERR_DEV = (1 << 2), /* device error */
  226. EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
  227. EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
  228. EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
  229. EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
  230. EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
  231. EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
  232. EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
  233. EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
  234. EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
  235. EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
  236. EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
  237. EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
  238. EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
  239. EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
  240. EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
  241. EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
  242. EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
  243. EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
  244. EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
  245. EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
  246. EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
  247. EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
  248. EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
  249. EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
  250. EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
  251. EDMA_ERR_OVERRUN_5 = (1 << 5),
  252. EDMA_ERR_UNDERRUN_5 = (1 << 6),
  253. EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
  254. EDMA_ERR_LNK_CTRL_RX_1 |
  255. EDMA_ERR_LNK_CTRL_RX_3 |
  256. EDMA_ERR_LNK_CTRL_TX,
  257. EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
  258. EDMA_ERR_PRD_PAR |
  259. EDMA_ERR_DEV_DCON |
  260. EDMA_ERR_DEV_CON |
  261. EDMA_ERR_SERR |
  262. EDMA_ERR_SELF_DIS |
  263. EDMA_ERR_CRQB_PAR |
  264. EDMA_ERR_CRPB_PAR |
  265. EDMA_ERR_INTRL_PAR |
  266. EDMA_ERR_IORDY |
  267. EDMA_ERR_LNK_CTRL_RX_2 |
  268. EDMA_ERR_LNK_DATA_RX |
  269. EDMA_ERR_LNK_DATA_TX |
  270. EDMA_ERR_TRANS_PROTO,
  271. EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
  272. EDMA_ERR_PRD_PAR |
  273. EDMA_ERR_DEV_DCON |
  274. EDMA_ERR_DEV_CON |
  275. EDMA_ERR_OVERRUN_5 |
  276. EDMA_ERR_UNDERRUN_5 |
  277. EDMA_ERR_SELF_DIS_5 |
  278. EDMA_ERR_CRQB_PAR |
  279. EDMA_ERR_CRPB_PAR |
  280. EDMA_ERR_INTRL_PAR |
  281. EDMA_ERR_IORDY,
  282. EDMA_REQ_Q_BASE_HI_OFS = 0x10,
  283. EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
  284. EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
  285. EDMA_REQ_Q_PTR_SHIFT = 5,
  286. EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
  287. EDMA_RSP_Q_IN_PTR_OFS = 0x20,
  288. EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
  289. EDMA_RSP_Q_PTR_SHIFT = 3,
  290. EDMA_CMD_OFS = 0x28, /* EDMA command register */
  291. EDMA_EN = (1 << 0), /* enable EDMA */
  292. EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
  293. EDMA_RESET = (1 << 2), /* reset eng/trans/link/phy */
  294. EDMA_STATUS_OFS = 0x30, /* EDMA engine status */
  295. EDMA_STATUS_CACHE_EMPTY = (1 << 6), /* GenIIe command cache empty */
  296. EDMA_STATUS_IDLE = (1 << 7), /* GenIIe EDMA enabled/idle */
  297. EDMA_IORDY_TMOUT_OFS = 0x34,
  298. EDMA_ARB_CFG_OFS = 0x38,
  299. EDMA_HALTCOND_OFS = 0x60, /* GenIIe halt conditions */
  300. GEN_II_NCQ_MAX_SECTORS = 256, /* max sects/io on Gen2 w/NCQ */
  301. /* Host private flags (hp_flags) */
  302. MV_HP_FLAG_MSI = (1 << 0),
  303. MV_HP_ERRATA_50XXB0 = (1 << 1),
  304. MV_HP_ERRATA_50XXB2 = (1 << 2),
  305. MV_HP_ERRATA_60X1B2 = (1 << 3),
  306. MV_HP_ERRATA_60X1C0 = (1 << 4),
  307. MV_HP_ERRATA_XX42A0 = (1 << 5),
  308. MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
  309. MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
  310. MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
  311. MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
  312. MV_HP_CUT_THROUGH = (1 << 10), /* can use EDMA cut-through */
  313. /* Port private flags (pp_flags) */
  314. MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
  315. MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
  316. };
  317. #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
  318. #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
  319. #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
  320. #define IS_PCIE(hpriv) ((hpriv)->hp_flags & MV_HP_PCIE)
  321. #define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC))
  322. #define WINDOW_CTRL(i) (0x20030 + ((i) << 4))
  323. #define WINDOW_BASE(i) (0x20034 + ((i) << 4))
  324. enum {
  325. /* DMA boundary 0xffff is required by the s/g splitting
  326. * we need on /length/ in mv_fill-sg().
  327. */
  328. MV_DMA_BOUNDARY = 0xffffU,
  329. /* mask of register bits containing lower 32 bits
  330. * of EDMA request queue DMA address
  331. */
  332. EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
  333. /* ditto, for response queue */
  334. EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
  335. };
  336. enum chip_type {
  337. chip_504x,
  338. chip_508x,
  339. chip_5080,
  340. chip_604x,
  341. chip_608x,
  342. chip_6042,
  343. chip_7042,
  344. chip_soc,
  345. };
  346. /* Command ReQuest Block: 32B */
  347. struct mv_crqb {
  348. __le32 sg_addr;
  349. __le32 sg_addr_hi;
  350. __le16 ctrl_flags;
  351. __le16 ata_cmd[11];
  352. };
  353. struct mv_crqb_iie {
  354. __le32 addr;
  355. __le32 addr_hi;
  356. __le32 flags;
  357. __le32 len;
  358. __le32 ata_cmd[4];
  359. };
  360. /* Command ResPonse Block: 8B */
  361. struct mv_crpb {
  362. __le16 id;
  363. __le16 flags;
  364. __le32 tmstmp;
  365. };
  366. /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
  367. struct mv_sg {
  368. __le32 addr;
  369. __le32 flags_size;
  370. __le32 addr_hi;
  371. __le32 reserved;
  372. };
  373. struct mv_port_priv {
  374. struct mv_crqb *crqb;
  375. dma_addr_t crqb_dma;
  376. struct mv_crpb *crpb;
  377. dma_addr_t crpb_dma;
  378. struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
  379. dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
  380. unsigned int req_idx;
  381. unsigned int resp_idx;
  382. u32 pp_flags;
  383. };
  384. struct mv_port_signal {
  385. u32 amps;
  386. u32 pre;
  387. };
  388. struct mv_host_priv {
  389. u32 hp_flags;
  390. struct mv_port_signal signal[8];
  391. const struct mv_hw_ops *ops;
  392. int n_ports;
  393. void __iomem *base;
  394. void __iomem *main_irq_cause_addr;
  395. void __iomem *main_irq_mask_addr;
  396. u32 irq_cause_ofs;
  397. u32 irq_mask_ofs;
  398. u32 unmask_all_irqs;
  399. /*
  400. * These consistent DMA memory pools give us guaranteed
  401. * alignment for hardware-accessed data structures,
  402. * and less memory waste in accomplishing the alignment.
  403. */
  404. struct dma_pool *crqb_pool;
  405. struct dma_pool *crpb_pool;
  406. struct dma_pool *sg_tbl_pool;
  407. };
  408. struct mv_hw_ops {
  409. void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
  410. unsigned int port);
  411. void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
  412. void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
  413. void __iomem *mmio);
  414. int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
  415. unsigned int n_hc);
  416. void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
  417. void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
  418. };
  419. static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
  420. static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
  421. static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
  422. static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
  423. static int mv_port_start(struct ata_port *ap);
  424. static void mv_port_stop(struct ata_port *ap);
  425. static int mv_qc_defer(struct ata_queued_cmd *qc);
  426. static void mv_qc_prep(struct ata_queued_cmd *qc);
  427. static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
  428. static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
  429. static int mv_hardreset(struct ata_link *link, unsigned int *class,
  430. unsigned long deadline);
  431. static void mv_eh_freeze(struct ata_port *ap);
  432. static void mv_eh_thaw(struct ata_port *ap);
  433. static void mv6_dev_config(struct ata_device *dev);
  434. static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
  435. unsigned int port);
  436. static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
  437. static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
  438. void __iomem *mmio);
  439. static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
  440. unsigned int n_hc);
  441. static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
  442. static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
  443. static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
  444. unsigned int port);
  445. static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
  446. static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
  447. void __iomem *mmio);
  448. static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
  449. unsigned int n_hc);
  450. static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
  451. static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
  452. void __iomem *mmio);
  453. static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
  454. void __iomem *mmio);
  455. static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
  456. void __iomem *mmio, unsigned int n_hc);
  457. static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
  458. void __iomem *mmio);
  459. static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
  460. static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
  461. static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
  462. unsigned int port_no);
  463. static int mv_stop_edma(struct ata_port *ap);
  464. static int mv_stop_edma_engine(void __iomem *port_mmio);
  465. static void mv_edma_cfg(struct ata_port *ap, int want_ncq);
  466. static void mv_pmp_select(struct ata_port *ap, int pmp);
  467. static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
  468. unsigned long deadline);
  469. static int mv_softreset(struct ata_link *link, unsigned int *class,
  470. unsigned long deadline);
  471. /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
  472. * because we have to allow room for worst case splitting of
  473. * PRDs for 64K boundaries in mv_fill_sg().
  474. */
  475. static struct scsi_host_template mv5_sht = {
  476. ATA_BASE_SHT(DRV_NAME),
  477. .sg_tablesize = MV_MAX_SG_CT / 2,
  478. .dma_boundary = MV_DMA_BOUNDARY,
  479. };
  480. static struct scsi_host_template mv6_sht = {
  481. ATA_NCQ_SHT(DRV_NAME),
  482. .can_queue = MV_MAX_Q_DEPTH - 1,
  483. .sg_tablesize = MV_MAX_SG_CT / 2,
  484. .dma_boundary = MV_DMA_BOUNDARY,
  485. };
  486. static struct ata_port_operations mv5_ops = {
  487. .inherits = &ata_sff_port_ops,
  488. .qc_defer = mv_qc_defer,
  489. .qc_prep = mv_qc_prep,
  490. .qc_issue = mv_qc_issue,
  491. .freeze = mv_eh_freeze,
  492. .thaw = mv_eh_thaw,
  493. .hardreset = mv_hardreset,
  494. .error_handler = ata_std_error_handler, /* avoid SFF EH */
  495. .post_internal_cmd = ATA_OP_NULL,
  496. .scr_read = mv5_scr_read,
  497. .scr_write = mv5_scr_write,
  498. .port_start = mv_port_start,
  499. .port_stop = mv_port_stop,
  500. };
  501. static struct ata_port_operations mv6_ops = {
  502. .inherits = &mv5_ops,
  503. .dev_config = mv6_dev_config,
  504. .scr_read = mv_scr_read,
  505. .scr_write = mv_scr_write,
  506. .pmp_hardreset = mv_pmp_hardreset,
  507. .pmp_softreset = mv_softreset,
  508. .softreset = mv_softreset,
  509. .error_handler = sata_pmp_error_handler,
  510. };
  511. static struct ata_port_operations mv_iie_ops = {
  512. .inherits = &mv6_ops,
  513. .dev_config = ATA_OP_NULL,
  514. .qc_prep = mv_qc_prep_iie,
  515. };
  516. static const struct ata_port_info mv_port_info[] = {
  517. { /* chip_504x */
  518. .flags = MV_COMMON_FLAGS,
  519. .pio_mask = 0x1f, /* pio0-4 */
  520. .udma_mask = ATA_UDMA6,
  521. .port_ops = &mv5_ops,
  522. },
  523. { /* chip_508x */
  524. .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
  525. .pio_mask = 0x1f, /* pio0-4 */
  526. .udma_mask = ATA_UDMA6,
  527. .port_ops = &mv5_ops,
  528. },
  529. { /* chip_5080 */
  530. .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
  531. .pio_mask = 0x1f, /* pio0-4 */
  532. .udma_mask = ATA_UDMA6,
  533. .port_ops = &mv5_ops,
  534. },
  535. { /* chip_604x */
  536. .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
  537. ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
  538. ATA_FLAG_NCQ,
  539. .pio_mask = 0x1f, /* pio0-4 */
  540. .udma_mask = ATA_UDMA6,
  541. .port_ops = &mv6_ops,
  542. },
  543. { /* chip_608x */
  544. .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
  545. ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
  546. ATA_FLAG_NCQ | MV_FLAG_DUAL_HC,
  547. .pio_mask = 0x1f, /* pio0-4 */
  548. .udma_mask = ATA_UDMA6,
  549. .port_ops = &mv6_ops,
  550. },
  551. { /* chip_6042 */
  552. .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
  553. ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
  554. ATA_FLAG_NCQ,
  555. .pio_mask = 0x1f, /* pio0-4 */
  556. .udma_mask = ATA_UDMA6,
  557. .port_ops = &mv_iie_ops,
  558. },
  559. { /* chip_7042 */
  560. .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
  561. ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
  562. ATA_FLAG_NCQ,
  563. .pio_mask = 0x1f, /* pio0-4 */
  564. .udma_mask = ATA_UDMA6,
  565. .port_ops = &mv_iie_ops,
  566. },
  567. { /* chip_soc */
  568. .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
  569. ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
  570. ATA_FLAG_NCQ | MV_FLAG_SOC,
  571. .pio_mask = 0x1f, /* pio0-4 */
  572. .udma_mask = ATA_UDMA6,
  573. .port_ops = &mv_iie_ops,
  574. },
  575. };
  576. static const struct pci_device_id mv_pci_tbl[] = {
  577. { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
  578. { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
  579. { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
  580. { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
  581. /* RocketRAID 1740/174x have different identifiers */
  582. { PCI_VDEVICE(TTI, 0x1740), chip_508x },
  583. { PCI_VDEVICE(TTI, 0x1742), chip_508x },
  584. { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
  585. { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
  586. { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
  587. { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
  588. { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
  589. { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
  590. /* Adaptec 1430SA */
  591. { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
  592. /* Marvell 7042 support */
  593. { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
  594. /* Highpoint RocketRAID PCIe series */
  595. { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
  596. { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
  597. { } /* terminate list */
  598. };
  599. static const struct mv_hw_ops mv5xxx_ops = {
  600. .phy_errata = mv5_phy_errata,
  601. .enable_leds = mv5_enable_leds,
  602. .read_preamp = mv5_read_preamp,
  603. .reset_hc = mv5_reset_hc,
  604. .reset_flash = mv5_reset_flash,
  605. .reset_bus = mv5_reset_bus,
  606. };
  607. static const struct mv_hw_ops mv6xxx_ops = {
  608. .phy_errata = mv6_phy_errata,
  609. .enable_leds = mv6_enable_leds,
  610. .read_preamp = mv6_read_preamp,
  611. .reset_hc = mv6_reset_hc,
  612. .reset_flash = mv6_reset_flash,
  613. .reset_bus = mv_reset_pci_bus,
  614. };
  615. static const struct mv_hw_ops mv_soc_ops = {
  616. .phy_errata = mv6_phy_errata,
  617. .enable_leds = mv_soc_enable_leds,
  618. .read_preamp = mv_soc_read_preamp,
  619. .reset_hc = mv_soc_reset_hc,
  620. .reset_flash = mv_soc_reset_flash,
  621. .reset_bus = mv_soc_reset_bus,
  622. };
  623. /*
  624. * Functions
  625. */
  626. static inline void writelfl(unsigned long data, void __iomem *addr)
  627. {
  628. writel(data, addr);
  629. (void) readl(addr); /* flush to avoid PCI posted write */
  630. }
  631. static inline unsigned int mv_hc_from_port(unsigned int port)
  632. {
  633. return port >> MV_PORT_HC_SHIFT;
  634. }
  635. static inline unsigned int mv_hardport_from_port(unsigned int port)
  636. {
  637. return port & MV_PORT_MASK;
  638. }
  639. /*
  640. * Consolidate some rather tricky bit shift calculations.
  641. * This is hot-path stuff, so not a function.
  642. * Simple code, with two return values, so macro rather than inline.
  643. *
  644. * port is the sole input, in range 0..7.
  645. * shift is one output, for use with main_irq_cause / main_irq_mask registers.
  646. * hardport is the other output, in range 0..3.
  647. *
  648. * Note that port and hardport may be the same variable in some cases.
  649. */
  650. #define MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport) \
  651. { \
  652. shift = mv_hc_from_port(port) * HC_SHIFT; \
  653. hardport = mv_hardport_from_port(port); \
  654. shift += hardport * 2; \
  655. }
  656. static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
  657. {
  658. return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
  659. }
  660. static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
  661. unsigned int port)
  662. {
  663. return mv_hc_base(base, mv_hc_from_port(port));
  664. }
  665. static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
  666. {
  667. return mv_hc_base_from_port(base, port) +
  668. MV_SATAHC_ARBTR_REG_SZ +
  669. (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
  670. }
  671. static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
  672. {
  673. void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
  674. unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
  675. return hc_mmio + ofs;
  676. }
  677. static inline void __iomem *mv_host_base(struct ata_host *host)
  678. {
  679. struct mv_host_priv *hpriv = host->private_data;
  680. return hpriv->base;
  681. }
  682. static inline void __iomem *mv_ap_base(struct ata_port *ap)
  683. {
  684. return mv_port_base(mv_host_base(ap->host), ap->port_no);
  685. }
  686. static inline int mv_get_hc_count(unsigned long port_flags)
  687. {
  688. return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
  689. }
  690. static void mv_set_edma_ptrs(void __iomem *port_mmio,
  691. struct mv_host_priv *hpriv,
  692. struct mv_port_priv *pp)
  693. {
  694. u32 index;
  695. /*
  696. * initialize request queue
  697. */
  698. pp->req_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */
  699. index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
  700. WARN_ON(pp->crqb_dma & 0x3ff);
  701. writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
  702. writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
  703. port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
  704. if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
  705. writelfl((pp->crqb_dma & 0xffffffff) | index,
  706. port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
  707. else
  708. writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
  709. /*
  710. * initialize response queue
  711. */
  712. pp->resp_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */
  713. index = pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT;
  714. WARN_ON(pp->crpb_dma & 0xff);
  715. writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
  716. if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
  717. writelfl((pp->crpb_dma & 0xffffffff) | index,
  718. port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
  719. else
  720. writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
  721. writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
  722. port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
  723. }
  724. /**
  725. * mv_start_dma - Enable eDMA engine
  726. * @base: port base address
  727. * @pp: port private data
  728. *
  729. * Verify the local cache of the eDMA state is accurate with a
  730. * WARN_ON.
  731. *
  732. * LOCKING:
  733. * Inherited from caller.
  734. */
  735. static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
  736. struct mv_port_priv *pp, u8 protocol)
  737. {
  738. int want_ncq = (protocol == ATA_PROT_NCQ);
  739. if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
  740. int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
  741. if (want_ncq != using_ncq)
  742. mv_stop_edma(ap);
  743. }
  744. if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
  745. struct mv_host_priv *hpriv = ap->host->private_data;
  746. int hardport = mv_hardport_from_port(ap->port_no);
  747. void __iomem *hc_mmio = mv_hc_base_from_port(
  748. mv_host_base(ap->host), hardport);
  749. u32 hc_irq_cause, ipending;
  750. /* clear EDMA event indicators, if any */
  751. writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
  752. /* clear EDMA interrupt indicator, if any */
  753. hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
  754. ipending = (DEV_IRQ | DMA_IRQ) << hardport;
  755. if (hc_irq_cause & ipending) {
  756. writelfl(hc_irq_cause & ~ipending,
  757. hc_mmio + HC_IRQ_CAUSE_OFS);
  758. }
  759. mv_edma_cfg(ap, want_ncq);
  760. /* clear FIS IRQ Cause */
  761. writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
  762. mv_set_edma_ptrs(port_mmio, hpriv, pp);
  763. writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
  764. pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
  765. }
  766. }
  767. static void mv_wait_for_edma_empty_idle(struct ata_port *ap)
  768. {
  769. void __iomem *port_mmio = mv_ap_base(ap);
  770. const u32 empty_idle = (EDMA_STATUS_CACHE_EMPTY | EDMA_STATUS_IDLE);
  771. const int per_loop = 5, timeout = (15 * 1000 / per_loop);
  772. int i;
  773. /*
  774. * Wait for the EDMA engine to finish transactions in progress.
  775. */
  776. for (i = 0; i < timeout; ++i) {
  777. u32 edma_stat = readl(port_mmio + EDMA_STATUS_OFS);
  778. if ((edma_stat & empty_idle) == empty_idle)
  779. break;
  780. udelay(per_loop);
  781. }
  782. /* ata_port_printk(ap, KERN_INFO, "%s: %u+ usecs\n", __func__, i); */
  783. }
  784. /**
  785. * mv_stop_edma_engine - Disable eDMA engine
  786. * @port_mmio: io base address
  787. *
  788. * LOCKING:
  789. * Inherited from caller.
  790. */
  791. static int mv_stop_edma_engine(void __iomem *port_mmio)
  792. {
  793. int i;
  794. /* Disable eDMA. The disable bit auto clears. */
  795. writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
  796. /* Wait for the chip to confirm eDMA is off. */
  797. for (i = 10000; i > 0; i--) {
  798. u32 reg = readl(port_mmio + EDMA_CMD_OFS);
  799. if (!(reg & EDMA_EN))
  800. return 0;
  801. udelay(10);
  802. }
  803. return -EIO;
  804. }
  805. static int mv_stop_edma(struct ata_port *ap)
  806. {
  807. void __iomem *port_mmio = mv_ap_base(ap);
  808. struct mv_port_priv *pp = ap->private_data;
  809. if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
  810. return 0;
  811. pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
  812. mv_wait_for_edma_empty_idle(ap);
  813. if (mv_stop_edma_engine(port_mmio)) {
  814. ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
  815. return -EIO;
  816. }
  817. return 0;
  818. }
  819. #ifdef ATA_DEBUG
  820. static void mv_dump_mem(void __iomem *start, unsigned bytes)
  821. {
  822. int b, w;
  823. for (b = 0; b < bytes; ) {
  824. DPRINTK("%p: ", start + b);
  825. for (w = 0; b < bytes && w < 4; w++) {
  826. printk("%08x ", readl(start + b));
  827. b += sizeof(u32);
  828. }
  829. printk("\n");
  830. }
  831. }
  832. #endif
  833. static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
  834. {
  835. #ifdef ATA_DEBUG
  836. int b, w;
  837. u32 dw;
  838. for (b = 0; b < bytes; ) {
  839. DPRINTK("%02x: ", b);
  840. for (w = 0; b < bytes && w < 4; w++) {
  841. (void) pci_read_config_dword(pdev, b, &dw);
  842. printk("%08x ", dw);
  843. b += sizeof(u32);
  844. }
  845. printk("\n");
  846. }
  847. #endif
  848. }
  849. static void mv_dump_all_regs(void __iomem *mmio_base, int port,
  850. struct pci_dev *pdev)
  851. {
  852. #ifdef ATA_DEBUG
  853. void __iomem *hc_base = mv_hc_base(mmio_base,
  854. port >> MV_PORT_HC_SHIFT);
  855. void __iomem *port_base;
  856. int start_port, num_ports, p, start_hc, num_hcs, hc;
  857. if (0 > port) {
  858. start_hc = start_port = 0;
  859. num_ports = 8; /* shld be benign for 4 port devs */
  860. num_hcs = 2;
  861. } else {
  862. start_hc = port >> MV_PORT_HC_SHIFT;
  863. start_port = port;
  864. num_ports = num_hcs = 1;
  865. }
  866. DPRINTK("All registers for port(s) %u-%u:\n", start_port,
  867. num_ports > 1 ? num_ports - 1 : start_port);
  868. if (NULL != pdev) {
  869. DPRINTK("PCI config space regs:\n");
  870. mv_dump_pci_cfg(pdev, 0x68);
  871. }
  872. DPRINTK("PCI regs:\n");
  873. mv_dump_mem(mmio_base+0xc00, 0x3c);
  874. mv_dump_mem(mmio_base+0xd00, 0x34);
  875. mv_dump_mem(mmio_base+0xf00, 0x4);
  876. mv_dump_mem(mmio_base+0x1d00, 0x6c);
  877. for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
  878. hc_base = mv_hc_base(mmio_base, hc);
  879. DPRINTK("HC regs (HC %i):\n", hc);
  880. mv_dump_mem(hc_base, 0x1c);
  881. }
  882. for (p = start_port; p < start_port + num_ports; p++) {
  883. port_base = mv_port_base(mmio_base, p);
  884. DPRINTK("EDMA regs (port %i):\n", p);
  885. mv_dump_mem(port_base, 0x54);
  886. DPRINTK("SATA regs (port %i):\n", p);
  887. mv_dump_mem(port_base+0x300, 0x60);
  888. }
  889. #endif
  890. }
  891. static unsigned int mv_scr_offset(unsigned int sc_reg_in)
  892. {
  893. unsigned int ofs;
  894. switch (sc_reg_in) {
  895. case SCR_STATUS:
  896. case SCR_CONTROL:
  897. case SCR_ERROR:
  898. ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
  899. break;
  900. case SCR_ACTIVE:
  901. ofs = SATA_ACTIVE_OFS; /* active is not with the others */
  902. break;
  903. default:
  904. ofs = 0xffffffffU;
  905. break;
  906. }
  907. return ofs;
  908. }
  909. static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
  910. {
  911. unsigned int ofs = mv_scr_offset(sc_reg_in);
  912. if (ofs != 0xffffffffU) {
  913. *val = readl(mv_ap_base(ap) + ofs);
  914. return 0;
  915. } else
  916. return -EINVAL;
  917. }
  918. static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
  919. {
  920. unsigned int ofs = mv_scr_offset(sc_reg_in);
  921. if (ofs != 0xffffffffU) {
  922. writelfl(val, mv_ap_base(ap) + ofs);
  923. return 0;
  924. } else
  925. return -EINVAL;
  926. }
  927. static void mv6_dev_config(struct ata_device *adev)
  928. {
  929. /*
  930. * Deal with Gen-II ("mv6") hardware quirks/restrictions:
  931. *
  932. * Gen-II does not support NCQ over a port multiplier
  933. * (no FIS-based switching).
  934. *
  935. * We don't have hob_nsect when doing NCQ commands on Gen-II.
  936. * See mv_qc_prep() for more info.
  937. */
  938. if (adev->flags & ATA_DFLAG_NCQ) {
  939. if (sata_pmp_attached(adev->link->ap)) {
  940. adev->flags &= ~ATA_DFLAG_NCQ;
  941. ata_dev_printk(adev, KERN_INFO,
  942. "NCQ disabled for command-based switching\n");
  943. } else if (adev->max_sectors > GEN_II_NCQ_MAX_SECTORS) {
  944. adev->max_sectors = GEN_II_NCQ_MAX_SECTORS;
  945. ata_dev_printk(adev, KERN_INFO,
  946. "max_sectors limited to %u for NCQ\n",
  947. adev->max_sectors);
  948. }
  949. }
  950. }
  951. static int mv_qc_defer(struct ata_queued_cmd *qc)
  952. {
  953. struct ata_link *link = qc->dev->link;
  954. struct ata_port *ap = link->ap;
  955. struct mv_port_priv *pp = ap->private_data;
  956. /*
  957. * If the port is completely idle, then allow the new qc.
  958. */
  959. if (ap->nr_active_links == 0)
  960. return 0;
  961. if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
  962. /*
  963. * The port is operating in host queuing mode (EDMA).
  964. * It can accomodate a new qc if the qc protocol
  965. * is compatible with the current host queue mode.
  966. */
  967. if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) {
  968. /*
  969. * The host queue (EDMA) is in NCQ mode.
  970. * If the new qc is also an NCQ command,
  971. * then allow the new qc.
  972. */
  973. if (qc->tf.protocol == ATA_PROT_NCQ)
  974. return 0;
  975. } else {
  976. /*
  977. * The host queue (EDMA) is in non-NCQ, DMA mode.
  978. * If the new qc is also a non-NCQ, DMA command,
  979. * then allow the new qc.
  980. */
  981. if (qc->tf.protocol == ATA_PROT_DMA)
  982. return 0;
  983. }
  984. }
  985. return ATA_DEFER_PORT;
  986. }
  987. static void mv_config_fbs(void __iomem *port_mmio, int enable_fbs)
  988. {
  989. u32 old_fiscfg, new_fiscfg, old_ltmode, new_ltmode;
  990. /*
  991. * Various bit settings required for operation
  992. * in FIS-based switching (fbs) mode on GenIIe:
  993. */
  994. old_fiscfg = readl(port_mmio + FISCFG_OFS);
  995. old_ltmode = readl(port_mmio + LTMODE_OFS);
  996. if (enable_fbs) {
  997. new_fiscfg = old_fiscfg | FISCFG_SINGLE_SYNC;
  998. new_ltmode = old_ltmode | LTMODE_BIT8;
  999. } else { /* disable fbs */
  1000. new_fiscfg = old_fiscfg & ~FISCFG_SINGLE_SYNC;
  1001. new_ltmode = old_ltmode & ~LTMODE_BIT8;
  1002. }
  1003. if (new_fiscfg != old_fiscfg)
  1004. writelfl(new_fiscfg, port_mmio + FISCFG_OFS);
  1005. if (new_ltmode != old_ltmode)
  1006. writelfl(new_ltmode, port_mmio + LTMODE_OFS);
  1007. }
  1008. static void mv_edma_cfg(struct ata_port *ap, int want_ncq)
  1009. {
  1010. u32 cfg;
  1011. struct mv_port_priv *pp = ap->private_data;
  1012. struct mv_host_priv *hpriv = ap->host->private_data;
  1013. void __iomem *port_mmio = mv_ap_base(ap);
  1014. /* set up non-NCQ EDMA configuration */
  1015. cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
  1016. if (IS_GEN_I(hpriv))
  1017. cfg |= (1 << 8); /* enab config burst size mask */
  1018. else if (IS_GEN_II(hpriv))
  1019. cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
  1020. else if (IS_GEN_IIE(hpriv)) {
  1021. cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
  1022. cfg |= (1 << 22); /* enab 4-entry host queue cache */
  1023. if (HAS_PCI(ap->host))
  1024. cfg |= (1 << 18); /* enab early completion */
  1025. if (hpriv->hp_flags & MV_HP_CUT_THROUGH)
  1026. cfg |= (1 << 17); /* enab cut-thru (dis stor&forwrd) */
  1027. if (want_ncq && sata_pmp_attached(ap)) {
  1028. cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */
  1029. mv_config_fbs(port_mmio, 1);
  1030. } else {
  1031. mv_config_fbs(port_mmio, 0);
  1032. }
  1033. }
  1034. if (want_ncq) {
  1035. cfg |= EDMA_CFG_NCQ;
  1036. pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
  1037. } else
  1038. pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
  1039. writelfl(cfg, port_mmio + EDMA_CFG_OFS);
  1040. }
  1041. static void mv_port_free_dma_mem(struct ata_port *ap)
  1042. {
  1043. struct mv_host_priv *hpriv = ap->host->private_data;
  1044. struct mv_port_priv *pp = ap->private_data;
  1045. int tag;
  1046. if (pp->crqb) {
  1047. dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
  1048. pp->crqb = NULL;
  1049. }
  1050. if (pp->crpb) {
  1051. dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
  1052. pp->crpb = NULL;
  1053. }
  1054. /*
  1055. * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
  1056. * For later hardware, we have one unique sg_tbl per NCQ tag.
  1057. */
  1058. for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
  1059. if (pp->sg_tbl[tag]) {
  1060. if (tag == 0 || !IS_GEN_I(hpriv))
  1061. dma_pool_free(hpriv->sg_tbl_pool,
  1062. pp->sg_tbl[tag],
  1063. pp->sg_tbl_dma[tag]);
  1064. pp->sg_tbl[tag] = NULL;
  1065. }
  1066. }
  1067. }
  1068. /**
  1069. * mv_port_start - Port specific init/start routine.
  1070. * @ap: ATA channel to manipulate
  1071. *
  1072. * Allocate and point to DMA memory, init port private memory,
  1073. * zero indices.
  1074. *
  1075. * LOCKING:
  1076. * Inherited from caller.
  1077. */
  1078. static int mv_port_start(struct ata_port *ap)
  1079. {
  1080. struct device *dev = ap->host->dev;
  1081. struct mv_host_priv *hpriv = ap->host->private_data;
  1082. struct mv_port_priv *pp;
  1083. int tag;
  1084. pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
  1085. if (!pp)
  1086. return -ENOMEM;
  1087. ap->private_data = pp;
  1088. pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
  1089. if (!pp->crqb)
  1090. return -ENOMEM;
  1091. memset(pp->crqb, 0, MV_CRQB_Q_SZ);
  1092. pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
  1093. if (!pp->crpb)
  1094. goto out_port_free_dma_mem;
  1095. memset(pp->crpb, 0, MV_CRPB_Q_SZ);
  1096. /*
  1097. * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
  1098. * For later hardware, we need one unique sg_tbl per NCQ tag.
  1099. */
  1100. for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
  1101. if (tag == 0 || !IS_GEN_I(hpriv)) {
  1102. pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
  1103. GFP_KERNEL, &pp->sg_tbl_dma[tag]);
  1104. if (!pp->sg_tbl[tag])
  1105. goto out_port_free_dma_mem;
  1106. } else {
  1107. pp->sg_tbl[tag] = pp->sg_tbl[0];
  1108. pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
  1109. }
  1110. }
  1111. return 0;
  1112. out_port_free_dma_mem:
  1113. mv_port_free_dma_mem(ap);
  1114. return -ENOMEM;
  1115. }
  1116. /**
  1117. * mv_port_stop - Port specific cleanup/stop routine.
  1118. * @ap: ATA channel to manipulate
  1119. *
  1120. * Stop DMA, cleanup port memory.
  1121. *
  1122. * LOCKING:
  1123. * This routine uses the host lock to protect the DMA stop.
  1124. */
  1125. static void mv_port_stop(struct ata_port *ap)
  1126. {
  1127. mv_stop_edma(ap);
  1128. mv_port_free_dma_mem(ap);
  1129. }
  1130. /**
  1131. * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
  1132. * @qc: queued command whose SG list to source from
  1133. *
  1134. * Populate the SG list and mark the last entry.
  1135. *
  1136. * LOCKING:
  1137. * Inherited from caller.
  1138. */
  1139. static void mv_fill_sg(struct ata_queued_cmd *qc)
  1140. {
  1141. struct mv_port_priv *pp = qc->ap->private_data;
  1142. struct scatterlist *sg;
  1143. struct mv_sg *mv_sg, *last_sg = NULL;
  1144. unsigned int si;
  1145. mv_sg = pp->sg_tbl[qc->tag];
  1146. for_each_sg(qc->sg, sg, qc->n_elem, si) {
  1147. dma_addr_t addr = sg_dma_address(sg);
  1148. u32 sg_len = sg_dma_len(sg);
  1149. while (sg_len) {
  1150. u32 offset = addr & 0xffff;
  1151. u32 len = sg_len;
  1152. if ((offset + sg_len > 0x10000))
  1153. len = 0x10000 - offset;
  1154. mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
  1155. mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
  1156. mv_sg->flags_size = cpu_to_le32(len & 0xffff);
  1157. sg_len -= len;
  1158. addr += len;
  1159. last_sg = mv_sg;
  1160. mv_sg++;
  1161. }
  1162. }
  1163. if (likely(last_sg))
  1164. last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
  1165. }
  1166. static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
  1167. {
  1168. u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
  1169. (last ? CRQB_CMD_LAST : 0);
  1170. *cmdw = cpu_to_le16(tmp);
  1171. }
  1172. /**
  1173. * mv_qc_prep - Host specific command preparation.
  1174. * @qc: queued command to prepare
  1175. *
  1176. * This routine simply redirects to the general purpose routine
  1177. * if command is not DMA. Else, it handles prep of the CRQB
  1178. * (command request block), does some sanity checking, and calls
  1179. * the SG load routine.
  1180. *
  1181. * LOCKING:
  1182. * Inherited from caller.
  1183. */
  1184. static void mv_qc_prep(struct ata_queued_cmd *qc)
  1185. {
  1186. struct ata_port *ap = qc->ap;
  1187. struct mv_port_priv *pp = ap->private_data;
  1188. __le16 *cw;
  1189. struct ata_taskfile *tf;
  1190. u16 flags = 0;
  1191. unsigned in_index;
  1192. if ((qc->tf.protocol != ATA_PROT_DMA) &&
  1193. (qc->tf.protocol != ATA_PROT_NCQ))
  1194. return;
  1195. /* Fill in command request block
  1196. */
  1197. if (!(qc->tf.flags & ATA_TFLAG_WRITE))
  1198. flags |= CRQB_FLAG_READ;
  1199. WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
  1200. flags |= qc->tag << CRQB_TAG_SHIFT;
  1201. flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
  1202. /* get current queue index from software */
  1203. in_index = pp->req_idx;
  1204. pp->crqb[in_index].sg_addr =
  1205. cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
  1206. pp->crqb[in_index].sg_addr_hi =
  1207. cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
  1208. pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
  1209. cw = &pp->crqb[in_index].ata_cmd[0];
  1210. tf = &qc->tf;
  1211. /* Sadly, the CRQB cannot accomodate all registers--there are
  1212. * only 11 bytes...so we must pick and choose required
  1213. * registers based on the command. So, we drop feature and
  1214. * hob_feature for [RW] DMA commands, but they are needed for
  1215. * NCQ. NCQ will drop hob_nsect.
  1216. */
  1217. switch (tf->command) {
  1218. case ATA_CMD_READ:
  1219. case ATA_CMD_READ_EXT:
  1220. case ATA_CMD_WRITE:
  1221. case ATA_CMD_WRITE_EXT:
  1222. case ATA_CMD_WRITE_FUA_EXT:
  1223. mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
  1224. break;
  1225. case ATA_CMD_FPDMA_READ:
  1226. case ATA_CMD_FPDMA_WRITE:
  1227. mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
  1228. mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
  1229. break;
  1230. default:
  1231. /* The only other commands EDMA supports in non-queued and
  1232. * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
  1233. * of which are defined/used by Linux. If we get here, this
  1234. * driver needs work.
  1235. *
  1236. * FIXME: modify libata to give qc_prep a return value and
  1237. * return error here.
  1238. */
  1239. BUG_ON(tf->command);
  1240. break;
  1241. }
  1242. mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
  1243. mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
  1244. mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
  1245. mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
  1246. mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
  1247. mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
  1248. mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
  1249. mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
  1250. mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
  1251. if (!(qc->flags & ATA_QCFLAG_DMAMAP))
  1252. return;
  1253. mv_fill_sg(qc);
  1254. }
  1255. /**
  1256. * mv_qc_prep_iie - Host specific command preparation.
  1257. * @qc: queued command to prepare
  1258. *
  1259. * This routine simply redirects to the general purpose routine
  1260. * if command is not DMA. Else, it handles prep of the CRQB
  1261. * (command request block), does some sanity checking, and calls
  1262. * the SG load routine.
  1263. *
  1264. * LOCKING:
  1265. * Inherited from caller.
  1266. */
  1267. static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
  1268. {
  1269. struct ata_port *ap = qc->ap;
  1270. struct mv_port_priv *pp = ap->private_data;
  1271. struct mv_crqb_iie *crqb;
  1272. struct ata_taskfile *tf;
  1273. unsigned in_index;
  1274. u32 flags = 0;
  1275. if ((qc->tf.protocol != ATA_PROT_DMA) &&
  1276. (qc->tf.protocol != ATA_PROT_NCQ))
  1277. return;
  1278. /* Fill in Gen IIE command request block */
  1279. if (!(qc->tf.flags & ATA_TFLAG_WRITE))
  1280. flags |= CRQB_FLAG_READ;
  1281. WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
  1282. flags |= qc->tag << CRQB_TAG_SHIFT;
  1283. flags |= qc->tag << CRQB_HOSTQ_SHIFT;
  1284. flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
  1285. /* get current queue index from software */
  1286. in_index = pp->req_idx;
  1287. crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
  1288. crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
  1289. crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
  1290. crqb->flags = cpu_to_le32(flags);
  1291. tf = &qc->tf;
  1292. crqb->ata_cmd[0] = cpu_to_le32(
  1293. (tf->command << 16) |
  1294. (tf->feature << 24)
  1295. );
  1296. crqb->ata_cmd[1] = cpu_to_le32(
  1297. (tf->lbal << 0) |
  1298. (tf->lbam << 8) |
  1299. (tf->lbah << 16) |
  1300. (tf->device << 24)
  1301. );
  1302. crqb->ata_cmd[2] = cpu_to_le32(
  1303. (tf->hob_lbal << 0) |
  1304. (tf->hob_lbam << 8) |
  1305. (tf->hob_lbah << 16) |
  1306. (tf->hob_feature << 24)
  1307. );
  1308. crqb->ata_cmd[3] = cpu_to_le32(
  1309. (tf->nsect << 0) |
  1310. (tf->hob_nsect << 8)
  1311. );
  1312. if (!(qc->flags & ATA_QCFLAG_DMAMAP))
  1313. return;
  1314. mv_fill_sg(qc);
  1315. }
  1316. /**
  1317. * mv_qc_issue - Initiate a command to the host
  1318. * @qc: queued command to start
  1319. *
  1320. * This routine simply redirects to the general purpose routine
  1321. * if command is not DMA. Else, it sanity checks our local
  1322. * caches of the request producer/consumer indices then enables
  1323. * DMA and bumps the request producer index.
  1324. *
  1325. * LOCKING:
  1326. * Inherited from caller.
  1327. */
  1328. static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
  1329. {
  1330. struct ata_port *ap = qc->ap;
  1331. void __iomem *port_mmio = mv_ap_base(ap);
  1332. struct mv_port_priv *pp = ap->private_data;
  1333. u32 in_index;
  1334. if ((qc->tf.protocol != ATA_PROT_DMA) &&
  1335. (qc->tf.protocol != ATA_PROT_NCQ)) {
  1336. /*
  1337. * We're about to send a non-EDMA capable command to the
  1338. * port. Turn off EDMA so there won't be problems accessing
  1339. * shadow block, etc registers.
  1340. */
  1341. mv_stop_edma(ap);
  1342. mv_pmp_select(ap, qc->dev->link->pmp);
  1343. return ata_sff_qc_issue(qc);
  1344. }
  1345. mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
  1346. pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK;
  1347. in_index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
  1348. /* and write the request in pointer to kick the EDMA to life */
  1349. writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
  1350. port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
  1351. return 0;
  1352. }
  1353. static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap)
  1354. {
  1355. struct mv_port_priv *pp = ap->private_data;
  1356. struct ata_queued_cmd *qc;
  1357. if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
  1358. return NULL;
  1359. qc = ata_qc_from_tag(ap, ap->link.active_tag);
  1360. if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
  1361. qc = NULL;
  1362. return qc;
  1363. }
  1364. static void mv_unexpected_intr(struct ata_port *ap)
  1365. {
  1366. struct mv_port_priv *pp = ap->private_data;
  1367. struct ata_eh_info *ehi = &ap->link.eh_info;
  1368. char *when = "";
  1369. /*
  1370. * We got a device interrupt from something that
  1371. * was supposed to be using EDMA or polling.
  1372. */
  1373. ata_ehi_clear_desc(ehi);
  1374. if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
  1375. when = " while EDMA enabled";
  1376. } else {
  1377. struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
  1378. if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
  1379. when = " while polling";
  1380. }
  1381. ata_ehi_push_desc(ehi, "unexpected device interrupt%s", when);
  1382. ehi->err_mask |= AC_ERR_OTHER;
  1383. ehi->action |= ATA_EH_RESET;
  1384. ata_port_freeze(ap);
  1385. }
  1386. /**
  1387. * mv_err_intr - Handle error interrupts on the port
  1388. * @ap: ATA channel to manipulate
  1389. * @qc: affected command (non-NCQ), or NULL
  1390. *
  1391. * Most cases require a full reset of the chip's state machine,
  1392. * which also performs a COMRESET.
  1393. * Also, if the port disabled DMA, update our cached copy to match.
  1394. *
  1395. * LOCKING:
  1396. * Inherited from caller.
  1397. */
  1398. static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
  1399. {
  1400. void __iomem *port_mmio = mv_ap_base(ap);
  1401. u32 edma_err_cause, eh_freeze_mask, serr = 0;
  1402. struct mv_port_priv *pp = ap->private_data;
  1403. struct mv_host_priv *hpriv = ap->host->private_data;
  1404. unsigned int action = 0, err_mask = 0;
  1405. struct ata_eh_info *ehi = &ap->link.eh_info;
  1406. ata_ehi_clear_desc(ehi);
  1407. /*
  1408. * Read and clear the err_cause bits. This won't actually
  1409. * clear for some errors (eg. SError), but we will be doing
  1410. * a hard reset in those cases regardless, which *will* clear it.
  1411. */
  1412. edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
  1413. writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
  1414. ata_ehi_push_desc(ehi, "edma_err_cause=%08x", edma_err_cause);
  1415. /*
  1416. * All generations share these EDMA error cause bits:
  1417. */
  1418. if (edma_err_cause & EDMA_ERR_DEV)
  1419. err_mask |= AC_ERR_DEV;
  1420. if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
  1421. EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
  1422. EDMA_ERR_INTRL_PAR)) {
  1423. err_mask |= AC_ERR_ATA_BUS;
  1424. action |= ATA_EH_RESET;
  1425. ata_ehi_push_desc(ehi, "parity error");
  1426. }
  1427. if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
  1428. ata_ehi_hotplugged(ehi);
  1429. ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
  1430. "dev disconnect" : "dev connect");
  1431. action |= ATA_EH_RESET;
  1432. }
  1433. /*
  1434. * Gen-I has a different SELF_DIS bit,
  1435. * different FREEZE bits, and no SERR bit:
  1436. */
  1437. if (IS_GEN_I(hpriv)) {
  1438. eh_freeze_mask = EDMA_EH_FREEZE_5;
  1439. if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
  1440. pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
  1441. ata_ehi_push_desc(ehi, "EDMA self-disable");
  1442. }
  1443. } else {
  1444. eh_freeze_mask = EDMA_EH_FREEZE;
  1445. if (edma_err_cause & EDMA_ERR_SELF_DIS) {
  1446. pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
  1447. ata_ehi_push_desc(ehi, "EDMA self-disable");
  1448. }
  1449. if (edma_err_cause & EDMA_ERR_SERR) {
  1450. /*
  1451. * Ensure that we read our own SCR, not a pmp link SCR:
  1452. */
  1453. ap->ops->scr_read(ap, SCR_ERROR, &serr);
  1454. /*
  1455. * Don't clear SError here; leave it for libata-eh:
  1456. */
  1457. ata_ehi_push_desc(ehi, "SError=%08x", serr);
  1458. err_mask |= AC_ERR_ATA_BUS;
  1459. action |= ATA_EH_RESET;
  1460. }
  1461. }
  1462. if (!err_mask) {
  1463. err_mask = AC_ERR_OTHER;
  1464. action |= ATA_EH_RESET;
  1465. }
  1466. ehi->serror |= serr;
  1467. ehi->action |= action;
  1468. if (qc)
  1469. qc->err_mask |= err_mask;
  1470. else
  1471. ehi->err_mask |= err_mask;
  1472. if (edma_err_cause & eh_freeze_mask)
  1473. ata_port_freeze(ap);
  1474. else
  1475. ata_port_abort(ap);
  1476. }
  1477. static void mv_process_crpb_response(struct ata_port *ap,
  1478. struct mv_crpb *response, unsigned int tag, int ncq_enabled)
  1479. {
  1480. struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
  1481. if (qc) {
  1482. u8 ata_status;
  1483. u16 edma_status = le16_to_cpu(response->flags);
  1484. /*
  1485. * edma_status from a response queue entry:
  1486. * LSB is from EDMA_ERR_IRQ_CAUSE_OFS (non-NCQ only).
  1487. * MSB is saved ATA status from command completion.
  1488. */
  1489. if (!ncq_enabled) {
  1490. u8 err_cause = edma_status & 0xff & ~EDMA_ERR_DEV;
  1491. if (err_cause) {
  1492. /*
  1493. * Error will be seen/handled by mv_err_intr().
  1494. * So do nothing at all here.
  1495. */
  1496. return;
  1497. }
  1498. }
  1499. ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT;
  1500. qc->err_mask |= ac_err_mask(ata_status);
  1501. ata_qc_complete(qc);
  1502. } else {
  1503. ata_port_printk(ap, KERN_ERR, "%s: no qc for tag=%d\n",
  1504. __func__, tag);
  1505. }
  1506. }
  1507. static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp)
  1508. {
  1509. void __iomem *port_mmio = mv_ap_base(ap);
  1510. struct mv_host_priv *hpriv = ap->host->private_data;
  1511. u32 in_index;
  1512. bool work_done = false;
  1513. int ncq_enabled = (pp->pp_flags & MV_PP_FLAG_NCQ_EN);
  1514. /* Get the hardware queue position index */
  1515. in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
  1516. >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
  1517. /* Process new responses from since the last time we looked */
  1518. while (in_index != pp->resp_idx) {
  1519. unsigned int tag;
  1520. struct mv_crpb *response = &pp->crpb[pp->resp_idx];
  1521. pp->resp_idx = (pp->resp_idx + 1) & MV_MAX_Q_DEPTH_MASK;
  1522. if (IS_GEN_I(hpriv)) {
  1523. /* 50xx: no NCQ, only one command active at a time */
  1524. tag = ap->link.active_tag;
  1525. } else {
  1526. /* Gen II/IIE: get command tag from CRPB entry */
  1527. tag = le16_to_cpu(response->id) & 0x1f;
  1528. }
  1529. mv_process_crpb_response(ap, response, tag, ncq_enabled);
  1530. work_done = true;
  1531. }
  1532. /* Update the software queue position index in hardware */
  1533. if (work_done)
  1534. writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
  1535. (pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT),
  1536. port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
  1537. }
  1538. /**
  1539. * mv_host_intr - Handle all interrupts on the given host controller
  1540. * @host: host specific structure
  1541. * @main_irq_cause: Main interrupt cause register for the chip.
  1542. *
  1543. * LOCKING:
  1544. * Inherited from caller.
  1545. */
  1546. static int mv_host_intr(struct ata_host *host, u32 main_irq_cause)
  1547. {
  1548. struct mv_host_priv *hpriv = host->private_data;
  1549. void __iomem *mmio = hpriv->base, *hc_mmio = NULL;
  1550. u32 hc_irq_cause = 0;
  1551. unsigned int handled = 0, port;
  1552. for (port = 0; port < hpriv->n_ports; port++) {
  1553. struct ata_port *ap = host->ports[port];
  1554. struct mv_port_priv *pp;
  1555. unsigned int shift, hardport, port_cause;
  1556. /*
  1557. * When we move to the second hc, flag our cached
  1558. * copies of hc_mmio (and hc_irq_cause) as invalid again.
  1559. */
  1560. if (port == MV_PORTS_PER_HC)
  1561. hc_mmio = NULL;
  1562. /*
  1563. * Do nothing if port is not interrupting or is disabled:
  1564. */
  1565. MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
  1566. port_cause = (main_irq_cause >> shift) & (DONE_IRQ | ERR_IRQ);
  1567. if (!port_cause || !ap || (ap->flags & ATA_FLAG_DISABLED))
  1568. continue;
  1569. /*
  1570. * Each hc within the host has its own hc_irq_cause register.
  1571. * We defer reading it until we know we need it, right now:
  1572. *
  1573. * FIXME later: we don't really need to read this register
  1574. * (some logic changes required below if we go that way),
  1575. * because it doesn't tell us anything new. But we do need
  1576. * to write to it, outside the top of this loop,
  1577. * to reset the interrupt triggers for next time.
  1578. */
  1579. if (!hc_mmio) {
  1580. hc_mmio = mv_hc_base_from_port(mmio, port);
  1581. hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
  1582. writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
  1583. handled = 1;
  1584. }
  1585. /*
  1586. * Process completed CRPB response(s) before other events.
  1587. */
  1588. pp = ap->private_data;
  1589. if (hc_irq_cause & (DMA_IRQ << hardport)) {
  1590. if (pp->pp_flags & MV_PP_FLAG_EDMA_EN)
  1591. mv_process_crpb_entries(ap, pp);
  1592. }
  1593. /*
  1594. * Handle chip-reported errors, or continue on to handle PIO.
  1595. */
  1596. if (unlikely(port_cause & ERR_IRQ)) {
  1597. mv_err_intr(ap, mv_get_active_qc(ap));
  1598. } else if (hc_irq_cause & (DEV_IRQ << hardport)) {
  1599. if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
  1600. struct ata_queued_cmd *qc = mv_get_active_qc(ap);
  1601. if (qc) {
  1602. ata_sff_host_intr(ap, qc);
  1603. continue;
  1604. }
  1605. }
  1606. mv_unexpected_intr(ap);
  1607. }
  1608. }
  1609. return handled;
  1610. }
  1611. static int mv_pci_error(struct ata_host *host, void __iomem *mmio)
  1612. {
  1613. struct mv_host_priv *hpriv = host->private_data;
  1614. struct ata_port *ap;
  1615. struct ata_queued_cmd *qc;
  1616. struct ata_eh_info *ehi;
  1617. unsigned int i, err_mask, printed = 0;
  1618. u32 err_cause;
  1619. err_cause = readl(mmio + hpriv->irq_cause_ofs);
  1620. dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
  1621. err_cause);
  1622. DPRINTK("All regs @ PCI error\n");
  1623. mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
  1624. writelfl(0, mmio + hpriv->irq_cause_ofs);
  1625. for (i = 0; i < host->n_ports; i++) {
  1626. ap = host->ports[i];
  1627. if (!ata_link_offline(&ap->link)) {
  1628. ehi = &ap->link.eh_info;
  1629. ata_ehi_clear_desc(ehi);
  1630. if (!printed++)
  1631. ata_ehi_push_desc(ehi,
  1632. "PCI err cause 0x%08x", err_cause);
  1633. err_mask = AC_ERR_HOST_BUS;
  1634. ehi->action = ATA_EH_RESET;
  1635. qc = ata_qc_from_tag(ap, ap->link.active_tag);
  1636. if (qc)
  1637. qc->err_mask |= err_mask;
  1638. else
  1639. ehi->err_mask |= err_mask;
  1640. ata_port_freeze(ap);
  1641. }
  1642. }
  1643. return 1; /* handled */
  1644. }
  1645. /**
  1646. * mv_interrupt - Main interrupt event handler
  1647. * @irq: unused
  1648. * @dev_instance: private data; in this case the host structure
  1649. *
  1650. * Read the read only register to determine if any host
  1651. * controllers have pending interrupts. If so, call lower level
  1652. * routine to handle. Also check for PCI errors which are only
  1653. * reported here.
  1654. *
  1655. * LOCKING:
  1656. * This routine holds the host lock while processing pending
  1657. * interrupts.
  1658. */
  1659. static irqreturn_t mv_interrupt(int irq, void *dev_instance)
  1660. {
  1661. struct ata_host *host = dev_instance;
  1662. struct mv_host_priv *hpriv = host->private_data;
  1663. unsigned int handled = 0;
  1664. u32 main_irq_cause, main_irq_mask;
  1665. spin_lock(&host->lock);
  1666. main_irq_cause = readl(hpriv->main_irq_cause_addr);
  1667. main_irq_mask = readl(hpriv->main_irq_mask_addr);
  1668. /*
  1669. * Deal with cases where we either have nothing pending, or have read
  1670. * a bogus register value which can indicate HW removal or PCI fault.
  1671. */
  1672. if ((main_irq_cause & main_irq_mask) && (main_irq_cause != 0xffffffffU)) {
  1673. if (unlikely((main_irq_cause & PCI_ERR) && HAS_PCI(host)))
  1674. handled = mv_pci_error(host, hpriv->base);
  1675. else
  1676. handled = mv_host_intr(host, main_irq_cause);
  1677. }
  1678. spin_unlock(&host->lock);
  1679. return IRQ_RETVAL(handled);
  1680. }
  1681. static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
  1682. {
  1683. unsigned int ofs;
  1684. switch (sc_reg_in) {
  1685. case SCR_STATUS:
  1686. case SCR_ERROR:
  1687. case SCR_CONTROL:
  1688. ofs = sc_reg_in * sizeof(u32);
  1689. break;
  1690. default:
  1691. ofs = 0xffffffffU;
  1692. break;
  1693. }
  1694. return ofs;
  1695. }
  1696. static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
  1697. {
  1698. struct mv_host_priv *hpriv = ap->host->private_data;
  1699. void __iomem *mmio = hpriv->base;
  1700. void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
  1701. unsigned int ofs = mv5_scr_offset(sc_reg_in);
  1702. if (ofs != 0xffffffffU) {
  1703. *val = readl(addr + ofs);
  1704. return 0;
  1705. } else
  1706. return -EINVAL;
  1707. }
  1708. static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
  1709. {
  1710. struct mv_host_priv *hpriv = ap->host->private_data;
  1711. void __iomem *mmio = hpriv->base;
  1712. void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
  1713. unsigned int ofs = mv5_scr_offset(sc_reg_in);
  1714. if (ofs != 0xffffffffU) {
  1715. writelfl(val, addr + ofs);
  1716. return 0;
  1717. } else
  1718. return -EINVAL;
  1719. }
  1720. static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
  1721. {
  1722. struct pci_dev *pdev = to_pci_dev(host->dev);
  1723. int early_5080;
  1724. early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
  1725. if (!early_5080) {
  1726. u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
  1727. tmp |= (1 << 0);
  1728. writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
  1729. }
  1730. mv_reset_pci_bus(host, mmio);
  1731. }
  1732. static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
  1733. {
  1734. writel(0x0fcfffff, mmio + MV_FLASH_CTL_OFS);
  1735. }
  1736. static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
  1737. void __iomem *mmio)
  1738. {
  1739. void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
  1740. u32 tmp;
  1741. tmp = readl(phy_mmio + MV5_PHY_MODE);
  1742. hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
  1743. hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
  1744. }
  1745. static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
  1746. {
  1747. u32 tmp;
  1748. writel(0, mmio + MV_GPIO_PORT_CTL_OFS);
  1749. /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
  1750. tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
  1751. tmp |= ~(1 << 0);
  1752. writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
  1753. }
  1754. static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
  1755. unsigned int port)
  1756. {
  1757. void __iomem *phy_mmio = mv5_phy_base(mmio, port);
  1758. const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
  1759. u32 tmp;
  1760. int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
  1761. if (fix_apm_sq) {
  1762. tmp = readl(phy_mmio + MV5_LTMODE_OFS);
  1763. tmp |= (1 << 19);
  1764. writel(tmp, phy_mmio + MV5_LTMODE_OFS);
  1765. tmp = readl(phy_mmio + MV5_PHY_CTL_OFS);
  1766. tmp &= ~0x3;
  1767. tmp |= 0x1;
  1768. writel(tmp, phy_mmio + MV5_PHY_CTL_OFS);
  1769. }
  1770. tmp = readl(phy_mmio + MV5_PHY_MODE);
  1771. tmp &= ~mask;
  1772. tmp |= hpriv->signal[port].pre;
  1773. tmp |= hpriv->signal[port].amps;
  1774. writel(tmp, phy_mmio + MV5_PHY_MODE);
  1775. }
  1776. #undef ZERO
  1777. #define ZERO(reg) writel(0, port_mmio + (reg))
  1778. static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
  1779. unsigned int port)
  1780. {
  1781. void __iomem *port_mmio = mv_port_base(mmio, port);
  1782. mv_reset_channel(hpriv, mmio, port);
  1783. ZERO(0x028); /* command */
  1784. writel(0x11f, port_mmio + EDMA_CFG_OFS);
  1785. ZERO(0x004); /* timer */
  1786. ZERO(0x008); /* irq err cause */
  1787. ZERO(0x00c); /* irq err mask */
  1788. ZERO(0x010); /* rq bah */
  1789. ZERO(0x014); /* rq inp */
  1790. ZERO(0x018); /* rq outp */
  1791. ZERO(0x01c); /* respq bah */
  1792. ZERO(0x024); /* respq outp */
  1793. ZERO(0x020); /* respq inp */
  1794. ZERO(0x02c); /* test control */
  1795. writel(0xbc, port_mmio + EDMA_IORDY_TMOUT_OFS);
  1796. }
  1797. #undef ZERO
  1798. #define ZERO(reg) writel(0, hc_mmio + (reg))
  1799. static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
  1800. unsigned int hc)
  1801. {
  1802. void __iomem *hc_mmio = mv_hc_base(mmio, hc);
  1803. u32 tmp;
  1804. ZERO(0x00c);
  1805. ZERO(0x010);
  1806. ZERO(0x014);
  1807. ZERO(0x018);
  1808. tmp = readl(hc_mmio + 0x20);
  1809. tmp &= 0x1c1c1c1c;
  1810. tmp |= 0x03030303;
  1811. writel(tmp, hc_mmio + 0x20);
  1812. }
  1813. #undef ZERO
  1814. static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
  1815. unsigned int n_hc)
  1816. {
  1817. unsigned int hc, port;
  1818. for (hc = 0; hc < n_hc; hc++) {
  1819. for (port = 0; port < MV_PORTS_PER_HC; port++)
  1820. mv5_reset_hc_port(hpriv, mmio,
  1821. (hc * MV_PORTS_PER_HC) + port);
  1822. mv5_reset_one_hc(hpriv, mmio, hc);
  1823. }
  1824. return 0;
  1825. }
  1826. #undef ZERO
  1827. #define ZERO(reg) writel(0, mmio + (reg))
  1828. static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
  1829. {
  1830. struct mv_host_priv *hpriv = host->private_data;
  1831. u32 tmp;
  1832. tmp = readl(mmio + MV_PCI_MODE_OFS);
  1833. tmp &= 0xff00ffff;
  1834. writel(tmp, mmio + MV_PCI_MODE_OFS);
  1835. ZERO(MV_PCI_DISC_TIMER);
  1836. ZERO(MV_PCI_MSI_TRIGGER);
  1837. writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT_OFS);
  1838. ZERO(PCI_HC_MAIN_IRQ_MASK_OFS);
  1839. ZERO(MV_PCI_SERR_MASK);
  1840. ZERO(hpriv->irq_cause_ofs);
  1841. ZERO(hpriv->irq_mask_ofs);
  1842. ZERO(MV_PCI_ERR_LOW_ADDRESS);
  1843. ZERO(MV_PCI_ERR_HIGH_ADDRESS);
  1844. ZERO(MV_PCI_ERR_ATTRIBUTE);
  1845. ZERO(MV_PCI_ERR_COMMAND);
  1846. }
  1847. #undef ZERO
  1848. static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
  1849. {
  1850. u32 tmp;
  1851. mv5_reset_flash(hpriv, mmio);
  1852. tmp = readl(mmio + MV_GPIO_PORT_CTL_OFS);
  1853. tmp &= 0x3;
  1854. tmp |= (1 << 5) | (1 << 6);
  1855. writel(tmp, mmio + MV_GPIO_PORT_CTL_OFS);
  1856. }
  1857. /**
  1858. * mv6_reset_hc - Perform the 6xxx global soft reset
  1859. * @mmio: base address of the HBA
  1860. *
  1861. * This routine only applies to 6xxx parts.
  1862. *
  1863. * LOCKING:
  1864. * Inherited from caller.
  1865. */
  1866. static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
  1867. unsigned int n_hc)
  1868. {
  1869. void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
  1870. int i, rc = 0;
  1871. u32 t;
  1872. /* Following procedure defined in PCI "main command and status
  1873. * register" table.
  1874. */
  1875. t = readl(reg);
  1876. writel(t | STOP_PCI_MASTER, reg);
  1877. for (i = 0; i < 1000; i++) {
  1878. udelay(1);
  1879. t = readl(reg);
  1880. if (PCI_MASTER_EMPTY & t)
  1881. break;
  1882. }
  1883. if (!(PCI_MASTER_EMPTY & t)) {
  1884. printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
  1885. rc = 1;
  1886. goto done;
  1887. }
  1888. /* set reset */
  1889. i = 5;
  1890. do {
  1891. writel(t | GLOB_SFT_RST, reg);
  1892. t = readl(reg);
  1893. udelay(1);
  1894. } while (!(GLOB_SFT_RST & t) && (i-- > 0));
  1895. if (!(GLOB_SFT_RST & t)) {
  1896. printk(KERN_ERR DRV_NAME ": can't set global reset\n");
  1897. rc = 1;
  1898. goto done;
  1899. }
  1900. /* clear reset and *reenable the PCI master* (not mentioned in spec) */
  1901. i = 5;
  1902. do {
  1903. writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
  1904. t = readl(reg);
  1905. udelay(1);
  1906. } while ((GLOB_SFT_RST & t) && (i-- > 0));
  1907. if (GLOB_SFT_RST & t) {
  1908. printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
  1909. rc = 1;
  1910. }
  1911. done:
  1912. return rc;
  1913. }
  1914. static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
  1915. void __iomem *mmio)
  1916. {
  1917. void __iomem *port_mmio;
  1918. u32 tmp;
  1919. tmp = readl(mmio + MV_RESET_CFG_OFS);
  1920. if ((tmp & (1 << 0)) == 0) {
  1921. hpriv->signal[idx].amps = 0x7 << 8;
  1922. hpriv->signal[idx].pre = 0x1 << 5;
  1923. return;
  1924. }
  1925. port_mmio = mv_port_base(mmio, idx);
  1926. tmp = readl(port_mmio + PHY_MODE2);
  1927. hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
  1928. hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
  1929. }
  1930. static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
  1931. {
  1932. writel(0x00000060, mmio + MV_GPIO_PORT_CTL_OFS);
  1933. }
  1934. static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
  1935. unsigned int port)
  1936. {
  1937. void __iomem *port_mmio = mv_port_base(mmio, port);
  1938. u32 hp_flags = hpriv->hp_flags;
  1939. int fix_phy_mode2 =
  1940. hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
  1941. int fix_phy_mode4 =
  1942. hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
  1943. u32 m2, tmp;
  1944. if (fix_phy_mode2) {
  1945. m2 = readl(port_mmio + PHY_MODE2);
  1946. m2 &= ~(1 << 16);
  1947. m2 |= (1 << 31);
  1948. writel(m2, port_mmio + PHY_MODE2);
  1949. udelay(200);
  1950. m2 = readl(port_mmio + PHY_MODE2);
  1951. m2 &= ~((1 << 16) | (1 << 31));
  1952. writel(m2, port_mmio + PHY_MODE2);
  1953. udelay(200);
  1954. }
  1955. /* who knows what this magic does */
  1956. tmp = readl(port_mmio + PHY_MODE3);
  1957. tmp &= ~0x7F800000;
  1958. tmp |= 0x2A800000;
  1959. writel(tmp, port_mmio + PHY_MODE3);
  1960. if (fix_phy_mode4) {
  1961. u32 m4;
  1962. m4 = readl(port_mmio + PHY_MODE4);
  1963. if (hp_flags & MV_HP_ERRATA_60X1B2)
  1964. tmp = readl(port_mmio + PHY_MODE3);
  1965. /* workaround for errata FEr SATA#10 (part 1) */
  1966. m4 = (m4 & ~(1 << 1)) | (1 << 0);
  1967. writel(m4, port_mmio + PHY_MODE4);
  1968. if (hp_flags & MV_HP_ERRATA_60X1B2)
  1969. writel(tmp, port_mmio + PHY_MODE3);
  1970. }
  1971. /* Revert values of pre-emphasis and signal amps to the saved ones */
  1972. m2 = readl(port_mmio + PHY_MODE2);
  1973. m2 &= ~MV_M2_PREAMP_MASK;
  1974. m2 |= hpriv->signal[port].amps;
  1975. m2 |= hpriv->signal[port].pre;
  1976. m2 &= ~(1 << 16);
  1977. /* according to mvSata 3.6.1, some IIE values are fixed */
  1978. if (IS_GEN_IIE(hpriv)) {
  1979. m2 &= ~0xC30FF01F;
  1980. m2 |= 0x0000900F;
  1981. }
  1982. writel(m2, port_mmio + PHY_MODE2);
  1983. }
  1984. /* TODO: use the generic LED interface to configure the SATA Presence */
  1985. /* & Acitivy LEDs on the board */
  1986. static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
  1987. void __iomem *mmio)
  1988. {
  1989. return;
  1990. }
  1991. static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
  1992. void __iomem *mmio)
  1993. {
  1994. void __iomem *port_mmio;
  1995. u32 tmp;
  1996. port_mmio = mv_port_base(mmio, idx);
  1997. tmp = readl(port_mmio + PHY_MODE2);
  1998. hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
  1999. hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
  2000. }
  2001. #undef ZERO
  2002. #define ZERO(reg) writel(0, port_mmio + (reg))
  2003. static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
  2004. void __iomem *mmio, unsigned int port)
  2005. {
  2006. void __iomem *port_mmio = mv_port_base(mmio, port);
  2007. mv_reset_channel(hpriv, mmio, port);
  2008. ZERO(0x028); /* command */
  2009. writel(0x101f, port_mmio + EDMA_CFG_OFS);
  2010. ZERO(0x004); /* timer */
  2011. ZERO(0x008); /* irq err cause */
  2012. ZERO(0x00c); /* irq err mask */
  2013. ZERO(0x010); /* rq bah */
  2014. ZERO(0x014); /* rq inp */
  2015. ZERO(0x018); /* rq outp */
  2016. ZERO(0x01c); /* respq bah */
  2017. ZERO(0x024); /* respq outp */
  2018. ZERO(0x020); /* respq inp */
  2019. ZERO(0x02c); /* test control */
  2020. writel(0xbc, port_mmio + EDMA_IORDY_TMOUT_OFS);
  2021. }
  2022. #undef ZERO
  2023. #define ZERO(reg) writel(0, hc_mmio + (reg))
  2024. static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
  2025. void __iomem *mmio)
  2026. {
  2027. void __iomem *hc_mmio = mv_hc_base(mmio, 0);
  2028. ZERO(0x00c);
  2029. ZERO(0x010);
  2030. ZERO(0x014);
  2031. }
  2032. #undef ZERO
  2033. static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
  2034. void __iomem *mmio, unsigned int n_hc)
  2035. {
  2036. unsigned int port;
  2037. for (port = 0; port < hpriv->n_ports; port++)
  2038. mv_soc_reset_hc_port(hpriv, mmio, port);
  2039. mv_soc_reset_one_hc(hpriv, mmio);
  2040. return 0;
  2041. }
  2042. static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
  2043. void __iomem *mmio)
  2044. {
  2045. return;
  2046. }
  2047. static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
  2048. {
  2049. return;
  2050. }
  2051. static void mv_setup_ifcfg(void __iomem *port_mmio, int want_gen2i)
  2052. {
  2053. u32 ifcfg = readl(port_mmio + SATA_INTERFACE_CFG_OFS);
  2054. ifcfg = (ifcfg & 0xf7f) | 0x9b1000; /* from chip spec */
  2055. if (want_gen2i)
  2056. ifcfg |= (1 << 7); /* enable gen2i speed */
  2057. writelfl(ifcfg, port_mmio + SATA_INTERFACE_CFG_OFS);
  2058. }
  2059. static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
  2060. unsigned int port_no)
  2061. {
  2062. void __iomem *port_mmio = mv_port_base(mmio, port_no);
  2063. /*
  2064. * The datasheet warns against setting EDMA_RESET when EDMA is active
  2065. * (but doesn't say what the problem might be). So we first try
  2066. * to disable the EDMA engine before doing the EDMA_RESET operation.
  2067. */
  2068. mv_stop_edma_engine(port_mmio);
  2069. writelfl(EDMA_RESET, port_mmio + EDMA_CMD_OFS);
  2070. if (!IS_GEN_I(hpriv)) {
  2071. /* Enable 3.0gb/s link speed: this survives EDMA_RESET */
  2072. mv_setup_ifcfg(port_mmio, 1);
  2073. }
  2074. /*
  2075. * Strobing EDMA_RESET here causes a hard reset of the SATA transport,
  2076. * link, and physical layers. It resets all SATA interface registers
  2077. * (except for SATA_INTERFACE_CFG), and issues a COMRESET to the dev.
  2078. */
  2079. writelfl(EDMA_RESET, port_mmio + EDMA_CMD_OFS);
  2080. udelay(25); /* allow reset propagation */
  2081. writelfl(0, port_mmio + EDMA_CMD_OFS);
  2082. hpriv->ops->phy_errata(hpriv, mmio, port_no);
  2083. if (IS_GEN_I(hpriv))
  2084. mdelay(1);
  2085. }
  2086. static void mv_pmp_select(struct ata_port *ap, int pmp)
  2087. {
  2088. if (sata_pmp_supported(ap)) {
  2089. void __iomem *port_mmio = mv_ap_base(ap);
  2090. u32 reg = readl(port_mmio + SATA_IFCTL_OFS);
  2091. int old = reg & 0xf;
  2092. if (old != pmp) {
  2093. reg = (reg & ~0xf) | pmp;
  2094. writelfl(reg, port_mmio + SATA_IFCTL_OFS);
  2095. }
  2096. }
  2097. }
  2098. static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
  2099. unsigned long deadline)
  2100. {
  2101. mv_pmp_select(link->ap, sata_srst_pmp(link));
  2102. return sata_std_hardreset(link, class, deadline);
  2103. }
  2104. static int mv_softreset(struct ata_link *link, unsigned int *class,
  2105. unsigned long deadline)
  2106. {
  2107. mv_pmp_select(link->ap, sata_srst_pmp(link));
  2108. return ata_sff_softreset(link, class, deadline);
  2109. }
  2110. static int mv_hardreset(struct ata_link *link, unsigned int *class,
  2111. unsigned long deadline)
  2112. {
  2113. struct ata_port *ap = link->ap;
  2114. struct mv_host_priv *hpriv = ap->host->private_data;
  2115. struct mv_port_priv *pp = ap->private_data;
  2116. void __iomem *mmio = hpriv->base;
  2117. int rc, attempts = 0, extra = 0;
  2118. u32 sstatus;
  2119. bool online;
  2120. mv_reset_channel(hpriv, mmio, ap->port_no);
  2121. pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
  2122. /* Workaround for errata FEr SATA#10 (part 2) */
  2123. do {
  2124. const unsigned long *timing =
  2125. sata_ehc_deb_timing(&link->eh_context);
  2126. rc = sata_link_hardreset(link, timing, deadline + extra,
  2127. &online, NULL);
  2128. if (rc)
  2129. return rc;
  2130. sata_scr_read(link, SCR_STATUS, &sstatus);
  2131. if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) {
  2132. /* Force 1.5gb/s link speed and try again */
  2133. mv_setup_ifcfg(mv_ap_base(ap), 0);
  2134. if (time_after(jiffies + HZ, deadline))
  2135. extra = HZ; /* only extend it once, max */
  2136. }
  2137. } while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123);
  2138. return rc;
  2139. }
  2140. static void mv_eh_freeze(struct ata_port *ap)
  2141. {
  2142. struct mv_host_priv *hpriv = ap->host->private_data;
  2143. unsigned int shift, hardport, port = ap->port_no;
  2144. u32 main_irq_mask;
  2145. /* FIXME: handle coalescing completion events properly */
  2146. mv_stop_edma(ap);
  2147. MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
  2148. /* disable assertion of portN err, done events */
  2149. main_irq_mask = readl(hpriv->main_irq_mask_addr);
  2150. main_irq_mask &= ~((DONE_IRQ | ERR_IRQ) << shift);
  2151. writelfl(main_irq_mask, hpriv->main_irq_mask_addr);
  2152. }
  2153. static void mv_eh_thaw(struct ata_port *ap)
  2154. {
  2155. struct mv_host_priv *hpriv = ap->host->private_data;
  2156. unsigned int shift, hardport, port = ap->port_no;
  2157. void __iomem *hc_mmio = mv_hc_base_from_port(hpriv->base, port);
  2158. void __iomem *port_mmio = mv_ap_base(ap);
  2159. u32 main_irq_mask, hc_irq_cause;
  2160. /* FIXME: handle coalescing completion events properly */
  2161. MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
  2162. /* clear EDMA errors on this port */
  2163. writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
  2164. /* clear pending irq events */
  2165. hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
  2166. hc_irq_cause &= ~((DEV_IRQ | DMA_IRQ) << hardport);
  2167. writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
  2168. /* enable assertion of portN err, done events */
  2169. main_irq_mask = readl(hpriv->main_irq_mask_addr);
  2170. main_irq_mask |= ((DONE_IRQ | ERR_IRQ) << shift);
  2171. writelfl(main_irq_mask, hpriv->main_irq_mask_addr);
  2172. }
  2173. /**
  2174. * mv_port_init - Perform some early initialization on a single port.
  2175. * @port: libata data structure storing shadow register addresses
  2176. * @port_mmio: base address of the port
  2177. *
  2178. * Initialize shadow register mmio addresses, clear outstanding
  2179. * interrupts on the port, and unmask interrupts for the future
  2180. * start of the port.
  2181. *
  2182. * LOCKING:
  2183. * Inherited from caller.
  2184. */
  2185. static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
  2186. {
  2187. void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
  2188. unsigned serr_ofs;
  2189. /* PIO related setup
  2190. */
  2191. port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
  2192. port->error_addr =
  2193. port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
  2194. port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
  2195. port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
  2196. port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
  2197. port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
  2198. port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
  2199. port->status_addr =
  2200. port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
  2201. /* special case: control/altstatus doesn't have ATA_REG_ address */
  2202. port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
  2203. /* unused: */
  2204. port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
  2205. /* Clear any currently outstanding port interrupt conditions */
  2206. serr_ofs = mv_scr_offset(SCR_ERROR);
  2207. writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
  2208. writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
  2209. /* unmask all non-transient EDMA error interrupts */
  2210. writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
  2211. VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
  2212. readl(port_mmio + EDMA_CFG_OFS),
  2213. readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
  2214. readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
  2215. }
  2216. static unsigned int mv_in_pcix_mode(struct ata_host *host)
  2217. {
  2218. struct mv_host_priv *hpriv = host->private_data;
  2219. void __iomem *mmio = hpriv->base;
  2220. u32 reg;
  2221. if (!HAS_PCI(host) || !IS_PCIE(hpriv))
  2222. return 0; /* not PCI-X capable */
  2223. reg = readl(mmio + MV_PCI_MODE_OFS);
  2224. if ((reg & MV_PCI_MODE_MASK) == 0)
  2225. return 0; /* conventional PCI mode */
  2226. return 1; /* chip is in PCI-X mode */
  2227. }
  2228. static int mv_pci_cut_through_okay(struct ata_host *host)
  2229. {
  2230. struct mv_host_priv *hpriv = host->private_data;
  2231. void __iomem *mmio = hpriv->base;
  2232. u32 reg;
  2233. if (!mv_in_pcix_mode(host)) {
  2234. reg = readl(mmio + PCI_COMMAND_OFS);
  2235. if (reg & PCI_COMMAND_MRDTRIG)
  2236. return 0; /* not okay */
  2237. }
  2238. return 1; /* okay */
  2239. }
  2240. static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
  2241. {
  2242. struct pci_dev *pdev = to_pci_dev(host->dev);
  2243. struct mv_host_priv *hpriv = host->private_data;
  2244. u32 hp_flags = hpriv->hp_flags;
  2245. switch (board_idx) {
  2246. case chip_5080:
  2247. hpriv->ops = &mv5xxx_ops;
  2248. hp_flags |= MV_HP_GEN_I;
  2249. switch (pdev->revision) {
  2250. case 0x1:
  2251. hp_flags |= MV_HP_ERRATA_50XXB0;
  2252. break;
  2253. case 0x3:
  2254. hp_flags |= MV_HP_ERRATA_50XXB2;
  2255. break;
  2256. default:
  2257. dev_printk(KERN_WARNING, &pdev->dev,
  2258. "Applying 50XXB2 workarounds to unknown rev\n");
  2259. hp_flags |= MV_HP_ERRATA_50XXB2;
  2260. break;
  2261. }
  2262. break;
  2263. case chip_504x:
  2264. case chip_508x:
  2265. hpriv->ops = &mv5xxx_ops;
  2266. hp_flags |= MV_HP_GEN_I;
  2267. switch (pdev->revision) {
  2268. case 0x0:
  2269. hp_flags |= MV_HP_ERRATA_50XXB0;
  2270. break;
  2271. case 0x3:
  2272. hp_flags |= MV_HP_ERRATA_50XXB2;
  2273. break;
  2274. default:
  2275. dev_printk(KERN_WARNING, &pdev->dev,
  2276. "Applying B2 workarounds to unknown rev\n");
  2277. hp_flags |= MV_HP_ERRATA_50XXB2;
  2278. break;
  2279. }
  2280. break;
  2281. case chip_604x:
  2282. case chip_608x:
  2283. hpriv->ops = &mv6xxx_ops;
  2284. hp_flags |= MV_HP_GEN_II;
  2285. switch (pdev->revision) {
  2286. case 0x7:
  2287. hp_flags |= MV_HP_ERRATA_60X1B2;
  2288. break;
  2289. case 0x9:
  2290. hp_flags |= MV_HP_ERRATA_60X1C0;
  2291. break;
  2292. default:
  2293. dev_printk(KERN_WARNING, &pdev->dev,
  2294. "Applying B2 workarounds to unknown rev\n");
  2295. hp_flags |= MV_HP_ERRATA_60X1B2;
  2296. break;
  2297. }
  2298. break;
  2299. case chip_7042:
  2300. hp_flags |= MV_HP_PCIE | MV_HP_CUT_THROUGH;
  2301. if (pdev->vendor == PCI_VENDOR_ID_TTI &&
  2302. (pdev->device == 0x2300 || pdev->device == 0x2310))
  2303. {
  2304. /*
  2305. * Highpoint RocketRAID PCIe 23xx series cards:
  2306. *
  2307. * Unconfigured drives are treated as "Legacy"
  2308. * by the BIOS, and it overwrites sector 8 with
  2309. * a "Lgcy" metadata block prior to Linux boot.
  2310. *
  2311. * Configured drives (RAID or JBOD) leave sector 8
  2312. * alone, but instead overwrite a high numbered
  2313. * sector for the RAID metadata. This sector can
  2314. * be determined exactly, by truncating the physical
  2315. * drive capacity to a nice even GB value.
  2316. *
  2317. * RAID metadata is at: (dev->n_sectors & ~0xfffff)
  2318. *
  2319. * Warn the user, lest they think we're just buggy.
  2320. */
  2321. printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
  2322. " BIOS CORRUPTS DATA on all attached drives,"
  2323. " regardless of if/how they are configured."
  2324. " BEWARE!\n");
  2325. printk(KERN_WARNING DRV_NAME ": For data safety, do not"
  2326. " use sectors 8-9 on \"Legacy\" drives,"
  2327. " and avoid the final two gigabytes on"
  2328. " all RocketRAID BIOS initialized drives.\n");
  2329. }
  2330. /* drop through */
  2331. case chip_6042:
  2332. hpriv->ops = &mv6xxx_ops;
  2333. hp_flags |= MV_HP_GEN_IIE;
  2334. if (board_idx == chip_6042 && mv_pci_cut_through_okay(host))
  2335. hp_flags |= MV_HP_CUT_THROUGH;
  2336. switch (pdev->revision) {
  2337. case 0x0:
  2338. hp_flags |= MV_HP_ERRATA_XX42A0;
  2339. break;
  2340. case 0x1:
  2341. hp_flags |= MV_HP_ERRATA_60X1C0;
  2342. break;
  2343. default:
  2344. dev_printk(KERN_WARNING, &pdev->dev,
  2345. "Applying 60X1C0 workarounds to unknown rev\n");
  2346. hp_flags |= MV_HP_ERRATA_60X1C0;
  2347. break;
  2348. }
  2349. break;
  2350. case chip_soc:
  2351. hpriv->ops = &mv_soc_ops;
  2352. hp_flags |= MV_HP_ERRATA_60X1C0;
  2353. break;
  2354. default:
  2355. dev_printk(KERN_ERR, host->dev,
  2356. "BUG: invalid board index %u\n", board_idx);
  2357. return 1;
  2358. }
  2359. hpriv->hp_flags = hp_flags;
  2360. if (hp_flags & MV_HP_PCIE) {
  2361. hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
  2362. hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
  2363. hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
  2364. } else {
  2365. hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
  2366. hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
  2367. hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
  2368. }
  2369. return 0;
  2370. }
  2371. /**
  2372. * mv_init_host - Perform some early initialization of the host.
  2373. * @host: ATA host to initialize
  2374. * @board_idx: controller index
  2375. *
  2376. * If possible, do an early global reset of the host. Then do
  2377. * our port init and clear/unmask all/relevant host interrupts.
  2378. *
  2379. * LOCKING:
  2380. * Inherited from caller.
  2381. */
  2382. static int mv_init_host(struct ata_host *host, unsigned int board_idx)
  2383. {
  2384. int rc = 0, n_hc, port, hc;
  2385. struct mv_host_priv *hpriv = host->private_data;
  2386. void __iomem *mmio = hpriv->base;
  2387. rc = mv_chip_id(host, board_idx);
  2388. if (rc)
  2389. goto done;
  2390. if (HAS_PCI(host)) {
  2391. hpriv->main_irq_cause_addr = mmio + PCI_HC_MAIN_IRQ_CAUSE_OFS;
  2392. hpriv->main_irq_mask_addr = mmio + PCI_HC_MAIN_IRQ_MASK_OFS;
  2393. } else {
  2394. hpriv->main_irq_cause_addr = mmio + SOC_HC_MAIN_IRQ_CAUSE_OFS;
  2395. hpriv->main_irq_mask_addr = mmio + SOC_HC_MAIN_IRQ_MASK_OFS;
  2396. }
  2397. /* global interrupt mask: 0 == mask everything */
  2398. writel(0, hpriv->main_irq_mask_addr);
  2399. n_hc = mv_get_hc_count(host->ports[0]->flags);
  2400. for (port = 0; port < host->n_ports; port++)
  2401. hpriv->ops->read_preamp(hpriv, port, mmio);
  2402. rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
  2403. if (rc)
  2404. goto done;
  2405. hpriv->ops->reset_flash(hpriv, mmio);
  2406. hpriv->ops->reset_bus(host, mmio);
  2407. hpriv->ops->enable_leds(hpriv, mmio);
  2408. for (port = 0; port < host->n_ports; port++) {
  2409. struct ata_port *ap = host->ports[port];
  2410. void __iomem *port_mmio = mv_port_base(mmio, port);
  2411. mv_port_init(&ap->ioaddr, port_mmio);
  2412. #ifdef CONFIG_PCI
  2413. if (HAS_PCI(host)) {
  2414. unsigned int offset = port_mmio - mmio;
  2415. ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
  2416. ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
  2417. }
  2418. #endif
  2419. }
  2420. for (hc = 0; hc < n_hc; hc++) {
  2421. void __iomem *hc_mmio = mv_hc_base(mmio, hc);
  2422. VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
  2423. "(before clear)=0x%08x\n", hc,
  2424. readl(hc_mmio + HC_CFG_OFS),
  2425. readl(hc_mmio + HC_IRQ_CAUSE_OFS));
  2426. /* Clear any currently outstanding hc interrupt conditions */
  2427. writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
  2428. }
  2429. if (HAS_PCI(host)) {
  2430. /* Clear any currently outstanding host interrupt conditions */
  2431. writelfl(0, mmio + hpriv->irq_cause_ofs);
  2432. /* and unmask interrupt generation for host regs */
  2433. writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
  2434. if (IS_GEN_I(hpriv))
  2435. writelfl(~HC_MAIN_MASKED_IRQS_5,
  2436. hpriv->main_irq_mask_addr);
  2437. else
  2438. writelfl(~HC_MAIN_MASKED_IRQS,
  2439. hpriv->main_irq_mask_addr);
  2440. VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
  2441. "PCI int cause/mask=0x%08x/0x%08x\n",
  2442. readl(hpriv->main_irq_cause_addr),
  2443. readl(hpriv->main_irq_mask_addr),
  2444. readl(mmio + hpriv->irq_cause_ofs),
  2445. readl(mmio + hpriv->irq_mask_ofs));
  2446. } else {
  2447. writelfl(~HC_MAIN_MASKED_IRQS_SOC,
  2448. hpriv->main_irq_mask_addr);
  2449. VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x\n",
  2450. readl(hpriv->main_irq_cause_addr),
  2451. readl(hpriv->main_irq_mask_addr));
  2452. }
  2453. done:
  2454. return rc;
  2455. }
  2456. static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
  2457. {
  2458. hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
  2459. MV_CRQB_Q_SZ, 0);
  2460. if (!hpriv->crqb_pool)
  2461. return -ENOMEM;
  2462. hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
  2463. MV_CRPB_Q_SZ, 0);
  2464. if (!hpriv->crpb_pool)
  2465. return -ENOMEM;
  2466. hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
  2467. MV_SG_TBL_SZ, 0);
  2468. if (!hpriv->sg_tbl_pool)
  2469. return -ENOMEM;
  2470. return 0;
  2471. }
  2472. static void mv_conf_mbus_windows(struct mv_host_priv *hpriv,
  2473. struct mbus_dram_target_info *dram)
  2474. {
  2475. int i;
  2476. for (i = 0; i < 4; i++) {
  2477. writel(0, hpriv->base + WINDOW_CTRL(i));
  2478. writel(0, hpriv->base + WINDOW_BASE(i));
  2479. }
  2480. for (i = 0; i < dram->num_cs; i++) {
  2481. struct mbus_dram_window *cs = dram->cs + i;
  2482. writel(((cs->size - 1) & 0xffff0000) |
  2483. (cs->mbus_attr << 8) |
  2484. (dram->mbus_dram_target_id << 4) | 1,
  2485. hpriv->base + WINDOW_CTRL(i));
  2486. writel(cs->base, hpriv->base + WINDOW_BASE(i));
  2487. }
  2488. }
  2489. /**
  2490. * mv_platform_probe - handle a positive probe of an soc Marvell
  2491. * host
  2492. * @pdev: platform device found
  2493. *
  2494. * LOCKING:
  2495. * Inherited from caller.
  2496. */
  2497. static int mv_platform_probe(struct platform_device *pdev)
  2498. {
  2499. static int printed_version;
  2500. const struct mv_sata_platform_data *mv_platform_data;
  2501. const struct ata_port_info *ppi[] =
  2502. { &mv_port_info[chip_soc], NULL };
  2503. struct ata_host *host;
  2504. struct mv_host_priv *hpriv;
  2505. struct resource *res;
  2506. int n_ports, rc;
  2507. if (!printed_version++)
  2508. dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
  2509. /*
  2510. * Simple resource validation ..
  2511. */
  2512. if (unlikely(pdev->num_resources != 2)) {
  2513. dev_err(&pdev->dev, "invalid number of resources\n");
  2514. return -EINVAL;
  2515. }
  2516. /*
  2517. * Get the register base first
  2518. */
  2519. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  2520. if (res == NULL)
  2521. return -EINVAL;
  2522. /* allocate host */
  2523. mv_platform_data = pdev->dev.platform_data;
  2524. n_ports = mv_platform_data->n_ports;
  2525. host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
  2526. hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
  2527. if (!host || !hpriv)
  2528. return -ENOMEM;
  2529. host->private_data = hpriv;
  2530. hpriv->n_ports = n_ports;
  2531. host->iomap = NULL;
  2532. hpriv->base = devm_ioremap(&pdev->dev, res->start,
  2533. res->end - res->start + 1);
  2534. hpriv->base -= MV_SATAHC0_REG_BASE;
  2535. /*
  2536. * (Re-)program MBUS remapping windows if we are asked to.
  2537. */
  2538. if (mv_platform_data->dram != NULL)
  2539. mv_conf_mbus_windows(hpriv, mv_platform_data->dram);
  2540. rc = mv_create_dma_pools(hpriv, &pdev->dev);
  2541. if (rc)
  2542. return rc;
  2543. /* initialize adapter */
  2544. rc = mv_init_host(host, chip_soc);
  2545. if (rc)
  2546. return rc;
  2547. dev_printk(KERN_INFO, &pdev->dev,
  2548. "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
  2549. host->n_ports);
  2550. return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
  2551. IRQF_SHARED, &mv6_sht);
  2552. }
  2553. /*
  2554. *
  2555. * mv_platform_remove - unplug a platform interface
  2556. * @pdev: platform device
  2557. *
  2558. * A platform bus SATA device has been unplugged. Perform the needed
  2559. * cleanup. Also called on module unload for any active devices.
  2560. */
  2561. static int __devexit mv_platform_remove(struct platform_device *pdev)
  2562. {
  2563. struct device *dev = &pdev->dev;
  2564. struct ata_host *host = dev_get_drvdata(dev);
  2565. ata_host_detach(host);
  2566. return 0;
  2567. }
  2568. static struct platform_driver mv_platform_driver = {
  2569. .probe = mv_platform_probe,
  2570. .remove = __devexit_p(mv_platform_remove),
  2571. .driver = {
  2572. .name = DRV_NAME,
  2573. .owner = THIS_MODULE,
  2574. },
  2575. };
  2576. #ifdef CONFIG_PCI
  2577. static int mv_pci_init_one(struct pci_dev *pdev,
  2578. const struct pci_device_id *ent);
  2579. static struct pci_driver mv_pci_driver = {
  2580. .name = DRV_NAME,
  2581. .id_table = mv_pci_tbl,
  2582. .probe = mv_pci_init_one,
  2583. .remove = ata_pci_remove_one,
  2584. };
  2585. /*
  2586. * module options
  2587. */
  2588. static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
  2589. /* move to PCI layer or libata core? */
  2590. static int pci_go_64(struct pci_dev *pdev)
  2591. {
  2592. int rc;
  2593. if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
  2594. rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
  2595. if (rc) {
  2596. rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
  2597. if (rc) {
  2598. dev_printk(KERN_ERR, &pdev->dev,
  2599. "64-bit DMA enable failed\n");
  2600. return rc;
  2601. }
  2602. }
  2603. } else {
  2604. rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
  2605. if (rc) {
  2606. dev_printk(KERN_ERR, &pdev->dev,
  2607. "32-bit DMA enable failed\n");
  2608. return rc;
  2609. }
  2610. rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
  2611. if (rc) {
  2612. dev_printk(KERN_ERR, &pdev->dev,
  2613. "32-bit consistent DMA enable failed\n");
  2614. return rc;
  2615. }
  2616. }
  2617. return rc;
  2618. }
  2619. /**
  2620. * mv_print_info - Dump key info to kernel log for perusal.
  2621. * @host: ATA host to print info about
  2622. *
  2623. * FIXME: complete this.
  2624. *
  2625. * LOCKING:
  2626. * Inherited from caller.
  2627. */
  2628. static void mv_print_info(struct ata_host *host)
  2629. {
  2630. struct pci_dev *pdev = to_pci_dev(host->dev);
  2631. struct mv_host_priv *hpriv = host->private_data;
  2632. u8 scc;
  2633. const char *scc_s, *gen;
  2634. /* Use this to determine the HW stepping of the chip so we know
  2635. * what errata to workaround
  2636. */
  2637. pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
  2638. if (scc == 0)
  2639. scc_s = "SCSI";
  2640. else if (scc == 0x01)
  2641. scc_s = "RAID";
  2642. else
  2643. scc_s = "?";
  2644. if (IS_GEN_I(hpriv))
  2645. gen = "I";
  2646. else if (IS_GEN_II(hpriv))
  2647. gen = "II";
  2648. else if (IS_GEN_IIE(hpriv))
  2649. gen = "IIE";
  2650. else
  2651. gen = "?";
  2652. dev_printk(KERN_INFO, &pdev->dev,
  2653. "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
  2654. gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
  2655. scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
  2656. }
  2657. /**
  2658. * mv_pci_init_one - handle a positive probe of a PCI Marvell host
  2659. * @pdev: PCI device found
  2660. * @ent: PCI device ID entry for the matched host
  2661. *
  2662. * LOCKING:
  2663. * Inherited from caller.
  2664. */
  2665. static int mv_pci_init_one(struct pci_dev *pdev,
  2666. const struct pci_device_id *ent)
  2667. {
  2668. static int printed_version;
  2669. unsigned int board_idx = (unsigned int)ent->driver_data;
  2670. const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
  2671. struct ata_host *host;
  2672. struct mv_host_priv *hpriv;
  2673. int n_ports, rc;
  2674. if (!printed_version++)
  2675. dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
  2676. /* allocate host */
  2677. n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
  2678. host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
  2679. hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
  2680. if (!host || !hpriv)
  2681. return -ENOMEM;
  2682. host->private_data = hpriv;
  2683. hpriv->n_ports = n_ports;
  2684. /* acquire resources */
  2685. rc = pcim_enable_device(pdev);
  2686. if (rc)
  2687. return rc;
  2688. rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
  2689. if (rc == -EBUSY)
  2690. pcim_pin_device(pdev);
  2691. if (rc)
  2692. return rc;
  2693. host->iomap = pcim_iomap_table(pdev);
  2694. hpriv->base = host->iomap[MV_PRIMARY_BAR];
  2695. rc = pci_go_64(pdev);
  2696. if (rc)
  2697. return rc;
  2698. rc = mv_create_dma_pools(hpriv, &pdev->dev);
  2699. if (rc)
  2700. return rc;
  2701. /* initialize adapter */
  2702. rc = mv_init_host(host, board_idx);
  2703. if (rc)
  2704. return rc;
  2705. /* Enable interrupts */
  2706. if (msi && pci_enable_msi(pdev))
  2707. pci_intx(pdev, 1);
  2708. mv_dump_pci_cfg(pdev, 0x68);
  2709. mv_print_info(host);
  2710. pci_set_master(pdev);
  2711. pci_try_set_mwi(pdev);
  2712. return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
  2713. IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
  2714. }
  2715. #endif
  2716. static int mv_platform_probe(struct platform_device *pdev);
  2717. static int __devexit mv_platform_remove(struct platform_device *pdev);
  2718. static int __init mv_init(void)
  2719. {
  2720. int rc = -ENODEV;
  2721. #ifdef CONFIG_PCI
  2722. rc = pci_register_driver(&mv_pci_driver);
  2723. if (rc < 0)
  2724. return rc;
  2725. #endif
  2726. rc = platform_driver_register(&mv_platform_driver);
  2727. #ifdef CONFIG_PCI
  2728. if (rc < 0)
  2729. pci_unregister_driver(&mv_pci_driver);
  2730. #endif
  2731. return rc;
  2732. }
  2733. static void __exit mv_exit(void)
  2734. {
  2735. #ifdef CONFIG_PCI
  2736. pci_unregister_driver(&mv_pci_driver);
  2737. #endif
  2738. platform_driver_unregister(&mv_platform_driver);
  2739. }
  2740. MODULE_AUTHOR("Brett Russ");
  2741. MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
  2742. MODULE_LICENSE("GPL");
  2743. MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
  2744. MODULE_VERSION(DRV_VERSION);
  2745. MODULE_ALIAS("platform:" DRV_NAME);
  2746. #ifdef CONFIG_PCI
  2747. module_param(msi, int, 0444);
  2748. MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
  2749. #endif
  2750. module_init(mv_init);
  2751. module_exit(mv_exit);