sata_mv.c 63 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397
  1. /*
  2. * sata_mv.c - Marvell SATA support
  3. *
  4. * Copyright 2005: EMC Corporation, all rights reserved.
  5. * Copyright 2005 Red Hat, Inc. All rights reserved.
  6. *
  7. * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as published by
  11. * the Free Software Foundation; version 2 of the License.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  21. *
  22. */
  23. #include <linux/kernel.h>
  24. #include <linux/module.h>
  25. #include <linux/pci.h>
  26. #include <linux/init.h>
  27. #include <linux/blkdev.h>
  28. #include <linux/delay.h>
  29. #include <linux/interrupt.h>
  30. #include <linux/dma-mapping.h>
  31. #include <linux/device.h>
  32. #include <scsi/scsi_host.h>
  33. #include <scsi/scsi_cmnd.h>
  34. #include <linux/libata.h>
  35. #define DRV_NAME "sata_mv"
  36. #define DRV_VERSION "0.8"
  37. enum {
  38. /* BAR's are enumerated in terms of pci_resource_start() terms */
  39. MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
  40. MV_IO_BAR = 2, /* offset 0x18: IO space */
  41. MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
  42. MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
  43. MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
  44. MV_PCI_REG_BASE = 0,
  45. MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
  46. MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
  47. MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
  48. MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
  49. MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
  50. MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
  51. MV_SATAHC0_REG_BASE = 0x20000,
  52. MV_FLASH_CTL = 0x1046c,
  53. MV_GPIO_PORT_CTL = 0x104f0,
  54. MV_RESET_CFG = 0x180d8,
  55. MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
  56. MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
  57. MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
  58. MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
  59. MV_USE_Q_DEPTH = ATA_DEF_QUEUE,
  60. MV_MAX_Q_DEPTH = 32,
  61. MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
  62. /* CRQB needs alignment on a 1KB boundary. Size == 1KB
  63. * CRPB needs alignment on a 256B boundary. Size == 256B
  64. * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
  65. * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
  66. */
  67. MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
  68. MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
  69. MV_MAX_SG_CT = 176,
  70. MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
  71. MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
  72. MV_PORTS_PER_HC = 4,
  73. /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
  74. MV_PORT_HC_SHIFT = 2,
  75. /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
  76. MV_PORT_MASK = 3,
  77. /* Host Flags */
  78. MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
  79. MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
  80. MV_COMMON_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
  81. ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO |
  82. ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING),
  83. MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
  84. CRQB_FLAG_READ = (1 << 0),
  85. CRQB_TAG_SHIFT = 1,
  86. CRQB_CMD_ADDR_SHIFT = 8,
  87. CRQB_CMD_CS = (0x2 << 11),
  88. CRQB_CMD_LAST = (1 << 15),
  89. CRPB_FLAG_STATUS_SHIFT = 8,
  90. EPRD_FLAG_END_OF_TBL = (1 << 31),
  91. /* PCI interface registers */
  92. PCI_COMMAND_OFS = 0xc00,
  93. PCI_MAIN_CMD_STS_OFS = 0xd30,
  94. STOP_PCI_MASTER = (1 << 2),
  95. PCI_MASTER_EMPTY = (1 << 3),
  96. GLOB_SFT_RST = (1 << 4),
  97. MV_PCI_MODE = 0xd00,
  98. MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
  99. MV_PCI_DISC_TIMER = 0xd04,
  100. MV_PCI_MSI_TRIGGER = 0xc38,
  101. MV_PCI_SERR_MASK = 0xc28,
  102. MV_PCI_XBAR_TMOUT = 0x1d04,
  103. MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
  104. MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
  105. MV_PCI_ERR_ATTRIBUTE = 0x1d48,
  106. MV_PCI_ERR_COMMAND = 0x1d50,
  107. PCI_IRQ_CAUSE_OFS = 0x1d58,
  108. PCI_IRQ_MASK_OFS = 0x1d5c,
  109. PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
  110. HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
  111. HC_MAIN_IRQ_MASK_OFS = 0x1d64,
  112. PORT0_ERR = (1 << 0), /* shift by port # */
  113. PORT0_DONE = (1 << 1), /* shift by port # */
  114. HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
  115. HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
  116. PCI_ERR = (1 << 18),
  117. TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
  118. TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
  119. PORTS_0_3_COAL_DONE = (1 << 8),
  120. PORTS_4_7_COAL_DONE = (1 << 17),
  121. PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
  122. GPIO_INT = (1 << 22),
  123. SELF_INT = (1 << 23),
  124. TWSI_INT = (1 << 24),
  125. HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
  126. HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
  127. HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
  128. PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
  129. HC_MAIN_RSVD),
  130. HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
  131. HC_MAIN_RSVD_5),
  132. /* SATAHC registers */
  133. HC_CFG_OFS = 0,
  134. HC_IRQ_CAUSE_OFS = 0x14,
  135. CRPB_DMA_DONE = (1 << 0), /* shift by port # */
  136. HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
  137. DEV_IRQ = (1 << 8), /* shift by port # */
  138. /* Shadow block registers */
  139. SHD_BLK_OFS = 0x100,
  140. SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
  141. /* SATA registers */
  142. SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
  143. SATA_ACTIVE_OFS = 0x350,
  144. PHY_MODE3 = 0x310,
  145. PHY_MODE4 = 0x314,
  146. PHY_MODE2 = 0x330,
  147. MV5_PHY_MODE = 0x74,
  148. MV5_LT_MODE = 0x30,
  149. MV5_PHY_CTL = 0x0C,
  150. SATA_INTERFACE_CTL = 0x050,
  151. MV_M2_PREAMP_MASK = 0x7e0,
  152. /* Port registers */
  153. EDMA_CFG_OFS = 0,
  154. EDMA_CFG_Q_DEPTH = 0, /* queueing disabled */
  155. EDMA_CFG_NCQ = (1 << 5),
  156. EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
  157. EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
  158. EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
  159. EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
  160. EDMA_ERR_IRQ_MASK_OFS = 0xc,
  161. EDMA_ERR_D_PAR = (1 << 0),
  162. EDMA_ERR_PRD_PAR = (1 << 1),
  163. EDMA_ERR_DEV = (1 << 2),
  164. EDMA_ERR_DEV_DCON = (1 << 3),
  165. EDMA_ERR_DEV_CON = (1 << 4),
  166. EDMA_ERR_SERR = (1 << 5),
  167. EDMA_ERR_SELF_DIS = (1 << 7),
  168. EDMA_ERR_BIST_ASYNC = (1 << 8),
  169. EDMA_ERR_CRBQ_PAR = (1 << 9),
  170. EDMA_ERR_CRPB_PAR = (1 << 10),
  171. EDMA_ERR_INTRL_PAR = (1 << 11),
  172. EDMA_ERR_IORDY = (1 << 12),
  173. EDMA_ERR_LNK_CTRL_RX = (0xf << 13),
  174. EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15),
  175. EDMA_ERR_LNK_DATA_RX = (0xf << 17),
  176. EDMA_ERR_LNK_CTRL_TX = (0x1f << 21),
  177. EDMA_ERR_LNK_DATA_TX = (0x1f << 26),
  178. EDMA_ERR_TRANS_PROTO = (1 << 31),
  179. EDMA_ERR_FATAL = (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
  180. EDMA_ERR_DEV_DCON | EDMA_ERR_CRBQ_PAR |
  181. EDMA_ERR_CRPB_PAR | EDMA_ERR_INTRL_PAR |
  182. EDMA_ERR_IORDY | EDMA_ERR_LNK_CTRL_RX_2 |
  183. EDMA_ERR_LNK_DATA_RX |
  184. EDMA_ERR_LNK_DATA_TX |
  185. EDMA_ERR_TRANS_PROTO),
  186. EDMA_REQ_Q_BASE_HI_OFS = 0x10,
  187. EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
  188. EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
  189. EDMA_REQ_Q_PTR_SHIFT = 5,
  190. EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
  191. EDMA_RSP_Q_IN_PTR_OFS = 0x20,
  192. EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
  193. EDMA_RSP_Q_PTR_SHIFT = 3,
  194. EDMA_CMD_OFS = 0x28,
  195. EDMA_EN = (1 << 0),
  196. EDMA_DS = (1 << 1),
  197. ATA_RST = (1 << 2),
  198. EDMA_IORDY_TMOUT = 0x34,
  199. EDMA_ARB_CFG = 0x38,
  200. /* Host private flags (hp_flags) */
  201. MV_HP_FLAG_MSI = (1 << 0),
  202. MV_HP_ERRATA_50XXB0 = (1 << 1),
  203. MV_HP_ERRATA_50XXB2 = (1 << 2),
  204. MV_HP_ERRATA_60X1B2 = (1 << 3),
  205. MV_HP_ERRATA_60X1C0 = (1 << 4),
  206. MV_HP_ERRATA_XX42A0 = (1 << 5),
  207. MV_HP_50XX = (1 << 6),
  208. MV_HP_GEN_IIE = (1 << 7),
  209. /* Port private flags (pp_flags) */
  210. MV_PP_FLAG_EDMA_EN = (1 << 0),
  211. MV_PP_FLAG_EDMA_DS_ACT = (1 << 1),
  212. };
  213. #define IS_50XX(hpriv) ((hpriv)->hp_flags & MV_HP_50XX)
  214. #define IS_60XX(hpriv) (((hpriv)->hp_flags & MV_HP_50XX) == 0)
  215. #define IS_GEN_I(hpriv) IS_50XX(hpriv)
  216. #define IS_GEN_II(hpriv) IS_60XX(hpriv)
  217. #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
  218. enum {
  219. MV_DMA_BOUNDARY = 0xffffffffU,
  220. EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
  221. EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
  222. };
  223. enum chip_type {
  224. chip_504x,
  225. chip_508x,
  226. chip_5080,
  227. chip_604x,
  228. chip_608x,
  229. chip_6042,
  230. chip_7042,
  231. };
  232. /* Command ReQuest Block: 32B */
  233. struct mv_crqb {
  234. __le32 sg_addr;
  235. __le32 sg_addr_hi;
  236. __le16 ctrl_flags;
  237. __le16 ata_cmd[11];
  238. };
  239. struct mv_crqb_iie {
  240. __le32 addr;
  241. __le32 addr_hi;
  242. __le32 flags;
  243. __le32 len;
  244. __le32 ata_cmd[4];
  245. };
  246. /* Command ResPonse Block: 8B */
  247. struct mv_crpb {
  248. __le16 id;
  249. __le16 flags;
  250. __le32 tmstmp;
  251. };
  252. /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
  253. struct mv_sg {
  254. __le32 addr;
  255. __le32 flags_size;
  256. __le32 addr_hi;
  257. __le32 reserved;
  258. };
  259. struct mv_port_priv {
  260. struct mv_crqb *crqb;
  261. dma_addr_t crqb_dma;
  262. struct mv_crpb *crpb;
  263. dma_addr_t crpb_dma;
  264. struct mv_sg *sg_tbl;
  265. dma_addr_t sg_tbl_dma;
  266. u32 pp_flags;
  267. };
  268. struct mv_port_signal {
  269. u32 amps;
  270. u32 pre;
  271. };
  272. struct mv_host_priv;
  273. struct mv_hw_ops {
  274. void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
  275. unsigned int port);
  276. void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
  277. void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
  278. void __iomem *mmio);
  279. int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
  280. unsigned int n_hc);
  281. void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
  282. void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
  283. };
  284. struct mv_host_priv {
  285. u32 hp_flags;
  286. struct mv_port_signal signal[8];
  287. const struct mv_hw_ops *ops;
  288. };
  289. static void mv_irq_clear(struct ata_port *ap);
  290. static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
  291. static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
  292. static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
  293. static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
  294. static void mv_phy_reset(struct ata_port *ap);
  295. static void __mv_phy_reset(struct ata_port *ap, int can_sleep);
  296. static int mv_port_start(struct ata_port *ap);
  297. static void mv_port_stop(struct ata_port *ap);
  298. static void mv_qc_prep(struct ata_queued_cmd *qc);
  299. static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
  300. static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
  301. static void mv_eng_timeout(struct ata_port *ap);
  302. static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
  303. static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
  304. unsigned int port);
  305. static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
  306. static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
  307. void __iomem *mmio);
  308. static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
  309. unsigned int n_hc);
  310. static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
  311. static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
  312. static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
  313. unsigned int port);
  314. static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
  315. static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
  316. void __iomem *mmio);
  317. static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
  318. unsigned int n_hc);
  319. static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
  320. static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
  321. static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
  322. unsigned int port_no);
  323. static void mv_stop_and_reset(struct ata_port *ap);
  324. static struct scsi_host_template mv_sht = {
  325. .module = THIS_MODULE,
  326. .name = DRV_NAME,
  327. .ioctl = ata_scsi_ioctl,
  328. .queuecommand = ata_scsi_queuecmd,
  329. .can_queue = MV_USE_Q_DEPTH,
  330. .this_id = ATA_SHT_THIS_ID,
  331. .sg_tablesize = MV_MAX_SG_CT,
  332. .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
  333. .emulated = ATA_SHT_EMULATED,
  334. .use_clustering = 1,
  335. .proc_name = DRV_NAME,
  336. .dma_boundary = MV_DMA_BOUNDARY,
  337. .slave_configure = ata_scsi_slave_config,
  338. .slave_destroy = ata_scsi_slave_destroy,
  339. .bios_param = ata_std_bios_param,
  340. };
  341. static const struct ata_port_operations mv5_ops = {
  342. .port_disable = ata_port_disable,
  343. .tf_load = ata_tf_load,
  344. .tf_read = ata_tf_read,
  345. .check_status = ata_check_status,
  346. .exec_command = ata_exec_command,
  347. .dev_select = ata_std_dev_select,
  348. .phy_reset = mv_phy_reset,
  349. .cable_detect = ata_cable_sata,
  350. .qc_prep = mv_qc_prep,
  351. .qc_issue = mv_qc_issue,
  352. .data_xfer = ata_data_xfer,
  353. .eng_timeout = mv_eng_timeout,
  354. .irq_clear = mv_irq_clear,
  355. .irq_on = ata_irq_on,
  356. .irq_ack = ata_irq_ack,
  357. .scr_read = mv5_scr_read,
  358. .scr_write = mv5_scr_write,
  359. .port_start = mv_port_start,
  360. .port_stop = mv_port_stop,
  361. };
  362. static const struct ata_port_operations mv6_ops = {
  363. .port_disable = ata_port_disable,
  364. .tf_load = ata_tf_load,
  365. .tf_read = ata_tf_read,
  366. .check_status = ata_check_status,
  367. .exec_command = ata_exec_command,
  368. .dev_select = ata_std_dev_select,
  369. .phy_reset = mv_phy_reset,
  370. .cable_detect = ata_cable_sata,
  371. .qc_prep = mv_qc_prep,
  372. .qc_issue = mv_qc_issue,
  373. .data_xfer = ata_data_xfer,
  374. .eng_timeout = mv_eng_timeout,
  375. .irq_clear = mv_irq_clear,
  376. .irq_on = ata_irq_on,
  377. .irq_ack = ata_irq_ack,
  378. .scr_read = mv_scr_read,
  379. .scr_write = mv_scr_write,
  380. .port_start = mv_port_start,
  381. .port_stop = mv_port_stop,
  382. };
  383. static const struct ata_port_operations mv_iie_ops = {
  384. .port_disable = ata_port_disable,
  385. .tf_load = ata_tf_load,
  386. .tf_read = ata_tf_read,
  387. .check_status = ata_check_status,
  388. .exec_command = ata_exec_command,
  389. .dev_select = ata_std_dev_select,
  390. .phy_reset = mv_phy_reset,
  391. .cable_detect = ata_cable_sata,
  392. .qc_prep = mv_qc_prep_iie,
  393. .qc_issue = mv_qc_issue,
  394. .data_xfer = ata_data_xfer,
  395. .eng_timeout = mv_eng_timeout,
  396. .irq_clear = mv_irq_clear,
  397. .irq_on = ata_irq_on,
  398. .irq_ack = ata_irq_ack,
  399. .scr_read = mv_scr_read,
  400. .scr_write = mv_scr_write,
  401. .port_start = mv_port_start,
  402. .port_stop = mv_port_stop,
  403. };
  404. static const struct ata_port_info mv_port_info[] = {
  405. { /* chip_504x */
  406. .flags = MV_COMMON_FLAGS,
  407. .pio_mask = 0x1f, /* pio0-4 */
  408. .udma_mask = 0x7f, /* udma0-6 */
  409. .port_ops = &mv5_ops,
  410. },
  411. { /* chip_508x */
  412. .flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC),
  413. .pio_mask = 0x1f, /* pio0-4 */
  414. .udma_mask = 0x7f, /* udma0-6 */
  415. .port_ops = &mv5_ops,
  416. },
  417. { /* chip_5080 */
  418. .flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC),
  419. .pio_mask = 0x1f, /* pio0-4 */
  420. .udma_mask = 0x7f, /* udma0-6 */
  421. .port_ops = &mv5_ops,
  422. },
  423. { /* chip_604x */
  424. .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
  425. .pio_mask = 0x1f, /* pio0-4 */
  426. .udma_mask = 0x7f, /* udma0-6 */
  427. .port_ops = &mv6_ops,
  428. },
  429. { /* chip_608x */
  430. .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS |
  431. MV_FLAG_DUAL_HC),
  432. .pio_mask = 0x1f, /* pio0-4 */
  433. .udma_mask = 0x7f, /* udma0-6 */
  434. .port_ops = &mv6_ops,
  435. },
  436. { /* chip_6042 */
  437. .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
  438. .pio_mask = 0x1f, /* pio0-4 */
  439. .udma_mask = 0x7f, /* udma0-6 */
  440. .port_ops = &mv_iie_ops,
  441. },
  442. { /* chip_7042 */
  443. .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
  444. .pio_mask = 0x1f, /* pio0-4 */
  445. .udma_mask = 0x7f, /* udma0-6 */
  446. .port_ops = &mv_iie_ops,
  447. },
  448. };
  449. static const struct pci_device_id mv_pci_tbl[] = {
  450. { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
  451. { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
  452. { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
  453. { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
  454. { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
  455. { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
  456. { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
  457. { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
  458. { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
  459. { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
  460. { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
  461. /* add Marvell 7042 support */
  462. { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
  463. { } /* terminate list */
  464. };
  465. static struct pci_driver mv_pci_driver = {
  466. .name = DRV_NAME,
  467. .id_table = mv_pci_tbl,
  468. .probe = mv_init_one,
  469. .remove = ata_pci_remove_one,
  470. };
  471. static const struct mv_hw_ops mv5xxx_ops = {
  472. .phy_errata = mv5_phy_errata,
  473. .enable_leds = mv5_enable_leds,
  474. .read_preamp = mv5_read_preamp,
  475. .reset_hc = mv5_reset_hc,
  476. .reset_flash = mv5_reset_flash,
  477. .reset_bus = mv5_reset_bus,
  478. };
  479. static const struct mv_hw_ops mv6xxx_ops = {
  480. .phy_errata = mv6_phy_errata,
  481. .enable_leds = mv6_enable_leds,
  482. .read_preamp = mv6_read_preamp,
  483. .reset_hc = mv6_reset_hc,
  484. .reset_flash = mv6_reset_flash,
  485. .reset_bus = mv_reset_pci_bus,
  486. };
  487. /*
  488. * module options
  489. */
  490. static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
  491. /* move to PCI layer or libata core? */
  492. static int pci_go_64(struct pci_dev *pdev)
  493. {
  494. int rc;
  495. if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
  496. rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
  497. if (rc) {
  498. rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
  499. if (rc) {
  500. dev_printk(KERN_ERR, &pdev->dev,
  501. "64-bit DMA enable failed\n");
  502. return rc;
  503. }
  504. }
  505. } else {
  506. rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
  507. if (rc) {
  508. dev_printk(KERN_ERR, &pdev->dev,
  509. "32-bit DMA enable failed\n");
  510. return rc;
  511. }
  512. rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
  513. if (rc) {
  514. dev_printk(KERN_ERR, &pdev->dev,
  515. "32-bit consistent DMA enable failed\n");
  516. return rc;
  517. }
  518. }
  519. return rc;
  520. }
  521. /*
  522. * Functions
  523. */
  524. static inline void writelfl(unsigned long data, void __iomem *addr)
  525. {
  526. writel(data, addr);
  527. (void) readl(addr); /* flush to avoid PCI posted write */
  528. }
  529. static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
  530. {
  531. return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
  532. }
  533. static inline unsigned int mv_hc_from_port(unsigned int port)
  534. {
  535. return port >> MV_PORT_HC_SHIFT;
  536. }
  537. static inline unsigned int mv_hardport_from_port(unsigned int port)
  538. {
  539. return port & MV_PORT_MASK;
  540. }
  541. static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
  542. unsigned int port)
  543. {
  544. return mv_hc_base(base, mv_hc_from_port(port));
  545. }
  546. static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
  547. {
  548. return mv_hc_base_from_port(base, port) +
  549. MV_SATAHC_ARBTR_REG_SZ +
  550. (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
  551. }
  552. static inline void __iomem *mv_ap_base(struct ata_port *ap)
  553. {
  554. return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
  555. }
  556. static inline int mv_get_hc_count(unsigned long port_flags)
  557. {
  558. return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
  559. }
  560. static void mv_irq_clear(struct ata_port *ap)
  561. {
  562. }
  563. /**
  564. * mv_start_dma - Enable eDMA engine
  565. * @base: port base address
  566. * @pp: port private data
  567. *
  568. * Verify the local cache of the eDMA state is accurate with a
  569. * WARN_ON.
  570. *
  571. * LOCKING:
  572. * Inherited from caller.
  573. */
  574. static void mv_start_dma(void __iomem *base, struct mv_port_priv *pp)
  575. {
  576. if (!(MV_PP_FLAG_EDMA_EN & pp->pp_flags)) {
  577. writelfl(EDMA_EN, base + EDMA_CMD_OFS);
  578. pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
  579. }
  580. WARN_ON(!(EDMA_EN & readl(base + EDMA_CMD_OFS)));
  581. }
  582. /**
  583. * mv_stop_dma - Disable eDMA engine
  584. * @ap: ATA channel to manipulate
  585. *
  586. * Verify the local cache of the eDMA state is accurate with a
  587. * WARN_ON.
  588. *
  589. * LOCKING:
  590. * Inherited from caller.
  591. */
  592. static void mv_stop_dma(struct ata_port *ap)
  593. {
  594. void __iomem *port_mmio = mv_ap_base(ap);
  595. struct mv_port_priv *pp = ap->private_data;
  596. u32 reg;
  597. int i;
  598. if (MV_PP_FLAG_EDMA_EN & pp->pp_flags) {
  599. /* Disable EDMA if active. The disable bit auto clears.
  600. */
  601. writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
  602. pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
  603. } else {
  604. WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
  605. }
  606. /* now properly wait for the eDMA to stop */
  607. for (i = 1000; i > 0; i--) {
  608. reg = readl(port_mmio + EDMA_CMD_OFS);
  609. if (!(EDMA_EN & reg)) {
  610. break;
  611. }
  612. udelay(100);
  613. }
  614. if (EDMA_EN & reg) {
  615. ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
  616. /* FIXME: Consider doing a reset here to recover */
  617. }
  618. }
  619. #ifdef ATA_DEBUG
  620. static void mv_dump_mem(void __iomem *start, unsigned bytes)
  621. {
  622. int b, w;
  623. for (b = 0; b < bytes; ) {
  624. DPRINTK("%p: ", start + b);
  625. for (w = 0; b < bytes && w < 4; w++) {
  626. printk("%08x ",readl(start + b));
  627. b += sizeof(u32);
  628. }
  629. printk("\n");
  630. }
  631. }
  632. #endif
  633. static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
  634. {
  635. #ifdef ATA_DEBUG
  636. int b, w;
  637. u32 dw;
  638. for (b = 0; b < bytes; ) {
  639. DPRINTK("%02x: ", b);
  640. for (w = 0; b < bytes && w < 4; w++) {
  641. (void) pci_read_config_dword(pdev,b,&dw);
  642. printk("%08x ",dw);
  643. b += sizeof(u32);
  644. }
  645. printk("\n");
  646. }
  647. #endif
  648. }
  649. static void mv_dump_all_regs(void __iomem *mmio_base, int port,
  650. struct pci_dev *pdev)
  651. {
  652. #ifdef ATA_DEBUG
  653. void __iomem *hc_base = mv_hc_base(mmio_base,
  654. port >> MV_PORT_HC_SHIFT);
  655. void __iomem *port_base;
  656. int start_port, num_ports, p, start_hc, num_hcs, hc;
  657. if (0 > port) {
  658. start_hc = start_port = 0;
  659. num_ports = 8; /* shld be benign for 4 port devs */
  660. num_hcs = 2;
  661. } else {
  662. start_hc = port >> MV_PORT_HC_SHIFT;
  663. start_port = port;
  664. num_ports = num_hcs = 1;
  665. }
  666. DPRINTK("All registers for port(s) %u-%u:\n", start_port,
  667. num_ports > 1 ? num_ports - 1 : start_port);
  668. if (NULL != pdev) {
  669. DPRINTK("PCI config space regs:\n");
  670. mv_dump_pci_cfg(pdev, 0x68);
  671. }
  672. DPRINTK("PCI regs:\n");
  673. mv_dump_mem(mmio_base+0xc00, 0x3c);
  674. mv_dump_mem(mmio_base+0xd00, 0x34);
  675. mv_dump_mem(mmio_base+0xf00, 0x4);
  676. mv_dump_mem(mmio_base+0x1d00, 0x6c);
  677. for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
  678. hc_base = mv_hc_base(mmio_base, hc);
  679. DPRINTK("HC regs (HC %i):\n", hc);
  680. mv_dump_mem(hc_base, 0x1c);
  681. }
  682. for (p = start_port; p < start_port + num_ports; p++) {
  683. port_base = mv_port_base(mmio_base, p);
  684. DPRINTK("EDMA regs (port %i):\n",p);
  685. mv_dump_mem(port_base, 0x54);
  686. DPRINTK("SATA regs (port %i):\n",p);
  687. mv_dump_mem(port_base+0x300, 0x60);
  688. }
  689. #endif
  690. }
  691. static unsigned int mv_scr_offset(unsigned int sc_reg_in)
  692. {
  693. unsigned int ofs;
  694. switch (sc_reg_in) {
  695. case SCR_STATUS:
  696. case SCR_CONTROL:
  697. case SCR_ERROR:
  698. ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
  699. break;
  700. case SCR_ACTIVE:
  701. ofs = SATA_ACTIVE_OFS; /* active is not with the others */
  702. break;
  703. default:
  704. ofs = 0xffffffffU;
  705. break;
  706. }
  707. return ofs;
  708. }
  709. static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
  710. {
  711. unsigned int ofs = mv_scr_offset(sc_reg_in);
  712. if (0xffffffffU != ofs)
  713. return readl(mv_ap_base(ap) + ofs);
  714. else
  715. return (u32) ofs;
  716. }
  717. static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
  718. {
  719. unsigned int ofs = mv_scr_offset(sc_reg_in);
  720. if (0xffffffffU != ofs)
  721. writelfl(val, mv_ap_base(ap) + ofs);
  722. }
  723. static void mv_edma_cfg(struct mv_host_priv *hpriv, void __iomem *port_mmio)
  724. {
  725. u32 cfg = readl(port_mmio + EDMA_CFG_OFS);
  726. /* set up non-NCQ EDMA configuration */
  727. cfg &= ~(1 << 9); /* disable equeue */
  728. if (IS_GEN_I(hpriv)) {
  729. cfg &= ~0x1f; /* clear queue depth */
  730. cfg |= (1 << 8); /* enab config burst size mask */
  731. }
  732. else if (IS_GEN_II(hpriv)) {
  733. cfg &= ~0x1f; /* clear queue depth */
  734. cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
  735. cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */
  736. }
  737. else if (IS_GEN_IIE(hpriv)) {
  738. cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
  739. cfg |= (1 << 22); /* enab 4-entry host queue cache */
  740. cfg &= ~(1 << 19); /* dis 128-entry queue (for now?) */
  741. cfg |= (1 << 18); /* enab early completion */
  742. cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
  743. cfg &= ~(1 << 16); /* dis FIS-based switching (for now) */
  744. cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */
  745. }
  746. writelfl(cfg, port_mmio + EDMA_CFG_OFS);
  747. }
  748. /**
  749. * mv_port_start - Port specific init/start routine.
  750. * @ap: ATA channel to manipulate
  751. *
  752. * Allocate and point to DMA memory, init port private memory,
  753. * zero indices.
  754. *
  755. * LOCKING:
  756. * Inherited from caller.
  757. */
  758. static int mv_port_start(struct ata_port *ap)
  759. {
  760. struct device *dev = ap->host->dev;
  761. struct mv_host_priv *hpriv = ap->host->private_data;
  762. struct mv_port_priv *pp;
  763. void __iomem *port_mmio = mv_ap_base(ap);
  764. void *mem;
  765. dma_addr_t mem_dma;
  766. int rc;
  767. pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
  768. if (!pp)
  769. return -ENOMEM;
  770. mem = dmam_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
  771. GFP_KERNEL);
  772. if (!mem)
  773. return -ENOMEM;
  774. memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
  775. rc = ata_pad_alloc(ap, dev);
  776. if (rc)
  777. return rc;
  778. /* First item in chunk of DMA memory:
  779. * 32-slot command request table (CRQB), 32 bytes each in size
  780. */
  781. pp->crqb = mem;
  782. pp->crqb_dma = mem_dma;
  783. mem += MV_CRQB_Q_SZ;
  784. mem_dma += MV_CRQB_Q_SZ;
  785. /* Second item:
  786. * 32-slot command response table (CRPB), 8 bytes each in size
  787. */
  788. pp->crpb = mem;
  789. pp->crpb_dma = mem_dma;
  790. mem += MV_CRPB_Q_SZ;
  791. mem_dma += MV_CRPB_Q_SZ;
  792. /* Third item:
  793. * Table of scatter-gather descriptors (ePRD), 16 bytes each
  794. */
  795. pp->sg_tbl = mem;
  796. pp->sg_tbl_dma = mem_dma;
  797. mv_edma_cfg(hpriv, port_mmio);
  798. writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
  799. writelfl(pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK,
  800. port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
  801. if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
  802. writelfl(pp->crqb_dma & 0xffffffff,
  803. port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
  804. else
  805. writelfl(0, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
  806. writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
  807. if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
  808. writelfl(pp->crpb_dma & 0xffffffff,
  809. port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
  810. else
  811. writelfl(0, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
  812. writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK,
  813. port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
  814. /* Don't turn on EDMA here...do it before DMA commands only. Else
  815. * we'll be unable to send non-data, PIO, etc due to restricted access
  816. * to shadow regs.
  817. */
  818. ap->private_data = pp;
  819. return 0;
  820. }
  821. /**
  822. * mv_port_stop - Port specific cleanup/stop routine.
  823. * @ap: ATA channel to manipulate
  824. *
  825. * Stop DMA, cleanup port memory.
  826. *
  827. * LOCKING:
  828. * This routine uses the host lock to protect the DMA stop.
  829. */
  830. static void mv_port_stop(struct ata_port *ap)
  831. {
  832. unsigned long flags;
  833. spin_lock_irqsave(&ap->host->lock, flags);
  834. mv_stop_dma(ap);
  835. spin_unlock_irqrestore(&ap->host->lock, flags);
  836. }
  837. /**
  838. * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
  839. * @qc: queued command whose SG list to source from
  840. *
  841. * Populate the SG list and mark the last entry.
  842. *
  843. * LOCKING:
  844. * Inherited from caller.
  845. */
  846. static unsigned int mv_fill_sg(struct ata_queued_cmd *qc)
  847. {
  848. struct mv_port_priv *pp = qc->ap->private_data;
  849. unsigned int n_sg = 0;
  850. struct scatterlist *sg;
  851. struct mv_sg *mv_sg;
  852. mv_sg = pp->sg_tbl;
  853. ata_for_each_sg(sg, qc) {
  854. dma_addr_t addr = sg_dma_address(sg);
  855. u32 sg_len = sg_dma_len(sg);
  856. mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
  857. mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
  858. mv_sg->flags_size = cpu_to_le32(sg_len & 0xffff);
  859. if (ata_sg_is_last(sg, qc))
  860. mv_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
  861. mv_sg++;
  862. n_sg++;
  863. }
  864. return n_sg;
  865. }
  866. static inline unsigned mv_inc_q_index(unsigned index)
  867. {
  868. return (index + 1) & MV_MAX_Q_DEPTH_MASK;
  869. }
  870. static inline void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
  871. {
  872. u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
  873. (last ? CRQB_CMD_LAST : 0);
  874. *cmdw = cpu_to_le16(tmp);
  875. }
  876. /**
  877. * mv_qc_prep - Host specific command preparation.
  878. * @qc: queued command to prepare
  879. *
  880. * This routine simply redirects to the general purpose routine
  881. * if command is not DMA. Else, it handles prep of the CRQB
  882. * (command request block), does some sanity checking, and calls
  883. * the SG load routine.
  884. *
  885. * LOCKING:
  886. * Inherited from caller.
  887. */
  888. static void mv_qc_prep(struct ata_queued_cmd *qc)
  889. {
  890. struct ata_port *ap = qc->ap;
  891. struct mv_port_priv *pp = ap->private_data;
  892. __le16 *cw;
  893. struct ata_taskfile *tf;
  894. u16 flags = 0;
  895. unsigned in_index;
  896. if (ATA_PROT_DMA != qc->tf.protocol)
  897. return;
  898. /* Fill in command request block
  899. */
  900. if (!(qc->tf.flags & ATA_TFLAG_WRITE))
  901. flags |= CRQB_FLAG_READ;
  902. WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
  903. flags |= qc->tag << CRQB_TAG_SHIFT;
  904. /* get current queue index from hardware */
  905. in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS)
  906. >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
  907. pp->crqb[in_index].sg_addr =
  908. cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
  909. pp->crqb[in_index].sg_addr_hi =
  910. cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
  911. pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
  912. cw = &pp->crqb[in_index].ata_cmd[0];
  913. tf = &qc->tf;
  914. /* Sadly, the CRQB cannot accomodate all registers--there are
  915. * only 11 bytes...so we must pick and choose required
  916. * registers based on the command. So, we drop feature and
  917. * hob_feature for [RW] DMA commands, but they are needed for
  918. * NCQ. NCQ will drop hob_nsect.
  919. */
  920. switch (tf->command) {
  921. case ATA_CMD_READ:
  922. case ATA_CMD_READ_EXT:
  923. case ATA_CMD_WRITE:
  924. case ATA_CMD_WRITE_EXT:
  925. case ATA_CMD_WRITE_FUA_EXT:
  926. mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
  927. break;
  928. #ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
  929. case ATA_CMD_FPDMA_READ:
  930. case ATA_CMD_FPDMA_WRITE:
  931. mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
  932. mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
  933. break;
  934. #endif /* FIXME: remove this line when NCQ added */
  935. default:
  936. /* The only other commands EDMA supports in non-queued and
  937. * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
  938. * of which are defined/used by Linux. If we get here, this
  939. * driver needs work.
  940. *
  941. * FIXME: modify libata to give qc_prep a return value and
  942. * return error here.
  943. */
  944. BUG_ON(tf->command);
  945. break;
  946. }
  947. mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
  948. mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
  949. mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
  950. mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
  951. mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
  952. mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
  953. mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
  954. mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
  955. mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
  956. if (!(qc->flags & ATA_QCFLAG_DMAMAP))
  957. return;
  958. mv_fill_sg(qc);
  959. }
  960. /**
  961. * mv_qc_prep_iie - Host specific command preparation.
  962. * @qc: queued command to prepare
  963. *
  964. * This routine simply redirects to the general purpose routine
  965. * if command is not DMA. Else, it handles prep of the CRQB
  966. * (command request block), does some sanity checking, and calls
  967. * the SG load routine.
  968. *
  969. * LOCKING:
  970. * Inherited from caller.
  971. */
  972. static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
  973. {
  974. struct ata_port *ap = qc->ap;
  975. struct mv_port_priv *pp = ap->private_data;
  976. struct mv_crqb_iie *crqb;
  977. struct ata_taskfile *tf;
  978. unsigned in_index;
  979. u32 flags = 0;
  980. if (ATA_PROT_DMA != qc->tf.protocol)
  981. return;
  982. /* Fill in Gen IIE command request block
  983. */
  984. if (!(qc->tf.flags & ATA_TFLAG_WRITE))
  985. flags |= CRQB_FLAG_READ;
  986. WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
  987. flags |= qc->tag << CRQB_TAG_SHIFT;
  988. /* get current queue index from hardware */
  989. in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS)
  990. >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
  991. crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
  992. crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
  993. crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
  994. crqb->flags = cpu_to_le32(flags);
  995. tf = &qc->tf;
  996. crqb->ata_cmd[0] = cpu_to_le32(
  997. (tf->command << 16) |
  998. (tf->feature << 24)
  999. );
  1000. crqb->ata_cmd[1] = cpu_to_le32(
  1001. (tf->lbal << 0) |
  1002. (tf->lbam << 8) |
  1003. (tf->lbah << 16) |
  1004. (tf->device << 24)
  1005. );
  1006. crqb->ata_cmd[2] = cpu_to_le32(
  1007. (tf->hob_lbal << 0) |
  1008. (tf->hob_lbam << 8) |
  1009. (tf->hob_lbah << 16) |
  1010. (tf->hob_feature << 24)
  1011. );
  1012. crqb->ata_cmd[3] = cpu_to_le32(
  1013. (tf->nsect << 0) |
  1014. (tf->hob_nsect << 8)
  1015. );
  1016. if (!(qc->flags & ATA_QCFLAG_DMAMAP))
  1017. return;
  1018. mv_fill_sg(qc);
  1019. }
  1020. /**
  1021. * mv_qc_issue - Initiate a command to the host
  1022. * @qc: queued command to start
  1023. *
  1024. * This routine simply redirects to the general purpose routine
  1025. * if command is not DMA. Else, it sanity checks our local
  1026. * caches of the request producer/consumer indices then enables
  1027. * DMA and bumps the request producer index.
  1028. *
  1029. * LOCKING:
  1030. * Inherited from caller.
  1031. */
  1032. static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
  1033. {
  1034. void __iomem *port_mmio = mv_ap_base(qc->ap);
  1035. struct mv_port_priv *pp = qc->ap->private_data;
  1036. unsigned in_index;
  1037. u32 in_ptr;
  1038. if (ATA_PROT_DMA != qc->tf.protocol) {
  1039. /* We're about to send a non-EDMA capable command to the
  1040. * port. Turn off EDMA so there won't be problems accessing
  1041. * shadow block, etc registers.
  1042. */
  1043. mv_stop_dma(qc->ap);
  1044. return ata_qc_issue_prot(qc);
  1045. }
  1046. in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
  1047. in_index = (in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
  1048. /* until we do queuing, the queue should be empty at this point */
  1049. WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
  1050. >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
  1051. in_index = mv_inc_q_index(in_index); /* now incr producer index */
  1052. mv_start_dma(port_mmio, pp);
  1053. /* and write the request in pointer to kick the EDMA to life */
  1054. in_ptr &= EDMA_REQ_Q_BASE_LO_MASK;
  1055. in_ptr |= in_index << EDMA_REQ_Q_PTR_SHIFT;
  1056. writelfl(in_ptr, port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
  1057. return 0;
  1058. }
  1059. /**
  1060. * mv_get_crpb_status - get status from most recently completed cmd
  1061. * @ap: ATA channel to manipulate
  1062. *
  1063. * This routine is for use when the port is in DMA mode, when it
  1064. * will be using the CRPB (command response block) method of
  1065. * returning command completion information. We check indices
  1066. * are good, grab status, and bump the response consumer index to
  1067. * prove that we're up to date.
  1068. *
  1069. * LOCKING:
  1070. * Inherited from caller.
  1071. */
  1072. static u8 mv_get_crpb_status(struct ata_port *ap)
  1073. {
  1074. void __iomem *port_mmio = mv_ap_base(ap);
  1075. struct mv_port_priv *pp = ap->private_data;
  1076. unsigned out_index;
  1077. u32 out_ptr;
  1078. u8 ata_status;
  1079. out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
  1080. out_index = (out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
  1081. ata_status = le16_to_cpu(pp->crpb[out_index].flags)
  1082. >> CRPB_FLAG_STATUS_SHIFT;
  1083. /* increment our consumer index... */
  1084. out_index = mv_inc_q_index(out_index);
  1085. /* and, until we do NCQ, there should only be 1 CRPB waiting */
  1086. WARN_ON(out_index != ((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
  1087. >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
  1088. /* write out our inc'd consumer index so EDMA knows we're caught up */
  1089. out_ptr &= EDMA_RSP_Q_BASE_LO_MASK;
  1090. out_ptr |= out_index << EDMA_RSP_Q_PTR_SHIFT;
  1091. writelfl(out_ptr, port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
  1092. /* Return ATA status register for completed CRPB */
  1093. return ata_status;
  1094. }
  1095. /**
  1096. * mv_err_intr - Handle error interrupts on the port
  1097. * @ap: ATA channel to manipulate
  1098. * @reset_allowed: bool: 0 == don't trigger from reset here
  1099. *
  1100. * In most cases, just clear the interrupt and move on. However,
  1101. * some cases require an eDMA reset, which is done right before
  1102. * the COMRESET in mv_phy_reset(). The SERR case requires a
  1103. * clear of pending errors in the SATA SERROR register. Finally,
  1104. * if the port disabled DMA, update our cached copy to match.
  1105. *
  1106. * LOCKING:
  1107. * Inherited from caller.
  1108. */
  1109. static void mv_err_intr(struct ata_port *ap, int reset_allowed)
  1110. {
  1111. void __iomem *port_mmio = mv_ap_base(ap);
  1112. u32 edma_err_cause, serr = 0;
  1113. edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
  1114. if (EDMA_ERR_SERR & edma_err_cause) {
  1115. sata_scr_read(ap, SCR_ERROR, &serr);
  1116. sata_scr_write_flush(ap, SCR_ERROR, serr);
  1117. }
  1118. if (EDMA_ERR_SELF_DIS & edma_err_cause) {
  1119. struct mv_port_priv *pp = ap->private_data;
  1120. pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
  1121. }
  1122. DPRINTK(KERN_ERR "ata%u: port error; EDMA err cause: 0x%08x "
  1123. "SERR: 0x%08x\n", ap->print_id, edma_err_cause, serr);
  1124. /* Clear EDMA now that SERR cleanup done */
  1125. writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
  1126. /* check for fatal here and recover if needed */
  1127. if (reset_allowed && (EDMA_ERR_FATAL & edma_err_cause))
  1128. mv_stop_and_reset(ap);
  1129. }
  1130. /**
  1131. * mv_host_intr - Handle all interrupts on the given host controller
  1132. * @host: host specific structure
  1133. * @relevant: port error bits relevant to this host controller
  1134. * @hc: which host controller we're to look at
  1135. *
  1136. * Read then write clear the HC interrupt status then walk each
  1137. * port connected to the HC and see if it needs servicing. Port
  1138. * success ints are reported in the HC interrupt status reg, the
  1139. * port error ints are reported in the higher level main
  1140. * interrupt status register and thus are passed in via the
  1141. * 'relevant' argument.
  1142. *
  1143. * LOCKING:
  1144. * Inherited from caller.
  1145. */
  1146. static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
  1147. {
  1148. void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
  1149. void __iomem *hc_mmio = mv_hc_base(mmio, hc);
  1150. struct ata_queued_cmd *qc;
  1151. u32 hc_irq_cause;
  1152. int shift, port, port0, hard_port, handled;
  1153. unsigned int err_mask;
  1154. if (hc == 0)
  1155. port0 = 0;
  1156. else
  1157. port0 = MV_PORTS_PER_HC;
  1158. /* we'll need the HC success int register in most cases */
  1159. hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
  1160. if (hc_irq_cause)
  1161. writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
  1162. VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
  1163. hc,relevant,hc_irq_cause);
  1164. for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
  1165. u8 ata_status = 0;
  1166. struct ata_port *ap = host->ports[port];
  1167. struct mv_port_priv *pp = ap->private_data;
  1168. hard_port = mv_hardport_from_port(port); /* range 0..3 */
  1169. handled = 0; /* ensure ata_status is set if handled++ */
  1170. /* Note that DEV_IRQ might happen spuriously during EDMA,
  1171. * and should be ignored in such cases.
  1172. * The cause of this is still under investigation.
  1173. */
  1174. if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
  1175. /* EDMA: check for response queue interrupt */
  1176. if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause) {
  1177. ata_status = mv_get_crpb_status(ap);
  1178. handled = 1;
  1179. }
  1180. } else {
  1181. /* PIO: check for device (drive) interrupt */
  1182. if ((DEV_IRQ << hard_port) & hc_irq_cause) {
  1183. ata_status = readb(ap->ioaddr.status_addr);
  1184. handled = 1;
  1185. /* ignore spurious intr if drive still BUSY */
  1186. if (ata_status & ATA_BUSY) {
  1187. ata_status = 0;
  1188. handled = 0;
  1189. }
  1190. }
  1191. }
  1192. if (ap && (ap->flags & ATA_FLAG_DISABLED))
  1193. continue;
  1194. err_mask = ac_err_mask(ata_status);
  1195. shift = port << 1; /* (port * 2) */
  1196. if (port >= MV_PORTS_PER_HC) {
  1197. shift++; /* skip bit 8 in the HC Main IRQ reg */
  1198. }
  1199. if ((PORT0_ERR << shift) & relevant) {
  1200. mv_err_intr(ap, 1);
  1201. err_mask |= AC_ERR_OTHER;
  1202. handled = 1;
  1203. }
  1204. if (handled) {
  1205. qc = ata_qc_from_tag(ap, ap->active_tag);
  1206. if (qc && (qc->flags & ATA_QCFLAG_ACTIVE)) {
  1207. VPRINTK("port %u IRQ found for qc, "
  1208. "ata_status 0x%x\n", port,ata_status);
  1209. /* mark qc status appropriately */
  1210. if (!(qc->tf.flags & ATA_TFLAG_POLLING)) {
  1211. qc->err_mask |= err_mask;
  1212. ata_qc_complete(qc);
  1213. }
  1214. }
  1215. }
  1216. }
  1217. VPRINTK("EXIT\n");
  1218. }
  1219. /**
  1220. * mv_interrupt -
  1221. * @irq: unused
  1222. * @dev_instance: private data; in this case the host structure
  1223. * @regs: unused
  1224. *
  1225. * Read the read only register to determine if any host
  1226. * controllers have pending interrupts. If so, call lower level
  1227. * routine to handle. Also check for PCI errors which are only
  1228. * reported here.
  1229. *
  1230. * LOCKING:
  1231. * This routine holds the host lock while processing pending
  1232. * interrupts.
  1233. */
  1234. static irqreturn_t mv_interrupt(int irq, void *dev_instance)
  1235. {
  1236. struct ata_host *host = dev_instance;
  1237. unsigned int hc, handled = 0, n_hcs;
  1238. void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
  1239. struct mv_host_priv *hpriv;
  1240. u32 irq_stat;
  1241. irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
  1242. /* check the cases where we either have nothing pending or have read
  1243. * a bogus register value which can indicate HW removal or PCI fault
  1244. */
  1245. if (!irq_stat || (0xffffffffU == irq_stat))
  1246. return IRQ_NONE;
  1247. n_hcs = mv_get_hc_count(host->ports[0]->flags);
  1248. spin_lock(&host->lock);
  1249. for (hc = 0; hc < n_hcs; hc++) {
  1250. u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
  1251. if (relevant) {
  1252. mv_host_intr(host, relevant, hc);
  1253. handled++;
  1254. }
  1255. }
  1256. hpriv = host->private_data;
  1257. if (IS_60XX(hpriv)) {
  1258. /* deal with the interrupt coalescing bits */
  1259. if (irq_stat & (TRAN_LO_DONE | TRAN_HI_DONE | PORTS_0_7_COAL_DONE)) {
  1260. writelfl(0, mmio + MV_IRQ_COAL_CAUSE_LO);
  1261. writelfl(0, mmio + MV_IRQ_COAL_CAUSE_HI);
  1262. writelfl(0, mmio + MV_IRQ_COAL_CAUSE);
  1263. }
  1264. }
  1265. if (PCI_ERR & irq_stat) {
  1266. printk(KERN_ERR DRV_NAME ": PCI ERROR; PCI IRQ cause=0x%08x\n",
  1267. readl(mmio + PCI_IRQ_CAUSE_OFS));
  1268. DPRINTK("All regs @ PCI error\n");
  1269. mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
  1270. writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
  1271. handled++;
  1272. }
  1273. spin_unlock(&host->lock);
  1274. return IRQ_RETVAL(handled);
  1275. }
  1276. static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
  1277. {
  1278. void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
  1279. unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
  1280. return hc_mmio + ofs;
  1281. }
  1282. static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
  1283. {
  1284. unsigned int ofs;
  1285. switch (sc_reg_in) {
  1286. case SCR_STATUS:
  1287. case SCR_ERROR:
  1288. case SCR_CONTROL:
  1289. ofs = sc_reg_in * sizeof(u32);
  1290. break;
  1291. default:
  1292. ofs = 0xffffffffU;
  1293. break;
  1294. }
  1295. return ofs;
  1296. }
  1297. static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
  1298. {
  1299. void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
  1300. void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
  1301. unsigned int ofs = mv5_scr_offset(sc_reg_in);
  1302. if (ofs != 0xffffffffU)
  1303. return readl(addr + ofs);
  1304. else
  1305. return (u32) ofs;
  1306. }
  1307. static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
  1308. {
  1309. void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
  1310. void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
  1311. unsigned int ofs = mv5_scr_offset(sc_reg_in);
  1312. if (ofs != 0xffffffffU)
  1313. writelfl(val, addr + ofs);
  1314. }
  1315. static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
  1316. {
  1317. u8 rev_id;
  1318. int early_5080;
  1319. pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
  1320. early_5080 = (pdev->device == 0x5080) && (rev_id == 0);
  1321. if (!early_5080) {
  1322. u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
  1323. tmp |= (1 << 0);
  1324. writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
  1325. }
  1326. mv_reset_pci_bus(pdev, mmio);
  1327. }
  1328. static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
  1329. {
  1330. writel(0x0fcfffff, mmio + MV_FLASH_CTL);
  1331. }
  1332. static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
  1333. void __iomem *mmio)
  1334. {
  1335. void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
  1336. u32 tmp;
  1337. tmp = readl(phy_mmio + MV5_PHY_MODE);
  1338. hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
  1339. hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
  1340. }
  1341. static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
  1342. {
  1343. u32 tmp;
  1344. writel(0, mmio + MV_GPIO_PORT_CTL);
  1345. /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
  1346. tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
  1347. tmp |= ~(1 << 0);
  1348. writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
  1349. }
  1350. static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
  1351. unsigned int port)
  1352. {
  1353. void __iomem *phy_mmio = mv5_phy_base(mmio, port);
  1354. const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
  1355. u32 tmp;
  1356. int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
  1357. if (fix_apm_sq) {
  1358. tmp = readl(phy_mmio + MV5_LT_MODE);
  1359. tmp |= (1 << 19);
  1360. writel(tmp, phy_mmio + MV5_LT_MODE);
  1361. tmp = readl(phy_mmio + MV5_PHY_CTL);
  1362. tmp &= ~0x3;
  1363. tmp |= 0x1;
  1364. writel(tmp, phy_mmio + MV5_PHY_CTL);
  1365. }
  1366. tmp = readl(phy_mmio + MV5_PHY_MODE);
  1367. tmp &= ~mask;
  1368. tmp |= hpriv->signal[port].pre;
  1369. tmp |= hpriv->signal[port].amps;
  1370. writel(tmp, phy_mmio + MV5_PHY_MODE);
  1371. }
  1372. #undef ZERO
  1373. #define ZERO(reg) writel(0, port_mmio + (reg))
  1374. static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
  1375. unsigned int port)
  1376. {
  1377. void __iomem *port_mmio = mv_port_base(mmio, port);
  1378. writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
  1379. mv_channel_reset(hpriv, mmio, port);
  1380. ZERO(0x028); /* command */
  1381. writel(0x11f, port_mmio + EDMA_CFG_OFS);
  1382. ZERO(0x004); /* timer */
  1383. ZERO(0x008); /* irq err cause */
  1384. ZERO(0x00c); /* irq err mask */
  1385. ZERO(0x010); /* rq bah */
  1386. ZERO(0x014); /* rq inp */
  1387. ZERO(0x018); /* rq outp */
  1388. ZERO(0x01c); /* respq bah */
  1389. ZERO(0x024); /* respq outp */
  1390. ZERO(0x020); /* respq inp */
  1391. ZERO(0x02c); /* test control */
  1392. writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
  1393. }
  1394. #undef ZERO
  1395. #define ZERO(reg) writel(0, hc_mmio + (reg))
  1396. static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
  1397. unsigned int hc)
  1398. {
  1399. void __iomem *hc_mmio = mv_hc_base(mmio, hc);
  1400. u32 tmp;
  1401. ZERO(0x00c);
  1402. ZERO(0x010);
  1403. ZERO(0x014);
  1404. ZERO(0x018);
  1405. tmp = readl(hc_mmio + 0x20);
  1406. tmp &= 0x1c1c1c1c;
  1407. tmp |= 0x03030303;
  1408. writel(tmp, hc_mmio + 0x20);
  1409. }
  1410. #undef ZERO
  1411. static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
  1412. unsigned int n_hc)
  1413. {
  1414. unsigned int hc, port;
  1415. for (hc = 0; hc < n_hc; hc++) {
  1416. for (port = 0; port < MV_PORTS_PER_HC; port++)
  1417. mv5_reset_hc_port(hpriv, mmio,
  1418. (hc * MV_PORTS_PER_HC) + port);
  1419. mv5_reset_one_hc(hpriv, mmio, hc);
  1420. }
  1421. return 0;
  1422. }
  1423. #undef ZERO
  1424. #define ZERO(reg) writel(0, mmio + (reg))
  1425. static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
  1426. {
  1427. u32 tmp;
  1428. tmp = readl(mmio + MV_PCI_MODE);
  1429. tmp &= 0xff00ffff;
  1430. writel(tmp, mmio + MV_PCI_MODE);
  1431. ZERO(MV_PCI_DISC_TIMER);
  1432. ZERO(MV_PCI_MSI_TRIGGER);
  1433. writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
  1434. ZERO(HC_MAIN_IRQ_MASK_OFS);
  1435. ZERO(MV_PCI_SERR_MASK);
  1436. ZERO(PCI_IRQ_CAUSE_OFS);
  1437. ZERO(PCI_IRQ_MASK_OFS);
  1438. ZERO(MV_PCI_ERR_LOW_ADDRESS);
  1439. ZERO(MV_PCI_ERR_HIGH_ADDRESS);
  1440. ZERO(MV_PCI_ERR_ATTRIBUTE);
  1441. ZERO(MV_PCI_ERR_COMMAND);
  1442. }
  1443. #undef ZERO
  1444. static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
  1445. {
  1446. u32 tmp;
  1447. mv5_reset_flash(hpriv, mmio);
  1448. tmp = readl(mmio + MV_GPIO_PORT_CTL);
  1449. tmp &= 0x3;
  1450. tmp |= (1 << 5) | (1 << 6);
  1451. writel(tmp, mmio + MV_GPIO_PORT_CTL);
  1452. }
  1453. /**
  1454. * mv6_reset_hc - Perform the 6xxx global soft reset
  1455. * @mmio: base address of the HBA
  1456. *
  1457. * This routine only applies to 6xxx parts.
  1458. *
  1459. * LOCKING:
  1460. * Inherited from caller.
  1461. */
  1462. static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
  1463. unsigned int n_hc)
  1464. {
  1465. void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
  1466. int i, rc = 0;
  1467. u32 t;
  1468. /* Following procedure defined in PCI "main command and status
  1469. * register" table.
  1470. */
  1471. t = readl(reg);
  1472. writel(t | STOP_PCI_MASTER, reg);
  1473. for (i = 0; i < 1000; i++) {
  1474. udelay(1);
  1475. t = readl(reg);
  1476. if (PCI_MASTER_EMPTY & t) {
  1477. break;
  1478. }
  1479. }
  1480. if (!(PCI_MASTER_EMPTY & t)) {
  1481. printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
  1482. rc = 1;
  1483. goto done;
  1484. }
  1485. /* set reset */
  1486. i = 5;
  1487. do {
  1488. writel(t | GLOB_SFT_RST, reg);
  1489. t = readl(reg);
  1490. udelay(1);
  1491. } while (!(GLOB_SFT_RST & t) && (i-- > 0));
  1492. if (!(GLOB_SFT_RST & t)) {
  1493. printk(KERN_ERR DRV_NAME ": can't set global reset\n");
  1494. rc = 1;
  1495. goto done;
  1496. }
  1497. /* clear reset and *reenable the PCI master* (not mentioned in spec) */
  1498. i = 5;
  1499. do {
  1500. writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
  1501. t = readl(reg);
  1502. udelay(1);
  1503. } while ((GLOB_SFT_RST & t) && (i-- > 0));
  1504. if (GLOB_SFT_RST & t) {
  1505. printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
  1506. rc = 1;
  1507. }
  1508. done:
  1509. return rc;
  1510. }
  1511. static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
  1512. void __iomem *mmio)
  1513. {
  1514. void __iomem *port_mmio;
  1515. u32 tmp;
  1516. tmp = readl(mmio + MV_RESET_CFG);
  1517. if ((tmp & (1 << 0)) == 0) {
  1518. hpriv->signal[idx].amps = 0x7 << 8;
  1519. hpriv->signal[idx].pre = 0x1 << 5;
  1520. return;
  1521. }
  1522. port_mmio = mv_port_base(mmio, idx);
  1523. tmp = readl(port_mmio + PHY_MODE2);
  1524. hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
  1525. hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
  1526. }
  1527. static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
  1528. {
  1529. writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
  1530. }
  1531. static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
  1532. unsigned int port)
  1533. {
  1534. void __iomem *port_mmio = mv_port_base(mmio, port);
  1535. u32 hp_flags = hpriv->hp_flags;
  1536. int fix_phy_mode2 =
  1537. hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
  1538. int fix_phy_mode4 =
  1539. hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
  1540. u32 m2, tmp;
  1541. if (fix_phy_mode2) {
  1542. m2 = readl(port_mmio + PHY_MODE2);
  1543. m2 &= ~(1 << 16);
  1544. m2 |= (1 << 31);
  1545. writel(m2, port_mmio + PHY_MODE2);
  1546. udelay(200);
  1547. m2 = readl(port_mmio + PHY_MODE2);
  1548. m2 &= ~((1 << 16) | (1 << 31));
  1549. writel(m2, port_mmio + PHY_MODE2);
  1550. udelay(200);
  1551. }
  1552. /* who knows what this magic does */
  1553. tmp = readl(port_mmio + PHY_MODE3);
  1554. tmp &= ~0x7F800000;
  1555. tmp |= 0x2A800000;
  1556. writel(tmp, port_mmio + PHY_MODE3);
  1557. if (fix_phy_mode4) {
  1558. u32 m4;
  1559. m4 = readl(port_mmio + PHY_MODE4);
  1560. if (hp_flags & MV_HP_ERRATA_60X1B2)
  1561. tmp = readl(port_mmio + 0x310);
  1562. m4 = (m4 & ~(1 << 1)) | (1 << 0);
  1563. writel(m4, port_mmio + PHY_MODE4);
  1564. if (hp_flags & MV_HP_ERRATA_60X1B2)
  1565. writel(tmp, port_mmio + 0x310);
  1566. }
  1567. /* Revert values of pre-emphasis and signal amps to the saved ones */
  1568. m2 = readl(port_mmio + PHY_MODE2);
  1569. m2 &= ~MV_M2_PREAMP_MASK;
  1570. m2 |= hpriv->signal[port].amps;
  1571. m2 |= hpriv->signal[port].pre;
  1572. m2 &= ~(1 << 16);
  1573. /* according to mvSata 3.6.1, some IIE values are fixed */
  1574. if (IS_GEN_IIE(hpriv)) {
  1575. m2 &= ~0xC30FF01F;
  1576. m2 |= 0x0000900F;
  1577. }
  1578. writel(m2, port_mmio + PHY_MODE2);
  1579. }
  1580. static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
  1581. unsigned int port_no)
  1582. {
  1583. void __iomem *port_mmio = mv_port_base(mmio, port_no);
  1584. writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
  1585. if (IS_60XX(hpriv)) {
  1586. u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
  1587. ifctl |= (1 << 7); /* enable gen2i speed */
  1588. ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
  1589. writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
  1590. }
  1591. udelay(25); /* allow reset propagation */
  1592. /* Spec never mentions clearing the bit. Marvell's driver does
  1593. * clear the bit, however.
  1594. */
  1595. writelfl(0, port_mmio + EDMA_CMD_OFS);
  1596. hpriv->ops->phy_errata(hpriv, mmio, port_no);
  1597. if (IS_50XX(hpriv))
  1598. mdelay(1);
  1599. }
  1600. static void mv_stop_and_reset(struct ata_port *ap)
  1601. {
  1602. struct mv_host_priv *hpriv = ap->host->private_data;
  1603. void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
  1604. mv_stop_dma(ap);
  1605. mv_channel_reset(hpriv, mmio, ap->port_no);
  1606. __mv_phy_reset(ap, 0);
  1607. }
  1608. static inline void __msleep(unsigned int msec, int can_sleep)
  1609. {
  1610. if (can_sleep)
  1611. msleep(msec);
  1612. else
  1613. mdelay(msec);
  1614. }
  1615. /**
  1616. * __mv_phy_reset - Perform eDMA reset followed by COMRESET
  1617. * @ap: ATA channel to manipulate
  1618. *
  1619. * Part of this is taken from __sata_phy_reset and modified to
  1620. * not sleep since this routine gets called from interrupt level.
  1621. *
  1622. * LOCKING:
  1623. * Inherited from caller. This is coded to safe to call at
  1624. * interrupt level, i.e. it does not sleep.
  1625. */
  1626. static void __mv_phy_reset(struct ata_port *ap, int can_sleep)
  1627. {
  1628. struct mv_port_priv *pp = ap->private_data;
  1629. struct mv_host_priv *hpriv = ap->host->private_data;
  1630. void __iomem *port_mmio = mv_ap_base(ap);
  1631. struct ata_taskfile tf;
  1632. struct ata_device *dev = &ap->device[0];
  1633. unsigned long timeout;
  1634. int retry = 5;
  1635. u32 sstatus;
  1636. VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
  1637. DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
  1638. "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
  1639. mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
  1640. /* Issue COMRESET via SControl */
  1641. comreset_retry:
  1642. sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
  1643. __msleep(1, can_sleep);
  1644. sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
  1645. __msleep(20, can_sleep);
  1646. timeout = jiffies + msecs_to_jiffies(200);
  1647. do {
  1648. sata_scr_read(ap, SCR_STATUS, &sstatus);
  1649. if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
  1650. break;
  1651. __msleep(1, can_sleep);
  1652. } while (time_before(jiffies, timeout));
  1653. /* work around errata */
  1654. if (IS_60XX(hpriv) &&
  1655. (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
  1656. (retry-- > 0))
  1657. goto comreset_retry;
  1658. DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
  1659. "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
  1660. mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
  1661. if (ata_port_online(ap)) {
  1662. ata_port_probe(ap);
  1663. } else {
  1664. sata_scr_read(ap, SCR_STATUS, &sstatus);
  1665. ata_port_printk(ap, KERN_INFO,
  1666. "no device found (phy stat %08x)\n", sstatus);
  1667. ata_port_disable(ap);
  1668. return;
  1669. }
  1670. /* even after SStatus reflects that device is ready,
  1671. * it seems to take a while for link to be fully
  1672. * established (and thus Status no longer 0x80/0x7F),
  1673. * so we poll a bit for that, here.
  1674. */
  1675. retry = 20;
  1676. while (1) {
  1677. u8 drv_stat = ata_check_status(ap);
  1678. if ((drv_stat != 0x80) && (drv_stat != 0x7f))
  1679. break;
  1680. __msleep(500, can_sleep);
  1681. if (retry-- <= 0)
  1682. break;
  1683. }
  1684. tf.lbah = readb(ap->ioaddr.lbah_addr);
  1685. tf.lbam = readb(ap->ioaddr.lbam_addr);
  1686. tf.lbal = readb(ap->ioaddr.lbal_addr);
  1687. tf.nsect = readb(ap->ioaddr.nsect_addr);
  1688. dev->class = ata_dev_classify(&tf);
  1689. if (!ata_dev_enabled(dev)) {
  1690. VPRINTK("Port disabled post-sig: No device present.\n");
  1691. ata_port_disable(ap);
  1692. }
  1693. writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
  1694. pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
  1695. VPRINTK("EXIT\n");
  1696. }
  1697. static void mv_phy_reset(struct ata_port *ap)
  1698. {
  1699. __mv_phy_reset(ap, 1);
  1700. }
  1701. /**
  1702. * mv_eng_timeout - Routine called by libata when SCSI times out I/O
  1703. * @ap: ATA channel to manipulate
  1704. *
  1705. * Intent is to clear all pending error conditions, reset the
  1706. * chip/bus, fail the command, and move on.
  1707. *
  1708. * LOCKING:
  1709. * This routine holds the host lock while failing the command.
  1710. */
  1711. static void mv_eng_timeout(struct ata_port *ap)
  1712. {
  1713. void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
  1714. struct ata_queued_cmd *qc;
  1715. unsigned long flags;
  1716. ata_port_printk(ap, KERN_ERR, "Entering mv_eng_timeout\n");
  1717. DPRINTK("All regs @ start of eng_timeout\n");
  1718. mv_dump_all_regs(mmio, ap->port_no, to_pci_dev(ap->host->dev));
  1719. qc = ata_qc_from_tag(ap, ap->active_tag);
  1720. printk(KERN_ERR "mmio_base %p ap %p qc %p scsi_cmnd %p &cmnd %p\n",
  1721. mmio, ap, qc, qc->scsicmd, &qc->scsicmd->cmnd);
  1722. spin_lock_irqsave(&ap->host->lock, flags);
  1723. mv_err_intr(ap, 0);
  1724. mv_stop_and_reset(ap);
  1725. spin_unlock_irqrestore(&ap->host->lock, flags);
  1726. WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
  1727. if (qc->flags & ATA_QCFLAG_ACTIVE) {
  1728. qc->err_mask |= AC_ERR_TIMEOUT;
  1729. ata_eh_qc_complete(qc);
  1730. }
  1731. }
  1732. /**
  1733. * mv_port_init - Perform some early initialization on a single port.
  1734. * @port: libata data structure storing shadow register addresses
  1735. * @port_mmio: base address of the port
  1736. *
  1737. * Initialize shadow register mmio addresses, clear outstanding
  1738. * interrupts on the port, and unmask interrupts for the future
  1739. * start of the port.
  1740. *
  1741. * LOCKING:
  1742. * Inherited from caller.
  1743. */
  1744. static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
  1745. {
  1746. void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
  1747. unsigned serr_ofs;
  1748. /* PIO related setup
  1749. */
  1750. port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
  1751. port->error_addr =
  1752. port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
  1753. port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
  1754. port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
  1755. port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
  1756. port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
  1757. port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
  1758. port->status_addr =
  1759. port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
  1760. /* special case: control/altstatus doesn't have ATA_REG_ address */
  1761. port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
  1762. /* unused: */
  1763. port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
  1764. /* Clear any currently outstanding port interrupt conditions */
  1765. serr_ofs = mv_scr_offset(SCR_ERROR);
  1766. writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
  1767. writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
  1768. /* unmask all EDMA error interrupts */
  1769. writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
  1770. VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
  1771. readl(port_mmio + EDMA_CFG_OFS),
  1772. readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
  1773. readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
  1774. }
  1775. static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
  1776. {
  1777. struct pci_dev *pdev = to_pci_dev(host->dev);
  1778. struct mv_host_priv *hpriv = host->private_data;
  1779. u8 rev_id;
  1780. u32 hp_flags = hpriv->hp_flags;
  1781. pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
  1782. switch(board_idx) {
  1783. case chip_5080:
  1784. hpriv->ops = &mv5xxx_ops;
  1785. hp_flags |= MV_HP_50XX;
  1786. switch (rev_id) {
  1787. case 0x1:
  1788. hp_flags |= MV_HP_ERRATA_50XXB0;
  1789. break;
  1790. case 0x3:
  1791. hp_flags |= MV_HP_ERRATA_50XXB2;
  1792. break;
  1793. default:
  1794. dev_printk(KERN_WARNING, &pdev->dev,
  1795. "Applying 50XXB2 workarounds to unknown rev\n");
  1796. hp_flags |= MV_HP_ERRATA_50XXB2;
  1797. break;
  1798. }
  1799. break;
  1800. case chip_504x:
  1801. case chip_508x:
  1802. hpriv->ops = &mv5xxx_ops;
  1803. hp_flags |= MV_HP_50XX;
  1804. switch (rev_id) {
  1805. case 0x0:
  1806. hp_flags |= MV_HP_ERRATA_50XXB0;
  1807. break;
  1808. case 0x3:
  1809. hp_flags |= MV_HP_ERRATA_50XXB2;
  1810. break;
  1811. default:
  1812. dev_printk(KERN_WARNING, &pdev->dev,
  1813. "Applying B2 workarounds to unknown rev\n");
  1814. hp_flags |= MV_HP_ERRATA_50XXB2;
  1815. break;
  1816. }
  1817. break;
  1818. case chip_604x:
  1819. case chip_608x:
  1820. hpriv->ops = &mv6xxx_ops;
  1821. switch (rev_id) {
  1822. case 0x7:
  1823. hp_flags |= MV_HP_ERRATA_60X1B2;
  1824. break;
  1825. case 0x9:
  1826. hp_flags |= MV_HP_ERRATA_60X1C0;
  1827. break;
  1828. default:
  1829. dev_printk(KERN_WARNING, &pdev->dev,
  1830. "Applying B2 workarounds to unknown rev\n");
  1831. hp_flags |= MV_HP_ERRATA_60X1B2;
  1832. break;
  1833. }
  1834. break;
  1835. case chip_7042:
  1836. case chip_6042:
  1837. hpriv->ops = &mv6xxx_ops;
  1838. hp_flags |= MV_HP_GEN_IIE;
  1839. switch (rev_id) {
  1840. case 0x0:
  1841. hp_flags |= MV_HP_ERRATA_XX42A0;
  1842. break;
  1843. case 0x1:
  1844. hp_flags |= MV_HP_ERRATA_60X1C0;
  1845. break;
  1846. default:
  1847. dev_printk(KERN_WARNING, &pdev->dev,
  1848. "Applying 60X1C0 workarounds to unknown rev\n");
  1849. hp_flags |= MV_HP_ERRATA_60X1C0;
  1850. break;
  1851. }
  1852. break;
  1853. default:
  1854. printk(KERN_ERR DRV_NAME ": BUG: invalid board index %u\n", board_idx);
  1855. return 1;
  1856. }
  1857. hpriv->hp_flags = hp_flags;
  1858. return 0;
  1859. }
  1860. /**
  1861. * mv_init_host - Perform some early initialization of the host.
  1862. * @host: ATA host to initialize
  1863. * @board_idx: controller index
  1864. *
  1865. * If possible, do an early global reset of the host. Then do
  1866. * our port init and clear/unmask all/relevant host interrupts.
  1867. *
  1868. * LOCKING:
  1869. * Inherited from caller.
  1870. */
  1871. static int mv_init_host(struct ata_host *host, unsigned int board_idx)
  1872. {
  1873. int rc = 0, n_hc, port, hc;
  1874. struct pci_dev *pdev = to_pci_dev(host->dev);
  1875. void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
  1876. struct mv_host_priv *hpriv = host->private_data;
  1877. /* global interrupt mask */
  1878. writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
  1879. rc = mv_chip_id(host, board_idx);
  1880. if (rc)
  1881. goto done;
  1882. n_hc = mv_get_hc_count(host->ports[0]->flags);
  1883. for (port = 0; port < host->n_ports; port++)
  1884. hpriv->ops->read_preamp(hpriv, port, mmio);
  1885. rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
  1886. if (rc)
  1887. goto done;
  1888. hpriv->ops->reset_flash(hpriv, mmio);
  1889. hpriv->ops->reset_bus(pdev, mmio);
  1890. hpriv->ops->enable_leds(hpriv, mmio);
  1891. for (port = 0; port < host->n_ports; port++) {
  1892. if (IS_60XX(hpriv)) {
  1893. void __iomem *port_mmio = mv_port_base(mmio, port);
  1894. u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
  1895. ifctl |= (1 << 7); /* enable gen2i speed */
  1896. ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
  1897. writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
  1898. }
  1899. hpriv->ops->phy_errata(hpriv, mmio, port);
  1900. }
  1901. for (port = 0; port < host->n_ports; port++) {
  1902. void __iomem *port_mmio = mv_port_base(mmio, port);
  1903. mv_port_init(&host->ports[port]->ioaddr, port_mmio);
  1904. }
  1905. for (hc = 0; hc < n_hc; hc++) {
  1906. void __iomem *hc_mmio = mv_hc_base(mmio, hc);
  1907. VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
  1908. "(before clear)=0x%08x\n", hc,
  1909. readl(hc_mmio + HC_CFG_OFS),
  1910. readl(hc_mmio + HC_IRQ_CAUSE_OFS));
  1911. /* Clear any currently outstanding hc interrupt conditions */
  1912. writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
  1913. }
  1914. /* Clear any currently outstanding host interrupt conditions */
  1915. writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
  1916. /* and unmask interrupt generation for host regs */
  1917. writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS);
  1918. if (IS_50XX(hpriv))
  1919. writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
  1920. else
  1921. writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
  1922. VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
  1923. "PCI int cause/mask=0x%08x/0x%08x\n",
  1924. readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
  1925. readl(mmio + HC_MAIN_IRQ_MASK_OFS),
  1926. readl(mmio + PCI_IRQ_CAUSE_OFS),
  1927. readl(mmio + PCI_IRQ_MASK_OFS));
  1928. done:
  1929. return rc;
  1930. }
  1931. /**
  1932. * mv_print_info - Dump key info to kernel log for perusal.
  1933. * @host: ATA host to print info about
  1934. *
  1935. * FIXME: complete this.
  1936. *
  1937. * LOCKING:
  1938. * Inherited from caller.
  1939. */
  1940. static void mv_print_info(struct ata_host *host)
  1941. {
  1942. struct pci_dev *pdev = to_pci_dev(host->dev);
  1943. struct mv_host_priv *hpriv = host->private_data;
  1944. u8 rev_id, scc;
  1945. const char *scc_s;
  1946. /* Use this to determine the HW stepping of the chip so we know
  1947. * what errata to workaround
  1948. */
  1949. pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
  1950. pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
  1951. if (scc == 0)
  1952. scc_s = "SCSI";
  1953. else if (scc == 0x01)
  1954. scc_s = "RAID";
  1955. else
  1956. scc_s = "unknown";
  1957. dev_printk(KERN_INFO, &pdev->dev,
  1958. "%u slots %u ports %s mode IRQ via %s\n",
  1959. (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
  1960. scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
  1961. }
  1962. /**
  1963. * mv_init_one - handle a positive probe of a Marvell host
  1964. * @pdev: PCI device found
  1965. * @ent: PCI device ID entry for the matched host
  1966. *
  1967. * LOCKING:
  1968. * Inherited from caller.
  1969. */
  1970. static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
  1971. {
  1972. static int printed_version = 0;
  1973. unsigned int board_idx = (unsigned int)ent->driver_data;
  1974. const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
  1975. struct ata_host *host;
  1976. struct mv_host_priv *hpriv;
  1977. int n_ports, rc;
  1978. if (!printed_version++)
  1979. dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
  1980. /* allocate host */
  1981. n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
  1982. host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
  1983. hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
  1984. if (!host || !hpriv)
  1985. return -ENOMEM;
  1986. host->private_data = hpriv;
  1987. /* acquire resources */
  1988. rc = pcim_enable_device(pdev);
  1989. if (rc)
  1990. return rc;
  1991. rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
  1992. if (rc == -EBUSY)
  1993. pcim_pin_device(pdev);
  1994. if (rc)
  1995. return rc;
  1996. host->iomap = pcim_iomap_table(pdev);
  1997. rc = pci_go_64(pdev);
  1998. if (rc)
  1999. return rc;
  2000. /* initialize adapter */
  2001. rc = mv_init_host(host, board_idx);
  2002. if (rc)
  2003. return rc;
  2004. /* Enable interrupts */
  2005. if (msi && pci_enable_msi(pdev))
  2006. pci_intx(pdev, 1);
  2007. mv_dump_pci_cfg(pdev, 0x68);
  2008. mv_print_info(host);
  2009. pci_set_master(pdev);
  2010. return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
  2011. &mv_sht);
  2012. }
  2013. static int __init mv_init(void)
  2014. {
  2015. return pci_register_driver(&mv_pci_driver);
  2016. }
  2017. static void __exit mv_exit(void)
  2018. {
  2019. pci_unregister_driver(&mv_pci_driver);
  2020. }
  2021. MODULE_AUTHOR("Brett Russ");
  2022. MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
  2023. MODULE_LICENSE("GPL");
  2024. MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
  2025. MODULE_VERSION(DRV_VERSION);
  2026. module_param(msi, int, 0444);
  2027. MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
  2028. module_init(mv_init);
  2029. module_exit(mv_exit);