mvsas.c 78 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970
  1. /*
  2. mvsas.c - Marvell 88SE6440 SAS/SATA support
  3. Copyright 2007 Red Hat, Inc.
  4. Copyright 2008 Marvell. <kewei@marvell.com>
  5. This program is free software; you can redistribute it and/or
  6. modify it under the terms of the GNU General Public License as
  7. published by the Free Software Foundation; either version 2,
  8. or (at your option) any later version.
  9. This program is distributed in the hope that it will be useful,
  10. but WITHOUT ANY WARRANTY; without even the implied warranty
  11. of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  12. See the GNU General Public License for more details.
  13. You should have received a copy of the GNU General Public
  14. License along with this program; see the file COPYING. If not,
  15. write to the Free Software Foundation, 675 Mass Ave, Cambridge,
  16. MA 02139, USA.
  17. ---------------------------------------------------------------
  18. Random notes:
  19. * hardware supports controlling the endian-ness of data
  20. structures. this permits elimination of all the le32_to_cpu()
  21. and cpu_to_le32() conversions.
  22. */
  23. #include <linux/kernel.h>
  24. #include <linux/module.h>
  25. #include <linux/pci.h>
  26. #include <linux/interrupt.h>
  27. #include <linux/spinlock.h>
  28. #include <linux/delay.h>
  29. #include <linux/dma-mapping.h>
  30. #include <linux/ctype.h>
  31. #include <scsi/libsas.h>
  32. #include <asm/io.h>
  33. #define DRV_NAME "mvsas"
  34. #define DRV_VERSION "0.5"
  35. #define _MV_DUMP 0
  36. #define MVS_DISABLE_NVRAM
  37. #define MVS_DISABLE_MSI
  38. #define mr32(reg) readl(regs + MVS_##reg)
  39. #define mw32(reg,val) writel((val), regs + MVS_##reg)
  40. #define mw32_f(reg,val) do { \
  41. writel((val), regs + MVS_##reg); \
  42. readl(regs + MVS_##reg); \
  43. } while (0)
  44. #define MVS_ID_NOT_MAPPED 0xff
  45. #define MVS_CHIP_SLOT_SZ (1U << mvi->chip->slot_width)
  46. /* offset for D2H FIS in the Received FIS List Structure */
  47. #define SATA_RECEIVED_D2H_FIS(reg_set) \
  48. ((void *) mvi->rx_fis + 0x400 + 0x100 * reg_set + 0x40)
  49. #define SATA_RECEIVED_PIO_FIS(reg_set) \
  50. ((void *) mvi->rx_fis + 0x400 + 0x100 * reg_set + 0x20)
  51. #define UNASSOC_D2H_FIS(id) \
  52. ((void *) mvi->rx_fis + 0x100 * id)
  53. #define for_each_phy(__lseq_mask, __mc, __lseq, __rest) \
  54. for ((__mc) = (__lseq_mask), (__lseq) = 0; \
  55. (__mc) != 0 && __rest; \
  56. (++__lseq), (__mc) >>= 1)
  57. /* driver compile-time configuration */
  58. enum driver_configuration {
  59. MVS_TX_RING_SZ = 1024, /* TX ring size (12-bit) */
  60. MVS_RX_RING_SZ = 1024, /* RX ring size (12-bit) */
  61. /* software requires power-of-2
  62. ring size */
  63. MVS_SLOTS = 512, /* command slots */
  64. MVS_SLOT_BUF_SZ = 8192, /* cmd tbl + IU + status + PRD */
  65. MVS_SSP_CMD_SZ = 64, /* SSP command table buffer size */
  66. MVS_ATA_CMD_SZ = 96, /* SATA command table buffer size */
  67. MVS_OAF_SZ = 64, /* Open address frame buffer size */
  68. MVS_RX_FIS_COUNT = 17, /* Optional rx'd FISs (max 17) */
  69. MVS_QUEUE_SIZE = 30, /* Support Queue depth */
  70. };
  71. /* unchangeable hardware details */
  72. enum hardware_details {
  73. MVS_MAX_PHYS = 8, /* max. possible phys */
  74. MVS_MAX_PORTS = 8, /* max. possible ports */
  75. MVS_RX_FISL_SZ = 0x400 + (MVS_RX_FIS_COUNT * 0x100),
  76. };
  77. /* peripheral registers (BAR2) */
  78. enum peripheral_registers {
  79. SPI_CTL = 0x10, /* EEPROM control */
  80. SPI_CMD = 0x14, /* EEPROM command */
  81. SPI_DATA = 0x18, /* EEPROM data */
  82. };
  83. enum peripheral_register_bits {
  84. TWSI_RDY = (1U << 7), /* EEPROM interface ready */
  85. TWSI_RD = (1U << 4), /* EEPROM read access */
  86. SPI_ADDR_MASK = 0x3ffff, /* bits 17:0 */
  87. };
  88. /* enhanced mode registers (BAR4) */
  89. enum hw_registers {
  90. MVS_GBL_CTL = 0x04, /* global control */
  91. MVS_GBL_INT_STAT = 0x08, /* global irq status */
  92. MVS_GBL_PI = 0x0C, /* ports implemented bitmask */
  93. MVS_GBL_PORT_TYPE = 0xa0, /* port type */
  94. MVS_CTL = 0x100, /* SAS/SATA port configuration */
  95. MVS_PCS = 0x104, /* SAS/SATA port control/status */
  96. MVS_CMD_LIST_LO = 0x108, /* cmd list addr */
  97. MVS_CMD_LIST_HI = 0x10C,
  98. MVS_RX_FIS_LO = 0x110, /* RX FIS list addr */
  99. MVS_RX_FIS_HI = 0x114,
  100. MVS_TX_CFG = 0x120, /* TX configuration */
  101. MVS_TX_LO = 0x124, /* TX (delivery) ring addr */
  102. MVS_TX_HI = 0x128,
  103. MVS_TX_PROD_IDX = 0x12C, /* TX producer pointer */
  104. MVS_TX_CONS_IDX = 0x130, /* TX consumer pointer (RO) */
  105. MVS_RX_CFG = 0x134, /* RX configuration */
  106. MVS_RX_LO = 0x138, /* RX (completion) ring addr */
  107. MVS_RX_HI = 0x13C,
  108. MVS_RX_CONS_IDX = 0x140, /* RX consumer pointer (RO) */
  109. MVS_INT_COAL = 0x148, /* Int coalescing config */
  110. MVS_INT_COAL_TMOUT = 0x14C, /* Int coalescing timeout */
  111. MVS_INT_STAT = 0x150, /* Central int status */
  112. MVS_INT_MASK = 0x154, /* Central int enable */
  113. MVS_INT_STAT_SRS = 0x158, /* SATA register set status */
  114. MVS_INT_MASK_SRS = 0x15C,
  115. /* ports 1-3 follow after this */
  116. MVS_P0_INT_STAT = 0x160, /* port0 interrupt status */
  117. MVS_P0_INT_MASK = 0x164, /* port0 interrupt mask */
  118. MVS_P4_INT_STAT = 0x200, /* Port 4 interrupt status */
  119. MVS_P4_INT_MASK = 0x204, /* Port 4 interrupt enable mask */
  120. /* ports 1-3 follow after this */
  121. MVS_P0_SER_CTLSTAT = 0x180, /* port0 serial control/status */
  122. MVS_P4_SER_CTLSTAT = 0x220, /* port4 serial control/status */
  123. MVS_CMD_ADDR = 0x1B8, /* Command register port (addr) */
  124. MVS_CMD_DATA = 0x1BC, /* Command register port (data) */
  125. /* ports 1-3 follow after this */
  126. MVS_P0_CFG_ADDR = 0x1C0, /* port0 phy register address */
  127. MVS_P0_CFG_DATA = 0x1C4, /* port0 phy register data */
  128. MVS_P4_CFG_ADDR = 0x230, /* Port 4 config address */
  129. MVS_P4_CFG_DATA = 0x234, /* Port 4 config data */
  130. /* ports 1-3 follow after this */
  131. MVS_P0_VSR_ADDR = 0x1E0, /* port0 VSR address */
  132. MVS_P0_VSR_DATA = 0x1E4, /* port0 VSR data */
  133. MVS_P4_VSR_ADDR = 0x250, /* port 4 VSR addr */
  134. MVS_P4_VSR_DATA = 0x254, /* port 4 VSR data */
  135. };
  136. enum hw_register_bits {
  137. /* MVS_GBL_CTL */
  138. INT_EN = (1U << 1), /* Global int enable */
  139. HBA_RST = (1U << 0), /* HBA reset */
  140. /* MVS_GBL_INT_STAT */
  141. INT_XOR = (1U << 4), /* XOR engine event */
  142. INT_SAS_SATA = (1U << 0), /* SAS/SATA event */
  143. /* MVS_GBL_PORT_TYPE */ /* shl for ports 1-3 */
  144. SATA_TARGET = (1U << 16), /* port0 SATA target enable */
  145. MODE_AUTO_DET_PORT7 = (1U << 15), /* port0 SAS/SATA autodetect */
  146. MODE_AUTO_DET_PORT6 = (1U << 14),
  147. MODE_AUTO_DET_PORT5 = (1U << 13),
  148. MODE_AUTO_DET_PORT4 = (1U << 12),
  149. MODE_AUTO_DET_PORT3 = (1U << 11),
  150. MODE_AUTO_DET_PORT2 = (1U << 10),
  151. MODE_AUTO_DET_PORT1 = (1U << 9),
  152. MODE_AUTO_DET_PORT0 = (1U << 8),
  153. MODE_AUTO_DET_EN = MODE_AUTO_DET_PORT0 | MODE_AUTO_DET_PORT1 |
  154. MODE_AUTO_DET_PORT2 | MODE_AUTO_DET_PORT3 |
  155. MODE_AUTO_DET_PORT4 | MODE_AUTO_DET_PORT5 |
  156. MODE_AUTO_DET_PORT6 | MODE_AUTO_DET_PORT7,
  157. MODE_SAS_PORT7_MASK = (1U << 7), /* port0 SAS(1), SATA(0) mode */
  158. MODE_SAS_PORT6_MASK = (1U << 6),
  159. MODE_SAS_PORT5_MASK = (1U << 5),
  160. MODE_SAS_PORT4_MASK = (1U << 4),
  161. MODE_SAS_PORT3_MASK = (1U << 3),
  162. MODE_SAS_PORT2_MASK = (1U << 2),
  163. MODE_SAS_PORT1_MASK = (1U << 1),
  164. MODE_SAS_PORT0_MASK = (1U << 0),
  165. MODE_SAS_SATA = MODE_SAS_PORT0_MASK | MODE_SAS_PORT1_MASK |
  166. MODE_SAS_PORT2_MASK | MODE_SAS_PORT3_MASK |
  167. MODE_SAS_PORT4_MASK | MODE_SAS_PORT5_MASK |
  168. MODE_SAS_PORT6_MASK | MODE_SAS_PORT7_MASK,
  169. /* SAS_MODE value may be
  170. * dictated (in hw) by values
  171. * of SATA_TARGET & AUTO_DET
  172. */
  173. /* MVS_TX_CFG */
  174. TX_EN = (1U << 16), /* Enable TX */
  175. TX_RING_SZ_MASK = 0xfff, /* TX ring size, bits 11:0 */
  176. /* MVS_RX_CFG */
  177. RX_EN = (1U << 16), /* Enable RX */
  178. RX_RING_SZ_MASK = 0xfff, /* RX ring size, bits 11:0 */
  179. /* MVS_INT_COAL */
  180. COAL_EN = (1U << 16), /* Enable int coalescing */
  181. /* MVS_INT_STAT, MVS_INT_MASK */
  182. CINT_I2C = (1U << 31), /* I2C event */
  183. CINT_SW0 = (1U << 30), /* software event 0 */
  184. CINT_SW1 = (1U << 29), /* software event 1 */
  185. CINT_PRD_BC = (1U << 28), /* PRD BC err for read cmd */
  186. CINT_DMA_PCIE = (1U << 27), /* DMA to PCIE timeout */
  187. CINT_MEM = (1U << 26), /* int mem parity err */
  188. CINT_I2C_SLAVE = (1U << 25), /* slave I2C event */
  189. CINT_SRS = (1U << 3), /* SRS event */
  190. CINT_CI_STOP = (1U << 1), /* cmd issue stopped */
  191. CINT_DONE = (1U << 0), /* cmd completion */
  192. /* shl for ports 1-3 */
  193. CINT_PORT_STOPPED = (1U << 16), /* port0 stopped */
  194. CINT_PORT = (1U << 8), /* port0 event */
  195. CINT_PORT_MASK_OFFSET = 8,
  196. CINT_PORT_MASK = (0xFF << CINT_PORT_MASK_OFFSET),
  197. /* TX (delivery) ring bits */
  198. TXQ_CMD_SHIFT = 29,
  199. TXQ_CMD_SSP = 1, /* SSP protocol */
  200. TXQ_CMD_SMP = 2, /* SMP protocol */
  201. TXQ_CMD_STP = 3, /* STP/SATA protocol */
  202. TXQ_CMD_SSP_FREE_LIST = 4, /* add to SSP targ free list */
  203. TXQ_CMD_SLOT_RESET = 7, /* reset command slot */
  204. TXQ_MODE_I = (1U << 28), /* mode: 0=target,1=initiator */
  205. TXQ_PRIO_HI = (1U << 27), /* priority: 0=normal, 1=high */
  206. TXQ_SRS_SHIFT = 20, /* SATA register set */
  207. TXQ_SRS_MASK = 0x7f,
  208. TXQ_PHY_SHIFT = 12, /* PHY bitmap */
  209. TXQ_PHY_MASK = 0xff,
  210. TXQ_SLOT_MASK = 0xfff, /* slot number */
  211. /* RX (completion) ring bits */
  212. RXQ_GOOD = (1U << 23), /* Response good */
  213. RXQ_SLOT_RESET = (1U << 21), /* Slot reset complete */
  214. RXQ_CMD_RX = (1U << 20), /* target cmd received */
  215. RXQ_ATTN = (1U << 19), /* attention */
  216. RXQ_RSP = (1U << 18), /* response frame xfer'd */
  217. RXQ_ERR = (1U << 17), /* err info rec xfer'd */
  218. RXQ_DONE = (1U << 16), /* cmd complete */
  219. RXQ_SLOT_MASK = 0xfff, /* slot number */
  220. /* mvs_cmd_hdr bits */
  221. MCH_PRD_LEN_SHIFT = 16, /* 16-bit PRD table len */
  222. MCH_SSP_FR_TYPE_SHIFT = 13, /* SSP frame type */
  223. /* SSP initiator only */
  224. MCH_SSP_FR_CMD = 0x0, /* COMMAND frame */
  225. /* SSP initiator or target */
  226. MCH_SSP_FR_TASK = 0x1, /* TASK frame */
  227. /* SSP target only */
  228. MCH_SSP_FR_XFER_RDY = 0x4, /* XFER_RDY frame */
  229. MCH_SSP_FR_RESP = 0x5, /* RESPONSE frame */
  230. MCH_SSP_FR_READ = 0x6, /* Read DATA frame(s) */
  231. MCH_SSP_FR_READ_RESP = 0x7, /* ditto, plus RESPONSE */
  232. MCH_PASSTHRU = (1U << 12), /* pass-through (SSP) */
  233. MCH_FBURST = (1U << 11), /* first burst (SSP) */
  234. MCH_CHK_LEN = (1U << 10), /* chk xfer len (SSP) */
  235. MCH_RETRY = (1U << 9), /* tport layer retry (SSP) */
  236. MCH_PROTECTION = (1U << 8), /* protection info rec (SSP) */
  237. MCH_RESET = (1U << 7), /* Reset (STP/SATA) */
  238. MCH_FPDMA = (1U << 6), /* First party DMA (STP/SATA) */
  239. MCH_ATAPI = (1U << 5), /* ATAPI (STP/SATA) */
  240. MCH_BIST = (1U << 4), /* BIST activate (STP/SATA) */
  241. MCH_PMP_MASK = 0xf, /* PMP from cmd FIS (STP/SATA)*/
  242. CCTL_RST = (1U << 5), /* port logic reset */
  243. /* 0(LSB first), 1(MSB first) */
  244. CCTL_ENDIAN_DATA = (1U << 3), /* PRD data */
  245. CCTL_ENDIAN_RSP = (1U << 2), /* response frame */
  246. CCTL_ENDIAN_OPEN = (1U << 1), /* open address frame */
  247. CCTL_ENDIAN_CMD = (1U << 0), /* command table */
  248. /* MVS_Px_SER_CTLSTAT (per-phy control) */
  249. PHY_SSP_RST = (1U << 3), /* reset SSP link layer */
  250. PHY_BCAST_CHG = (1U << 2), /* broadcast(change) notif */
  251. PHY_RST_HARD = (1U << 1), /* hard reset + phy reset */
  252. PHY_RST = (1U << 0), /* phy reset */
  253. PHY_MIN_SPP_PHYS_LINK_RATE_MASK = (0xF << 8),
  254. PHY_MAX_SPP_PHYS_LINK_RATE_MASK = (0xF << 12),
  255. PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET = (16),
  256. PHY_NEG_SPP_PHYS_LINK_RATE_MASK =
  257. (0xF << PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET),
  258. PHY_READY_MASK = (1U << 20),
  259. /* MVS_Px_INT_STAT, MVS_Px_INT_MASK (per-phy events) */
  260. PHYEV_DEC_ERR = (1U << 24), /* Phy Decoding Error */
  261. PHYEV_UNASSOC_FIS = (1U << 19), /* unassociated FIS rx'd */
  262. PHYEV_AN = (1U << 18), /* SATA async notification */
  263. PHYEV_BIST_ACT = (1U << 17), /* BIST activate FIS */
  264. PHYEV_SIG_FIS = (1U << 16), /* signature FIS */
  265. PHYEV_POOF = (1U << 12), /* phy ready from 1 -> 0 */
  266. PHYEV_IU_BIG = (1U << 11), /* IU too long err */
  267. PHYEV_IU_SMALL = (1U << 10), /* IU too short err */
  268. PHYEV_UNK_TAG = (1U << 9), /* unknown tag */
  269. PHYEV_BROAD_CH = (1U << 8), /* broadcast(CHANGE) */
  270. PHYEV_COMWAKE = (1U << 7), /* COMWAKE rx'd */
  271. PHYEV_PORT_SEL = (1U << 6), /* port selector present */
  272. PHYEV_HARD_RST = (1U << 5), /* hard reset rx'd */
  273. PHYEV_ID_TMOUT = (1U << 4), /* identify timeout */
  274. PHYEV_ID_FAIL = (1U << 3), /* identify failed */
  275. PHYEV_ID_DONE = (1U << 2), /* identify done */
  276. PHYEV_HARD_RST_DONE = (1U << 1), /* hard reset done */
  277. PHYEV_RDY_CH = (1U << 0), /* phy ready changed state */
  278. /* MVS_PCS */
  279. PCS_EN_SATA_REG_SHIFT = (16), /* Enable SATA Register Set */
  280. PCS_EN_PORT_XMT_SHIFT = (12), /* Enable Port Transmit */
  281. PCS_EN_PORT_XMT_SHIFT2 = (8), /* For 6480 */
  282. PCS_SATA_RETRY = (1U << 8), /* retry ctl FIS on R_ERR */
  283. PCS_RSP_RX_EN = (1U << 7), /* raw response rx */
  284. PCS_SELF_CLEAR = (1U << 5), /* self-clearing int mode */
  285. PCS_FIS_RX_EN = (1U << 4), /* FIS rx enable */
  286. PCS_CMD_STOP_ERR = (1U << 3), /* cmd stop-on-err enable */
  287. PCS_CMD_RST = (1U << 1), /* reset cmd issue */
  288. PCS_CMD_EN = (1U << 0), /* enable cmd issue */
  289. /* Port n Attached Device Info */
  290. PORT_DEV_SSP_TRGT = (1U << 19),
  291. PORT_DEV_SMP_TRGT = (1U << 18),
  292. PORT_DEV_STP_TRGT = (1U << 17),
  293. PORT_DEV_SSP_INIT = (1U << 11),
  294. PORT_DEV_SMP_INIT = (1U << 10),
  295. PORT_DEV_STP_INIT = (1U << 9),
  296. PORT_PHY_ID_MASK = (0xFFU << 24),
  297. PORT_DEV_TRGT_MASK = (0x7U << 17),
  298. PORT_DEV_INIT_MASK = (0x7U << 9),
  299. PORT_DEV_TYPE_MASK = (0x7U << 0),
  300. /* Port n PHY Status */
  301. PHY_RDY = (1U << 2),
  302. PHY_DW_SYNC = (1U << 1),
  303. PHY_OOB_DTCTD = (1U << 0),
  304. /* VSR */
  305. /* PHYMODE 6 (CDB) */
  306. PHY_MODE6_DTL_SPEED = (1U << 27),
  307. };
  308. enum mvs_info_flags {
  309. MVF_MSI = (1U << 0), /* MSI is enabled */
  310. MVF_PHY_PWR_FIX = (1U << 1), /* bug workaround */
  311. };
  312. enum sas_cmd_port_registers {
  313. CMD_CMRST_OOB_DET = 0x100, /* COMRESET OOB detect register */
  314. CMD_CMWK_OOB_DET = 0x104, /* COMWAKE OOB detect register */
  315. CMD_CMSAS_OOB_DET = 0x108, /* COMSAS OOB detect register */
  316. CMD_BRST_OOB_DET = 0x10c, /* burst OOB detect register */
  317. CMD_OOB_SPACE = 0x110, /* OOB space control register */
  318. CMD_OOB_BURST = 0x114, /* OOB burst control register */
  319. CMD_PHY_TIMER = 0x118, /* PHY timer control register */
  320. CMD_PHY_CONFIG0 = 0x11c, /* PHY config register 0 */
  321. CMD_PHY_CONFIG1 = 0x120, /* PHY config register 1 */
  322. CMD_SAS_CTL0 = 0x124, /* SAS control register 0 */
  323. CMD_SAS_CTL1 = 0x128, /* SAS control register 1 */
  324. CMD_SAS_CTL2 = 0x12c, /* SAS control register 2 */
  325. CMD_SAS_CTL3 = 0x130, /* SAS control register 3 */
  326. CMD_ID_TEST = 0x134, /* ID test register */
  327. CMD_PL_TIMER = 0x138, /* PL timer register */
  328. CMD_WD_TIMER = 0x13c, /* WD timer register */
  329. CMD_PORT_SEL_COUNT = 0x140, /* port selector count register */
  330. CMD_APP_MEM_CTL = 0x144, /* Application Memory Control */
  331. CMD_XOR_MEM_CTL = 0x148, /* XOR Block Memory Control */
  332. CMD_DMA_MEM_CTL = 0x14c, /* DMA Block Memory Control */
  333. CMD_PORT_MEM_CTL0 = 0x150, /* Port Memory Control 0 */
  334. CMD_PORT_MEM_CTL1 = 0x154, /* Port Memory Control 1 */
  335. CMD_SATA_PORT_MEM_CTL0 = 0x158, /* SATA Port Memory Control 0 */
  336. CMD_SATA_PORT_MEM_CTL1 = 0x15c, /* SATA Port Memory Control 1 */
  337. CMD_XOR_MEM_BIST_CTL = 0x160, /* XOR Memory BIST Control */
  338. CMD_XOR_MEM_BIST_STAT = 0x164, /* XOR Memroy BIST Status */
  339. CMD_DMA_MEM_BIST_CTL = 0x168, /* DMA Memory BIST Control */
  340. CMD_DMA_MEM_BIST_STAT = 0x16c, /* DMA Memory BIST Status */
  341. CMD_PORT_MEM_BIST_CTL = 0x170, /* Port Memory BIST Control */
  342. CMD_PORT_MEM_BIST_STAT0 = 0x174, /* Port Memory BIST Status 0 */
  343. CMD_PORT_MEM_BIST_STAT1 = 0x178, /* Port Memory BIST Status 1 */
  344. CMD_STP_MEM_BIST_CTL = 0x17c, /* STP Memory BIST Control */
  345. CMD_STP_MEM_BIST_STAT0 = 0x180, /* STP Memory BIST Status 0 */
  346. CMD_STP_MEM_BIST_STAT1 = 0x184, /* STP Memory BIST Status 1 */
  347. CMD_RESET_COUNT = 0x188, /* Reset Count */
  348. CMD_MONTR_DATA_SEL = 0x18C, /* Monitor Data/Select */
  349. CMD_PLL_PHY_CONFIG = 0x190, /* PLL/PHY Configuration */
  350. CMD_PHY_CTL = 0x194, /* PHY Control and Status */
  351. CMD_PHY_TEST_COUNT0 = 0x198, /* Phy Test Count 0 */
  352. CMD_PHY_TEST_COUNT1 = 0x19C, /* Phy Test Count 1 */
  353. CMD_PHY_TEST_COUNT2 = 0x1A0, /* Phy Test Count 2 */
  354. CMD_APP_ERR_CONFIG = 0x1A4, /* Application Error Configuration */
  355. CMD_PND_FIFO_CTL0 = 0x1A8, /* Pending FIFO Control 0 */
  356. CMD_HOST_CTL = 0x1AC, /* Host Control Status */
  357. CMD_HOST_WR_DATA = 0x1B0, /* Host Write Data */
  358. CMD_HOST_RD_DATA = 0x1B4, /* Host Read Data */
  359. CMD_PHY_MODE_21 = 0x1B8, /* Phy Mode 21 */
  360. CMD_SL_MODE0 = 0x1BC, /* SL Mode 0 */
  361. CMD_SL_MODE1 = 0x1C0, /* SL Mode 1 */
  362. CMD_PND_FIFO_CTL1 = 0x1C4, /* Pending FIFO Control 1 */
  363. };
  364. /* SAS/SATA configuration port registers, aka phy registers */
  365. enum sas_sata_config_port_regs {
  366. PHYR_IDENTIFY = 0x00, /* info for IDENTIFY frame */
  367. PHYR_ADDR_LO = 0x04, /* my SAS address (low) */
  368. PHYR_ADDR_HI = 0x08, /* my SAS address (high) */
  369. PHYR_ATT_DEV_INFO = 0x0C, /* attached device info */
  370. PHYR_ATT_ADDR_LO = 0x10, /* attached dev SAS addr (low) */
  371. PHYR_ATT_ADDR_HI = 0x14, /* attached dev SAS addr (high) */
  372. PHYR_SATA_CTL = 0x18, /* SATA control */
  373. PHYR_PHY_STAT = 0x1C, /* PHY status */
  374. PHYR_SATA_SIG0 = 0x20, /*port SATA signature FIS(Byte 0-3) */
  375. PHYR_SATA_SIG1 = 0x24, /*port SATA signature FIS(Byte 4-7) */
  376. PHYR_SATA_SIG2 = 0x28, /*port SATA signature FIS(Byte 8-11) */
  377. PHYR_SATA_SIG3 = 0x2c, /*port SATA signature FIS(Byte 12-15) */
  378. PHYR_R_ERR_COUNT = 0x30, /* port R_ERR count register */
  379. PHYR_CRC_ERR_COUNT = 0x34, /* port CRC error count register */
  380. PHYR_WIDE_PORT = 0x38, /* wide port participating */
  381. PHYR_CURRENT0 = 0x80, /* current connection info 0 */
  382. PHYR_CURRENT1 = 0x84, /* current connection info 1 */
  383. PHYR_CURRENT2 = 0x88, /* current connection info 2 */
  384. };
  385. /* SAS/SATA Vendor Specific Port Registers */
  386. enum sas_sata_vsp_regs {
  387. VSR_PHY_STAT = 0x00, /* Phy Status */
  388. VSR_PHY_MODE1 = 0x01, /* phy tx */
  389. VSR_PHY_MODE2 = 0x02, /* tx scc */
  390. VSR_PHY_MODE3 = 0x03, /* pll */
  391. VSR_PHY_MODE4 = 0x04, /* VCO */
  392. VSR_PHY_MODE5 = 0x05, /* Rx */
  393. VSR_PHY_MODE6 = 0x06, /* CDR */
  394. VSR_PHY_MODE7 = 0x07, /* Impedance */
  395. VSR_PHY_MODE8 = 0x08, /* Voltage */
  396. VSR_PHY_MODE9 = 0x09, /* Test */
  397. VSR_PHY_MODE10 = 0x0A, /* Power */
  398. VSR_PHY_MODE11 = 0x0B, /* Phy Mode */
  399. VSR_PHY_VS0 = 0x0C, /* Vednor Specific 0 */
  400. VSR_PHY_VS1 = 0x0D, /* Vednor Specific 1 */
  401. };
  402. enum pci_cfg_registers {
  403. PCR_PHY_CTL = 0x40,
  404. PCR_PHY_CTL2 = 0x90,
  405. PCR_DEV_CTRL = 0xE8,
  406. };
  407. enum pci_cfg_register_bits {
  408. PCTL_PWR_ON = (0xFU << 24),
  409. PCTL_OFF = (0xFU << 12),
  410. PRD_REQ_SIZE = (0x4000),
  411. PRD_REQ_MASK = (0x00007000),
  412. };
  413. enum nvram_layout_offsets {
  414. NVR_SIG = 0x00, /* 0xAA, 0x55 */
  415. NVR_SAS_ADDR = 0x02, /* 8-byte SAS address */
  416. };
  417. enum chip_flavors {
  418. chip_6320,
  419. chip_6440,
  420. chip_6480,
  421. };
  422. enum port_type {
  423. PORT_TYPE_SAS = (1L << 1),
  424. PORT_TYPE_SATA = (1L << 0),
  425. };
  426. /* Command Table Format */
  427. enum ct_format {
  428. /* SSP */
  429. SSP_F_H = 0x00,
  430. SSP_F_IU = 0x18,
  431. SSP_F_MAX = 0x4D,
  432. /* STP */
  433. STP_CMD_FIS = 0x00,
  434. STP_ATAPI_CMD = 0x40,
  435. STP_F_MAX = 0x10,
  436. /* SMP */
  437. SMP_F_T = 0x00,
  438. SMP_F_DEP = 0x01,
  439. SMP_F_MAX = 0x101,
  440. };
  441. enum status_buffer {
  442. SB_EIR_OFF = 0x00, /* Error Information Record */
  443. SB_RFB_OFF = 0x08, /* Response Frame Buffer */
  444. SB_RFB_MAX = 0x400, /* RFB size*/
  445. };
  446. enum error_info_rec {
  447. CMD_ISS_STPD = (1U << 31), /* Cmd Issue Stopped */
  448. };
  449. struct mvs_chip_info {
  450. u32 n_phy;
  451. u32 srs_sz;
  452. u32 slot_width;
  453. };
  454. struct mvs_err_info {
  455. __le32 flags;
  456. __le32 flags2;
  457. };
  458. struct mvs_prd {
  459. __le64 addr; /* 64-bit buffer address */
  460. __le32 reserved;
  461. __le32 len; /* 16-bit length */
  462. };
  463. struct mvs_cmd_hdr {
  464. __le32 flags; /* PRD tbl len; SAS, SATA ctl */
  465. __le32 lens; /* cmd, max resp frame len */
  466. __le32 tags; /* targ port xfer tag; tag */
  467. __le32 data_len; /* data xfer len */
  468. __le64 cmd_tbl; /* command table address */
  469. __le64 open_frame; /* open addr frame address */
  470. __le64 status_buf; /* status buffer address */
  471. __le64 prd_tbl; /* PRD tbl address */
  472. __le32 reserved[4];
  473. };
  474. struct mvs_slot_info {
  475. struct sas_task *task;
  476. u32 n_elem;
  477. u32 tx;
  478. /* DMA buffer for storing cmd tbl, open addr frame, status buffer,
  479. * and PRD table
  480. */
  481. void *buf;
  482. dma_addr_t buf_dma;
  483. #if _MV_DUMP
  484. u32 cmd_size;
  485. #endif
  486. void *response;
  487. };
  488. struct mvs_port {
  489. struct asd_sas_port sas_port;
  490. u8 port_attached;
  491. u8 taskfileset;
  492. u8 wide_port_phymap;
  493. };
  494. struct mvs_phy {
  495. struct mvs_port *port;
  496. struct asd_sas_phy sas_phy;
  497. struct sas_identify identify;
  498. struct scsi_device *sdev;
  499. u64 dev_sas_addr;
  500. u64 att_dev_sas_addr;
  501. u32 att_dev_info;
  502. u32 dev_info;
  503. u32 phy_type;
  504. u32 phy_status;
  505. u32 irq_status;
  506. u32 frame_rcvd_size;
  507. u8 frame_rcvd[32];
  508. u8 phy_attached;
  509. };
  510. struct mvs_info {
  511. unsigned long flags;
  512. spinlock_t lock; /* host-wide lock */
  513. struct pci_dev *pdev; /* our device */
  514. void __iomem *regs; /* enhanced mode registers */
  515. void __iomem *peri_regs; /* peripheral registers */
  516. u8 sas_addr[SAS_ADDR_SIZE];
  517. struct sas_ha_struct sas; /* SCSI/SAS glue */
  518. struct Scsi_Host *shost;
  519. __le32 *tx; /* TX (delivery) DMA ring */
  520. dma_addr_t tx_dma;
  521. u32 tx_prod; /* cached next-producer idx */
  522. __le32 *rx; /* RX (completion) DMA ring */
  523. dma_addr_t rx_dma;
  524. u32 rx_cons; /* RX consumer idx */
  525. __le32 *rx_fis; /* RX'd FIS area */
  526. dma_addr_t rx_fis_dma;
  527. struct mvs_cmd_hdr *slot; /* DMA command header slots */
  528. dma_addr_t slot_dma;
  529. const struct mvs_chip_info *chip;
  530. unsigned long tags[MVS_SLOTS];
  531. struct mvs_slot_info slot_info[MVS_SLOTS];
  532. /* further per-slot information */
  533. struct mvs_phy phy[MVS_MAX_PHYS];
  534. struct mvs_port port[MVS_MAX_PHYS];
  535. u32 can_queue; /* per adapter */
  536. u32 tag_out; /*Get*/
  537. u32 tag_in; /*Give*/
  538. };
  539. struct mvs_queue_task {
  540. struct list_head list;
  541. void *uldd_task;
  542. };
  543. static int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
  544. void *funcdata);
  545. static u32 mvs_read_phy_ctl(struct mvs_info *mvi, u32 port);
  546. static void mvs_write_phy_ctl(struct mvs_info *mvi, u32 port, u32 val);
  547. static u32 mvs_read_port_irq_stat(struct mvs_info *mvi, u32 port);
  548. static void mvs_write_port_irq_stat(struct mvs_info *mvi, u32 port, u32 val);
  549. static void mvs_write_port_irq_mask(struct mvs_info *mvi, u32 port, u32 val);
  550. static u32 mvs_read_port_irq_mask(struct mvs_info *mvi, u32 port);
  551. static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i);
  552. static void mvs_detect_porttype(struct mvs_info *mvi, int i);
  553. static void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st);
  554. static int mvs_scan_finished(struct Scsi_Host *, unsigned long);
  555. static void mvs_scan_start(struct Scsi_Host *);
  556. static int mvs_sas_slave_alloc(struct scsi_device *scsi_dev);
  557. static struct scsi_transport_template *mvs_stt;
  558. static const struct mvs_chip_info mvs_chips[] = {
  559. [chip_6320] = { 2, 16, 9 },
  560. [chip_6440] = { 4, 16, 9 },
  561. [chip_6480] = { 8, 32, 10 },
  562. };
  563. static struct scsi_host_template mvs_sht = {
  564. .module = THIS_MODULE,
  565. .name = DRV_NAME,
  566. .queuecommand = sas_queuecommand,
  567. .target_alloc = sas_target_alloc,
  568. .slave_configure = sas_slave_configure,
  569. .slave_destroy = sas_slave_destroy,
  570. .scan_finished = mvs_scan_finished,
  571. .scan_start = mvs_scan_start,
  572. .change_queue_depth = sas_change_queue_depth,
  573. .change_queue_type = sas_change_queue_type,
  574. .bios_param = sas_bios_param,
  575. .can_queue = 1,
  576. .cmd_per_lun = 1,
  577. .this_id = -1,
  578. .sg_tablesize = SG_ALL,
  579. .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
  580. .use_clustering = ENABLE_CLUSTERING,
  581. .eh_device_reset_handler = sas_eh_device_reset_handler,
  582. .eh_bus_reset_handler = sas_eh_bus_reset_handler,
  583. .slave_alloc = mvs_sas_slave_alloc,
  584. .target_destroy = sas_target_destroy,
  585. .ioctl = sas_ioctl,
  586. };
  587. static void mvs_hexdump(u32 size, u8 *data, u32 baseaddr)
  588. {
  589. u32 i;
  590. u32 run;
  591. u32 offset;
  592. offset = 0;
  593. while (size) {
  594. printk("%08X : ", baseaddr + offset);
  595. if (size >= 16)
  596. run = 16;
  597. else
  598. run = size;
  599. size -= run;
  600. for (i = 0; i < 16; i++) {
  601. if (i < run)
  602. printk("%02X ", (u32)data[i]);
  603. else
  604. printk(" ");
  605. }
  606. printk(": ");
  607. for (i = 0; i < run; i++)
  608. printk("%c", isalnum(data[i]) ? data[i] : '.');
  609. printk("\n");
  610. data = &data[16];
  611. offset += run;
  612. }
  613. printk("\n");
  614. }
  615. static void mvs_hba_sb_dump(struct mvs_info *mvi, u32 tag,
  616. enum sas_protocol proto)
  617. {
  618. #if _MV_DUMP
  619. u32 offset;
  620. struct pci_dev *pdev = mvi->pdev;
  621. struct mvs_slot_info *slot = &mvi->slot_info[tag];
  622. offset = slot->cmd_size + MVS_OAF_SZ +
  623. sizeof(struct mvs_prd) * slot->n_elem;
  624. dev_printk(KERN_DEBUG, &pdev->dev, "+---->Status buffer[%d] :\n",
  625. tag);
  626. mvs_hexdump(32, (u8 *) slot->response,
  627. (u32) slot->buf_dma + offset);
  628. #endif
  629. }
  630. static void mvs_hba_memory_dump(struct mvs_info *mvi, u32 tag,
  631. enum sas_protocol proto)
  632. {
  633. #if _MV_DUMP
  634. u32 sz, w_ptr, r_ptr;
  635. u64 addr;
  636. void __iomem *regs = mvi->regs;
  637. struct pci_dev *pdev = mvi->pdev;
  638. struct mvs_slot_info *slot = &mvi->slot_info[tag];
  639. /*Delivery Queue */
  640. sz = mr32(TX_CFG) & TX_RING_SZ_MASK;
  641. w_ptr = mr32(TX_PROD_IDX) & TX_RING_SZ_MASK;
  642. r_ptr = mr32(TX_CONS_IDX) & TX_RING_SZ_MASK;
  643. addr = mr32(TX_HI) << 16 << 16 | mr32(TX_LO);
  644. dev_printk(KERN_DEBUG, &pdev->dev,
  645. "Delivery Queue Size=%04d , WRT_PTR=%04X , RD_PTR=%04X\n",
  646. sz, w_ptr, r_ptr);
  647. dev_printk(KERN_DEBUG, &pdev->dev,
  648. "Delivery Queue Base Address=0x%llX (PA)"
  649. "(tx_dma=0x%llX), Entry=%04d\n",
  650. addr, mvi->tx_dma, w_ptr);
  651. mvs_hexdump(sizeof(u32), (u8 *)(&mvi->tx[mvi->tx_prod]),
  652. (u32) mvi->tx_dma + sizeof(u32) * w_ptr);
  653. /*Command List */
  654. addr = mr32(CMD_LIST_HI) << 16 << 16 | mr32(CMD_LIST_LO);
  655. dev_printk(KERN_DEBUG, &pdev->dev,
  656. "Command List Base Address=0x%llX (PA)"
  657. "(slot_dma=0x%llX), Header=%03d\n",
  658. addr, mvi->slot_dma, tag);
  659. dev_printk(KERN_DEBUG, &pdev->dev, "Command Header[%03d]:\n", tag);
  660. /*mvs_cmd_hdr */
  661. mvs_hexdump(sizeof(struct mvs_cmd_hdr), (u8 *)(&mvi->slot[tag]),
  662. (u32) mvi->slot_dma + tag * sizeof(struct mvs_cmd_hdr));
  663. /*1.command table area */
  664. dev_printk(KERN_DEBUG, &pdev->dev, "+---->Command Table :\n");
  665. mvs_hexdump(slot->cmd_size, (u8 *) slot->buf, (u32) slot->buf_dma);
  666. /*2.open address frame area */
  667. dev_printk(KERN_DEBUG, &pdev->dev, "+---->Open Address Frame :\n");
  668. mvs_hexdump(MVS_OAF_SZ, (u8 *) slot->buf + slot->cmd_size,
  669. (u32) slot->buf_dma + slot->cmd_size);
  670. /*3.status buffer */
  671. mvs_hba_sb_dump(mvi, tag, proto);
  672. /*4.PRD table */
  673. dev_printk(KERN_DEBUG, &pdev->dev, "+---->PRD table :\n");
  674. mvs_hexdump(sizeof(struct mvs_prd) * slot->n_elem,
  675. (u8 *) slot->buf + slot->cmd_size + MVS_OAF_SZ,
  676. (u32) slot->buf_dma + slot->cmd_size + MVS_OAF_SZ);
  677. #endif
  678. }
  679. static void mvs_hba_cq_dump(struct mvs_info *mvi)
  680. {
  681. #if _MV_DUMP
  682. u64 addr;
  683. void __iomem *regs = mvi->regs;
  684. struct pci_dev *pdev = mvi->pdev;
  685. u32 entry = mvi->rx_cons + 1;
  686. u32 rx_desc = le32_to_cpu(mvi->rx[entry]);
  687. /*Completion Queue */
  688. addr = mr32(RX_HI) << 16 << 16 | mr32(RX_LO);
  689. dev_printk(KERN_DEBUG, &pdev->dev, "Completion Task = 0x%08X\n",
  690. (u32) mvi->slot_info[rx_desc & RXQ_SLOT_MASK].task);
  691. dev_printk(KERN_DEBUG, &pdev->dev,
  692. "Completion List Base Address=0x%llX (PA), "
  693. "CQ_Entry=%04d, CQ_WP=0x%08X\n",
  694. addr, entry - 1, mvi->rx[0]);
  695. mvs_hexdump(sizeof(u32), (u8 *)(&rx_desc),
  696. mvi->rx_dma + sizeof(u32) * entry);
  697. #endif
  698. }
  699. static void mvs_hba_interrupt_enable(struct mvs_info *mvi)
  700. {
  701. void __iomem *regs = mvi->regs;
  702. u32 tmp;
  703. tmp = mr32(GBL_CTL);
  704. mw32(GBL_CTL, tmp | INT_EN);
  705. }
  706. static void mvs_hba_interrupt_disable(struct mvs_info *mvi)
  707. {
  708. void __iomem *regs = mvi->regs;
  709. u32 tmp;
  710. tmp = mr32(GBL_CTL);
  711. mw32(GBL_CTL, tmp & ~INT_EN);
  712. }
  713. static int mvs_int_rx(struct mvs_info *mvi, bool self_clear);
  714. /* move to PCI layer or libata core? */
  715. static int pci_go_64(struct pci_dev *pdev)
  716. {
  717. int rc;
  718. if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
  719. rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
  720. if (rc) {
  721. rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
  722. if (rc) {
  723. dev_printk(KERN_ERR, &pdev->dev,
  724. "64-bit DMA enable failed\n");
  725. return rc;
  726. }
  727. }
  728. } else {
  729. rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
  730. if (rc) {
  731. dev_printk(KERN_ERR, &pdev->dev,
  732. "32-bit DMA enable failed\n");
  733. return rc;
  734. }
  735. rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
  736. if (rc) {
  737. dev_printk(KERN_ERR, &pdev->dev,
  738. "32-bit consistent DMA enable failed\n");
  739. return rc;
  740. }
  741. }
  742. return rc;
  743. }
  744. static void mvs_tag_clear(struct mvs_info *mvi, u32 tag)
  745. {
  746. mvi->tag_in = (mvi->tag_in + 1) & (MVS_SLOTS - 1);
  747. mvi->tags[mvi->tag_in] = tag;
  748. }
  749. static void mvs_tag_free(struct mvs_info *mvi, u32 tag)
  750. {
  751. mvi->tag_out = (mvi->tag_out - 1) & (MVS_SLOTS - 1);
  752. }
  753. static int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out)
  754. {
  755. if (mvi->tag_out != mvi->tag_in) {
  756. *tag_out = mvi->tags[mvi->tag_out];
  757. mvi->tag_out = (mvi->tag_out + 1) & (MVS_SLOTS - 1);
  758. return 0;
  759. }
  760. return -EBUSY;
  761. }
  762. static void mvs_tag_init(struct mvs_info *mvi)
  763. {
  764. int i;
  765. for (i = 0; i < MVS_SLOTS; ++i)
  766. mvi->tags[i] = i;
  767. mvi->tag_out = 0;
  768. mvi->tag_in = MVS_SLOTS - 1;
  769. }
  770. #ifndef MVS_DISABLE_NVRAM
  771. static int mvs_eep_read(void __iomem *regs, u32 addr, u32 *data)
  772. {
  773. int timeout = 1000;
  774. if (addr & ~SPI_ADDR_MASK)
  775. return -EINVAL;
  776. writel(addr, regs + SPI_CMD);
  777. writel(TWSI_RD, regs + SPI_CTL);
  778. while (timeout-- > 0) {
  779. if (readl(regs + SPI_CTL) & TWSI_RDY) {
  780. *data = readl(regs + SPI_DATA);
  781. return 0;
  782. }
  783. udelay(10);
  784. }
  785. return -EBUSY;
  786. }
  787. static int mvs_eep_read_buf(void __iomem *regs, u32 addr,
  788. void *buf, u32 buflen)
  789. {
  790. u32 addr_end, tmp_addr, i, j;
  791. u32 tmp = 0;
  792. int rc;
  793. u8 *tmp8, *buf8 = buf;
  794. addr_end = addr + buflen;
  795. tmp_addr = ALIGN(addr, 4);
  796. if (addr > 0xff)
  797. return -EINVAL;
  798. j = addr & 0x3;
  799. if (j) {
  800. rc = mvs_eep_read(regs, tmp_addr, &tmp);
  801. if (rc)
  802. return rc;
  803. tmp8 = (u8 *)&tmp;
  804. for (i = j; i < 4; i++)
  805. *buf8++ = tmp8[i];
  806. tmp_addr += 4;
  807. }
  808. for (j = ALIGN(addr_end, 4); tmp_addr < j; tmp_addr += 4) {
  809. rc = mvs_eep_read(regs, tmp_addr, &tmp);
  810. if (rc)
  811. return rc;
  812. memcpy(buf8, &tmp, 4);
  813. buf8 += 4;
  814. }
  815. if (tmp_addr < addr_end) {
  816. rc = mvs_eep_read(regs, tmp_addr, &tmp);
  817. if (rc)
  818. return rc;
  819. tmp8 = (u8 *)&tmp;
  820. j = addr_end - tmp_addr;
  821. for (i = 0; i < j; i++)
  822. *buf8++ = tmp8[i];
  823. tmp_addr += 4;
  824. }
  825. return 0;
  826. }
  827. #endif
  828. static int mvs_nvram_read(struct mvs_info *mvi, u32 addr,
  829. void *buf, u32 buflen)
  830. {
  831. #ifndef MVS_DISABLE_NVRAM
  832. void __iomem *regs = mvi->regs;
  833. int rc, i;
  834. u32 sum;
  835. u8 hdr[2], *tmp;
  836. const char *msg;
  837. rc = mvs_eep_read_buf(regs, addr, &hdr, 2);
  838. if (rc) {
  839. msg = "nvram hdr read failed";
  840. goto err_out;
  841. }
  842. rc = mvs_eep_read_buf(regs, addr + 2, buf, buflen);
  843. if (rc) {
  844. msg = "nvram read failed";
  845. goto err_out;
  846. }
  847. if (hdr[0] != 0x5A) {
  848. /* entry id */
  849. msg = "invalid nvram entry id";
  850. rc = -ENOENT;
  851. goto err_out;
  852. }
  853. tmp = buf;
  854. sum = ((u32)hdr[0]) + ((u32)hdr[1]);
  855. for (i = 0; i < buflen; i++)
  856. sum += ((u32)tmp[i]);
  857. if (sum) {
  858. msg = "nvram checksum failure";
  859. rc = -EILSEQ;
  860. goto err_out;
  861. }
  862. return 0;
  863. err_out:
  864. dev_printk(KERN_ERR, &mvi->pdev->dev, "%s", msg);
  865. return rc;
  866. #else
  867. /* FIXME , For SAS target mode */
  868. memcpy(buf, "\x00\x00\xab\x11\x30\x04\x05\x50", 8);
  869. return 0;
  870. #endif
  871. }
  872. static void mvs_bytes_dmaed(struct mvs_info *mvi, int i)
  873. {
  874. struct mvs_phy *phy = &mvi->phy[i];
  875. if (!phy->phy_attached)
  876. return;
  877. if (phy->phy_type & PORT_TYPE_SAS) {
  878. struct sas_identify_frame *id;
  879. id = (struct sas_identify_frame *)phy->frame_rcvd;
  880. id->dev_type = phy->identify.device_type;
  881. id->initiator_bits = SAS_PROTOCOL_ALL;
  882. id->target_bits = phy->identify.target_port_protocols;
  883. } else if (phy->phy_type & PORT_TYPE_SATA) {
  884. /* TODO */
  885. }
  886. mvi->sas.sas_phy[i]->frame_rcvd_size = phy->frame_rcvd_size;
  887. mvi->sas.notify_port_event(mvi->sas.sas_phy[i],
  888. PORTE_BYTES_DMAED);
  889. }
  890. static int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time)
  891. {
  892. /* give the phy enabling interrupt event time to come in (1s
  893. * is empirically about all it takes) */
  894. if (time < HZ)
  895. return 0;
  896. /* Wait for discovery to finish */
  897. scsi_flush_work(shost);
  898. return 1;
  899. }
  900. static void mvs_scan_start(struct Scsi_Host *shost)
  901. {
  902. int i;
  903. struct mvs_info *mvi = SHOST_TO_SAS_HA(shost)->lldd_ha;
  904. for (i = 0; i < mvi->chip->n_phy; ++i) {
  905. mvs_bytes_dmaed(mvi, i);
  906. }
  907. }
  908. static int mvs_sas_slave_alloc(struct scsi_device *scsi_dev)
  909. {
  910. int rc;
  911. rc = sas_slave_alloc(scsi_dev);
  912. return rc;
  913. }
  914. static void mvs_int_port(struct mvs_info *mvi, int port_no, u32 events)
  915. {
  916. struct pci_dev *pdev = mvi->pdev;
  917. struct sas_ha_struct *sas_ha = &mvi->sas;
  918. struct mvs_phy *phy = &mvi->phy[port_no];
  919. struct asd_sas_phy *sas_phy = &phy->sas_phy;
  920. phy->irq_status = mvs_read_port_irq_stat(mvi, port_no);
  921. /*
  922. * events is port event now ,
  923. * we need check the interrupt status which belongs to per port.
  924. */
  925. dev_printk(KERN_DEBUG, &pdev->dev,
  926. "Port %d Event = %X\n",
  927. port_no, phy->irq_status);
  928. if (phy->irq_status & (PHYEV_POOF | PHYEV_DEC_ERR)) {
  929. if (!mvs_is_phy_ready(mvi, port_no)) {
  930. sas_phy_disconnected(sas_phy);
  931. sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL);
  932. } else
  933. mvs_phy_control(sas_phy, PHY_FUNC_LINK_RESET, NULL);
  934. }
  935. if (!(phy->irq_status & PHYEV_DEC_ERR)) {
  936. if (phy->irq_status & PHYEV_COMWAKE) {
  937. u32 tmp = mvs_read_port_irq_mask(mvi, port_no);
  938. mvs_write_port_irq_mask(mvi, port_no,
  939. tmp | PHYEV_SIG_FIS);
  940. }
  941. if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) {
  942. phy->phy_status = mvs_is_phy_ready(mvi, port_no);
  943. if (phy->phy_status) {
  944. mvs_detect_porttype(mvi, port_no);
  945. if (phy->phy_type & PORT_TYPE_SATA) {
  946. u32 tmp = mvs_read_port_irq_mask(mvi,
  947. port_no);
  948. tmp &= ~PHYEV_SIG_FIS;
  949. mvs_write_port_irq_mask(mvi,
  950. port_no, tmp);
  951. }
  952. mvs_update_phyinfo(mvi, port_no, 0);
  953. sas_ha->notify_phy_event(sas_phy,
  954. PHYE_OOB_DONE);
  955. mvs_bytes_dmaed(mvi, port_no);
  956. } else {
  957. dev_printk(KERN_DEBUG, &pdev->dev,
  958. "plugin interrupt but phy is gone\n");
  959. mvs_phy_control(sas_phy, PHY_FUNC_LINK_RESET,
  960. NULL);
  961. }
  962. } else if (phy->irq_status & PHYEV_BROAD_CH)
  963. sas_ha->notify_port_event(sas_phy,
  964. PORTE_BROADCAST_RCVD);
  965. }
  966. mvs_write_port_irq_stat(mvi, port_no, phy->irq_status);
  967. }
  968. static void mvs_int_sata(struct mvs_info *mvi)
  969. {
  970. /* FIXME */
  971. }
  972. static void mvs_slot_free(struct mvs_info *mvi, struct sas_task *task,
  973. struct mvs_slot_info *slot, u32 slot_idx)
  974. {
  975. if (!sas_protocol_ata(task->task_proto))
  976. if (slot->n_elem)
  977. pci_unmap_sg(mvi->pdev, task->scatter,
  978. slot->n_elem, task->data_dir);
  979. switch (task->task_proto) {
  980. case SAS_PROTOCOL_SMP:
  981. pci_unmap_sg(mvi->pdev, &task->smp_task.smp_resp, 1,
  982. PCI_DMA_FROMDEVICE);
  983. pci_unmap_sg(mvi->pdev, &task->smp_task.smp_req, 1,
  984. PCI_DMA_TODEVICE);
  985. break;
  986. case SAS_PROTOCOL_SATA:
  987. case SAS_PROTOCOL_STP:
  988. case SAS_PROTOCOL_SSP:
  989. default:
  990. /* do nothing */
  991. break;
  992. }
  993. slot->task = NULL;
  994. mvs_tag_clear(mvi, slot_idx);
  995. }
  996. static void mvs_slot_err(struct mvs_info *mvi, struct sas_task *task,
  997. u32 slot_idx)
  998. {
  999. struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
  1000. u64 err_dw0 = *(u32 *) slot->response;
  1001. void __iomem *regs = mvi->regs;
  1002. u32 tmp;
  1003. if (err_dw0 & CMD_ISS_STPD)
  1004. if (sas_protocol_ata(task->task_proto)) {
  1005. tmp = mr32(INT_STAT_SRS);
  1006. mw32(INT_STAT_SRS, tmp & 0xFFFF);
  1007. }
  1008. mvs_hba_sb_dump(mvi, slot_idx, task->task_proto);
  1009. }
  1010. static int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc)
  1011. {
  1012. u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
  1013. struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
  1014. struct sas_task *task = slot->task;
  1015. struct task_status_struct *tstat = &task->task_status;
  1016. struct mvs_port *port = &mvi->port[task->dev->port->id];
  1017. bool aborted;
  1018. void *to;
  1019. spin_lock(&task->task_state_lock);
  1020. aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED;
  1021. if (!aborted) {
  1022. task->task_state_flags &=
  1023. ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
  1024. task->task_state_flags |= SAS_TASK_STATE_DONE;
  1025. }
  1026. spin_unlock(&task->task_state_lock);
  1027. if (aborted)
  1028. return -1;
  1029. memset(tstat, 0, sizeof(*tstat));
  1030. tstat->resp = SAS_TASK_COMPLETE;
  1031. if (unlikely(!port->port_attached)) {
  1032. tstat->stat = SAS_PHY_DOWN;
  1033. goto out;
  1034. }
  1035. /* error info record present */
  1036. if ((rx_desc & RXQ_ERR) && (*(u64 *) slot->response)) {
  1037. tstat->stat = SAM_CHECK_COND;
  1038. mvs_slot_err(mvi, task, slot_idx);
  1039. goto out;
  1040. }
  1041. switch (task->task_proto) {
  1042. case SAS_PROTOCOL_SSP:
  1043. /* hw says status == 0, datapres == 0 */
  1044. if (rx_desc & RXQ_GOOD) {
  1045. tstat->stat = SAM_GOOD;
  1046. tstat->resp = SAS_TASK_COMPLETE;
  1047. }
  1048. /* response frame present */
  1049. else if (rx_desc & RXQ_RSP) {
  1050. struct ssp_response_iu *iu =
  1051. slot->response + sizeof(struct mvs_err_info);
  1052. sas_ssp_task_response(&mvi->pdev->dev, task, iu);
  1053. }
  1054. /* should never happen? */
  1055. else
  1056. tstat->stat = SAM_CHECK_COND;
  1057. break;
  1058. case SAS_PROTOCOL_SMP: {
  1059. struct scatterlist *sg_resp = &task->smp_task.smp_resp;
  1060. tstat->stat = SAM_GOOD;
  1061. to = kmap_atomic(sg_page(sg_resp), KM_IRQ0);
  1062. memcpy(to + sg_resp->offset,
  1063. slot->response + sizeof(struct mvs_err_info),
  1064. sg_dma_len(sg_resp));
  1065. kunmap_atomic(to, KM_IRQ0);
  1066. break;
  1067. }
  1068. case SAS_PROTOCOL_SATA:
  1069. case SAS_PROTOCOL_STP:
  1070. case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: {
  1071. struct ata_task_resp *resp =
  1072. (struct ata_task_resp *)tstat->buf;
  1073. if ((rx_desc & (RXQ_DONE | RXQ_ERR | RXQ_ATTN)) ==
  1074. RXQ_DONE)
  1075. tstat->stat = SAM_GOOD;
  1076. else
  1077. tstat->stat = SAM_CHECK_COND;
  1078. resp->frame_len = sizeof(struct dev_to_host_fis);
  1079. memcpy(&resp->ending_fis[0],
  1080. SATA_RECEIVED_D2H_FIS(port->taskfileset),
  1081. sizeof(struct dev_to_host_fis));
  1082. if (resp->ending_fis[2] & ATA_ERR)
  1083. mvs_hexdump(16, resp->ending_fis, 0);
  1084. break;
  1085. }
  1086. default:
  1087. tstat->stat = SAM_CHECK_COND;
  1088. break;
  1089. }
  1090. out:
  1091. mvs_slot_free(mvi, task, slot, slot_idx);
  1092. task->task_done(task);
  1093. return tstat->stat;
  1094. }
  1095. static void mvs_int_full(struct mvs_info *mvi)
  1096. {
  1097. void __iomem *regs = mvi->regs;
  1098. u32 tmp, stat;
  1099. int i;
  1100. stat = mr32(INT_STAT);
  1101. mvs_int_rx(mvi, false);
  1102. for (i = 0; i < MVS_MAX_PORTS; i++) {
  1103. tmp = (stat >> i) & (CINT_PORT | CINT_PORT_STOPPED);
  1104. if (tmp)
  1105. mvs_int_port(mvi, i, tmp);
  1106. }
  1107. if (stat & CINT_SRS)
  1108. mvs_int_sata(mvi);
  1109. mw32(INT_STAT, stat);
  1110. }
  1111. static int mvs_int_rx(struct mvs_info *mvi, bool self_clear)
  1112. {
  1113. void __iomem *regs = mvi->regs;
  1114. u32 rx_prod_idx, rx_desc;
  1115. bool attn = false;
  1116. struct pci_dev *pdev = mvi->pdev;
  1117. /* the first dword in the RX ring is special: it contains
  1118. * a mirror of the hardware's RX producer index, so that
  1119. * we don't have to stall the CPU reading that register.
  1120. * The actual RX ring is offset by one dword, due to this.
  1121. */
  1122. rx_prod_idx = mr32(RX_CONS_IDX) & RX_RING_SZ_MASK;
  1123. if (rx_prod_idx == 0xfff) { /* h/w hasn't touched RX ring yet */
  1124. mvi->rx_cons = 0xfff;
  1125. return 0;
  1126. }
  1127. /* The CMPL_Q may come late, read from register and try again
  1128. * note: if coalescing is enabled,
  1129. * it will need to read from register every time for sure
  1130. */
  1131. if (mvi->rx_cons == rx_prod_idx)
  1132. return 0;
  1133. if (mvi->rx_cons == 0xfff)
  1134. mvi->rx_cons = MVS_RX_RING_SZ - 1;
  1135. while (mvi->rx_cons != rx_prod_idx) {
  1136. /* increment our internal RX consumer pointer */
  1137. mvi->rx_cons = (mvi->rx_cons + 1) & (MVS_RX_RING_SZ - 1);
  1138. rx_desc = le32_to_cpu(mvi->rx[mvi->rx_cons + 1]);
  1139. mvs_hba_cq_dump(mvi);
  1140. if (unlikely(rx_desc & RXQ_DONE))
  1141. mvs_slot_complete(mvi, rx_desc);
  1142. if (rx_desc & RXQ_ATTN) {
  1143. attn = true;
  1144. dev_printk(KERN_DEBUG, &pdev->dev, "ATTN %X\n",
  1145. rx_desc);
  1146. } else if (rx_desc & RXQ_ERR) {
  1147. dev_printk(KERN_DEBUG, &pdev->dev, "RXQ_ERR %X\n",
  1148. rx_desc);
  1149. }
  1150. }
  1151. if (attn && self_clear)
  1152. mvs_int_full(mvi);
  1153. return 0;
  1154. }
  1155. static irqreturn_t mvs_interrupt(int irq, void *opaque)
  1156. {
  1157. struct mvs_info *mvi = opaque;
  1158. void __iomem *regs = mvi->regs;
  1159. u32 stat;
  1160. stat = mr32(GBL_INT_STAT);
  1161. /* clear CMD_CMPLT ASAP */
  1162. mw32_f(INT_STAT, CINT_DONE);
  1163. if (stat == 0 || stat == 0xffffffff)
  1164. return IRQ_NONE;
  1165. spin_lock(&mvi->lock);
  1166. mvs_int_full(mvi);
  1167. spin_unlock(&mvi->lock);
  1168. return IRQ_HANDLED;
  1169. }
  1170. #ifndef MVS_DISABLE_MSI
  1171. static irqreturn_t mvs_msi_interrupt(int irq, void *opaque)
  1172. {
  1173. struct mvs_info *mvi = opaque;
  1174. spin_lock(&mvi->lock);
  1175. mvs_int_rx(mvi, true);
  1176. spin_unlock(&mvi->lock);
  1177. return IRQ_HANDLED;
  1178. }
  1179. #endif
  1180. struct mvs_task_exec_info {
  1181. struct sas_task *task;
  1182. struct mvs_cmd_hdr *hdr;
  1183. struct mvs_port *port;
  1184. u32 tag;
  1185. int n_elem;
  1186. };
  1187. static int mvs_task_prep_smp(struct mvs_info *mvi,
  1188. struct mvs_task_exec_info *tei)
  1189. {
  1190. int elem, rc, i;
  1191. struct sas_task *task = tei->task;
  1192. struct mvs_cmd_hdr *hdr = tei->hdr;
  1193. struct scatterlist *sg_req, *sg_resp;
  1194. u32 req_len, resp_len, tag = tei->tag;
  1195. void *buf_tmp;
  1196. u8 *buf_oaf;
  1197. dma_addr_t buf_tmp_dma;
  1198. struct mvs_prd *buf_prd;
  1199. struct scatterlist *sg;
  1200. struct mvs_slot_info *slot = &mvi->slot_info[tag];
  1201. struct asd_sas_port *sas_port = task->dev->port;
  1202. u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
  1203. #if _MV_DUMP
  1204. u8 *buf_cmd;
  1205. void *from;
  1206. #endif
  1207. /*
  1208. * DMA-map SMP request, response buffers
  1209. */
  1210. sg_req = &task->smp_task.smp_req;
  1211. elem = pci_map_sg(mvi->pdev, sg_req, 1, PCI_DMA_TODEVICE);
  1212. if (!elem)
  1213. return -ENOMEM;
  1214. req_len = sg_dma_len(sg_req);
  1215. sg_resp = &task->smp_task.smp_resp;
  1216. elem = pci_map_sg(mvi->pdev, sg_resp, 1, PCI_DMA_FROMDEVICE);
  1217. if (!elem) {
  1218. rc = -ENOMEM;
  1219. goto err_out;
  1220. }
  1221. resp_len = sg_dma_len(sg_resp);
  1222. /* must be in dwords */
  1223. if ((req_len & 0x3) || (resp_len & 0x3)) {
  1224. rc = -EINVAL;
  1225. goto err_out_2;
  1226. }
  1227. /*
  1228. * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
  1229. */
  1230. /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */
  1231. buf_tmp = slot->buf;
  1232. buf_tmp_dma = slot->buf_dma;
  1233. #if _MV_DUMP
  1234. buf_cmd = buf_tmp;
  1235. hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
  1236. buf_tmp += req_len;
  1237. buf_tmp_dma += req_len;
  1238. slot->cmd_size = req_len;
  1239. #else
  1240. hdr->cmd_tbl = cpu_to_le64(sg_dma_address(sg_req));
  1241. #endif
  1242. /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
  1243. buf_oaf = buf_tmp;
  1244. hdr->open_frame = cpu_to_le64(buf_tmp_dma);
  1245. buf_tmp += MVS_OAF_SZ;
  1246. buf_tmp_dma += MVS_OAF_SZ;
  1247. /* region 3: PRD table ********************************************* */
  1248. buf_prd = buf_tmp;
  1249. if (tei->n_elem)
  1250. hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
  1251. else
  1252. hdr->prd_tbl = 0;
  1253. i = sizeof(struct mvs_prd) * tei->n_elem;
  1254. buf_tmp += i;
  1255. buf_tmp_dma += i;
  1256. /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
  1257. slot->response = buf_tmp;
  1258. hdr->status_buf = cpu_to_le64(buf_tmp_dma);
  1259. /*
  1260. * Fill in TX ring and command slot header
  1261. */
  1262. slot->tx = mvi->tx_prod;
  1263. mvi->tx[mvi->tx_prod] = cpu_to_le32((TXQ_CMD_SMP << TXQ_CMD_SHIFT) |
  1264. TXQ_MODE_I | tag |
  1265. (sas_port->phy_mask << TXQ_PHY_SHIFT));
  1266. hdr->flags |= flags;
  1267. hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | ((req_len - 4) / 4));
  1268. hdr->tags = cpu_to_le32(tag);
  1269. hdr->data_len = 0;
  1270. /* generate open address frame hdr (first 12 bytes) */
  1271. buf_oaf[0] = (1 << 7) | (0 << 4) | 0x01; /* initiator, SMP, ftype 1h */
  1272. buf_oaf[1] = task->dev->linkrate & 0xf;
  1273. *(u16 *)(buf_oaf + 2) = 0xFFFF; /* SAS SPEC */
  1274. memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE);
  1275. /* fill in PRD (scatter/gather) table, if any */
  1276. for_each_sg(task->scatter, sg, tei->n_elem, i) {
  1277. buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
  1278. buf_prd->len = cpu_to_le32(sg_dma_len(sg));
  1279. buf_prd++;
  1280. }
  1281. #if _MV_DUMP
  1282. /* copy cmd table */
  1283. from = kmap_atomic(sg_page(sg_req), KM_IRQ0);
  1284. memcpy(buf_cmd, from + sg_req->offset, req_len);
  1285. kunmap_atomic(from, KM_IRQ0);
  1286. #endif
  1287. return 0;
  1288. err_out_2:
  1289. pci_unmap_sg(mvi->pdev, &tei->task->smp_task.smp_resp, 1,
  1290. PCI_DMA_FROMDEVICE);
  1291. err_out:
  1292. pci_unmap_sg(mvi->pdev, &tei->task->smp_task.smp_req, 1,
  1293. PCI_DMA_TODEVICE);
  1294. return rc;
  1295. }
  1296. static void mvs_free_reg_set(struct mvs_info *mvi, struct mvs_port *port)
  1297. {
  1298. void __iomem *regs = mvi->regs;
  1299. u32 tmp, offs;
  1300. u8 *tfs = &port->taskfileset;
  1301. if (*tfs == MVS_ID_NOT_MAPPED)
  1302. return;
  1303. offs = 1U << ((*tfs & 0x0f) + PCS_EN_SATA_REG_SHIFT);
  1304. if (*tfs < 16) {
  1305. tmp = mr32(PCS);
  1306. mw32(PCS, tmp & ~offs);
  1307. } else {
  1308. tmp = mr32(CTL);
  1309. mw32(CTL, tmp & ~offs);
  1310. }
  1311. tmp = mr32(INT_STAT_SRS) & (1U << *tfs);
  1312. if (tmp)
  1313. mw32(INT_STAT_SRS, tmp);
  1314. *tfs = MVS_ID_NOT_MAPPED;
  1315. }
  1316. static u8 mvs_assign_reg_set(struct mvs_info *mvi, struct mvs_port *port)
  1317. {
  1318. int i;
  1319. u32 tmp, offs;
  1320. void __iomem *regs = mvi->regs;
  1321. if (port->taskfileset != MVS_ID_NOT_MAPPED)
  1322. return 0;
  1323. tmp = mr32(PCS);
  1324. for (i = 0; i < mvi->chip->srs_sz; i++) {
  1325. if (i == 16)
  1326. tmp = mr32(CTL);
  1327. offs = 1U << ((i & 0x0f) + PCS_EN_SATA_REG_SHIFT);
  1328. if (!(tmp & offs)) {
  1329. port->taskfileset = i;
  1330. if (i < 16)
  1331. mw32(PCS, tmp | offs);
  1332. else
  1333. mw32(CTL, tmp | offs);
  1334. tmp = mr32(INT_STAT_SRS) & (1U << i);
  1335. if (tmp)
  1336. mw32(INT_STAT_SRS, tmp);
  1337. return 0;
  1338. }
  1339. }
  1340. return MVS_ID_NOT_MAPPED;
  1341. }
  1342. static u32 mvs_get_ncq_tag(struct sas_task *task)
  1343. {
  1344. u32 tag = 0;
  1345. struct ata_queued_cmd *qc = task->uldd_task;
  1346. if (qc)
  1347. tag = qc->tag;
  1348. return tag;
  1349. }
  1350. static int mvs_task_prep_ata(struct mvs_info *mvi,
  1351. struct mvs_task_exec_info *tei)
  1352. {
  1353. struct sas_task *task = tei->task;
  1354. struct domain_device *dev = task->dev;
  1355. struct mvs_cmd_hdr *hdr = tei->hdr;
  1356. struct asd_sas_port *sas_port = dev->port;
  1357. struct mvs_slot_info *slot;
  1358. struct scatterlist *sg;
  1359. struct mvs_prd *buf_prd;
  1360. struct mvs_port *port = tei->port;
  1361. u32 tag = tei->tag;
  1362. u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
  1363. void *buf_tmp;
  1364. u8 *buf_cmd, *buf_oaf;
  1365. dma_addr_t buf_tmp_dma;
  1366. u32 i, req_len, resp_len;
  1367. const u32 max_resp_len = SB_RFB_MAX;
  1368. if (mvs_assign_reg_set(mvi, port) == MVS_ID_NOT_MAPPED)
  1369. return -EBUSY;
  1370. slot = &mvi->slot_info[tag];
  1371. slot->tx = mvi->tx_prod;
  1372. mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag |
  1373. (TXQ_CMD_STP << TXQ_CMD_SHIFT) |
  1374. (sas_port->phy_mask << TXQ_PHY_SHIFT) |
  1375. (port->taskfileset << TXQ_SRS_SHIFT));
  1376. if (task->ata_task.use_ncq)
  1377. flags |= MCH_FPDMA;
  1378. if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) {
  1379. if (task->ata_task.fis.command != ATA_CMD_ID_ATAPI)
  1380. flags |= MCH_ATAPI;
  1381. }
  1382. /* FIXME: fill in port multiplier number */
  1383. hdr->flags = cpu_to_le32(flags);
  1384. /* FIXME: the low order order 5 bits for the TAG if enable NCQ */
  1385. if (task->ata_task.use_ncq) {
  1386. hdr->tags = cpu_to_le32(mvs_get_ncq_tag(task));
  1387. /*Fill in task file */
  1388. task->ata_task.fis.sector_count = hdr->tags << 3;
  1389. } else
  1390. hdr->tags = cpu_to_le32(tag);
  1391. hdr->data_len = cpu_to_le32(task->total_xfer_len);
  1392. /*
  1393. * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
  1394. */
  1395. /* region 1: command table area (MVS_ATA_CMD_SZ bytes) ************** */
  1396. buf_cmd = buf_tmp = slot->buf;
  1397. buf_tmp_dma = slot->buf_dma;
  1398. hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
  1399. buf_tmp += MVS_ATA_CMD_SZ;
  1400. buf_tmp_dma += MVS_ATA_CMD_SZ;
  1401. #if _MV_DUMP
  1402. slot->cmd_size = MVS_ATA_CMD_SZ;
  1403. #endif
  1404. /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
  1405. /* used for STP. unused for SATA? */
  1406. buf_oaf = buf_tmp;
  1407. hdr->open_frame = cpu_to_le64(buf_tmp_dma);
  1408. buf_tmp += MVS_OAF_SZ;
  1409. buf_tmp_dma += MVS_OAF_SZ;
  1410. /* region 3: PRD table ********************************************* */
  1411. buf_prd = buf_tmp;
  1412. if (tei->n_elem)
  1413. hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
  1414. else
  1415. hdr->prd_tbl = 0;
  1416. i = sizeof(struct mvs_prd) * tei->n_elem;
  1417. buf_tmp += i;
  1418. buf_tmp_dma += i;
  1419. /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
  1420. /* FIXME: probably unused, for SATA. kept here just in case
  1421. * we get a STP/SATA error information record
  1422. */
  1423. slot->response = buf_tmp;
  1424. hdr->status_buf = cpu_to_le64(buf_tmp_dma);
  1425. req_len = sizeof(struct host_to_dev_fis);
  1426. resp_len = MVS_SLOT_BUF_SZ - MVS_ATA_CMD_SZ -
  1427. sizeof(struct mvs_err_info) - i;
  1428. /* request, response lengths */
  1429. resp_len = min(resp_len, max_resp_len);
  1430. hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
  1431. task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */
  1432. /* fill in command FIS and ATAPI CDB */
  1433. memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
  1434. if (dev->sata_dev.command_set == ATAPI_COMMAND_SET)
  1435. memcpy(buf_cmd + STP_ATAPI_CMD,
  1436. task->ata_task.atapi_packet, 16);
  1437. /* generate open address frame hdr (first 12 bytes) */
  1438. buf_oaf[0] = (1 << 7) | (2 << 4) | 0x1; /* initiator, STP, ftype 1h */
  1439. buf_oaf[1] = task->dev->linkrate & 0xf;
  1440. *(u16 *)(buf_oaf + 2) = cpu_to_be16(tag);
  1441. memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE);
  1442. /* fill in PRD (scatter/gather) table, if any */
  1443. for_each_sg(task->scatter, sg, tei->n_elem, i) {
  1444. buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
  1445. buf_prd->len = cpu_to_le32(sg_dma_len(sg));
  1446. buf_prd++;
  1447. }
  1448. return 0;
  1449. }
  1450. static int mvs_task_prep_ssp(struct mvs_info *mvi,
  1451. struct mvs_task_exec_info *tei)
  1452. {
  1453. struct sas_task *task = tei->task;
  1454. struct mvs_cmd_hdr *hdr = tei->hdr;
  1455. struct mvs_port *port = tei->port;
  1456. struct mvs_slot_info *slot;
  1457. struct scatterlist *sg;
  1458. struct mvs_prd *buf_prd;
  1459. struct ssp_frame_hdr *ssp_hdr;
  1460. void *buf_tmp;
  1461. u8 *buf_cmd, *buf_oaf, fburst = 0;
  1462. dma_addr_t buf_tmp_dma;
  1463. u32 flags;
  1464. u32 resp_len, req_len, i, tag = tei->tag;
  1465. const u32 max_resp_len = SB_RFB_MAX;
  1466. slot = &mvi->slot_info[tag];
  1467. slot->tx = mvi->tx_prod;
  1468. mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag |
  1469. (TXQ_CMD_SSP << TXQ_CMD_SHIFT) |
  1470. (port->wide_port_phymap << TXQ_PHY_SHIFT));
  1471. flags = MCH_RETRY;
  1472. if (task->ssp_task.enable_first_burst) {
  1473. flags |= MCH_FBURST;
  1474. fburst = (1 << 7);
  1475. }
  1476. hdr->flags = cpu_to_le32(flags |
  1477. (tei->n_elem << MCH_PRD_LEN_SHIFT) |
  1478. (MCH_SSP_FR_CMD << MCH_SSP_FR_TYPE_SHIFT));
  1479. hdr->tags = cpu_to_le32(tag);
  1480. hdr->data_len = cpu_to_le32(task->total_xfer_len);
  1481. /*
  1482. * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
  1483. */
  1484. /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */
  1485. buf_cmd = buf_tmp = slot->buf;
  1486. buf_tmp_dma = slot->buf_dma;
  1487. hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
  1488. buf_tmp += MVS_SSP_CMD_SZ;
  1489. buf_tmp_dma += MVS_SSP_CMD_SZ;
  1490. #if _MV_DUMP
  1491. slot->cmd_size = MVS_SSP_CMD_SZ;
  1492. #endif
  1493. /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
  1494. buf_oaf = buf_tmp;
  1495. hdr->open_frame = cpu_to_le64(buf_tmp_dma);
  1496. buf_tmp += MVS_OAF_SZ;
  1497. buf_tmp_dma += MVS_OAF_SZ;
  1498. /* region 3: PRD table ********************************************* */
  1499. buf_prd = buf_tmp;
  1500. if (tei->n_elem)
  1501. hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
  1502. else
  1503. hdr->prd_tbl = 0;
  1504. i = sizeof(struct mvs_prd) * tei->n_elem;
  1505. buf_tmp += i;
  1506. buf_tmp_dma += i;
  1507. /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
  1508. slot->response = buf_tmp;
  1509. hdr->status_buf = cpu_to_le64(buf_tmp_dma);
  1510. resp_len = MVS_SLOT_BUF_SZ - MVS_SSP_CMD_SZ - MVS_OAF_SZ -
  1511. sizeof(struct mvs_err_info) - i;
  1512. resp_len = min(resp_len, max_resp_len);
  1513. req_len = sizeof(struct ssp_frame_hdr) + 28;
  1514. /* request, response lengths */
  1515. hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
  1516. /* generate open address frame hdr (first 12 bytes) */
  1517. buf_oaf[0] = (1 << 7) | (1 << 4) | 0x1; /* initiator, SSP, ftype 1h */
  1518. buf_oaf[1] = task->dev->linkrate & 0xf;
  1519. *(u16 *)(buf_oaf + 2) = cpu_to_be16(tag);
  1520. memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE);
  1521. /* fill in SSP frame header (Command Table.SSP frame header) */
  1522. ssp_hdr = (struct ssp_frame_hdr *)buf_cmd;
  1523. ssp_hdr->frame_type = SSP_COMMAND;
  1524. memcpy(ssp_hdr->hashed_dest_addr, task->dev->hashed_sas_addr,
  1525. HASHED_SAS_ADDR_SIZE);
  1526. memcpy(ssp_hdr->hashed_src_addr,
  1527. task->dev->port->ha->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
  1528. ssp_hdr->tag = cpu_to_be16(tag);
  1529. /* fill in command frame IU */
  1530. buf_cmd += sizeof(*ssp_hdr);
  1531. memcpy(buf_cmd, &task->ssp_task.LUN, 8);
  1532. buf_cmd[9] = fburst | task->ssp_task.task_attr |
  1533. (task->ssp_task.task_prio << 3);
  1534. memcpy(buf_cmd + 12, &task->ssp_task.cdb, 16);
  1535. /* fill in PRD (scatter/gather) table, if any */
  1536. for_each_sg(task->scatter, sg, tei->n_elem, i) {
  1537. buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
  1538. buf_prd->len = cpu_to_le32(sg_dma_len(sg));
  1539. buf_prd++;
  1540. }
  1541. return 0;
  1542. }
  1543. static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags)
  1544. {
  1545. struct domain_device *dev = task->dev;
  1546. struct mvs_info *mvi = dev->port->ha->lldd_ha;
  1547. struct pci_dev *pdev = mvi->pdev;
  1548. void __iomem *regs = mvi->regs;
  1549. struct mvs_task_exec_info tei;
  1550. struct sas_task *t = task;
  1551. u32 tag = 0xdeadbeef, rc, n_elem = 0;
  1552. unsigned long flags;
  1553. u32 n = num, pass = 0;
  1554. spin_lock_irqsave(&mvi->lock, flags);
  1555. do {
  1556. tei.port = &mvi->port[dev->port->id];
  1557. if (!tei.port->port_attached) {
  1558. struct task_status_struct *ts = &t->task_status;
  1559. ts->stat = SAS_PHY_DOWN;
  1560. t->task_done(t);
  1561. rc = 0;
  1562. goto exec_exit;
  1563. }
  1564. if (!sas_protocol_ata(t->task_proto)) {
  1565. if (t->num_scatter) {
  1566. n_elem = pci_map_sg(mvi->pdev, t->scatter,
  1567. t->num_scatter,
  1568. t->data_dir);
  1569. if (!n_elem) {
  1570. rc = -ENOMEM;
  1571. goto err_out;
  1572. }
  1573. }
  1574. } else {
  1575. n_elem = t->num_scatter;
  1576. }
  1577. rc = mvs_tag_alloc(mvi, &tag);
  1578. if (rc)
  1579. goto err_out;
  1580. mvi->slot_info[tag].task = t;
  1581. mvi->slot_info[tag].n_elem = n_elem;
  1582. memset(mvi->slot_info[tag].buf, 0, MVS_SLOT_BUF_SZ);
  1583. tei.task = t;
  1584. tei.hdr = &mvi->slot[tag];
  1585. tei.tag = tag;
  1586. tei.n_elem = n_elem;
  1587. switch (t->task_proto) {
  1588. case SAS_PROTOCOL_SMP:
  1589. rc = mvs_task_prep_smp(mvi, &tei);
  1590. break;
  1591. case SAS_PROTOCOL_SSP:
  1592. rc = mvs_task_prep_ssp(mvi, &tei);
  1593. break;
  1594. case SAS_PROTOCOL_SATA:
  1595. case SAS_PROTOCOL_STP:
  1596. case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
  1597. rc = mvs_task_prep_ata(mvi, &tei);
  1598. break;
  1599. default:
  1600. dev_printk(KERN_ERR, &pdev->dev,
  1601. "unknown sas_task proto: 0x%x\n",
  1602. t->task_proto);
  1603. rc = -EINVAL;
  1604. break;
  1605. }
  1606. if (rc)
  1607. goto err_out_tag;
  1608. /* TODO: select normal or high priority */
  1609. spin_lock(&t->task_state_lock);
  1610. t->task_state_flags |= SAS_TASK_AT_INITIATOR;
  1611. spin_unlock(&t->task_state_lock);
  1612. if (n == 1) {
  1613. spin_unlock_irqrestore(&mvi->lock, flags);
  1614. mw32(TX_PROD_IDX, mvi->tx_prod);
  1615. }
  1616. mvs_hba_memory_dump(mvi, tag, t->task_proto);
  1617. ++pass;
  1618. mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1);
  1619. if (n == 1)
  1620. break;
  1621. t = list_entry(t->list.next, struct sas_task, list);
  1622. } while (--n);
  1623. return 0;
  1624. err_out_tag:
  1625. mvs_tag_free(mvi, tag);
  1626. err_out:
  1627. dev_printk(KERN_ERR, &pdev->dev, "mvsas exec failed[%d]!\n", rc);
  1628. if (!sas_protocol_ata(t->task_proto))
  1629. if (n_elem)
  1630. pci_unmap_sg(mvi->pdev, t->scatter, n_elem,
  1631. t->data_dir);
  1632. exec_exit:
  1633. if (pass)
  1634. mw32(TX_PROD_IDX, (mvi->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1));
  1635. spin_unlock_irqrestore(&mvi->lock, flags);
  1636. return rc;
  1637. }
  1638. static int mvs_task_abort(struct sas_task *task)
  1639. {
  1640. int rc = 1;
  1641. unsigned long flags;
  1642. struct mvs_info *mvi = task->dev->port->ha->lldd_ha;
  1643. struct pci_dev *pdev = mvi->pdev;
  1644. spin_lock_irqsave(&task->task_state_lock, flags);
  1645. if (task->task_state_flags & SAS_TASK_STATE_DONE) {
  1646. rc = TMF_RESP_FUNC_COMPLETE;
  1647. goto out_done;
  1648. }
  1649. spin_unlock_irqrestore(&task->task_state_lock, flags);
  1650. /*FIXME*/
  1651. rc = TMF_RESP_FUNC_COMPLETE;
  1652. switch (task->task_proto) {
  1653. case SAS_PROTOCOL_SMP:
  1654. dev_printk(KERN_DEBUG, &pdev->dev, "SMP Abort! ");
  1655. break;
  1656. case SAS_PROTOCOL_SSP:
  1657. dev_printk(KERN_DEBUG, &pdev->dev, "SSP Abort! ");
  1658. break;
  1659. case SAS_PROTOCOL_SATA:
  1660. case SAS_PROTOCOL_STP:
  1661. case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:{
  1662. dev_printk(KERN_DEBUG, &pdev->dev, "STP Abort! "
  1663. "Dump D2H FIS: \n");
  1664. mvs_hexdump(sizeof(struct host_to_dev_fis),
  1665. (void *)&task->ata_task.fis, 0);
  1666. dev_printk(KERN_DEBUG, &pdev->dev, "Dump ATAPI Cmd : \n");
  1667. mvs_hexdump(16, task->ata_task.atapi_packet, 0);
  1668. break;
  1669. }
  1670. default:
  1671. break;
  1672. }
  1673. out_done:
  1674. return rc;
  1675. }
  1676. static void mvs_free(struct mvs_info *mvi)
  1677. {
  1678. int i;
  1679. if (!mvi)
  1680. return;
  1681. for (i = 0; i < MVS_SLOTS; i++) {
  1682. struct mvs_slot_info *slot = &mvi->slot_info[i];
  1683. if (slot->buf)
  1684. dma_free_coherent(&mvi->pdev->dev, MVS_SLOT_BUF_SZ,
  1685. slot->buf, slot->buf_dma);
  1686. }
  1687. if (mvi->tx)
  1688. dma_free_coherent(&mvi->pdev->dev,
  1689. sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ,
  1690. mvi->tx, mvi->tx_dma);
  1691. if (mvi->rx_fis)
  1692. dma_free_coherent(&mvi->pdev->dev, MVS_RX_FISL_SZ,
  1693. mvi->rx_fis, mvi->rx_fis_dma);
  1694. if (mvi->rx)
  1695. dma_free_coherent(&mvi->pdev->dev,
  1696. sizeof(*mvi->rx) * MVS_RX_RING_SZ,
  1697. mvi->rx, mvi->rx_dma);
  1698. if (mvi->slot)
  1699. dma_free_coherent(&mvi->pdev->dev,
  1700. sizeof(*mvi->slot) * MVS_SLOTS,
  1701. mvi->slot, mvi->slot_dma);
  1702. #ifdef MVS_ENABLE_PERI
  1703. if (mvi->peri_regs)
  1704. iounmap(mvi->peri_regs);
  1705. #endif
  1706. if (mvi->regs)
  1707. iounmap(mvi->regs);
  1708. if (mvi->shost)
  1709. scsi_host_put(mvi->shost);
  1710. kfree(mvi->sas.sas_port);
  1711. kfree(mvi->sas.sas_phy);
  1712. kfree(mvi);
  1713. }
  1714. /* FIXME: locking? */
  1715. static int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
  1716. void *funcdata)
  1717. {
  1718. struct mvs_info *mvi = sas_phy->ha->lldd_ha;
  1719. int rc = 0, phy_id = sas_phy->id;
  1720. u32 tmp;
  1721. tmp = mvs_read_phy_ctl(mvi, phy_id);
  1722. switch (func) {
  1723. case PHY_FUNC_SET_LINK_RATE:{
  1724. struct sas_phy_linkrates *rates = funcdata;
  1725. u32 lrmin = 0, lrmax = 0;
  1726. lrmin = (rates->minimum_linkrate << 8);
  1727. lrmax = (rates->maximum_linkrate << 12);
  1728. if (lrmin) {
  1729. tmp &= ~(0xf << 8);
  1730. tmp |= lrmin;
  1731. }
  1732. if (lrmax) {
  1733. tmp &= ~(0xf << 12);
  1734. tmp |= lrmax;
  1735. }
  1736. mvs_write_phy_ctl(mvi, phy_id, tmp);
  1737. break;
  1738. }
  1739. case PHY_FUNC_HARD_RESET:
  1740. if (tmp & PHY_RST_HARD)
  1741. break;
  1742. mvs_write_phy_ctl(mvi, phy_id, tmp | PHY_RST_HARD);
  1743. break;
  1744. case PHY_FUNC_LINK_RESET:
  1745. mvs_write_phy_ctl(mvi, phy_id, tmp | PHY_RST);
  1746. break;
  1747. case PHY_FUNC_DISABLE:
  1748. case PHY_FUNC_RELEASE_SPINUP_HOLD:
  1749. default:
  1750. rc = -EOPNOTSUPP;
  1751. }
  1752. return rc;
  1753. }
  1754. static void __devinit mvs_phy_init(struct mvs_info *mvi, int phy_id)
  1755. {
  1756. struct mvs_phy *phy = &mvi->phy[phy_id];
  1757. struct asd_sas_phy *sas_phy = &phy->sas_phy;
  1758. sas_phy->enabled = (phy_id < mvi->chip->n_phy) ? 1 : 0;
  1759. sas_phy->class = SAS;
  1760. sas_phy->iproto = SAS_PROTOCOL_ALL;
  1761. sas_phy->tproto = 0;
  1762. sas_phy->type = PHY_TYPE_PHYSICAL;
  1763. sas_phy->role = PHY_ROLE_INITIATOR;
  1764. sas_phy->oob_mode = OOB_NOT_CONNECTED;
  1765. sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
  1766. sas_phy->id = phy_id;
  1767. sas_phy->sas_addr = &mvi->sas_addr[0];
  1768. sas_phy->frame_rcvd = &phy->frame_rcvd[0];
  1769. sas_phy->ha = &mvi->sas;
  1770. sas_phy->lldd_phy = phy;
  1771. }
  1772. static struct mvs_info *__devinit mvs_alloc(struct pci_dev *pdev,
  1773. const struct pci_device_id *ent)
  1774. {
  1775. struct mvs_info *mvi;
  1776. unsigned long res_start, res_len, res_flag;
  1777. struct asd_sas_phy **arr_phy;
  1778. struct asd_sas_port **arr_port;
  1779. const struct mvs_chip_info *chip = &mvs_chips[ent->driver_data];
  1780. int i;
  1781. /*
  1782. * alloc and init our per-HBA mvs_info struct
  1783. */
  1784. mvi = kzalloc(sizeof(*mvi), GFP_KERNEL);
  1785. if (!mvi)
  1786. return NULL;
  1787. spin_lock_init(&mvi->lock);
  1788. mvi->pdev = pdev;
  1789. mvi->chip = chip;
  1790. if (pdev->device == 0x6440 && pdev->revision == 0)
  1791. mvi->flags |= MVF_PHY_PWR_FIX;
  1792. /*
  1793. * alloc and init SCSI, SAS glue
  1794. */
  1795. mvi->shost = scsi_host_alloc(&mvs_sht, sizeof(void *));
  1796. if (!mvi->shost)
  1797. goto err_out;
  1798. arr_phy = kcalloc(MVS_MAX_PHYS, sizeof(void *), GFP_KERNEL);
  1799. arr_port = kcalloc(MVS_MAX_PHYS, sizeof(void *), GFP_KERNEL);
  1800. if (!arr_phy || !arr_port)
  1801. goto err_out;
  1802. for (i = 0; i < MVS_MAX_PHYS; i++) {
  1803. mvs_phy_init(mvi, i);
  1804. arr_phy[i] = &mvi->phy[i].sas_phy;
  1805. arr_port[i] = &mvi->port[i].sas_port;
  1806. }
  1807. SHOST_TO_SAS_HA(mvi->shost) = &mvi->sas;
  1808. mvi->shost->transportt = mvs_stt;
  1809. mvi->shost->max_id = 21;
  1810. mvi->shost->max_lun = ~0;
  1811. mvi->shost->max_channel = 0;
  1812. mvi->shost->max_cmd_len = 16;
  1813. mvi->sas.sas_ha_name = DRV_NAME;
  1814. mvi->sas.dev = &pdev->dev;
  1815. mvi->sas.lldd_module = THIS_MODULE;
  1816. mvi->sas.sas_addr = &mvi->sas_addr[0];
  1817. mvi->sas.sas_phy = arr_phy;
  1818. mvi->sas.sas_port = arr_port;
  1819. mvi->sas.num_phys = chip->n_phy;
  1820. mvi->sas.lldd_max_execute_num = MVS_CHIP_SLOT_SZ - 1;
  1821. mvi->sas.lldd_queue_size = MVS_QUEUE_SIZE;
  1822. mvi->can_queue = (MVS_CHIP_SLOT_SZ >> 1) - 1;
  1823. mvi->sas.lldd_ha = mvi;
  1824. mvi->sas.core.shost = mvi->shost;
  1825. mvs_tag_init(mvi);
  1826. /*
  1827. * ioremap main and peripheral registers
  1828. */
  1829. #ifdef MVS_ENABLE_PERI
  1830. res_start = pci_resource_start(pdev, 2);
  1831. res_len = pci_resource_len(pdev, 2);
  1832. if (!res_start || !res_len)
  1833. goto err_out;
  1834. mvi->peri_regs = ioremap_nocache(res_start, res_len);
  1835. if (!mvi->peri_regs)
  1836. goto err_out;
  1837. #endif
  1838. res_start = pci_resource_start(pdev, 4);
  1839. res_len = pci_resource_len(pdev, 4);
  1840. if (!res_start || !res_len)
  1841. goto err_out;
  1842. res_flag = pci_resource_flags(pdev, 4);
  1843. if (res_flag & IORESOURCE_CACHEABLE)
  1844. mvi->regs = ioremap(res_start, res_len);
  1845. else
  1846. mvi->regs = ioremap_nocache(res_start, res_len);
  1847. if (!mvi->regs)
  1848. goto err_out;
  1849. /*
  1850. * alloc and init our DMA areas
  1851. */
  1852. mvi->tx = dma_alloc_coherent(&pdev->dev,
  1853. sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ,
  1854. &mvi->tx_dma, GFP_KERNEL);
  1855. if (!mvi->tx)
  1856. goto err_out;
  1857. memset(mvi->tx, 0, sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ);
  1858. mvi->rx_fis = dma_alloc_coherent(&pdev->dev, MVS_RX_FISL_SZ,
  1859. &mvi->rx_fis_dma, GFP_KERNEL);
  1860. if (!mvi->rx_fis)
  1861. goto err_out;
  1862. memset(mvi->rx_fis, 0, MVS_RX_FISL_SZ);
  1863. mvi->rx = dma_alloc_coherent(&pdev->dev,
  1864. sizeof(*mvi->rx) * MVS_RX_RING_SZ,
  1865. &mvi->rx_dma, GFP_KERNEL);
  1866. if (!mvi->rx)
  1867. goto err_out;
  1868. memset(mvi->rx, 0, sizeof(*mvi->rx) * MVS_RX_RING_SZ);
  1869. mvi->rx[0] = cpu_to_le32(0xfff);
  1870. mvi->rx_cons = 0xfff;
  1871. mvi->slot = dma_alloc_coherent(&pdev->dev,
  1872. sizeof(*mvi->slot) * MVS_SLOTS,
  1873. &mvi->slot_dma, GFP_KERNEL);
  1874. if (!mvi->slot)
  1875. goto err_out;
  1876. memset(mvi->slot, 0, sizeof(*mvi->slot) * MVS_SLOTS);
  1877. for (i = 0; i < MVS_SLOTS; i++) {
  1878. struct mvs_slot_info *slot = &mvi->slot_info[i];
  1879. slot->buf = dma_alloc_coherent(&pdev->dev, MVS_SLOT_BUF_SZ,
  1880. &slot->buf_dma, GFP_KERNEL);
  1881. if (!slot->buf)
  1882. goto err_out;
  1883. memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
  1884. }
  1885. /* finally, read NVRAM to get our SAS address */
  1886. if (mvs_nvram_read(mvi, NVR_SAS_ADDR, &mvi->sas_addr, 8))
  1887. goto err_out;
  1888. return mvi;
  1889. err_out:
  1890. mvs_free(mvi);
  1891. return NULL;
  1892. }
  1893. static u32 mvs_cr32(void __iomem *regs, u32 addr)
  1894. {
  1895. mw32(CMD_ADDR, addr);
  1896. return mr32(CMD_DATA);
  1897. }
  1898. static void mvs_cw32(void __iomem *regs, u32 addr, u32 val)
  1899. {
  1900. mw32(CMD_ADDR, addr);
  1901. mw32(CMD_DATA, val);
  1902. }
  1903. static u32 mvs_read_phy_ctl(struct mvs_info *mvi, u32 port)
  1904. {
  1905. void __iomem *regs = mvi->regs;
  1906. return (port < 4)?mr32(P0_SER_CTLSTAT + port * 4):
  1907. mr32(P4_SER_CTLSTAT + (port - 4) * 4);
  1908. }
  1909. static void mvs_write_phy_ctl(struct mvs_info *mvi, u32 port, u32 val)
  1910. {
  1911. void __iomem *regs = mvi->regs;
  1912. if (port < 4)
  1913. mw32(P0_SER_CTLSTAT + port * 4, val);
  1914. else
  1915. mw32(P4_SER_CTLSTAT + (port - 4) * 4, val);
  1916. }
  1917. static u32 mvs_read_port(struct mvs_info *mvi, u32 off, u32 off2, u32 port)
  1918. {
  1919. void __iomem *regs = mvi->regs + off;
  1920. void __iomem *regs2 = mvi->regs + off2;
  1921. return (port < 4)?readl(regs + port * 8):
  1922. readl(regs2 + (port - 4) * 8);
  1923. }
  1924. static void mvs_write_port(struct mvs_info *mvi, u32 off, u32 off2,
  1925. u32 port, u32 val)
  1926. {
  1927. void __iomem *regs = mvi->regs + off;
  1928. void __iomem *regs2 = mvi->regs + off2;
  1929. if (port < 4)
  1930. writel(val, regs + port * 8);
  1931. else
  1932. writel(val, regs2 + (port - 4) * 8);
  1933. }
  1934. static u32 mvs_read_port_cfg_data(struct mvs_info *mvi, u32 port)
  1935. {
  1936. return mvs_read_port(mvi, MVS_P0_CFG_DATA, MVS_P4_CFG_DATA, port);
  1937. }
  1938. static void mvs_write_port_cfg_data(struct mvs_info *mvi, u32 port, u32 val)
  1939. {
  1940. mvs_write_port(mvi, MVS_P0_CFG_DATA, MVS_P4_CFG_DATA, port, val);
  1941. }
  1942. static void mvs_write_port_cfg_addr(struct mvs_info *mvi, u32 port, u32 addr)
  1943. {
  1944. mvs_write_port(mvi, MVS_P0_CFG_ADDR, MVS_P4_CFG_ADDR, port, addr);
  1945. }
  1946. static u32 mvs_read_port_vsr_data(struct mvs_info *mvi, u32 port)
  1947. {
  1948. return mvs_read_port(mvi, MVS_P0_VSR_DATA, MVS_P4_VSR_DATA, port);
  1949. }
  1950. static void mvs_write_port_vsr_data(struct mvs_info *mvi, u32 port, u32 val)
  1951. {
  1952. mvs_write_port(mvi, MVS_P0_VSR_DATA, MVS_P4_VSR_DATA, port, val);
  1953. }
  1954. static void mvs_write_port_vsr_addr(struct mvs_info *mvi, u32 port, u32 addr)
  1955. {
  1956. mvs_write_port(mvi, MVS_P0_VSR_ADDR, MVS_P4_VSR_ADDR, port, addr);
  1957. }
  1958. static u32 mvs_read_port_irq_stat(struct mvs_info *mvi, u32 port)
  1959. {
  1960. return mvs_read_port(mvi, MVS_P0_INT_STAT, MVS_P4_INT_STAT, port);
  1961. }
  1962. static void mvs_write_port_irq_stat(struct mvs_info *mvi, u32 port, u32 val)
  1963. {
  1964. mvs_write_port(mvi, MVS_P0_INT_STAT, MVS_P4_INT_STAT, port, val);
  1965. }
  1966. static u32 mvs_read_port_irq_mask(struct mvs_info *mvi, u32 port)
  1967. {
  1968. return mvs_read_port(mvi, MVS_P0_INT_MASK, MVS_P4_INT_MASK, port);
  1969. }
  1970. static void mvs_write_port_irq_mask(struct mvs_info *mvi, u32 port, u32 val)
  1971. {
  1972. mvs_write_port(mvi, MVS_P0_INT_MASK, MVS_P4_INT_MASK, port, val);
  1973. }
  1974. static void __devinit mvs_phy_hacks(struct mvs_info *mvi)
  1975. {
  1976. void __iomem *regs = mvi->regs;
  1977. u32 tmp;
  1978. /* workaround for SATA R-ERR, to ignore phy glitch */
  1979. tmp = mvs_cr32(regs, CMD_PHY_TIMER);
  1980. tmp &= ~(1 << 9);
  1981. tmp |= (1 << 10);
  1982. mvs_cw32(regs, CMD_PHY_TIMER, tmp);
  1983. /* enable retry 127 times */
  1984. mvs_cw32(regs, CMD_SAS_CTL1, 0x7f7f);
  1985. /* extend open frame timeout to max */
  1986. tmp = mvs_cr32(regs, CMD_SAS_CTL0);
  1987. tmp &= ~0xffff;
  1988. tmp |= 0x3fff;
  1989. mvs_cw32(regs, CMD_SAS_CTL0, tmp);
  1990. /* workaround for WDTIMEOUT , set to 550 ms */
  1991. mvs_cw32(regs, CMD_WD_TIMER, 0xffffff);
  1992. /* not to halt for different port op during wideport link change */
  1993. mvs_cw32(regs, CMD_APP_ERR_CONFIG, 0xffefbf7d);
  1994. /* workaround for Seagate disk not-found OOB sequence, recv
  1995. * COMINIT before sending out COMWAKE */
  1996. tmp = mvs_cr32(regs, CMD_PHY_MODE_21);
  1997. tmp &= 0x0000ffff;
  1998. tmp |= 0x00fa0000;
  1999. mvs_cw32(regs, CMD_PHY_MODE_21, tmp);
  2000. tmp = mvs_cr32(regs, CMD_PHY_TIMER);
  2001. tmp &= 0x1fffffff;
  2002. tmp |= (2U << 29); /* 8 ms retry */
  2003. mvs_cw32(regs, CMD_PHY_TIMER, tmp);
  2004. /* TEST - for phy decoding error, adjust voltage levels */
  2005. mw32(P0_VSR_ADDR + 0, 0x8);
  2006. mw32(P0_VSR_DATA + 0, 0x2F0);
  2007. mw32(P0_VSR_ADDR + 8, 0x8);
  2008. mw32(P0_VSR_DATA + 8, 0x2F0);
  2009. mw32(P0_VSR_ADDR + 16, 0x8);
  2010. mw32(P0_VSR_DATA + 16, 0x2F0);
  2011. mw32(P0_VSR_ADDR + 24, 0x8);
  2012. mw32(P0_VSR_DATA + 24, 0x2F0);
  2013. }
  2014. static void mvs_enable_xmt(struct mvs_info *mvi, int PhyId)
  2015. {
  2016. void __iomem *regs = mvi->regs;
  2017. u32 tmp;
  2018. tmp = mr32(PCS);
  2019. if (mvi->chip->n_phy <= 4)
  2020. tmp |= 1 << (PhyId + PCS_EN_PORT_XMT_SHIFT);
  2021. else
  2022. tmp |= 1 << (PhyId + PCS_EN_PORT_XMT_SHIFT2);
  2023. mw32(PCS, tmp);
  2024. }
  2025. static void mvs_detect_porttype(struct mvs_info *mvi, int i)
  2026. {
  2027. void __iomem *regs = mvi->regs;
  2028. u32 reg;
  2029. struct mvs_phy *phy = &mvi->phy[i];
  2030. /* TODO check & save device type */
  2031. reg = mr32(GBL_PORT_TYPE);
  2032. if (reg & MODE_SAS_SATA & (1 << i))
  2033. phy->phy_type |= PORT_TYPE_SAS;
  2034. else
  2035. phy->phy_type |= PORT_TYPE_SATA;
  2036. }
  2037. static void *mvs_get_d2h_reg(struct mvs_info *mvi, int i, void *buf)
  2038. {
  2039. u32 *s = (u32 *) buf;
  2040. if (!s)
  2041. return NULL;
  2042. mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG3);
  2043. s[3] = mvs_read_port_cfg_data(mvi, i);
  2044. mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG2);
  2045. s[2] = mvs_read_port_cfg_data(mvi, i);
  2046. mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG1);
  2047. s[1] = mvs_read_port_cfg_data(mvi, i);
  2048. mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG0);
  2049. s[0] = mvs_read_port_cfg_data(mvi, i);
  2050. return (void *)s;
  2051. }
  2052. static u32 mvs_is_sig_fis_received(u32 irq_status)
  2053. {
  2054. return irq_status & PHYEV_SIG_FIS;
  2055. }
  2056. static void mvs_update_wideport(struct mvs_info *mvi, int i)
  2057. {
  2058. struct mvs_phy *phy = &mvi->phy[i];
  2059. struct mvs_port *port = phy->port;
  2060. int j, no;
  2061. for_each_phy(port->wide_port_phymap, no, j, mvi->chip->n_phy)
  2062. if (no & 1) {
  2063. mvs_write_port_cfg_addr(mvi, no, PHYR_WIDE_PORT);
  2064. mvs_write_port_cfg_data(mvi, no,
  2065. port->wide_port_phymap);
  2066. } else {
  2067. mvs_write_port_cfg_addr(mvi, no, PHYR_WIDE_PORT);
  2068. mvs_write_port_cfg_data(mvi, no, 0);
  2069. }
  2070. }
  2071. static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i)
  2072. {
  2073. u32 tmp;
  2074. struct mvs_phy *phy = &mvi->phy[i];
  2075. struct mvs_port *port;
  2076. tmp = mvs_read_phy_ctl(mvi, i);
  2077. if ((tmp & PHY_READY_MASK) && !(phy->irq_status & PHYEV_POOF)) {
  2078. if (!phy->port)
  2079. phy->phy_attached = 1;
  2080. return tmp;
  2081. }
  2082. port = phy->port;
  2083. if (port) {
  2084. if (phy->phy_type & PORT_TYPE_SAS) {
  2085. port->wide_port_phymap &= ~(1U << i);
  2086. if (!port->wide_port_phymap)
  2087. port->port_attached = 0;
  2088. mvs_update_wideport(mvi, i);
  2089. } else if (phy->phy_type & PORT_TYPE_SATA)
  2090. port->port_attached = 0;
  2091. mvs_free_reg_set(mvi, phy->port);
  2092. phy->port = NULL;
  2093. phy->phy_attached = 0;
  2094. phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
  2095. }
  2096. return 0;
  2097. }
  2098. static void mvs_update_phyinfo(struct mvs_info *mvi, int i,
  2099. int get_st)
  2100. {
  2101. struct mvs_phy *phy = &mvi->phy[i];
  2102. struct pci_dev *pdev = mvi->pdev;
  2103. u32 tmp, j;
  2104. u64 tmp64;
  2105. mvs_write_port_cfg_addr(mvi, i, PHYR_IDENTIFY);
  2106. phy->dev_info = mvs_read_port_cfg_data(mvi, i);
  2107. mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_HI);
  2108. phy->dev_sas_addr = (u64) mvs_read_port_cfg_data(mvi, i) << 32;
  2109. mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_LO);
  2110. phy->dev_sas_addr |= mvs_read_port_cfg_data(mvi, i);
  2111. if (get_st) {
  2112. phy->irq_status = mvs_read_port_irq_stat(mvi, i);
  2113. phy->phy_status = mvs_is_phy_ready(mvi, i);
  2114. }
  2115. if (phy->phy_status) {
  2116. u32 phy_st;
  2117. struct asd_sas_phy *sas_phy = mvi->sas.sas_phy[i];
  2118. mvs_write_port_cfg_addr(mvi, i, PHYR_PHY_STAT);
  2119. phy_st = mvs_read_port_cfg_data(mvi, i);
  2120. sas_phy->linkrate =
  2121. (phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
  2122. PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET;
  2123. /* Updated attached_sas_addr */
  2124. mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_HI);
  2125. phy->att_dev_sas_addr =
  2126. (u64) mvs_read_port_cfg_data(mvi, i) << 32;
  2127. mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_LO);
  2128. phy->att_dev_sas_addr |= mvs_read_port_cfg_data(mvi, i);
  2129. dev_printk(KERN_DEBUG, &pdev->dev,
  2130. "phy[%d] Get Attached Address 0x%llX ,"
  2131. " SAS Address 0x%llX\n",
  2132. i, phy->att_dev_sas_addr, phy->dev_sas_addr);
  2133. dev_printk(KERN_DEBUG, &pdev->dev,
  2134. "Rate = %x , type = %d\n",
  2135. sas_phy->linkrate, phy->phy_type);
  2136. #if 1
  2137. /*
  2138. * If the device is capable of supporting a wide port
  2139. * on its phys, it may configure the phys as a wide port.
  2140. */
  2141. if (phy->phy_type & PORT_TYPE_SAS)
  2142. for (j = 0; j < mvi->chip->n_phy && j != i; ++j) {
  2143. if ((mvi->phy[j].phy_attached) &&
  2144. (mvi->phy[j].phy_type & PORT_TYPE_SAS))
  2145. if (phy->att_dev_sas_addr ==
  2146. mvi->phy[j].att_dev_sas_addr - 1) {
  2147. phy->att_dev_sas_addr =
  2148. mvi->phy[j].att_dev_sas_addr;
  2149. break;
  2150. }
  2151. }
  2152. #endif
  2153. tmp64 = cpu_to_be64(phy->att_dev_sas_addr);
  2154. memcpy(sas_phy->attached_sas_addr, &tmp64, SAS_ADDR_SIZE);
  2155. if (phy->phy_type & PORT_TYPE_SAS) {
  2156. mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_DEV_INFO);
  2157. phy->att_dev_info = mvs_read_port_cfg_data(mvi, i);
  2158. phy->identify.device_type =
  2159. phy->att_dev_info & PORT_DEV_TYPE_MASK;
  2160. if (phy->identify.device_type == SAS_END_DEV)
  2161. phy->identify.target_port_protocols =
  2162. SAS_PROTOCOL_SSP;
  2163. else if (phy->identify.device_type != NO_DEVICE)
  2164. phy->identify.target_port_protocols =
  2165. SAS_PROTOCOL_SMP;
  2166. if (phy_st & PHY_OOB_DTCTD)
  2167. sas_phy->oob_mode = SAS_OOB_MODE;
  2168. phy->frame_rcvd_size =
  2169. sizeof(struct sas_identify_frame);
  2170. } else if (phy->phy_type & PORT_TYPE_SATA) {
  2171. phy->identify.target_port_protocols = SAS_PROTOCOL_STP;
  2172. if (mvs_is_sig_fis_received(phy->irq_status)) {
  2173. if (phy_st & PHY_OOB_DTCTD)
  2174. sas_phy->oob_mode = SATA_OOB_MODE;
  2175. phy->frame_rcvd_size =
  2176. sizeof(struct dev_to_host_fis);
  2177. mvs_get_d2h_reg(mvi, i,
  2178. (void *)sas_phy->frame_rcvd);
  2179. } else {
  2180. dev_printk(KERN_DEBUG, &pdev->dev,
  2181. "No sig fis\n");
  2182. }
  2183. }
  2184. /* workaround for HW phy decoding error on 1.5g disk drive */
  2185. mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE6);
  2186. tmp = mvs_read_port_vsr_data(mvi, i);
  2187. if (((phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
  2188. PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET) ==
  2189. SAS_LINK_RATE_1_5_GBPS)
  2190. tmp &= ~PHY_MODE6_DTL_SPEED;
  2191. else
  2192. tmp |= PHY_MODE6_DTL_SPEED;
  2193. mvs_write_port_vsr_data(mvi, i, tmp);
  2194. }
  2195. if (get_st)
  2196. mvs_write_port_irq_stat(mvi, i, phy->irq_status);
  2197. }
  2198. static void mvs_port_formed(struct asd_sas_phy *sas_phy)
  2199. {
  2200. struct sas_ha_struct *sas_ha = sas_phy->ha;
  2201. struct mvs_info *mvi = sas_ha->lldd_ha;
  2202. struct asd_sas_port *sas_port = sas_phy->port;
  2203. struct mvs_phy *phy = sas_phy->lldd_phy;
  2204. struct mvs_port *port = &mvi->port[sas_port->id];
  2205. unsigned long flags;
  2206. spin_lock_irqsave(&mvi->lock, flags);
  2207. port->port_attached = 1;
  2208. phy->port = port;
  2209. port->taskfileset = MVS_ID_NOT_MAPPED;
  2210. if (phy->phy_type & PORT_TYPE_SAS) {
  2211. port->wide_port_phymap = sas_port->phy_mask;
  2212. mvs_update_wideport(mvi, sas_phy->id);
  2213. }
  2214. spin_unlock_irqrestore(&mvi->lock, flags);
  2215. }
  2216. static int __devinit mvs_hw_init(struct mvs_info *mvi)
  2217. {
  2218. void __iomem *regs = mvi->regs;
  2219. int i;
  2220. u32 tmp, cctl;
  2221. /* make sure interrupts are masked immediately (paranoia) */
  2222. mw32(GBL_CTL, 0);
  2223. tmp = mr32(GBL_CTL);
  2224. /* Reset Controller */
  2225. if (!(tmp & HBA_RST)) {
  2226. if (mvi->flags & MVF_PHY_PWR_FIX) {
  2227. pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp);
  2228. tmp &= ~PCTL_PWR_ON;
  2229. tmp |= PCTL_OFF;
  2230. pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
  2231. pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp);
  2232. tmp &= ~PCTL_PWR_ON;
  2233. tmp |= PCTL_OFF;
  2234. pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
  2235. }
  2236. /* global reset, incl. COMRESET/H_RESET_N (self-clearing) */
  2237. mw32_f(GBL_CTL, HBA_RST);
  2238. }
  2239. /* wait for reset to finish; timeout is just a guess */
  2240. i = 1000;
  2241. while (i-- > 0) {
  2242. msleep(10);
  2243. if (!(mr32(GBL_CTL) & HBA_RST))
  2244. break;
  2245. }
  2246. if (mr32(GBL_CTL) & HBA_RST) {
  2247. dev_printk(KERN_ERR, &mvi->pdev->dev, "HBA reset failed\n");
  2248. return -EBUSY;
  2249. }
  2250. /* Init Chip */
  2251. /* make sure RST is set; HBA_RST /should/ have done that for us */
  2252. cctl = mr32(CTL);
  2253. if (cctl & CCTL_RST)
  2254. cctl &= ~CCTL_RST;
  2255. else
  2256. mw32_f(CTL, cctl | CCTL_RST);
  2257. /* write to device control _AND_ device status register? - A.C. */
  2258. pci_read_config_dword(mvi->pdev, PCR_DEV_CTRL, &tmp);
  2259. tmp &= ~PRD_REQ_MASK;
  2260. tmp |= PRD_REQ_SIZE;
  2261. pci_write_config_dword(mvi->pdev, PCR_DEV_CTRL, tmp);
  2262. pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp);
  2263. tmp |= PCTL_PWR_ON;
  2264. tmp &= ~PCTL_OFF;
  2265. pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
  2266. pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp);
  2267. tmp |= PCTL_PWR_ON;
  2268. tmp &= ~PCTL_OFF;
  2269. pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
  2270. mw32_f(CTL, cctl);
  2271. /* reset control */
  2272. mw32(PCS, 0); /*MVS_PCS */
  2273. mvs_phy_hacks(mvi);
  2274. mw32(CMD_LIST_LO, mvi->slot_dma);
  2275. mw32(CMD_LIST_HI, (mvi->slot_dma >> 16) >> 16);
  2276. mw32(RX_FIS_LO, mvi->rx_fis_dma);
  2277. mw32(RX_FIS_HI, (mvi->rx_fis_dma >> 16) >> 16);
  2278. mw32(TX_CFG, MVS_CHIP_SLOT_SZ);
  2279. mw32(TX_LO, mvi->tx_dma);
  2280. mw32(TX_HI, (mvi->tx_dma >> 16) >> 16);
  2281. mw32(RX_CFG, MVS_RX_RING_SZ);
  2282. mw32(RX_LO, mvi->rx_dma);
  2283. mw32(RX_HI, (mvi->rx_dma >> 16) >> 16);
  2284. /* enable auto port detection */
  2285. mw32(GBL_PORT_TYPE, MODE_AUTO_DET_EN);
  2286. msleep(100);
  2287. /* init and reset phys */
  2288. for (i = 0; i < mvi->chip->n_phy; i++) {
  2289. /* FIXME: is this the correct dword order? */
  2290. u32 lo = *((u32 *)&mvi->sas_addr[0]);
  2291. u32 hi = *((u32 *)&mvi->sas_addr[4]);
  2292. mvs_detect_porttype(mvi, i);
  2293. /* set phy local SAS address */
  2294. mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_LO);
  2295. mvs_write_port_cfg_data(mvi, i, lo);
  2296. mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_HI);
  2297. mvs_write_port_cfg_data(mvi, i, hi);
  2298. /* reset phy */
  2299. tmp = mvs_read_phy_ctl(mvi, i);
  2300. tmp |= PHY_RST;
  2301. mvs_write_phy_ctl(mvi, i, tmp);
  2302. }
  2303. msleep(100);
  2304. for (i = 0; i < mvi->chip->n_phy; i++) {
  2305. /* clear phy int status */
  2306. tmp = mvs_read_port_irq_stat(mvi, i);
  2307. tmp &= ~PHYEV_SIG_FIS;
  2308. mvs_write_port_irq_stat(mvi, i, tmp);
  2309. /* set phy int mask */
  2310. tmp = PHYEV_RDY_CH | PHYEV_BROAD_CH | PHYEV_UNASSOC_FIS |
  2311. PHYEV_ID_DONE | PHYEV_DEC_ERR;
  2312. mvs_write_port_irq_mask(mvi, i, tmp);
  2313. msleep(100);
  2314. mvs_update_phyinfo(mvi, i, 1);
  2315. mvs_enable_xmt(mvi, i);
  2316. }
  2317. /* FIXME: update wide port bitmaps */
  2318. /* little endian for open address and command table, etc. */
  2319. /* A.C.
  2320. * it seems that ( from the spec ) turning on big-endian won't
  2321. * do us any good on big-endian machines, need further confirmation
  2322. */
  2323. cctl = mr32(CTL);
  2324. cctl |= CCTL_ENDIAN_CMD;
  2325. cctl |= CCTL_ENDIAN_DATA;
  2326. cctl &= ~CCTL_ENDIAN_OPEN;
  2327. cctl |= CCTL_ENDIAN_RSP;
  2328. mw32_f(CTL, cctl);
  2329. /* reset CMD queue */
  2330. tmp = mr32(PCS);
  2331. tmp |= PCS_CMD_RST;
  2332. mw32(PCS, tmp);
  2333. /* interrupt coalescing may cause missing HW interrput in some case,
  2334. * and the max count is 0x1ff, while our max slot is 0x200,
  2335. * it will make count 0.
  2336. */
  2337. tmp = 0;
  2338. mw32(INT_COAL, tmp);
  2339. tmp = 0x100;
  2340. mw32(INT_COAL_TMOUT, tmp);
  2341. /* ladies and gentlemen, start your engines */
  2342. mw32(TX_CFG, 0);
  2343. mw32(TX_CFG, MVS_CHIP_SLOT_SZ | TX_EN);
  2344. mw32(RX_CFG, MVS_RX_RING_SZ | RX_EN);
  2345. /* enable CMD/CMPL_Q/RESP mode */
  2346. mw32(PCS, PCS_SATA_RETRY | PCS_FIS_RX_EN | PCS_CMD_EN);
  2347. /* re-enable interrupts globally */
  2348. mvs_hba_interrupt_enable(mvi);
  2349. /* enable completion queue interrupt */
  2350. tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM);
  2351. mw32(INT_MASK, tmp);
  2352. return 0;
  2353. }
  2354. static void __devinit mvs_print_info(struct mvs_info *mvi)
  2355. {
  2356. struct pci_dev *pdev = mvi->pdev;
  2357. static int printed_version;
  2358. if (!printed_version++)
  2359. dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
  2360. dev_printk(KERN_INFO, &pdev->dev, "%u phys, addr %llx\n",
  2361. mvi->chip->n_phy, SAS_ADDR(mvi->sas_addr));
  2362. }
  2363. static int __devinit mvs_pci_init(struct pci_dev *pdev,
  2364. const struct pci_device_id *ent)
  2365. {
  2366. int rc;
  2367. struct mvs_info *mvi;
  2368. irq_handler_t irq_handler = mvs_interrupt;
  2369. rc = pci_enable_device(pdev);
  2370. if (rc)
  2371. return rc;
  2372. pci_set_master(pdev);
  2373. rc = pci_request_regions(pdev, DRV_NAME);
  2374. if (rc)
  2375. goto err_out_disable;
  2376. rc = pci_go_64(pdev);
  2377. if (rc)
  2378. goto err_out_regions;
  2379. mvi = mvs_alloc(pdev, ent);
  2380. if (!mvi) {
  2381. rc = -ENOMEM;
  2382. goto err_out_regions;
  2383. }
  2384. rc = mvs_hw_init(mvi);
  2385. if (rc)
  2386. goto err_out_mvi;
  2387. #ifndef MVS_DISABLE_MSI
  2388. if (!pci_enable_msi(pdev)) {
  2389. u32 tmp;
  2390. void __iomem *regs = mvi->regs;
  2391. mvi->flags |= MVF_MSI;
  2392. irq_handler = mvs_msi_interrupt;
  2393. tmp = mr32(PCS);
  2394. mw32(PCS, tmp | PCS_SELF_CLEAR);
  2395. }
  2396. #endif
  2397. rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED, DRV_NAME, mvi);
  2398. if (rc)
  2399. goto err_out_msi;
  2400. rc = scsi_add_host(mvi->shost, &pdev->dev);
  2401. if (rc)
  2402. goto err_out_irq;
  2403. rc = sas_register_ha(&mvi->sas);
  2404. if (rc)
  2405. goto err_out_shost;
  2406. pci_set_drvdata(pdev, mvi);
  2407. mvs_print_info(mvi);
  2408. scsi_scan_host(mvi->shost);
  2409. return 0;
  2410. err_out_shost:
  2411. scsi_remove_host(mvi->shost);
  2412. err_out_irq:
  2413. free_irq(pdev->irq, mvi);
  2414. err_out_msi:
  2415. if (mvi->flags |= MVF_MSI)
  2416. pci_disable_msi(pdev);
  2417. err_out_mvi:
  2418. mvs_free(mvi);
  2419. err_out_regions:
  2420. pci_release_regions(pdev);
  2421. err_out_disable:
  2422. pci_disable_device(pdev);
  2423. return rc;
  2424. }
  2425. static void __devexit mvs_pci_remove(struct pci_dev *pdev)
  2426. {
  2427. struct mvs_info *mvi = pci_get_drvdata(pdev);
  2428. pci_set_drvdata(pdev, NULL);
  2429. if (mvi) {
  2430. sas_unregister_ha(&mvi->sas);
  2431. mvs_hba_interrupt_disable(mvi);
  2432. sas_remove_host(mvi->shost);
  2433. scsi_remove_host(mvi->shost);
  2434. free_irq(pdev->irq, mvi);
  2435. if (mvi->flags & MVF_MSI)
  2436. pci_disable_msi(pdev);
  2437. mvs_free(mvi);
  2438. pci_release_regions(pdev);
  2439. }
  2440. pci_disable_device(pdev);
  2441. }
  2442. static struct sas_domain_function_template mvs_transport_ops = {
  2443. .lldd_execute_task = mvs_task_exec,
  2444. .lldd_control_phy = mvs_phy_control,
  2445. .lldd_abort_task = mvs_task_abort,
  2446. .lldd_port_formed = mvs_port_formed
  2447. };
  2448. static struct pci_device_id __devinitdata mvs_pci_table[] = {
  2449. { PCI_VDEVICE(MARVELL, 0x6320), chip_6320 },
  2450. { PCI_VDEVICE(MARVELL, 0x6340), chip_6440 },
  2451. { PCI_VDEVICE(MARVELL, 0x6440), chip_6440 },
  2452. { PCI_VDEVICE(MARVELL, 0x6480), chip_6480 },
  2453. { } /* terminate list */
  2454. };
  2455. static struct pci_driver mvs_pci_driver = {
  2456. .name = DRV_NAME,
  2457. .id_table = mvs_pci_table,
  2458. .probe = mvs_pci_init,
  2459. .remove = __devexit_p(mvs_pci_remove),
  2460. };
  2461. static int __init mvs_init(void)
  2462. {
  2463. int rc;
  2464. mvs_stt = sas_domain_attach_transport(&mvs_transport_ops);
  2465. if (!mvs_stt)
  2466. return -ENOMEM;
  2467. rc = pci_register_driver(&mvs_pci_driver);
  2468. if (rc)
  2469. goto err_out;
  2470. return 0;
  2471. err_out:
  2472. sas_release_transport(mvs_stt);
  2473. return rc;
  2474. }
  2475. static void __exit mvs_exit(void)
  2476. {
  2477. pci_unregister_driver(&mvs_pci_driver);
  2478. sas_release_transport(mvs_stt);
  2479. }
  2480. module_init(mvs_init);
  2481. module_exit(mvs_exit);
  2482. MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
  2483. MODULE_DESCRIPTION("Marvell 88SE6440 SAS/SATA controller driver");
  2484. MODULE_VERSION(DRV_VERSION);
  2485. MODULE_LICENSE("GPL");
  2486. MODULE_DEVICE_TABLE(pci, mvs_pci_table);