mvsas.c 85 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177
  1. /*
  2. mvsas.c - Marvell 88SE6440 SAS/SATA support
  3. Copyright 2007 Red Hat, Inc.
  4. Copyright 2008 Marvell. <kewei@marvell.com>
  5. This program is free software; you can redistribute it and/or
  6. modify it under the terms of the GNU General Public License as
  7. published by the Free Software Foundation; either version 2,
  8. or (at your option) any later version.
  9. This program is distributed in the hope that it will be useful,
  10. but WITHOUT ANY WARRANTY; without even the implied warranty
  11. of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  12. See the GNU General Public License for more details.
  13. You should have received a copy of the GNU General Public
  14. License along with this program; see the file COPYING. If not,
  15. write to the Free Software Foundation, 675 Mass Ave, Cambridge,
  16. MA 02139, USA.
  17. ---------------------------------------------------------------
  18. Random notes:
  19. * hardware supports controlling the endian-ness of data
  20. structures. this permits elimination of all the le32_to_cpu()
  21. and cpu_to_le32() conversions.
  22. */
  23. #include <linux/kernel.h>
  24. #include <linux/module.h>
  25. #include <linux/pci.h>
  26. #include <linux/interrupt.h>
  27. #include <linux/spinlock.h>
  28. #include <linux/delay.h>
  29. #include <linux/dma-mapping.h>
  30. #include <linux/ctype.h>
  31. #include <scsi/libsas.h>
  32. #include <scsi/scsi_tcq.h>
  33. #include <scsi/sas_ata.h>
  34. #include <asm/io.h>
  35. #define DRV_NAME "mvsas"
  36. #define DRV_VERSION "0.5.2"
  37. #define _MV_DUMP 0
  38. #define MVS_DISABLE_NVRAM
  39. #define MVS_DISABLE_MSI
  40. #define mr32(reg) readl(regs + MVS_##reg)
  41. #define mw32(reg,val) writel((val), regs + MVS_##reg)
  42. #define mw32_f(reg,val) do { \
  43. writel((val), regs + MVS_##reg); \
  44. readl(regs + MVS_##reg); \
  45. } while (0)
  46. #define MVS_ID_NOT_MAPPED 0x7f
  47. #define MVS_CHIP_SLOT_SZ (1U << mvi->chip->slot_width)
  48. /* offset for D2H FIS in the Received FIS List Structure */
  49. #define SATA_RECEIVED_D2H_FIS(reg_set) \
  50. ((void *) mvi->rx_fis + 0x400 + 0x100 * reg_set + 0x40)
  51. #define SATA_RECEIVED_PIO_FIS(reg_set) \
  52. ((void *) mvi->rx_fis + 0x400 + 0x100 * reg_set + 0x20)
  53. #define UNASSOC_D2H_FIS(id) \
  54. ((void *) mvi->rx_fis + 0x100 * id)
  55. #define for_each_phy(__lseq_mask, __mc, __lseq, __rest) \
  56. for ((__mc) = (__lseq_mask), (__lseq) = 0; \
  57. (__mc) != 0 && __rest; \
  58. (++__lseq), (__mc) >>= 1)
  59. /* driver compile-time configuration */
  60. enum driver_configuration {
  61. MVS_TX_RING_SZ = 1024, /* TX ring size (12-bit) */
  62. MVS_RX_RING_SZ = 1024, /* RX ring size (12-bit) */
  63. /* software requires power-of-2
  64. ring size */
  65. MVS_SLOTS = 512, /* command slots */
  66. MVS_SLOT_BUF_SZ = 8192, /* cmd tbl + IU + status + PRD */
  67. MVS_SSP_CMD_SZ = 64, /* SSP command table buffer size */
  68. MVS_ATA_CMD_SZ = 96, /* SATA command table buffer size */
  69. MVS_OAF_SZ = 64, /* Open address frame buffer size */
  70. MVS_RX_FIS_COUNT = 17, /* Optional rx'd FISs (max 17) */
  71. MVS_QUEUE_SIZE = 30, /* Support Queue depth */
  72. MVS_CAN_QUEUE = MVS_SLOTS - 1, /* SCSI Queue depth */
  73. };
  74. /* unchangeable hardware details */
  75. enum hardware_details {
  76. MVS_MAX_PHYS = 8, /* max. possible phys */
  77. MVS_MAX_PORTS = 8, /* max. possible ports */
  78. MVS_RX_FISL_SZ = 0x400 + (MVS_RX_FIS_COUNT * 0x100),
  79. };
  80. /* peripheral registers (BAR2) */
  81. enum peripheral_registers {
  82. SPI_CTL = 0x10, /* EEPROM control */
  83. SPI_CMD = 0x14, /* EEPROM command */
  84. SPI_DATA = 0x18, /* EEPROM data */
  85. };
  86. enum peripheral_register_bits {
  87. TWSI_RDY = (1U << 7), /* EEPROM interface ready */
  88. TWSI_RD = (1U << 4), /* EEPROM read access */
  89. SPI_ADDR_MASK = 0x3ffff, /* bits 17:0 */
  90. };
  91. /* enhanced mode registers (BAR4) */
  92. enum hw_registers {
  93. MVS_GBL_CTL = 0x04, /* global control */
  94. MVS_GBL_INT_STAT = 0x08, /* global irq status */
  95. MVS_GBL_PI = 0x0C, /* ports implemented bitmask */
  96. MVS_GBL_PORT_TYPE = 0xa0, /* port type */
  97. MVS_CTL = 0x100, /* SAS/SATA port configuration */
  98. MVS_PCS = 0x104, /* SAS/SATA port control/status */
  99. MVS_CMD_LIST_LO = 0x108, /* cmd list addr */
  100. MVS_CMD_LIST_HI = 0x10C,
  101. MVS_RX_FIS_LO = 0x110, /* RX FIS list addr */
  102. MVS_RX_FIS_HI = 0x114,
  103. MVS_TX_CFG = 0x120, /* TX configuration */
  104. MVS_TX_LO = 0x124, /* TX (delivery) ring addr */
  105. MVS_TX_HI = 0x128,
  106. MVS_TX_PROD_IDX = 0x12C, /* TX producer pointer */
  107. MVS_TX_CONS_IDX = 0x130, /* TX consumer pointer (RO) */
  108. MVS_RX_CFG = 0x134, /* RX configuration */
  109. MVS_RX_LO = 0x138, /* RX (completion) ring addr */
  110. MVS_RX_HI = 0x13C,
  111. MVS_RX_CONS_IDX = 0x140, /* RX consumer pointer (RO) */
  112. MVS_INT_COAL = 0x148, /* Int coalescing config */
  113. MVS_INT_COAL_TMOUT = 0x14C, /* Int coalescing timeout */
  114. MVS_INT_STAT = 0x150, /* Central int status */
  115. MVS_INT_MASK = 0x154, /* Central int enable */
  116. MVS_INT_STAT_SRS = 0x158, /* SATA register set status */
  117. MVS_INT_MASK_SRS = 0x15C,
  118. /* ports 1-3 follow after this */
  119. MVS_P0_INT_STAT = 0x160, /* port0 interrupt status */
  120. MVS_P0_INT_MASK = 0x164, /* port0 interrupt mask */
  121. MVS_P4_INT_STAT = 0x200, /* Port 4 interrupt status */
  122. MVS_P4_INT_MASK = 0x204, /* Port 4 interrupt enable mask */
  123. /* ports 1-3 follow after this */
  124. MVS_P0_SER_CTLSTAT = 0x180, /* port0 serial control/status */
  125. MVS_P4_SER_CTLSTAT = 0x220, /* port4 serial control/status */
  126. MVS_CMD_ADDR = 0x1B8, /* Command register port (addr) */
  127. MVS_CMD_DATA = 0x1BC, /* Command register port (data) */
  128. /* ports 1-3 follow after this */
  129. MVS_P0_CFG_ADDR = 0x1C0, /* port0 phy register address */
  130. MVS_P0_CFG_DATA = 0x1C4, /* port0 phy register data */
  131. MVS_P4_CFG_ADDR = 0x230, /* Port 4 config address */
  132. MVS_P4_CFG_DATA = 0x234, /* Port 4 config data */
  133. /* ports 1-3 follow after this */
  134. MVS_P0_VSR_ADDR = 0x1E0, /* port0 VSR address */
  135. MVS_P0_VSR_DATA = 0x1E4, /* port0 VSR data */
  136. MVS_P4_VSR_ADDR = 0x250, /* port 4 VSR addr */
  137. MVS_P4_VSR_DATA = 0x254, /* port 4 VSR data */
  138. };
  139. enum hw_register_bits {
  140. /* MVS_GBL_CTL */
  141. INT_EN = (1U << 1), /* Global int enable */
  142. HBA_RST = (1U << 0), /* HBA reset */
  143. /* MVS_GBL_INT_STAT */
  144. INT_XOR = (1U << 4), /* XOR engine event */
  145. INT_SAS_SATA = (1U << 0), /* SAS/SATA event */
  146. /* MVS_GBL_PORT_TYPE */ /* shl for ports 1-3 */
  147. SATA_TARGET = (1U << 16), /* port0 SATA target enable */
  148. MODE_AUTO_DET_PORT7 = (1U << 15), /* port0 SAS/SATA autodetect */
  149. MODE_AUTO_DET_PORT6 = (1U << 14),
  150. MODE_AUTO_DET_PORT5 = (1U << 13),
  151. MODE_AUTO_DET_PORT4 = (1U << 12),
  152. MODE_AUTO_DET_PORT3 = (1U << 11),
  153. MODE_AUTO_DET_PORT2 = (1U << 10),
  154. MODE_AUTO_DET_PORT1 = (1U << 9),
  155. MODE_AUTO_DET_PORT0 = (1U << 8),
  156. MODE_AUTO_DET_EN = MODE_AUTO_DET_PORT0 | MODE_AUTO_DET_PORT1 |
  157. MODE_AUTO_DET_PORT2 | MODE_AUTO_DET_PORT3 |
  158. MODE_AUTO_DET_PORT4 | MODE_AUTO_DET_PORT5 |
  159. MODE_AUTO_DET_PORT6 | MODE_AUTO_DET_PORT7,
  160. MODE_SAS_PORT7_MASK = (1U << 7), /* port0 SAS(1), SATA(0) mode */
  161. MODE_SAS_PORT6_MASK = (1U << 6),
  162. MODE_SAS_PORT5_MASK = (1U << 5),
  163. MODE_SAS_PORT4_MASK = (1U << 4),
  164. MODE_SAS_PORT3_MASK = (1U << 3),
  165. MODE_SAS_PORT2_MASK = (1U << 2),
  166. MODE_SAS_PORT1_MASK = (1U << 1),
  167. MODE_SAS_PORT0_MASK = (1U << 0),
  168. MODE_SAS_SATA = MODE_SAS_PORT0_MASK | MODE_SAS_PORT1_MASK |
  169. MODE_SAS_PORT2_MASK | MODE_SAS_PORT3_MASK |
  170. MODE_SAS_PORT4_MASK | MODE_SAS_PORT5_MASK |
  171. MODE_SAS_PORT6_MASK | MODE_SAS_PORT7_MASK,
  172. /* SAS_MODE value may be
  173. * dictated (in hw) by values
  174. * of SATA_TARGET & AUTO_DET
  175. */
  176. /* MVS_TX_CFG */
  177. TX_EN = (1U << 16), /* Enable TX */
  178. TX_RING_SZ_MASK = 0xfff, /* TX ring size, bits 11:0 */
  179. /* MVS_RX_CFG */
  180. RX_EN = (1U << 16), /* Enable RX */
  181. RX_RING_SZ_MASK = 0xfff, /* RX ring size, bits 11:0 */
  182. /* MVS_INT_COAL */
  183. COAL_EN = (1U << 16), /* Enable int coalescing */
  184. /* MVS_INT_STAT, MVS_INT_MASK */
  185. CINT_I2C = (1U << 31), /* I2C event */
  186. CINT_SW0 = (1U << 30), /* software event 0 */
  187. CINT_SW1 = (1U << 29), /* software event 1 */
  188. CINT_PRD_BC = (1U << 28), /* PRD BC err for read cmd */
  189. CINT_DMA_PCIE = (1U << 27), /* DMA to PCIE timeout */
  190. CINT_MEM = (1U << 26), /* int mem parity err */
  191. CINT_I2C_SLAVE = (1U << 25), /* slave I2C event */
  192. CINT_SRS = (1U << 3), /* SRS event */
  193. CINT_CI_STOP = (1U << 1), /* cmd issue stopped */
  194. CINT_DONE = (1U << 0), /* cmd completion */
  195. /* shl for ports 1-3 */
  196. CINT_PORT_STOPPED = (1U << 16), /* port0 stopped */
  197. CINT_PORT = (1U << 8), /* port0 event */
  198. CINT_PORT_MASK_OFFSET = 8,
  199. CINT_PORT_MASK = (0xFF << CINT_PORT_MASK_OFFSET),
  200. /* TX (delivery) ring bits */
  201. TXQ_CMD_SHIFT = 29,
  202. TXQ_CMD_SSP = 1, /* SSP protocol */
  203. TXQ_CMD_SMP = 2, /* SMP protocol */
  204. TXQ_CMD_STP = 3, /* STP/SATA protocol */
  205. TXQ_CMD_SSP_FREE_LIST = 4, /* add to SSP targ free list */
  206. TXQ_CMD_SLOT_RESET = 7, /* reset command slot */
  207. TXQ_MODE_I = (1U << 28), /* mode: 0=target,1=initiator */
  208. TXQ_PRIO_HI = (1U << 27), /* priority: 0=normal, 1=high */
  209. TXQ_SRS_SHIFT = 20, /* SATA register set */
  210. TXQ_SRS_MASK = 0x7f,
  211. TXQ_PHY_SHIFT = 12, /* PHY bitmap */
  212. TXQ_PHY_MASK = 0xff,
  213. TXQ_SLOT_MASK = 0xfff, /* slot number */
  214. /* RX (completion) ring bits */
  215. RXQ_GOOD = (1U << 23), /* Response good */
  216. RXQ_SLOT_RESET = (1U << 21), /* Slot reset complete */
  217. RXQ_CMD_RX = (1U << 20), /* target cmd received */
  218. RXQ_ATTN = (1U << 19), /* attention */
  219. RXQ_RSP = (1U << 18), /* response frame xfer'd */
  220. RXQ_ERR = (1U << 17), /* err info rec xfer'd */
  221. RXQ_DONE = (1U << 16), /* cmd complete */
  222. RXQ_SLOT_MASK = 0xfff, /* slot number */
  223. /* mvs_cmd_hdr bits */
  224. MCH_PRD_LEN_SHIFT = 16, /* 16-bit PRD table len */
  225. MCH_SSP_FR_TYPE_SHIFT = 13, /* SSP frame type */
  226. /* SSP initiator only */
  227. MCH_SSP_FR_CMD = 0x0, /* COMMAND frame */
  228. /* SSP initiator or target */
  229. MCH_SSP_FR_TASK = 0x1, /* TASK frame */
  230. /* SSP target only */
  231. MCH_SSP_FR_XFER_RDY = 0x4, /* XFER_RDY frame */
  232. MCH_SSP_FR_RESP = 0x5, /* RESPONSE frame */
  233. MCH_SSP_FR_READ = 0x6, /* Read DATA frame(s) */
  234. MCH_SSP_FR_READ_RESP = 0x7, /* ditto, plus RESPONSE */
  235. MCH_PASSTHRU = (1U << 12), /* pass-through (SSP) */
  236. MCH_FBURST = (1U << 11), /* first burst (SSP) */
  237. MCH_CHK_LEN = (1U << 10), /* chk xfer len (SSP) */
  238. MCH_RETRY = (1U << 9), /* tport layer retry (SSP) */
  239. MCH_PROTECTION = (1U << 8), /* protection info rec (SSP) */
  240. MCH_RESET = (1U << 7), /* Reset (STP/SATA) */
  241. MCH_FPDMA = (1U << 6), /* First party DMA (STP/SATA) */
  242. MCH_ATAPI = (1U << 5), /* ATAPI (STP/SATA) */
  243. MCH_BIST = (1U << 4), /* BIST activate (STP/SATA) */
  244. MCH_PMP_MASK = 0xf, /* PMP from cmd FIS (STP/SATA)*/
  245. CCTL_RST = (1U << 5), /* port logic reset */
  246. /* 0(LSB first), 1(MSB first) */
  247. CCTL_ENDIAN_DATA = (1U << 3), /* PRD data */
  248. CCTL_ENDIAN_RSP = (1U << 2), /* response frame */
  249. CCTL_ENDIAN_OPEN = (1U << 1), /* open address frame */
  250. CCTL_ENDIAN_CMD = (1U << 0), /* command table */
  251. /* MVS_Px_SER_CTLSTAT (per-phy control) */
  252. PHY_SSP_RST = (1U << 3), /* reset SSP link layer */
  253. PHY_BCAST_CHG = (1U << 2), /* broadcast(change) notif */
  254. PHY_RST_HARD = (1U << 1), /* hard reset + phy reset */
  255. PHY_RST = (1U << 0), /* phy reset */
  256. PHY_MIN_SPP_PHYS_LINK_RATE_MASK = (0xF << 8),
  257. PHY_MAX_SPP_PHYS_LINK_RATE_MASK = (0xF << 12),
  258. PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET = (16),
  259. PHY_NEG_SPP_PHYS_LINK_RATE_MASK =
  260. (0xF << PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET),
  261. PHY_READY_MASK = (1U << 20),
  262. /* MVS_Px_INT_STAT, MVS_Px_INT_MASK (per-phy events) */
  263. PHYEV_DEC_ERR = (1U << 24), /* Phy Decoding Error */
  264. PHYEV_UNASSOC_FIS = (1U << 19), /* unassociated FIS rx'd */
  265. PHYEV_AN = (1U << 18), /* SATA async notification */
  266. PHYEV_BIST_ACT = (1U << 17), /* BIST activate FIS */
  267. PHYEV_SIG_FIS = (1U << 16), /* signature FIS */
  268. PHYEV_POOF = (1U << 12), /* phy ready from 1 -> 0 */
  269. PHYEV_IU_BIG = (1U << 11), /* IU too long err */
  270. PHYEV_IU_SMALL = (1U << 10), /* IU too short err */
  271. PHYEV_UNK_TAG = (1U << 9), /* unknown tag */
  272. PHYEV_BROAD_CH = (1U << 8), /* broadcast(CHANGE) */
  273. PHYEV_COMWAKE = (1U << 7), /* COMWAKE rx'd */
  274. PHYEV_PORT_SEL = (1U << 6), /* port selector present */
  275. PHYEV_HARD_RST = (1U << 5), /* hard reset rx'd */
  276. PHYEV_ID_TMOUT = (1U << 4), /* identify timeout */
  277. PHYEV_ID_FAIL = (1U << 3), /* identify failed */
  278. PHYEV_ID_DONE = (1U << 2), /* identify done */
  279. PHYEV_HARD_RST_DONE = (1U << 1), /* hard reset done */
  280. PHYEV_RDY_CH = (1U << 0), /* phy ready changed state */
  281. /* MVS_PCS */
  282. PCS_EN_SATA_REG_SHIFT = (16), /* Enable SATA Register Set */
  283. PCS_EN_PORT_XMT_SHIFT = (12), /* Enable Port Transmit */
  284. PCS_EN_PORT_XMT_SHIFT2 = (8), /* For 6480 */
  285. PCS_SATA_RETRY = (1U << 8), /* retry ctl FIS on R_ERR */
  286. PCS_RSP_RX_EN = (1U << 7), /* raw response rx */
  287. PCS_SELF_CLEAR = (1U << 5), /* self-clearing int mode */
  288. PCS_FIS_RX_EN = (1U << 4), /* FIS rx enable */
  289. PCS_CMD_STOP_ERR = (1U << 3), /* cmd stop-on-err enable */
  290. PCS_CMD_RST = (1U << 1), /* reset cmd issue */
  291. PCS_CMD_EN = (1U << 0), /* enable cmd issue */
  292. /* Port n Attached Device Info */
  293. PORT_DEV_SSP_TRGT = (1U << 19),
  294. PORT_DEV_SMP_TRGT = (1U << 18),
  295. PORT_DEV_STP_TRGT = (1U << 17),
  296. PORT_DEV_SSP_INIT = (1U << 11),
  297. PORT_DEV_SMP_INIT = (1U << 10),
  298. PORT_DEV_STP_INIT = (1U << 9),
  299. PORT_PHY_ID_MASK = (0xFFU << 24),
  300. PORT_DEV_TRGT_MASK = (0x7U << 17),
  301. PORT_DEV_INIT_MASK = (0x7U << 9),
  302. PORT_DEV_TYPE_MASK = (0x7U << 0),
  303. /* Port n PHY Status */
  304. PHY_RDY = (1U << 2),
  305. PHY_DW_SYNC = (1U << 1),
  306. PHY_OOB_DTCTD = (1U << 0),
  307. /* VSR */
  308. /* PHYMODE 6 (CDB) */
  309. PHY_MODE6_LATECLK = (1U << 29), /* Lock Clock */
  310. PHY_MODE6_DTL_SPEED = (1U << 27), /* Digital Loop Speed */
  311. PHY_MODE6_FC_ORDER = (1U << 26), /* Fibre Channel Mode Order*/
  312. PHY_MODE6_MUCNT_EN = (1U << 24), /* u Count Enable */
  313. PHY_MODE6_SEL_MUCNT_LEN = (1U << 22), /* Training Length Select */
  314. PHY_MODE6_SELMUPI = (1U << 20), /* Phase Multi Select (init) */
  315. PHY_MODE6_SELMUPF = (1U << 18), /* Phase Multi Select (final) */
  316. PHY_MODE6_SELMUFF = (1U << 16), /* Freq Loop Multi Sel(final) */
  317. PHY_MODE6_SELMUFI = (1U << 14), /* Freq Loop Multi Sel(init) */
  318. PHY_MODE6_FREEZE_LOOP = (1U << 12), /* Freeze Rx CDR Loop */
  319. PHY_MODE6_INT_RXFOFFS = (1U << 3), /* Rx CDR Freq Loop Enable */
  320. PHY_MODE6_FRC_RXFOFFS = (1U << 2), /* Initial Rx CDR Offset */
  321. PHY_MODE6_STAU_0D8 = (1U << 1), /* Rx CDR Freq Loop Saturate */
  322. PHY_MODE6_RXSAT_DIS = (1U << 0), /* Saturate Ctl */
  323. };
  324. enum mvs_info_flags {
  325. MVF_MSI = (1U << 0), /* MSI is enabled */
  326. MVF_PHY_PWR_FIX = (1U << 1), /* bug workaround */
  327. };
  328. enum sas_cmd_port_registers {
  329. CMD_CMRST_OOB_DET = 0x100, /* COMRESET OOB detect register */
  330. CMD_CMWK_OOB_DET = 0x104, /* COMWAKE OOB detect register */
  331. CMD_CMSAS_OOB_DET = 0x108, /* COMSAS OOB detect register */
  332. CMD_BRST_OOB_DET = 0x10c, /* burst OOB detect register */
  333. CMD_OOB_SPACE = 0x110, /* OOB space control register */
  334. CMD_OOB_BURST = 0x114, /* OOB burst control register */
  335. CMD_PHY_TIMER = 0x118, /* PHY timer control register */
  336. CMD_PHY_CONFIG0 = 0x11c, /* PHY config register 0 */
  337. CMD_PHY_CONFIG1 = 0x120, /* PHY config register 1 */
  338. CMD_SAS_CTL0 = 0x124, /* SAS control register 0 */
  339. CMD_SAS_CTL1 = 0x128, /* SAS control register 1 */
  340. CMD_SAS_CTL2 = 0x12c, /* SAS control register 2 */
  341. CMD_SAS_CTL3 = 0x130, /* SAS control register 3 */
  342. CMD_ID_TEST = 0x134, /* ID test register */
  343. CMD_PL_TIMER = 0x138, /* PL timer register */
  344. CMD_WD_TIMER = 0x13c, /* WD timer register */
  345. CMD_PORT_SEL_COUNT = 0x140, /* port selector count register */
  346. CMD_APP_MEM_CTL = 0x144, /* Application Memory Control */
  347. CMD_XOR_MEM_CTL = 0x148, /* XOR Block Memory Control */
  348. CMD_DMA_MEM_CTL = 0x14c, /* DMA Block Memory Control */
  349. CMD_PORT_MEM_CTL0 = 0x150, /* Port Memory Control 0 */
  350. CMD_PORT_MEM_CTL1 = 0x154, /* Port Memory Control 1 */
  351. CMD_SATA_PORT_MEM_CTL0 = 0x158, /* SATA Port Memory Control 0 */
  352. CMD_SATA_PORT_MEM_CTL1 = 0x15c, /* SATA Port Memory Control 1 */
  353. CMD_XOR_MEM_BIST_CTL = 0x160, /* XOR Memory BIST Control */
  354. CMD_XOR_MEM_BIST_STAT = 0x164, /* XOR Memroy BIST Status */
  355. CMD_DMA_MEM_BIST_CTL = 0x168, /* DMA Memory BIST Control */
  356. CMD_DMA_MEM_BIST_STAT = 0x16c, /* DMA Memory BIST Status */
  357. CMD_PORT_MEM_BIST_CTL = 0x170, /* Port Memory BIST Control */
  358. CMD_PORT_MEM_BIST_STAT0 = 0x174, /* Port Memory BIST Status 0 */
  359. CMD_PORT_MEM_BIST_STAT1 = 0x178, /* Port Memory BIST Status 1 */
  360. CMD_STP_MEM_BIST_CTL = 0x17c, /* STP Memory BIST Control */
  361. CMD_STP_MEM_BIST_STAT0 = 0x180, /* STP Memory BIST Status 0 */
  362. CMD_STP_MEM_BIST_STAT1 = 0x184, /* STP Memory BIST Status 1 */
  363. CMD_RESET_COUNT = 0x188, /* Reset Count */
  364. CMD_MONTR_DATA_SEL = 0x18C, /* Monitor Data/Select */
  365. CMD_PLL_PHY_CONFIG = 0x190, /* PLL/PHY Configuration */
  366. CMD_PHY_CTL = 0x194, /* PHY Control and Status */
  367. CMD_PHY_TEST_COUNT0 = 0x198, /* Phy Test Count 0 */
  368. CMD_PHY_TEST_COUNT1 = 0x19C, /* Phy Test Count 1 */
  369. CMD_PHY_TEST_COUNT2 = 0x1A0, /* Phy Test Count 2 */
  370. CMD_APP_ERR_CONFIG = 0x1A4, /* Application Error Configuration */
  371. CMD_PND_FIFO_CTL0 = 0x1A8, /* Pending FIFO Control 0 */
  372. CMD_HOST_CTL = 0x1AC, /* Host Control Status */
  373. CMD_HOST_WR_DATA = 0x1B0, /* Host Write Data */
  374. CMD_HOST_RD_DATA = 0x1B4, /* Host Read Data */
  375. CMD_PHY_MODE_21 = 0x1B8, /* Phy Mode 21 */
  376. CMD_SL_MODE0 = 0x1BC, /* SL Mode 0 */
  377. CMD_SL_MODE1 = 0x1C0, /* SL Mode 1 */
  378. CMD_PND_FIFO_CTL1 = 0x1C4, /* Pending FIFO Control 1 */
  379. };
  380. /* SAS/SATA configuration port registers, aka phy registers */
  381. enum sas_sata_config_port_regs {
  382. PHYR_IDENTIFY = 0x00, /* info for IDENTIFY frame */
  383. PHYR_ADDR_LO = 0x04, /* my SAS address (low) */
  384. PHYR_ADDR_HI = 0x08, /* my SAS address (high) */
  385. PHYR_ATT_DEV_INFO = 0x0C, /* attached device info */
  386. PHYR_ATT_ADDR_LO = 0x10, /* attached dev SAS addr (low) */
  387. PHYR_ATT_ADDR_HI = 0x14, /* attached dev SAS addr (high) */
  388. PHYR_SATA_CTL = 0x18, /* SATA control */
  389. PHYR_PHY_STAT = 0x1C, /* PHY status */
  390. PHYR_SATA_SIG0 = 0x20, /*port SATA signature FIS(Byte 0-3) */
  391. PHYR_SATA_SIG1 = 0x24, /*port SATA signature FIS(Byte 4-7) */
  392. PHYR_SATA_SIG2 = 0x28, /*port SATA signature FIS(Byte 8-11) */
  393. PHYR_SATA_SIG3 = 0x2c, /*port SATA signature FIS(Byte 12-15) */
  394. PHYR_R_ERR_COUNT = 0x30, /* port R_ERR count register */
  395. PHYR_CRC_ERR_COUNT = 0x34, /* port CRC error count register */
  396. PHYR_WIDE_PORT = 0x38, /* wide port participating */
  397. PHYR_CURRENT0 = 0x80, /* current connection info 0 */
  398. PHYR_CURRENT1 = 0x84, /* current connection info 1 */
  399. PHYR_CURRENT2 = 0x88, /* current connection info 2 */
  400. };
  401. /* SAS/SATA Vendor Specific Port Registers */
  402. enum sas_sata_vsp_regs {
  403. VSR_PHY_STAT = 0x00, /* Phy Status */
  404. VSR_PHY_MODE1 = 0x01, /* phy tx */
  405. VSR_PHY_MODE2 = 0x02, /* tx scc */
  406. VSR_PHY_MODE3 = 0x03, /* pll */
  407. VSR_PHY_MODE4 = 0x04, /* VCO */
  408. VSR_PHY_MODE5 = 0x05, /* Rx */
  409. VSR_PHY_MODE6 = 0x06, /* CDR */
  410. VSR_PHY_MODE7 = 0x07, /* Impedance */
  411. VSR_PHY_MODE8 = 0x08, /* Voltage */
  412. VSR_PHY_MODE9 = 0x09, /* Test */
  413. VSR_PHY_MODE10 = 0x0A, /* Power */
  414. VSR_PHY_MODE11 = 0x0B, /* Phy Mode */
  415. VSR_PHY_VS0 = 0x0C, /* Vednor Specific 0 */
  416. VSR_PHY_VS1 = 0x0D, /* Vednor Specific 1 */
  417. };
  418. enum pci_cfg_registers {
  419. PCR_PHY_CTL = 0x40,
  420. PCR_PHY_CTL2 = 0x90,
  421. PCR_DEV_CTRL = 0xE8,
  422. };
  423. enum pci_cfg_register_bits {
  424. PCTL_PWR_ON = (0xFU << 24),
  425. PCTL_OFF = (0xFU << 12),
  426. PRD_REQ_SIZE = (0x4000),
  427. PRD_REQ_MASK = (0x00007000),
  428. };
  429. enum nvram_layout_offsets {
  430. NVR_SIG = 0x00, /* 0xAA, 0x55 */
  431. NVR_SAS_ADDR = 0x02, /* 8-byte SAS address */
  432. };
  433. enum chip_flavors {
  434. chip_6320,
  435. chip_6440,
  436. chip_6480,
  437. };
  438. enum port_type {
  439. PORT_TYPE_SAS = (1L << 1),
  440. PORT_TYPE_SATA = (1L << 0),
  441. };
  442. /* Command Table Format */
  443. enum ct_format {
  444. /* SSP */
  445. SSP_F_H = 0x00,
  446. SSP_F_IU = 0x18,
  447. SSP_F_MAX = 0x4D,
  448. /* STP */
  449. STP_CMD_FIS = 0x00,
  450. STP_ATAPI_CMD = 0x40,
  451. STP_F_MAX = 0x10,
  452. /* SMP */
  453. SMP_F_T = 0x00,
  454. SMP_F_DEP = 0x01,
  455. SMP_F_MAX = 0x101,
  456. };
  457. enum status_buffer {
  458. SB_EIR_OFF = 0x00, /* Error Information Record */
  459. SB_RFB_OFF = 0x08, /* Response Frame Buffer */
  460. SB_RFB_MAX = 0x400, /* RFB size*/
  461. };
  462. enum error_info_rec {
  463. CMD_ISS_STPD = (1U << 31), /* Cmd Issue Stopped */
  464. CMD_PI_ERR = (1U << 30), /* Protection info error. see flags2 */
  465. RSP_OVER = (1U << 29), /* rsp buffer overflow */
  466. RETRY_LIM = (1U << 28), /* FIS/frame retry limit exceeded */
  467. UNK_FIS = (1U << 27), /* unknown FIS */
  468. DMA_TERM = (1U << 26), /* DMA terminate primitive rx'd */
  469. SYNC_ERR = (1U << 25), /* SYNC rx'd during frame xmit */
  470. TFILE_ERR = (1U << 24), /* SATA taskfile Error bit set */
  471. R_ERR = (1U << 23), /* SATA returned R_ERR prim */
  472. RD_OFS = (1U << 20), /* Read DATA frame invalid offset */
  473. XFER_RDY_OFS = (1U << 19), /* XFER_RDY offset error */
  474. UNEXP_XFER_RDY = (1U << 18), /* unexpected XFER_RDY error */
  475. DATA_OVER_UNDER = (1U << 16), /* data overflow/underflow */
  476. INTERLOCK = (1U << 15), /* interlock error */
  477. NAK = (1U << 14), /* NAK rx'd */
  478. ACK_NAK_TO = (1U << 13), /* ACK/NAK timeout */
  479. CXN_CLOSED = (1U << 12), /* cxn closed w/out ack/nak */
  480. OPEN_TO = (1U << 11), /* I_T nexus lost, open cxn timeout */
  481. PATH_BLOCKED = (1U << 10), /* I_T nexus lost, pathway blocked */
  482. NO_DEST = (1U << 9), /* I_T nexus lost, no destination */
  483. STP_RES_BSY = (1U << 8), /* STP resources busy */
  484. BREAK = (1U << 7), /* break received */
  485. BAD_DEST = (1U << 6), /* bad destination */
  486. BAD_PROTO = (1U << 5), /* protocol not supported */
  487. BAD_RATE = (1U << 4), /* cxn rate not supported */
  488. WRONG_DEST = (1U << 3), /* wrong destination error */
  489. CREDIT_TO = (1U << 2), /* credit timeout */
  490. WDOG_TO = (1U << 1), /* watchdog timeout */
  491. BUF_PAR = (1U << 0), /* buffer parity error */
  492. };
  493. enum error_info_rec_2 {
  494. SLOT_BSY_ERR = (1U << 31), /* Slot Busy Error */
  495. GRD_CHK_ERR = (1U << 14), /* Guard Check Error */
  496. APP_CHK_ERR = (1U << 13), /* Application Check error */
  497. REF_CHK_ERR = (1U << 12), /* Reference Check Error */
  498. USR_BLK_NM = (1U << 0), /* User Block Number */
  499. };
  500. struct mvs_chip_info {
  501. u32 n_phy;
  502. u32 srs_sz;
  503. u32 slot_width;
  504. };
  505. struct mvs_err_info {
  506. __le32 flags;
  507. __le32 flags2;
  508. };
  509. struct mvs_prd {
  510. __le64 addr; /* 64-bit buffer address */
  511. __le32 reserved;
  512. __le32 len; /* 16-bit length */
  513. };
  514. struct mvs_cmd_hdr {
  515. __le32 flags; /* PRD tbl len; SAS, SATA ctl */
  516. __le32 lens; /* cmd, max resp frame len */
  517. __le32 tags; /* targ port xfer tag; tag */
  518. __le32 data_len; /* data xfer len */
  519. __le64 cmd_tbl; /* command table address */
  520. __le64 open_frame; /* open addr frame address */
  521. __le64 status_buf; /* status buffer address */
  522. __le64 prd_tbl; /* PRD tbl address */
  523. __le32 reserved[4];
  524. };
  525. struct mvs_port {
  526. struct asd_sas_port sas_port;
  527. u8 port_attached;
  528. u8 taskfileset;
  529. u8 wide_port_phymap;
  530. struct list_head list;
  531. };
  532. struct mvs_phy {
  533. struct mvs_port *port;
  534. struct asd_sas_phy sas_phy;
  535. struct sas_identify identify;
  536. struct scsi_device *sdev;
  537. u64 dev_sas_addr;
  538. u64 att_dev_sas_addr;
  539. u32 att_dev_info;
  540. u32 dev_info;
  541. u32 phy_type;
  542. u32 phy_status;
  543. u32 irq_status;
  544. u32 frame_rcvd_size;
  545. u8 frame_rcvd[32];
  546. u8 phy_attached;
  547. enum sas_linkrate minimum_linkrate;
  548. enum sas_linkrate maximum_linkrate;
  549. };
  550. struct mvs_slot_info {
  551. struct list_head list;
  552. struct sas_task *task;
  553. u32 n_elem;
  554. u32 tx;
  555. /* DMA buffer for storing cmd tbl, open addr frame, status buffer,
  556. * and PRD table
  557. */
  558. void *buf;
  559. dma_addr_t buf_dma;
  560. #if _MV_DUMP
  561. u32 cmd_size;
  562. #endif
  563. void *response;
  564. struct mvs_port *port;
  565. };
  566. struct mvs_info {
  567. unsigned long flags;
  568. spinlock_t lock; /* host-wide lock */
  569. struct pci_dev *pdev; /* our device */
  570. void __iomem *regs; /* enhanced mode registers */
  571. void __iomem *peri_regs; /* peripheral registers */
  572. u8 sas_addr[SAS_ADDR_SIZE];
  573. struct sas_ha_struct sas; /* SCSI/SAS glue */
  574. struct Scsi_Host *shost;
  575. __le32 *tx; /* TX (delivery) DMA ring */
  576. dma_addr_t tx_dma;
  577. u32 tx_prod; /* cached next-producer idx */
  578. __le32 *rx; /* RX (completion) DMA ring */
  579. dma_addr_t rx_dma;
  580. u32 rx_cons; /* RX consumer idx */
  581. __le32 *rx_fis; /* RX'd FIS area */
  582. dma_addr_t rx_fis_dma;
  583. struct mvs_cmd_hdr *slot; /* DMA command header slots */
  584. dma_addr_t slot_dma;
  585. const struct mvs_chip_info *chip;
  586. u8 tags[MVS_SLOTS];
  587. struct mvs_slot_info slot_info[MVS_SLOTS];
  588. /* further per-slot information */
  589. struct mvs_phy phy[MVS_MAX_PHYS];
  590. struct mvs_port port[MVS_MAX_PHYS];
  591. #ifdef MVS_USE_TASKLET
  592. struct tasklet_struct tasklet;
  593. #endif
  594. };
  595. static int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
  596. void *funcdata);
  597. static u32 mvs_read_phy_ctl(struct mvs_info *mvi, u32 port);
  598. static void mvs_write_phy_ctl(struct mvs_info *mvi, u32 port, u32 val);
  599. static u32 mvs_read_port_irq_stat(struct mvs_info *mvi, u32 port);
  600. static void mvs_write_port_irq_stat(struct mvs_info *mvi, u32 port, u32 val);
  601. static void mvs_write_port_irq_mask(struct mvs_info *mvi, u32 port, u32 val);
  602. static u32 mvs_read_port_irq_mask(struct mvs_info *mvi, u32 port);
  603. static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i);
  604. static void mvs_detect_porttype(struct mvs_info *mvi, int i);
  605. static void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st);
  606. static void mvs_release_task(struct mvs_info *mvi, int phy_no);
  607. static int mvs_scan_finished(struct Scsi_Host *, unsigned long);
  608. static void mvs_scan_start(struct Scsi_Host *);
  609. static int mvs_slave_configure(struct scsi_device *sdev);
  610. static struct scsi_transport_template *mvs_stt;
  611. static const struct mvs_chip_info mvs_chips[] = {
  612. [chip_6320] = { 2, 16, 9 },
  613. [chip_6440] = { 4, 16, 9 },
  614. [chip_6480] = { 8, 32, 10 },
  615. };
  616. static struct scsi_host_template mvs_sht = {
  617. .module = THIS_MODULE,
  618. .name = DRV_NAME,
  619. .queuecommand = sas_queuecommand,
  620. .target_alloc = sas_target_alloc,
  621. .slave_configure = mvs_slave_configure,
  622. .slave_destroy = sas_slave_destroy,
  623. .scan_finished = mvs_scan_finished,
  624. .scan_start = mvs_scan_start,
  625. .change_queue_depth = sas_change_queue_depth,
  626. .change_queue_type = sas_change_queue_type,
  627. .bios_param = sas_bios_param,
  628. .can_queue = 1,
  629. .cmd_per_lun = 1,
  630. .this_id = -1,
  631. .sg_tablesize = SG_ALL,
  632. .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
  633. .use_clustering = ENABLE_CLUSTERING,
  634. .eh_device_reset_handler = sas_eh_device_reset_handler,
  635. .eh_bus_reset_handler = sas_eh_bus_reset_handler,
  636. .slave_alloc = sas_slave_alloc,
  637. .target_destroy = sas_target_destroy,
  638. .ioctl = sas_ioctl,
  639. };
  640. static void mvs_hexdump(u32 size, u8 *data, u32 baseaddr)
  641. {
  642. u32 i;
  643. u32 run;
  644. u32 offset;
  645. offset = 0;
  646. while (size) {
  647. printk("%08X : ", baseaddr + offset);
  648. if (size >= 16)
  649. run = 16;
  650. else
  651. run = size;
  652. size -= run;
  653. for (i = 0; i < 16; i++) {
  654. if (i < run)
  655. printk("%02X ", (u32)data[i]);
  656. else
  657. printk(" ");
  658. }
  659. printk(": ");
  660. for (i = 0; i < run; i++)
  661. printk("%c", isalnum(data[i]) ? data[i] : '.');
  662. printk("\n");
  663. data = &data[16];
  664. offset += run;
  665. }
  666. printk("\n");
  667. }
  668. #if _MV_DUMP
  669. static void mvs_hba_sb_dump(struct mvs_info *mvi, u32 tag,
  670. enum sas_protocol proto)
  671. {
  672. u32 offset;
  673. struct pci_dev *pdev = mvi->pdev;
  674. struct mvs_slot_info *slot = &mvi->slot_info[tag];
  675. offset = slot->cmd_size + MVS_OAF_SZ +
  676. sizeof(struct mvs_prd) * slot->n_elem;
  677. dev_printk(KERN_DEBUG, &pdev->dev, "+---->Status buffer[%d] :\n",
  678. tag);
  679. mvs_hexdump(32, (u8 *) slot->response,
  680. (u32) slot->buf_dma + offset);
  681. }
  682. #endif
  683. static void mvs_hba_memory_dump(struct mvs_info *mvi, u32 tag,
  684. enum sas_protocol proto)
  685. {
  686. #if _MV_DUMP
  687. u32 sz, w_ptr;
  688. u64 addr;
  689. void __iomem *regs = mvi->regs;
  690. struct pci_dev *pdev = mvi->pdev;
  691. struct mvs_slot_info *slot = &mvi->slot_info[tag];
  692. /*Delivery Queue */
  693. sz = mr32(TX_CFG) & TX_RING_SZ_MASK;
  694. w_ptr = slot->tx;
  695. addr = mr32(TX_HI) << 16 << 16 | mr32(TX_LO);
  696. dev_printk(KERN_DEBUG, &pdev->dev,
  697. "Delivery Queue Size=%04d , WRT_PTR=%04X\n", sz, w_ptr);
  698. dev_printk(KERN_DEBUG, &pdev->dev,
  699. "Delivery Queue Base Address=0x%llX (PA)"
  700. "(tx_dma=0x%llX), Entry=%04d\n",
  701. addr, mvi->tx_dma, w_ptr);
  702. mvs_hexdump(sizeof(u32), (u8 *)(&mvi->tx[mvi->tx_prod]),
  703. (u32) mvi->tx_dma + sizeof(u32) * w_ptr);
  704. /*Command List */
  705. addr = mvi->slot_dma;
  706. dev_printk(KERN_DEBUG, &pdev->dev,
  707. "Command List Base Address=0x%llX (PA)"
  708. "(slot_dma=0x%llX), Header=%03d\n",
  709. addr, slot->buf_dma, tag);
  710. dev_printk(KERN_DEBUG, &pdev->dev, "Command Header[%03d]:\n", tag);
  711. /*mvs_cmd_hdr */
  712. mvs_hexdump(sizeof(struct mvs_cmd_hdr), (u8 *)(&mvi->slot[tag]),
  713. (u32) mvi->slot_dma + tag * sizeof(struct mvs_cmd_hdr));
  714. /*1.command table area */
  715. dev_printk(KERN_DEBUG, &pdev->dev, "+---->Command Table :\n");
  716. mvs_hexdump(slot->cmd_size, (u8 *) slot->buf, (u32) slot->buf_dma);
  717. /*2.open address frame area */
  718. dev_printk(KERN_DEBUG, &pdev->dev, "+---->Open Address Frame :\n");
  719. mvs_hexdump(MVS_OAF_SZ, (u8 *) slot->buf + slot->cmd_size,
  720. (u32) slot->buf_dma + slot->cmd_size);
  721. /*3.status buffer */
  722. mvs_hba_sb_dump(mvi, tag, proto);
  723. /*4.PRD table */
  724. dev_printk(KERN_DEBUG, &pdev->dev, "+---->PRD table :\n");
  725. mvs_hexdump(sizeof(struct mvs_prd) * slot->n_elem,
  726. (u8 *) slot->buf + slot->cmd_size + MVS_OAF_SZ,
  727. (u32) slot->buf_dma + slot->cmd_size + MVS_OAF_SZ);
  728. #endif
  729. }
  730. static void mvs_hba_cq_dump(struct mvs_info *mvi)
  731. {
  732. #if (_MV_DUMP > 2)
  733. u64 addr;
  734. void __iomem *regs = mvi->regs;
  735. struct pci_dev *pdev = mvi->pdev;
  736. u32 entry = mvi->rx_cons + 1;
  737. u32 rx_desc = le32_to_cpu(mvi->rx[entry]);
  738. /*Completion Queue */
  739. addr = mr32(RX_HI) << 16 << 16 | mr32(RX_LO);
  740. dev_printk(KERN_DEBUG, &pdev->dev, "Completion Task = 0x%p\n",
  741. mvi->slot_info[rx_desc & RXQ_SLOT_MASK].task);
  742. dev_printk(KERN_DEBUG, &pdev->dev,
  743. "Completion List Base Address=0x%llX (PA), "
  744. "CQ_Entry=%04d, CQ_WP=0x%08X\n",
  745. addr, entry - 1, mvi->rx[0]);
  746. mvs_hexdump(sizeof(u32), (u8 *)(&rx_desc),
  747. mvi->rx_dma + sizeof(u32) * entry);
  748. #endif
  749. }
  750. static void mvs_hba_interrupt_enable(struct mvs_info *mvi)
  751. {
  752. void __iomem *regs = mvi->regs;
  753. u32 tmp;
  754. tmp = mr32(GBL_CTL);
  755. mw32(GBL_CTL, tmp | INT_EN);
  756. }
  757. static void mvs_hba_interrupt_disable(struct mvs_info *mvi)
  758. {
  759. void __iomem *regs = mvi->regs;
  760. u32 tmp;
  761. tmp = mr32(GBL_CTL);
  762. mw32(GBL_CTL, tmp & ~INT_EN);
  763. }
  764. static int mvs_int_rx(struct mvs_info *mvi, bool self_clear);
  765. /* move to PCI layer or libata core? */
  766. static int pci_go_64(struct pci_dev *pdev)
  767. {
  768. int rc;
  769. if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
  770. rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
  771. if (rc) {
  772. rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
  773. if (rc) {
  774. dev_printk(KERN_ERR, &pdev->dev,
  775. "64-bit DMA enable failed\n");
  776. return rc;
  777. }
  778. }
  779. } else {
  780. rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
  781. if (rc) {
  782. dev_printk(KERN_ERR, &pdev->dev,
  783. "32-bit DMA enable failed\n");
  784. return rc;
  785. }
  786. rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
  787. if (rc) {
  788. dev_printk(KERN_ERR, &pdev->dev,
  789. "32-bit consistent DMA enable failed\n");
  790. return rc;
  791. }
  792. }
  793. return rc;
  794. }
  795. static int mvs_find_tag(struct mvs_info *mvi, struct sas_task *task, u32 *tag)
  796. {
  797. if (task->lldd_task) {
  798. struct mvs_slot_info *slot;
  799. slot = (struct mvs_slot_info *) task->lldd_task;
  800. *tag = slot - mvi->slot_info;
  801. return 1;
  802. }
  803. return 0;
  804. }
  805. static void mvs_tag_clear(struct mvs_info *mvi, u32 tag)
  806. {
  807. void *bitmap = (void *) &mvi->tags;
  808. clear_bit(tag, bitmap);
  809. }
  810. static void mvs_tag_free(struct mvs_info *mvi, u32 tag)
  811. {
  812. mvs_tag_clear(mvi, tag);
  813. }
  814. static void mvs_tag_set(struct mvs_info *mvi, unsigned int tag)
  815. {
  816. void *bitmap = (void *) &mvi->tags;
  817. set_bit(tag, bitmap);
  818. }
  819. static int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out)
  820. {
  821. unsigned int index, tag;
  822. void *bitmap = (void *) &mvi->tags;
  823. index = find_first_zero_bit(bitmap, MVS_SLOTS);
  824. tag = index;
  825. if (tag >= MVS_SLOTS)
  826. return -SAS_QUEUE_FULL;
  827. mvs_tag_set(mvi, tag);
  828. *tag_out = tag;
  829. return 0;
  830. }
  831. static void mvs_tag_init(struct mvs_info *mvi)
  832. {
  833. int i;
  834. for (i = 0; i < MVS_SLOTS; ++i)
  835. mvs_tag_clear(mvi, i);
  836. }
  837. #ifndef MVS_DISABLE_NVRAM
  838. static int mvs_eep_read(void __iomem *regs, u32 addr, u32 *data)
  839. {
  840. int timeout = 1000;
  841. if (addr & ~SPI_ADDR_MASK)
  842. return -EINVAL;
  843. writel(addr, regs + SPI_CMD);
  844. writel(TWSI_RD, regs + SPI_CTL);
  845. while (timeout-- > 0) {
  846. if (readl(regs + SPI_CTL) & TWSI_RDY) {
  847. *data = readl(regs + SPI_DATA);
  848. return 0;
  849. }
  850. udelay(10);
  851. }
  852. return -EBUSY;
  853. }
  854. static int mvs_eep_read_buf(void __iomem *regs, u32 addr,
  855. void *buf, u32 buflen)
  856. {
  857. u32 addr_end, tmp_addr, i, j;
  858. u32 tmp = 0;
  859. int rc;
  860. u8 *tmp8, *buf8 = buf;
  861. addr_end = addr + buflen;
  862. tmp_addr = ALIGN(addr, 4);
  863. if (addr > 0xff)
  864. return -EINVAL;
  865. j = addr & 0x3;
  866. if (j) {
  867. rc = mvs_eep_read(regs, tmp_addr, &tmp);
  868. if (rc)
  869. return rc;
  870. tmp8 = (u8 *)&tmp;
  871. for (i = j; i < 4; i++)
  872. *buf8++ = tmp8[i];
  873. tmp_addr += 4;
  874. }
  875. for (j = ALIGN(addr_end, 4); tmp_addr < j; tmp_addr += 4) {
  876. rc = mvs_eep_read(regs, tmp_addr, &tmp);
  877. if (rc)
  878. return rc;
  879. memcpy(buf8, &tmp, 4);
  880. buf8 += 4;
  881. }
  882. if (tmp_addr < addr_end) {
  883. rc = mvs_eep_read(regs, tmp_addr, &tmp);
  884. if (rc)
  885. return rc;
  886. tmp8 = (u8 *)&tmp;
  887. j = addr_end - tmp_addr;
  888. for (i = 0; i < j; i++)
  889. *buf8++ = tmp8[i];
  890. tmp_addr += 4;
  891. }
  892. return 0;
  893. }
  894. #endif
  895. static int mvs_nvram_read(struct mvs_info *mvi, u32 addr,
  896. void *buf, u32 buflen)
  897. {
  898. #ifndef MVS_DISABLE_NVRAM
  899. void __iomem *regs = mvi->regs;
  900. int rc, i;
  901. u32 sum;
  902. u8 hdr[2], *tmp;
  903. const char *msg;
  904. rc = mvs_eep_read_buf(regs, addr, &hdr, 2);
  905. if (rc) {
  906. msg = "nvram hdr read failed";
  907. goto err_out;
  908. }
  909. rc = mvs_eep_read_buf(regs, addr + 2, buf, buflen);
  910. if (rc) {
  911. msg = "nvram read failed";
  912. goto err_out;
  913. }
  914. if (hdr[0] != 0x5A) {
  915. /* entry id */
  916. msg = "invalid nvram entry id";
  917. rc = -ENOENT;
  918. goto err_out;
  919. }
  920. tmp = buf;
  921. sum = ((u32)hdr[0]) + ((u32)hdr[1]);
  922. for (i = 0; i < buflen; i++)
  923. sum += ((u32)tmp[i]);
  924. if (sum) {
  925. msg = "nvram checksum failure";
  926. rc = -EILSEQ;
  927. goto err_out;
  928. }
  929. return 0;
  930. err_out:
  931. dev_printk(KERN_ERR, &mvi->pdev->dev, "%s", msg);
  932. return rc;
  933. #else
  934. /* FIXME , For SAS target mode */
  935. memcpy(buf, "\x50\x05\x04\x30\x11\xab\x00\x00", 8);
  936. return 0;
  937. #endif
  938. }
  939. static void mvs_bytes_dmaed(struct mvs_info *mvi, int i)
  940. {
  941. struct mvs_phy *phy = &mvi->phy[i];
  942. struct asd_sas_phy *sas_phy = mvi->sas.sas_phy[i];
  943. if (!phy->phy_attached)
  944. return;
  945. if (sas_phy->phy) {
  946. struct sas_phy *sphy = sas_phy->phy;
  947. sphy->negotiated_linkrate = sas_phy->linkrate;
  948. sphy->minimum_linkrate = phy->minimum_linkrate;
  949. sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
  950. sphy->maximum_linkrate = phy->maximum_linkrate;
  951. sphy->maximum_linkrate_hw = SAS_LINK_RATE_3_0_GBPS;
  952. }
  953. if (phy->phy_type & PORT_TYPE_SAS) {
  954. struct sas_identify_frame *id;
  955. id = (struct sas_identify_frame *)phy->frame_rcvd;
  956. id->dev_type = phy->identify.device_type;
  957. id->initiator_bits = SAS_PROTOCOL_ALL;
  958. id->target_bits = phy->identify.target_port_protocols;
  959. } else if (phy->phy_type & PORT_TYPE_SATA) {
  960. /* TODO */
  961. }
  962. mvi->sas.sas_phy[i]->frame_rcvd_size = phy->frame_rcvd_size;
  963. mvi->sas.notify_port_event(mvi->sas.sas_phy[i],
  964. PORTE_BYTES_DMAED);
  965. }
  966. static int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time)
  967. {
  968. /* give the phy enabling interrupt event time to come in (1s
  969. * is empirically about all it takes) */
  970. if (time < HZ)
  971. return 0;
  972. /* Wait for discovery to finish */
  973. scsi_flush_work(shost);
  974. return 1;
  975. }
  976. static void mvs_scan_start(struct Scsi_Host *shost)
  977. {
  978. int i;
  979. struct mvs_info *mvi = SHOST_TO_SAS_HA(shost)->lldd_ha;
  980. for (i = 0; i < mvi->chip->n_phy; ++i) {
  981. mvs_bytes_dmaed(mvi, i);
  982. }
  983. }
  984. static int mvs_slave_configure(struct scsi_device *sdev)
  985. {
  986. struct domain_device *dev = sdev_to_domain_dev(sdev);
  987. int ret = sas_slave_configure(sdev);
  988. if (ret)
  989. return ret;
  990. if (dev_is_sata(dev)) {
  991. /* struct ata_port *ap = dev->sata_dev.ap; */
  992. /* struct ata_device *adev = ap->link.device; */
  993. /* clamp at no NCQ for the time being */
  994. /* adev->flags |= ATA_DFLAG_NCQ_OFF; */
  995. scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, 1);
  996. }
  997. return 0;
  998. }
  999. static void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
  1000. {
  1001. struct pci_dev *pdev = mvi->pdev;
  1002. struct sas_ha_struct *sas_ha = &mvi->sas;
  1003. struct mvs_phy *phy = &mvi->phy[phy_no];
  1004. struct asd_sas_phy *sas_phy = &phy->sas_phy;
  1005. phy->irq_status = mvs_read_port_irq_stat(mvi, phy_no);
  1006. /*
  1007. * events is port event now ,
  1008. * we need check the interrupt status which belongs to per port.
  1009. */
  1010. dev_printk(KERN_DEBUG, &pdev->dev,
  1011. "Port %d Event = %X\n",
  1012. phy_no, phy->irq_status);
  1013. if (phy->irq_status & (PHYEV_POOF | PHYEV_DEC_ERR)) {
  1014. mvs_release_task(mvi, phy_no);
  1015. if (!mvs_is_phy_ready(mvi, phy_no)) {
  1016. sas_phy_disconnected(sas_phy);
  1017. sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL);
  1018. dev_printk(KERN_INFO, &pdev->dev,
  1019. "Port %d Unplug Notice\n", phy_no);
  1020. } else
  1021. mvs_phy_control(sas_phy, PHY_FUNC_LINK_RESET, NULL);
  1022. }
  1023. if (!(phy->irq_status & PHYEV_DEC_ERR)) {
  1024. if (phy->irq_status & PHYEV_COMWAKE) {
  1025. u32 tmp = mvs_read_port_irq_mask(mvi, phy_no);
  1026. mvs_write_port_irq_mask(mvi, phy_no,
  1027. tmp | PHYEV_SIG_FIS);
  1028. }
  1029. if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) {
  1030. phy->phy_status = mvs_is_phy_ready(mvi, phy_no);
  1031. if (phy->phy_status) {
  1032. mvs_detect_porttype(mvi, phy_no);
  1033. if (phy->phy_type & PORT_TYPE_SATA) {
  1034. u32 tmp = mvs_read_port_irq_mask(mvi,
  1035. phy_no);
  1036. tmp &= ~PHYEV_SIG_FIS;
  1037. mvs_write_port_irq_mask(mvi,
  1038. phy_no, tmp);
  1039. }
  1040. mvs_update_phyinfo(mvi, phy_no, 0);
  1041. sas_ha->notify_phy_event(sas_phy,
  1042. PHYE_OOB_DONE);
  1043. mvs_bytes_dmaed(mvi, phy_no);
  1044. } else {
  1045. dev_printk(KERN_DEBUG, &pdev->dev,
  1046. "plugin interrupt but phy is gone\n");
  1047. mvs_phy_control(sas_phy, PHY_FUNC_LINK_RESET,
  1048. NULL);
  1049. }
  1050. } else if (phy->irq_status & PHYEV_BROAD_CH) {
  1051. mvs_release_task(mvi, phy_no);
  1052. sas_ha->notify_port_event(sas_phy,
  1053. PORTE_BROADCAST_RCVD);
  1054. }
  1055. }
  1056. mvs_write_port_irq_stat(mvi, phy_no, phy->irq_status);
  1057. }
  1058. static void mvs_int_sata(struct mvs_info *mvi)
  1059. {
  1060. u32 tmp;
  1061. void __iomem *regs = mvi->regs;
  1062. tmp = mr32(INT_STAT_SRS);
  1063. mw32(INT_STAT_SRS, tmp & 0xFFFF);
  1064. }
  1065. static void mvs_slot_reset(struct mvs_info *mvi, struct sas_task *task,
  1066. u32 slot_idx)
  1067. {
  1068. void __iomem *regs = mvi->regs;
  1069. struct domain_device *dev = task->dev;
  1070. struct asd_sas_port *sas_port = dev->port;
  1071. struct mvs_port *port = mvi->slot_info[slot_idx].port;
  1072. u32 reg_set, phy_mask;
  1073. if (!sas_protocol_ata(task->task_proto)) {
  1074. reg_set = 0;
  1075. phy_mask = (port->wide_port_phymap) ? port->wide_port_phymap :
  1076. sas_port->phy_mask;
  1077. } else {
  1078. reg_set = port->taskfileset;
  1079. phy_mask = sas_port->phy_mask;
  1080. }
  1081. mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | slot_idx |
  1082. (TXQ_CMD_SLOT_RESET << TXQ_CMD_SHIFT) |
  1083. (phy_mask << TXQ_PHY_SHIFT) |
  1084. (reg_set << TXQ_SRS_SHIFT));
  1085. mw32(TX_PROD_IDX, mvi->tx_prod);
  1086. mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1);
  1087. }
  1088. static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task,
  1089. u32 slot_idx, int err)
  1090. {
  1091. struct mvs_port *port = mvi->slot_info[slot_idx].port;
  1092. struct task_status_struct *tstat = &task->task_status;
  1093. struct ata_task_resp *resp = (struct ata_task_resp *)tstat->buf;
  1094. int stat = SAM_GOOD;
  1095. resp->frame_len = sizeof(struct dev_to_host_fis);
  1096. memcpy(&resp->ending_fis[0],
  1097. SATA_RECEIVED_D2H_FIS(port->taskfileset),
  1098. sizeof(struct dev_to_host_fis));
  1099. tstat->buf_valid_size = sizeof(*resp);
  1100. if (unlikely(err))
  1101. stat = SAS_PROTO_RESPONSE;
  1102. return stat;
  1103. }
  1104. static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc)
  1105. {
  1106. u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
  1107. mvs_tag_clear(mvi, slot_idx);
  1108. }
  1109. static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task,
  1110. struct mvs_slot_info *slot, u32 slot_idx)
  1111. {
  1112. if (!sas_protocol_ata(task->task_proto))
  1113. if (slot->n_elem)
  1114. pci_unmap_sg(mvi->pdev, task->scatter,
  1115. slot->n_elem, task->data_dir);
  1116. switch (task->task_proto) {
  1117. case SAS_PROTOCOL_SMP:
  1118. pci_unmap_sg(mvi->pdev, &task->smp_task.smp_resp, 1,
  1119. PCI_DMA_FROMDEVICE);
  1120. pci_unmap_sg(mvi->pdev, &task->smp_task.smp_req, 1,
  1121. PCI_DMA_TODEVICE);
  1122. break;
  1123. case SAS_PROTOCOL_SATA:
  1124. case SAS_PROTOCOL_STP:
  1125. case SAS_PROTOCOL_SSP:
  1126. default:
  1127. /* do nothing */
  1128. break;
  1129. }
  1130. list_del(&slot->list);
  1131. task->lldd_task = NULL;
  1132. slot->task = NULL;
  1133. slot->port = NULL;
  1134. }
  1135. static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task,
  1136. u32 slot_idx)
  1137. {
  1138. struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
  1139. u32 err_dw0 = le32_to_cpu(*(u32 *) (slot->response));
  1140. u32 err_dw1 = le32_to_cpu(*(u32 *) (slot->response + 4));
  1141. int stat = SAM_CHECK_COND;
  1142. if (err_dw1 & SLOT_BSY_ERR) {
  1143. stat = SAS_QUEUE_FULL;
  1144. mvs_slot_reset(mvi, task, slot_idx);
  1145. }
  1146. switch (task->task_proto) {
  1147. case SAS_PROTOCOL_SSP:
  1148. break;
  1149. case SAS_PROTOCOL_SMP:
  1150. break;
  1151. case SAS_PROTOCOL_SATA:
  1152. case SAS_PROTOCOL_STP:
  1153. case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
  1154. if (err_dw0 & TFILE_ERR)
  1155. stat = mvs_sata_done(mvi, task, slot_idx, 1);
  1156. break;
  1157. default:
  1158. break;
  1159. }
  1160. mvs_hexdump(16, (u8 *) slot->response, 0);
  1161. return stat;
  1162. }
  1163. static int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
  1164. {
  1165. u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
  1166. struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
  1167. struct sas_task *task = slot->task;
  1168. struct task_status_struct *tstat;
  1169. struct mvs_port *port;
  1170. bool aborted;
  1171. void *to;
  1172. if (unlikely(!task || !task->lldd_task))
  1173. return -1;
  1174. mvs_hba_cq_dump(mvi);
  1175. spin_lock(&task->task_state_lock);
  1176. aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED;
  1177. if (!aborted) {
  1178. task->task_state_flags &=
  1179. ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
  1180. task->task_state_flags |= SAS_TASK_STATE_DONE;
  1181. }
  1182. spin_unlock(&task->task_state_lock);
  1183. if (aborted) {
  1184. mvs_slot_task_free(mvi, task, slot, slot_idx);
  1185. mvs_slot_free(mvi, rx_desc);
  1186. return -1;
  1187. }
  1188. port = slot->port;
  1189. tstat = &task->task_status;
  1190. memset(tstat, 0, sizeof(*tstat));
  1191. tstat->resp = SAS_TASK_COMPLETE;
  1192. if (unlikely(!port->port_attached || flags)) {
  1193. mvs_slot_err(mvi, task, slot_idx);
  1194. if (!sas_protocol_ata(task->task_proto))
  1195. tstat->stat = SAS_PHY_DOWN;
  1196. goto out;
  1197. }
  1198. /* error info record present */
  1199. if (unlikely((rx_desc & RXQ_ERR) && (*(u64 *) slot->response))) {
  1200. tstat->stat = mvs_slot_err(mvi, task, slot_idx);
  1201. goto out;
  1202. }
  1203. switch (task->task_proto) {
  1204. case SAS_PROTOCOL_SSP:
  1205. /* hw says status == 0, datapres == 0 */
  1206. if (rx_desc & RXQ_GOOD) {
  1207. tstat->stat = SAM_GOOD;
  1208. tstat->resp = SAS_TASK_COMPLETE;
  1209. }
  1210. /* response frame present */
  1211. else if (rx_desc & RXQ_RSP) {
  1212. struct ssp_response_iu *iu =
  1213. slot->response + sizeof(struct mvs_err_info);
  1214. sas_ssp_task_response(&mvi->pdev->dev, task, iu);
  1215. }
  1216. /* should never happen? */
  1217. else
  1218. tstat->stat = SAM_CHECK_COND;
  1219. break;
  1220. case SAS_PROTOCOL_SMP: {
  1221. struct scatterlist *sg_resp = &task->smp_task.smp_resp;
  1222. tstat->stat = SAM_GOOD;
  1223. to = kmap_atomic(sg_page(sg_resp), KM_IRQ0);
  1224. memcpy(to + sg_resp->offset,
  1225. slot->response + sizeof(struct mvs_err_info),
  1226. sg_dma_len(sg_resp));
  1227. kunmap_atomic(to, KM_IRQ0);
  1228. break;
  1229. }
  1230. case SAS_PROTOCOL_SATA:
  1231. case SAS_PROTOCOL_STP:
  1232. case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: {
  1233. tstat->stat = mvs_sata_done(mvi, task, slot_idx, 0);
  1234. break;
  1235. }
  1236. default:
  1237. tstat->stat = SAM_CHECK_COND;
  1238. break;
  1239. }
  1240. out:
  1241. mvs_slot_task_free(mvi, task, slot, slot_idx);
  1242. if (unlikely(tstat->stat != SAS_QUEUE_FULL))
  1243. mvs_slot_free(mvi, rx_desc);
  1244. spin_unlock(&mvi->lock);
  1245. task->task_done(task);
  1246. spin_lock(&mvi->lock);
  1247. return tstat->stat;
  1248. }
  1249. static void mvs_release_task(struct mvs_info *mvi, int phy_no)
  1250. {
  1251. struct list_head *pos, *n;
  1252. struct mvs_slot_info *slot;
  1253. struct mvs_phy *phy = &mvi->phy[phy_no];
  1254. struct mvs_port *port = phy->port;
  1255. u32 rx_desc;
  1256. if (!port)
  1257. return;
  1258. list_for_each_safe(pos, n, &port->list) {
  1259. slot = container_of(pos, struct mvs_slot_info, list);
  1260. rx_desc = (u32) (slot - mvi->slot_info);
  1261. mvs_slot_complete(mvi, rx_desc, 1);
  1262. }
  1263. }
  1264. static void mvs_int_full(struct mvs_info *mvi)
  1265. {
  1266. void __iomem *regs = mvi->regs;
  1267. u32 tmp, stat;
  1268. int i;
  1269. stat = mr32(INT_STAT);
  1270. mvs_int_rx(mvi, false);
  1271. for (i = 0; i < MVS_MAX_PORTS; i++) {
  1272. tmp = (stat >> i) & (CINT_PORT | CINT_PORT_STOPPED);
  1273. if (tmp)
  1274. mvs_int_port(mvi, i, tmp);
  1275. }
  1276. if (stat & CINT_SRS)
  1277. mvs_int_sata(mvi);
  1278. mw32(INT_STAT, stat);
  1279. }
  1280. static int mvs_int_rx(struct mvs_info *mvi, bool self_clear)
  1281. {
  1282. void __iomem *regs = mvi->regs;
  1283. u32 rx_prod_idx, rx_desc;
  1284. bool attn = false;
  1285. struct pci_dev *pdev = mvi->pdev;
  1286. /* the first dword in the RX ring is special: it contains
  1287. * a mirror of the hardware's RX producer index, so that
  1288. * we don't have to stall the CPU reading that register.
  1289. * The actual RX ring is offset by one dword, due to this.
  1290. */
  1291. rx_prod_idx = mvi->rx_cons;
  1292. mvi->rx_cons = le32_to_cpu(mvi->rx[0]);
  1293. if (mvi->rx_cons == 0xfff) /* h/w hasn't touched RX ring yet */
  1294. return 0;
  1295. /* The CMPL_Q may come late, read from register and try again
  1296. * note: if coalescing is enabled,
  1297. * it will need to read from register every time for sure
  1298. */
  1299. if (mvi->rx_cons == rx_prod_idx)
  1300. mvi->rx_cons = mr32(RX_CONS_IDX) & RX_RING_SZ_MASK;
  1301. if (mvi->rx_cons == rx_prod_idx)
  1302. return 0;
  1303. while (mvi->rx_cons != rx_prod_idx) {
  1304. /* increment our internal RX consumer pointer */
  1305. rx_prod_idx = (rx_prod_idx + 1) & (MVS_RX_RING_SZ - 1);
  1306. rx_desc = le32_to_cpu(mvi->rx[rx_prod_idx + 1]);
  1307. if (likely(rx_desc & RXQ_DONE))
  1308. mvs_slot_complete(mvi, rx_desc, 0);
  1309. if (rx_desc & RXQ_ATTN) {
  1310. attn = true;
  1311. dev_printk(KERN_DEBUG, &pdev->dev, "ATTN %X\n",
  1312. rx_desc);
  1313. } else if (rx_desc & RXQ_ERR) {
  1314. if (!(rx_desc & RXQ_DONE))
  1315. mvs_slot_complete(mvi, rx_desc, 0);
  1316. dev_printk(KERN_DEBUG, &pdev->dev, "RXQ_ERR %X\n",
  1317. rx_desc);
  1318. } else if (rx_desc & RXQ_SLOT_RESET) {
  1319. dev_printk(KERN_DEBUG, &pdev->dev, "Slot reset[%X]\n",
  1320. rx_desc);
  1321. mvs_slot_free(mvi, rx_desc);
  1322. }
  1323. }
  1324. if (attn && self_clear)
  1325. mvs_int_full(mvi);
  1326. return 0;
  1327. }
  1328. #ifdef MVS_USE_TASKLET
  1329. static void mvs_tasklet(unsigned long data)
  1330. {
  1331. struct mvs_info *mvi = (struct mvs_info *) data;
  1332. unsigned long flags;
  1333. spin_lock_irqsave(&mvi->lock, flags);
  1334. #ifdef MVS_DISABLE_MSI
  1335. mvs_int_full(mvi);
  1336. #else
  1337. mvs_int_rx(mvi, true);
  1338. #endif
  1339. spin_unlock_irqrestore(&mvi->lock, flags);
  1340. }
  1341. #endif
  1342. static irqreturn_t mvs_interrupt(int irq, void *opaque)
  1343. {
  1344. struct mvs_info *mvi = opaque;
  1345. void __iomem *regs = mvi->regs;
  1346. u32 stat;
  1347. stat = mr32(GBL_INT_STAT);
  1348. if (stat == 0 || stat == 0xffffffff)
  1349. return IRQ_NONE;
  1350. /* clear CMD_CMPLT ASAP */
  1351. mw32_f(INT_STAT, CINT_DONE);
  1352. #ifndef MVS_USE_TASKLET
  1353. spin_lock(&mvi->lock);
  1354. mvs_int_full(mvi);
  1355. spin_unlock(&mvi->lock);
  1356. #else
  1357. tasklet_schedule(&mvi->tasklet);
  1358. #endif
  1359. return IRQ_HANDLED;
  1360. }
  1361. #ifndef MVS_DISABLE_MSI
  1362. static irqreturn_t mvs_msi_interrupt(int irq, void *opaque)
  1363. {
  1364. struct mvs_info *mvi = opaque;
  1365. #ifndef MVS_USE_TASKLET
  1366. spin_lock(&mvi->lock);
  1367. mvs_int_rx(mvi, true);
  1368. spin_unlock(&mvi->lock);
  1369. #else
  1370. tasklet_schedule(&mvi->tasklet);
  1371. #endif
  1372. return IRQ_HANDLED;
  1373. }
  1374. #endif
  1375. struct mvs_task_exec_info {
  1376. struct sas_task *task;
  1377. struct mvs_cmd_hdr *hdr;
  1378. struct mvs_port *port;
  1379. u32 tag;
  1380. int n_elem;
  1381. };
  1382. static int mvs_task_prep_smp(struct mvs_info *mvi,
  1383. struct mvs_task_exec_info *tei)
  1384. {
  1385. int elem, rc, i;
  1386. struct sas_task *task = tei->task;
  1387. struct mvs_cmd_hdr *hdr = tei->hdr;
  1388. struct scatterlist *sg_req, *sg_resp;
  1389. u32 req_len, resp_len, tag = tei->tag;
  1390. void *buf_tmp;
  1391. u8 *buf_oaf;
  1392. dma_addr_t buf_tmp_dma;
  1393. struct mvs_prd *buf_prd;
  1394. struct scatterlist *sg;
  1395. struct mvs_slot_info *slot = &mvi->slot_info[tag];
  1396. struct asd_sas_port *sas_port = task->dev->port;
  1397. u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
  1398. #if _MV_DUMP
  1399. u8 *buf_cmd;
  1400. void *from;
  1401. #endif
  1402. /*
  1403. * DMA-map SMP request, response buffers
  1404. */
  1405. sg_req = &task->smp_task.smp_req;
  1406. elem = pci_map_sg(mvi->pdev, sg_req, 1, PCI_DMA_TODEVICE);
  1407. if (!elem)
  1408. return -ENOMEM;
  1409. req_len = sg_dma_len(sg_req);
  1410. sg_resp = &task->smp_task.smp_resp;
  1411. elem = pci_map_sg(mvi->pdev, sg_resp, 1, PCI_DMA_FROMDEVICE);
  1412. if (!elem) {
  1413. rc = -ENOMEM;
  1414. goto err_out;
  1415. }
  1416. resp_len = sg_dma_len(sg_resp);
  1417. /* must be in dwords */
  1418. if ((req_len & 0x3) || (resp_len & 0x3)) {
  1419. rc = -EINVAL;
  1420. goto err_out_2;
  1421. }
  1422. /*
  1423. * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
  1424. */
  1425. /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */
  1426. buf_tmp = slot->buf;
  1427. buf_tmp_dma = slot->buf_dma;
  1428. #if _MV_DUMP
  1429. buf_cmd = buf_tmp;
  1430. hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
  1431. buf_tmp += req_len;
  1432. buf_tmp_dma += req_len;
  1433. slot->cmd_size = req_len;
  1434. #else
  1435. hdr->cmd_tbl = cpu_to_le64(sg_dma_address(sg_req));
  1436. #endif
  1437. /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
  1438. buf_oaf = buf_tmp;
  1439. hdr->open_frame = cpu_to_le64(buf_tmp_dma);
  1440. buf_tmp += MVS_OAF_SZ;
  1441. buf_tmp_dma += MVS_OAF_SZ;
  1442. /* region 3: PRD table ********************************************* */
  1443. buf_prd = buf_tmp;
  1444. if (tei->n_elem)
  1445. hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
  1446. else
  1447. hdr->prd_tbl = 0;
  1448. i = sizeof(struct mvs_prd) * tei->n_elem;
  1449. buf_tmp += i;
  1450. buf_tmp_dma += i;
  1451. /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
  1452. slot->response = buf_tmp;
  1453. hdr->status_buf = cpu_to_le64(buf_tmp_dma);
  1454. /*
  1455. * Fill in TX ring and command slot header
  1456. */
  1457. slot->tx = mvi->tx_prod;
  1458. mvi->tx[mvi->tx_prod] = cpu_to_le32((TXQ_CMD_SMP << TXQ_CMD_SHIFT) |
  1459. TXQ_MODE_I | tag |
  1460. (sas_port->phy_mask << TXQ_PHY_SHIFT));
  1461. hdr->flags |= flags;
  1462. hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | ((req_len - 4) / 4));
  1463. hdr->tags = cpu_to_le32(tag);
  1464. hdr->data_len = 0;
  1465. /* generate open address frame hdr (first 12 bytes) */
  1466. buf_oaf[0] = (1 << 7) | (0 << 4) | 0x01; /* initiator, SMP, ftype 1h */
  1467. buf_oaf[1] = task->dev->linkrate & 0xf;
  1468. *(u16 *)(buf_oaf + 2) = 0xFFFF; /* SAS SPEC */
  1469. memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE);
  1470. /* fill in PRD (scatter/gather) table, if any */
  1471. for_each_sg(task->scatter, sg, tei->n_elem, i) {
  1472. buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
  1473. buf_prd->len = cpu_to_le32(sg_dma_len(sg));
  1474. buf_prd++;
  1475. }
  1476. #if _MV_DUMP
  1477. /* copy cmd table */
  1478. from = kmap_atomic(sg_page(sg_req), KM_IRQ0);
  1479. memcpy(buf_cmd, from + sg_req->offset, req_len);
  1480. kunmap_atomic(from, KM_IRQ0);
  1481. #endif
  1482. return 0;
  1483. err_out_2:
  1484. pci_unmap_sg(mvi->pdev, &tei->task->smp_task.smp_resp, 1,
  1485. PCI_DMA_FROMDEVICE);
  1486. err_out:
  1487. pci_unmap_sg(mvi->pdev, &tei->task->smp_task.smp_req, 1,
  1488. PCI_DMA_TODEVICE);
  1489. return rc;
  1490. }
  1491. static void mvs_free_reg_set(struct mvs_info *mvi, struct mvs_port *port)
  1492. {
  1493. void __iomem *regs = mvi->regs;
  1494. u32 tmp, offs;
  1495. u8 *tfs = &port->taskfileset;
  1496. if (*tfs == MVS_ID_NOT_MAPPED)
  1497. return;
  1498. offs = 1U << ((*tfs & 0x0f) + PCS_EN_SATA_REG_SHIFT);
  1499. if (*tfs < 16) {
  1500. tmp = mr32(PCS);
  1501. mw32(PCS, tmp & ~offs);
  1502. } else {
  1503. tmp = mr32(CTL);
  1504. mw32(CTL, tmp & ~offs);
  1505. }
  1506. tmp = mr32(INT_STAT_SRS) & (1U << *tfs);
  1507. if (tmp)
  1508. mw32(INT_STAT_SRS, tmp);
  1509. *tfs = MVS_ID_NOT_MAPPED;
  1510. }
  1511. static u8 mvs_assign_reg_set(struct mvs_info *mvi, struct mvs_port *port)
  1512. {
  1513. int i;
  1514. u32 tmp, offs;
  1515. void __iomem *regs = mvi->regs;
  1516. if (port->taskfileset != MVS_ID_NOT_MAPPED)
  1517. return 0;
  1518. tmp = mr32(PCS);
  1519. for (i = 0; i < mvi->chip->srs_sz; i++) {
  1520. if (i == 16)
  1521. tmp = mr32(CTL);
  1522. offs = 1U << ((i & 0x0f) + PCS_EN_SATA_REG_SHIFT);
  1523. if (!(tmp & offs)) {
  1524. port->taskfileset = i;
  1525. if (i < 16)
  1526. mw32(PCS, tmp | offs);
  1527. else
  1528. mw32(CTL, tmp | offs);
  1529. tmp = mr32(INT_STAT_SRS) & (1U << i);
  1530. if (tmp)
  1531. mw32(INT_STAT_SRS, tmp);
  1532. return 0;
  1533. }
  1534. }
  1535. return MVS_ID_NOT_MAPPED;
  1536. }
  1537. static u32 mvs_get_ncq_tag(struct sas_task *task)
  1538. {
  1539. u32 tag = 0;
  1540. struct ata_queued_cmd *qc = task->uldd_task;
  1541. if (qc)
  1542. tag = qc->tag;
  1543. return tag;
  1544. }
  1545. static int mvs_task_prep_ata(struct mvs_info *mvi,
  1546. struct mvs_task_exec_info *tei)
  1547. {
  1548. struct sas_task *task = tei->task;
  1549. struct domain_device *dev = task->dev;
  1550. struct mvs_cmd_hdr *hdr = tei->hdr;
  1551. struct asd_sas_port *sas_port = dev->port;
  1552. struct mvs_slot_info *slot;
  1553. struct scatterlist *sg;
  1554. struct mvs_prd *buf_prd;
  1555. struct mvs_port *port = tei->port;
  1556. u32 tag = tei->tag;
  1557. u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
  1558. void *buf_tmp;
  1559. u8 *buf_cmd, *buf_oaf;
  1560. dma_addr_t buf_tmp_dma;
  1561. u32 i, req_len, resp_len;
  1562. const u32 max_resp_len = SB_RFB_MAX;
  1563. if (mvs_assign_reg_set(mvi, port) == MVS_ID_NOT_MAPPED)
  1564. return -EBUSY;
  1565. slot = &mvi->slot_info[tag];
  1566. slot->tx = mvi->tx_prod;
  1567. mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag |
  1568. (TXQ_CMD_STP << TXQ_CMD_SHIFT) |
  1569. (sas_port->phy_mask << TXQ_PHY_SHIFT) |
  1570. (port->taskfileset << TXQ_SRS_SHIFT));
  1571. if (task->ata_task.use_ncq)
  1572. flags |= MCH_FPDMA;
  1573. if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) {
  1574. if (task->ata_task.fis.command != ATA_CMD_ID_ATAPI)
  1575. flags |= MCH_ATAPI;
  1576. }
  1577. /* FIXME: fill in port multiplier number */
  1578. hdr->flags = cpu_to_le32(flags);
  1579. /* FIXME: the low order order 5 bits for the TAG if enable NCQ */
  1580. if (task->ata_task.use_ncq) {
  1581. hdr->tags = cpu_to_le32(mvs_get_ncq_tag(task));
  1582. /*Fill in task file */
  1583. task->ata_task.fis.sector_count = hdr->tags << 3;
  1584. } else
  1585. hdr->tags = cpu_to_le32(tag);
  1586. hdr->data_len = cpu_to_le32(task->total_xfer_len);
  1587. /*
  1588. * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
  1589. */
  1590. /* region 1: command table area (MVS_ATA_CMD_SZ bytes) ************** */
  1591. buf_cmd = buf_tmp = slot->buf;
  1592. buf_tmp_dma = slot->buf_dma;
  1593. hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
  1594. buf_tmp += MVS_ATA_CMD_SZ;
  1595. buf_tmp_dma += MVS_ATA_CMD_SZ;
  1596. #if _MV_DUMP
  1597. slot->cmd_size = MVS_ATA_CMD_SZ;
  1598. #endif
  1599. /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
  1600. /* used for STP. unused for SATA? */
  1601. buf_oaf = buf_tmp;
  1602. hdr->open_frame = cpu_to_le64(buf_tmp_dma);
  1603. buf_tmp += MVS_OAF_SZ;
  1604. buf_tmp_dma += MVS_OAF_SZ;
  1605. /* region 3: PRD table ********************************************* */
  1606. buf_prd = buf_tmp;
  1607. if (tei->n_elem)
  1608. hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
  1609. else
  1610. hdr->prd_tbl = 0;
  1611. i = sizeof(struct mvs_prd) * tei->n_elem;
  1612. buf_tmp += i;
  1613. buf_tmp_dma += i;
  1614. /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
  1615. /* FIXME: probably unused, for SATA. kept here just in case
  1616. * we get a STP/SATA error information record
  1617. */
  1618. slot->response = buf_tmp;
  1619. hdr->status_buf = cpu_to_le64(buf_tmp_dma);
  1620. req_len = sizeof(struct host_to_dev_fis);
  1621. resp_len = MVS_SLOT_BUF_SZ - MVS_ATA_CMD_SZ -
  1622. sizeof(struct mvs_err_info) - i;
  1623. /* request, response lengths */
  1624. resp_len = min(resp_len, max_resp_len);
  1625. hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
  1626. task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */
  1627. /* fill in command FIS and ATAPI CDB */
  1628. memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
  1629. if (dev->sata_dev.command_set == ATAPI_COMMAND_SET)
  1630. memcpy(buf_cmd + STP_ATAPI_CMD,
  1631. task->ata_task.atapi_packet, 16);
  1632. /* generate open address frame hdr (first 12 bytes) */
  1633. buf_oaf[0] = (1 << 7) | (2 << 4) | 0x1; /* initiator, STP, ftype 1h */
  1634. buf_oaf[1] = task->dev->linkrate & 0xf;
  1635. *(u16 *)(buf_oaf + 2) = cpu_to_be16(tag);
  1636. memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE);
  1637. /* fill in PRD (scatter/gather) table, if any */
  1638. for_each_sg(task->scatter, sg, tei->n_elem, i) {
  1639. buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
  1640. buf_prd->len = cpu_to_le32(sg_dma_len(sg));
  1641. buf_prd++;
  1642. }
  1643. return 0;
  1644. }
  1645. static int mvs_task_prep_ssp(struct mvs_info *mvi,
  1646. struct mvs_task_exec_info *tei)
  1647. {
  1648. struct sas_task *task = tei->task;
  1649. struct mvs_cmd_hdr *hdr = tei->hdr;
  1650. struct mvs_port *port = tei->port;
  1651. struct mvs_slot_info *slot;
  1652. struct scatterlist *sg;
  1653. struct mvs_prd *buf_prd;
  1654. struct ssp_frame_hdr *ssp_hdr;
  1655. void *buf_tmp;
  1656. u8 *buf_cmd, *buf_oaf, fburst = 0;
  1657. dma_addr_t buf_tmp_dma;
  1658. u32 flags;
  1659. u32 resp_len, req_len, i, tag = tei->tag;
  1660. const u32 max_resp_len = SB_RFB_MAX;
  1661. slot = &mvi->slot_info[tag];
  1662. slot->tx = mvi->tx_prod;
  1663. mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag |
  1664. (TXQ_CMD_SSP << TXQ_CMD_SHIFT) |
  1665. (port->wide_port_phymap << TXQ_PHY_SHIFT));
  1666. flags = MCH_RETRY;
  1667. if (task->ssp_task.enable_first_burst) {
  1668. flags |= MCH_FBURST;
  1669. fburst = (1 << 7);
  1670. }
  1671. hdr->flags = cpu_to_le32(flags |
  1672. (tei->n_elem << MCH_PRD_LEN_SHIFT) |
  1673. (MCH_SSP_FR_CMD << MCH_SSP_FR_TYPE_SHIFT));
  1674. hdr->tags = cpu_to_le32(tag);
  1675. hdr->data_len = cpu_to_le32(task->total_xfer_len);
  1676. /*
  1677. * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
  1678. */
  1679. /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */
  1680. buf_cmd = buf_tmp = slot->buf;
  1681. buf_tmp_dma = slot->buf_dma;
  1682. hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
  1683. buf_tmp += MVS_SSP_CMD_SZ;
  1684. buf_tmp_dma += MVS_SSP_CMD_SZ;
  1685. #if _MV_DUMP
  1686. slot->cmd_size = MVS_SSP_CMD_SZ;
  1687. #endif
  1688. /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
  1689. buf_oaf = buf_tmp;
  1690. hdr->open_frame = cpu_to_le64(buf_tmp_dma);
  1691. buf_tmp += MVS_OAF_SZ;
  1692. buf_tmp_dma += MVS_OAF_SZ;
  1693. /* region 3: PRD table ********************************************* */
  1694. buf_prd = buf_tmp;
  1695. if (tei->n_elem)
  1696. hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
  1697. else
  1698. hdr->prd_tbl = 0;
  1699. i = sizeof(struct mvs_prd) * tei->n_elem;
  1700. buf_tmp += i;
  1701. buf_tmp_dma += i;
  1702. /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
  1703. slot->response = buf_tmp;
  1704. hdr->status_buf = cpu_to_le64(buf_tmp_dma);
  1705. resp_len = MVS_SLOT_BUF_SZ - MVS_SSP_CMD_SZ - MVS_OAF_SZ -
  1706. sizeof(struct mvs_err_info) - i;
  1707. resp_len = min(resp_len, max_resp_len);
  1708. req_len = sizeof(struct ssp_frame_hdr) + 28;
  1709. /* request, response lengths */
  1710. hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
  1711. /* generate open address frame hdr (first 12 bytes) */
  1712. buf_oaf[0] = (1 << 7) | (1 << 4) | 0x1; /* initiator, SSP, ftype 1h */
  1713. buf_oaf[1] = task->dev->linkrate & 0xf;
  1714. *(u16 *)(buf_oaf + 2) = cpu_to_be16(tag);
  1715. memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE);
  1716. /* fill in SSP frame header (Command Table.SSP frame header) */
  1717. ssp_hdr = (struct ssp_frame_hdr *)buf_cmd;
  1718. ssp_hdr->frame_type = SSP_COMMAND;
  1719. memcpy(ssp_hdr->hashed_dest_addr, task->dev->hashed_sas_addr,
  1720. HASHED_SAS_ADDR_SIZE);
  1721. memcpy(ssp_hdr->hashed_src_addr,
  1722. task->dev->port->ha->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
  1723. ssp_hdr->tag = cpu_to_be16(tag);
  1724. /* fill in command frame IU */
  1725. buf_cmd += sizeof(*ssp_hdr);
  1726. memcpy(buf_cmd, &task->ssp_task.LUN, 8);
  1727. buf_cmd[9] = fburst | task->ssp_task.task_attr |
  1728. (task->ssp_task.task_prio << 3);
  1729. memcpy(buf_cmd + 12, &task->ssp_task.cdb, 16);
  1730. /* fill in PRD (scatter/gather) table, if any */
  1731. for_each_sg(task->scatter, sg, tei->n_elem, i) {
  1732. buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
  1733. buf_prd->len = cpu_to_le32(sg_dma_len(sg));
  1734. buf_prd++;
  1735. }
  1736. return 0;
  1737. }
  1738. static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags)
  1739. {
  1740. struct domain_device *dev = task->dev;
  1741. struct mvs_info *mvi = dev->port->ha->lldd_ha;
  1742. struct pci_dev *pdev = mvi->pdev;
  1743. void __iomem *regs = mvi->regs;
  1744. struct mvs_task_exec_info tei;
  1745. struct sas_task *t = task;
  1746. u32 tag = 0xdeadbeef, rc, n_elem = 0;
  1747. unsigned long flags;
  1748. u32 n = num, pass = 0;
  1749. spin_lock_irqsave(&mvi->lock, flags);
  1750. do {
  1751. tei.port = &mvi->port[dev->port->id];
  1752. if (!tei.port->port_attached) {
  1753. struct task_status_struct *ts = &t->task_status;
  1754. ts->stat = SAS_PHY_DOWN;
  1755. t->task_done(t);
  1756. rc = 0;
  1757. goto exec_exit;
  1758. }
  1759. if (!sas_protocol_ata(t->task_proto)) {
  1760. if (t->num_scatter) {
  1761. n_elem = pci_map_sg(mvi->pdev, t->scatter,
  1762. t->num_scatter,
  1763. t->data_dir);
  1764. if (!n_elem) {
  1765. rc = -ENOMEM;
  1766. goto err_out;
  1767. }
  1768. }
  1769. } else {
  1770. n_elem = t->num_scatter;
  1771. }
  1772. rc = mvs_tag_alloc(mvi, &tag);
  1773. if (rc)
  1774. goto err_out;
  1775. mvi->slot_info[tag].task = t;
  1776. mvi->slot_info[tag].n_elem = n_elem;
  1777. memset(mvi->slot_info[tag].buf, 0, MVS_SLOT_BUF_SZ);
  1778. tei.task = t;
  1779. tei.hdr = &mvi->slot[tag];
  1780. tei.tag = tag;
  1781. tei.n_elem = n_elem;
  1782. switch (t->task_proto) {
  1783. case SAS_PROTOCOL_SMP:
  1784. rc = mvs_task_prep_smp(mvi, &tei);
  1785. break;
  1786. case SAS_PROTOCOL_SSP:
  1787. rc = mvs_task_prep_ssp(mvi, &tei);
  1788. break;
  1789. case SAS_PROTOCOL_SATA:
  1790. case SAS_PROTOCOL_STP:
  1791. case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
  1792. rc = mvs_task_prep_ata(mvi, &tei);
  1793. break;
  1794. default:
  1795. dev_printk(KERN_ERR, &pdev->dev,
  1796. "unknown sas_task proto: 0x%x\n",
  1797. t->task_proto);
  1798. rc = -EINVAL;
  1799. break;
  1800. }
  1801. if (rc)
  1802. goto err_out_tag;
  1803. /* TODO: select normal or high priority */
  1804. spin_lock(&t->task_state_lock);
  1805. t->task_state_flags |= SAS_TASK_AT_INITIATOR;
  1806. spin_unlock(&t->task_state_lock);
  1807. if (n == 1) {
  1808. spin_unlock_irqrestore(&mvi->lock, flags);
  1809. mw32(TX_PROD_IDX, mvi->tx_prod);
  1810. }
  1811. mvs_hba_memory_dump(mvi, tag, t->task_proto);
  1812. ++pass;
  1813. mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1);
  1814. if (n == 1)
  1815. break;
  1816. t = list_entry(t->list.next, struct sas_task, list);
  1817. } while (--n);
  1818. return 0;
  1819. err_out_tag:
  1820. mvs_tag_free(mvi, tag);
  1821. err_out:
  1822. dev_printk(KERN_ERR, &pdev->dev, "mvsas exec failed[%d]!\n", rc);
  1823. if (!sas_protocol_ata(t->task_proto))
  1824. if (n_elem)
  1825. pci_unmap_sg(mvi->pdev, t->scatter, n_elem,
  1826. t->data_dir);
  1827. exec_exit:
  1828. if (pass)
  1829. mw32(TX_PROD_IDX, (mvi->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1));
  1830. spin_unlock_irqrestore(&mvi->lock, flags);
  1831. return rc;
  1832. }
  1833. static int mvs_task_abort(struct sas_task *task)
  1834. {
  1835. int rc = 1;
  1836. unsigned long flags;
  1837. struct mvs_info *mvi = task->dev->port->ha->lldd_ha;
  1838. struct pci_dev *pdev = mvi->pdev;
  1839. spin_lock_irqsave(&task->task_state_lock, flags);
  1840. if (task->task_state_flags & SAS_TASK_STATE_DONE) {
  1841. rc = TMF_RESP_FUNC_COMPLETE;
  1842. goto out_done;
  1843. }
  1844. spin_unlock_irqrestore(&task->task_state_lock, flags);
  1845. /*FIXME*/
  1846. rc = TMF_RESP_FUNC_COMPLETE;
  1847. switch (task->task_proto) {
  1848. case SAS_PROTOCOL_SMP:
  1849. dev_printk(KERN_DEBUG, &pdev->dev, "SMP Abort! ");
  1850. break;
  1851. case SAS_PROTOCOL_SSP:
  1852. dev_printk(KERN_DEBUG, &pdev->dev, "SSP Abort! ");
  1853. break;
  1854. case SAS_PROTOCOL_SATA:
  1855. case SAS_PROTOCOL_STP:
  1856. case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:{
  1857. dev_printk(KERN_DEBUG, &pdev->dev, "STP Abort! "
  1858. "Dump D2H FIS: \n");
  1859. mvs_hexdump(sizeof(struct host_to_dev_fis),
  1860. (void *)&task->ata_task.fis, 0);
  1861. dev_printk(KERN_DEBUG, &pdev->dev, "Dump ATAPI Cmd : \n");
  1862. mvs_hexdump(16, task->ata_task.atapi_packet, 0);
  1863. break;
  1864. }
  1865. default:
  1866. break;
  1867. }
  1868. out_done:
  1869. return rc;
  1870. }
  1871. static void mvs_free(struct mvs_info *mvi)
  1872. {
  1873. int i;
  1874. if (!mvi)
  1875. return;
  1876. for (i = 0; i < MVS_SLOTS; i++) {
  1877. struct mvs_slot_info *slot = &mvi->slot_info[i];
  1878. if (slot->buf)
  1879. dma_free_coherent(&mvi->pdev->dev, MVS_SLOT_BUF_SZ,
  1880. slot->buf, slot->buf_dma);
  1881. }
  1882. if (mvi->tx)
  1883. dma_free_coherent(&mvi->pdev->dev,
  1884. sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ,
  1885. mvi->tx, mvi->tx_dma);
  1886. if (mvi->rx_fis)
  1887. dma_free_coherent(&mvi->pdev->dev, MVS_RX_FISL_SZ,
  1888. mvi->rx_fis, mvi->rx_fis_dma);
  1889. if (mvi->rx)
  1890. dma_free_coherent(&mvi->pdev->dev,
  1891. sizeof(*mvi->rx) * MVS_RX_RING_SZ,
  1892. mvi->rx, mvi->rx_dma);
  1893. if (mvi->slot)
  1894. dma_free_coherent(&mvi->pdev->dev,
  1895. sizeof(*mvi->slot) * MVS_SLOTS,
  1896. mvi->slot, mvi->slot_dma);
  1897. #ifdef MVS_ENABLE_PERI
  1898. if (mvi->peri_regs)
  1899. iounmap(mvi->peri_regs);
  1900. #endif
  1901. if (mvi->regs)
  1902. iounmap(mvi->regs);
  1903. if (mvi->shost)
  1904. scsi_host_put(mvi->shost);
  1905. kfree(mvi->sas.sas_port);
  1906. kfree(mvi->sas.sas_phy);
  1907. kfree(mvi);
  1908. }
  1909. /* FIXME: locking? */
  1910. static int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
  1911. void *funcdata)
  1912. {
  1913. struct mvs_info *mvi = sas_phy->ha->lldd_ha;
  1914. int rc = 0, phy_id = sas_phy->id;
  1915. u32 tmp;
  1916. tmp = mvs_read_phy_ctl(mvi, phy_id);
  1917. switch (func) {
  1918. case PHY_FUNC_SET_LINK_RATE:{
  1919. struct sas_phy_linkrates *rates = funcdata;
  1920. u32 lrmin = 0, lrmax = 0;
  1921. lrmin = (rates->minimum_linkrate << 8);
  1922. lrmax = (rates->maximum_linkrate << 12);
  1923. if (lrmin) {
  1924. tmp &= ~(0xf << 8);
  1925. tmp |= lrmin;
  1926. }
  1927. if (lrmax) {
  1928. tmp &= ~(0xf << 12);
  1929. tmp |= lrmax;
  1930. }
  1931. mvs_write_phy_ctl(mvi, phy_id, tmp);
  1932. break;
  1933. }
  1934. case PHY_FUNC_HARD_RESET:
  1935. if (tmp & PHY_RST_HARD)
  1936. break;
  1937. mvs_write_phy_ctl(mvi, phy_id, tmp | PHY_RST_HARD);
  1938. break;
  1939. case PHY_FUNC_LINK_RESET:
  1940. mvs_write_phy_ctl(mvi, phy_id, tmp | PHY_RST);
  1941. break;
  1942. case PHY_FUNC_DISABLE:
  1943. case PHY_FUNC_RELEASE_SPINUP_HOLD:
  1944. default:
  1945. rc = -EOPNOTSUPP;
  1946. }
  1947. return rc;
  1948. }
  1949. static void __devinit mvs_phy_init(struct mvs_info *mvi, int phy_id)
  1950. {
  1951. struct mvs_phy *phy = &mvi->phy[phy_id];
  1952. struct asd_sas_phy *sas_phy = &phy->sas_phy;
  1953. sas_phy->enabled = (phy_id < mvi->chip->n_phy) ? 1 : 0;
  1954. sas_phy->class = SAS;
  1955. sas_phy->iproto = SAS_PROTOCOL_ALL;
  1956. sas_phy->tproto = 0;
  1957. sas_phy->type = PHY_TYPE_PHYSICAL;
  1958. sas_phy->role = PHY_ROLE_INITIATOR;
  1959. sas_phy->oob_mode = OOB_NOT_CONNECTED;
  1960. sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
  1961. sas_phy->id = phy_id;
  1962. sas_phy->sas_addr = &mvi->sas_addr[0];
  1963. sas_phy->frame_rcvd = &phy->frame_rcvd[0];
  1964. sas_phy->ha = &mvi->sas;
  1965. sas_phy->lldd_phy = phy;
  1966. }
  1967. static struct mvs_info *__devinit mvs_alloc(struct pci_dev *pdev,
  1968. const struct pci_device_id *ent)
  1969. {
  1970. struct mvs_info *mvi;
  1971. unsigned long res_start, res_len, res_flag;
  1972. struct asd_sas_phy **arr_phy;
  1973. struct asd_sas_port **arr_port;
  1974. const struct mvs_chip_info *chip = &mvs_chips[ent->driver_data];
  1975. int i;
  1976. /*
  1977. * alloc and init our per-HBA mvs_info struct
  1978. */
  1979. mvi = kzalloc(sizeof(*mvi), GFP_KERNEL);
  1980. if (!mvi)
  1981. return NULL;
  1982. spin_lock_init(&mvi->lock);
  1983. mvi->pdev = pdev;
  1984. mvi->chip = chip;
  1985. if (pdev->device == 0x6440 && pdev->revision == 0)
  1986. mvi->flags |= MVF_PHY_PWR_FIX;
  1987. /*
  1988. * alloc and init SCSI, SAS glue
  1989. */
  1990. mvi->shost = scsi_host_alloc(&mvs_sht, sizeof(void *));
  1991. if (!mvi->shost)
  1992. goto err_out;
  1993. arr_phy = kcalloc(MVS_MAX_PHYS, sizeof(void *), GFP_KERNEL);
  1994. arr_port = kcalloc(MVS_MAX_PHYS, sizeof(void *), GFP_KERNEL);
  1995. if (!arr_phy || !arr_port)
  1996. goto err_out;
  1997. for (i = 0; i < MVS_MAX_PHYS; i++) {
  1998. mvs_phy_init(mvi, i);
  1999. arr_phy[i] = &mvi->phy[i].sas_phy;
  2000. arr_port[i] = &mvi->port[i].sas_port;
  2001. }
  2002. SHOST_TO_SAS_HA(mvi->shost) = &mvi->sas;
  2003. mvi->shost->transportt = mvs_stt;
  2004. mvi->shost->max_id = 21;
  2005. mvi->shost->max_lun = ~0;
  2006. mvi->shost->max_channel = 0;
  2007. mvi->shost->max_cmd_len = 16;
  2008. mvi->sas.sas_ha_name = DRV_NAME;
  2009. mvi->sas.dev = &pdev->dev;
  2010. mvi->sas.lldd_module = THIS_MODULE;
  2011. mvi->sas.sas_addr = &mvi->sas_addr[0];
  2012. mvi->sas.sas_phy = arr_phy;
  2013. mvi->sas.sas_port = arr_port;
  2014. mvi->sas.num_phys = chip->n_phy;
  2015. mvi->sas.lldd_max_execute_num = MVS_CHIP_SLOT_SZ - 1;
  2016. mvi->sas.lldd_queue_size = MVS_QUEUE_SIZE;
  2017. mvi->can_queue = (MVS_CHIP_SLOT_SZ >> 1) - 1;
  2018. mvi->sas.lldd_ha = mvi;
  2019. mvi->sas.core.shost = mvi->shost;
  2020. mvs_tag_init(mvi);
  2021. /*
  2022. * ioremap main and peripheral registers
  2023. */
  2024. #ifdef MVS_ENABLE_PERI
  2025. res_start = pci_resource_start(pdev, 2);
  2026. res_len = pci_resource_len(pdev, 2);
  2027. if (!res_start || !res_len)
  2028. goto err_out;
  2029. mvi->peri_regs = ioremap_nocache(res_start, res_len);
  2030. if (!mvi->peri_regs)
  2031. goto err_out;
  2032. #endif
  2033. res_start = pci_resource_start(pdev, 4);
  2034. res_len = pci_resource_len(pdev, 4);
  2035. if (!res_start || !res_len)
  2036. goto err_out;
  2037. res_flag = pci_resource_flags(pdev, 4);
  2038. if (res_flag & IORESOURCE_CACHEABLE)
  2039. mvi->regs = ioremap(res_start, res_len);
  2040. else
  2041. mvi->regs = ioremap_nocache(res_start, res_len);
  2042. if (!mvi->regs)
  2043. goto err_out;
  2044. /*
  2045. * alloc and init our DMA areas
  2046. */
  2047. mvi->tx = dma_alloc_coherent(&pdev->dev,
  2048. sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ,
  2049. &mvi->tx_dma, GFP_KERNEL);
  2050. if (!mvi->tx)
  2051. goto err_out;
  2052. memset(mvi->tx, 0, sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ);
  2053. mvi->rx_fis = dma_alloc_coherent(&pdev->dev, MVS_RX_FISL_SZ,
  2054. &mvi->rx_fis_dma, GFP_KERNEL);
  2055. if (!mvi->rx_fis)
  2056. goto err_out;
  2057. memset(mvi->rx_fis, 0, MVS_RX_FISL_SZ);
  2058. mvi->rx = dma_alloc_coherent(&pdev->dev,
  2059. sizeof(*mvi->rx) * MVS_RX_RING_SZ,
  2060. &mvi->rx_dma, GFP_KERNEL);
  2061. if (!mvi->rx)
  2062. goto err_out;
  2063. memset(mvi->rx, 0, sizeof(*mvi->rx) * MVS_RX_RING_SZ);
  2064. mvi->rx[0] = cpu_to_le32(0xfff);
  2065. mvi->rx_cons = 0xfff;
  2066. mvi->slot = dma_alloc_coherent(&pdev->dev,
  2067. sizeof(*mvi->slot) * MVS_SLOTS,
  2068. &mvi->slot_dma, GFP_KERNEL);
  2069. if (!mvi->slot)
  2070. goto err_out;
  2071. memset(mvi->slot, 0, sizeof(*mvi->slot) * MVS_SLOTS);
  2072. for (i = 0; i < MVS_SLOTS; i++) {
  2073. struct mvs_slot_info *slot = &mvi->slot_info[i];
  2074. slot->buf = dma_alloc_coherent(&pdev->dev, MVS_SLOT_BUF_SZ,
  2075. &slot->buf_dma, GFP_KERNEL);
  2076. if (!slot->buf)
  2077. goto err_out;
  2078. memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
  2079. }
  2080. /* finally, read NVRAM to get our SAS address */
  2081. if (mvs_nvram_read(mvi, NVR_SAS_ADDR, &mvi->sas_addr, 8))
  2082. goto err_out;
  2083. return mvi;
  2084. err_out:
  2085. mvs_free(mvi);
  2086. return NULL;
  2087. }
  2088. static u32 mvs_cr32(void __iomem *regs, u32 addr)
  2089. {
  2090. mw32(CMD_ADDR, addr);
  2091. return mr32(CMD_DATA);
  2092. }
  2093. static void mvs_cw32(void __iomem *regs, u32 addr, u32 val)
  2094. {
  2095. mw32(CMD_ADDR, addr);
  2096. mw32(CMD_DATA, val);
  2097. }
  2098. static u32 mvs_read_phy_ctl(struct mvs_info *mvi, u32 port)
  2099. {
  2100. void __iomem *regs = mvi->regs;
  2101. return (port < 4)?mr32(P0_SER_CTLSTAT + port * 4):
  2102. mr32(P4_SER_CTLSTAT + (port - 4) * 4);
  2103. }
  2104. static void mvs_write_phy_ctl(struct mvs_info *mvi, u32 port, u32 val)
  2105. {
  2106. void __iomem *regs = mvi->regs;
  2107. if (port < 4)
  2108. mw32(P0_SER_CTLSTAT + port * 4, val);
  2109. else
  2110. mw32(P4_SER_CTLSTAT + (port - 4) * 4, val);
  2111. }
  2112. static u32 mvs_read_port(struct mvs_info *mvi, u32 off, u32 off2, u32 port)
  2113. {
  2114. void __iomem *regs = mvi->regs + off;
  2115. void __iomem *regs2 = mvi->regs + off2;
  2116. return (port < 4)?readl(regs + port * 8):
  2117. readl(regs2 + (port - 4) * 8);
  2118. }
  2119. static void mvs_write_port(struct mvs_info *mvi, u32 off, u32 off2,
  2120. u32 port, u32 val)
  2121. {
  2122. void __iomem *regs = mvi->regs + off;
  2123. void __iomem *regs2 = mvi->regs + off2;
  2124. if (port < 4)
  2125. writel(val, regs + port * 8);
  2126. else
  2127. writel(val, regs2 + (port - 4) * 8);
  2128. }
  2129. static u32 mvs_read_port_cfg_data(struct mvs_info *mvi, u32 port)
  2130. {
  2131. return mvs_read_port(mvi, MVS_P0_CFG_DATA, MVS_P4_CFG_DATA, port);
  2132. }
  2133. static void mvs_write_port_cfg_data(struct mvs_info *mvi, u32 port, u32 val)
  2134. {
  2135. mvs_write_port(mvi, MVS_P0_CFG_DATA, MVS_P4_CFG_DATA, port, val);
  2136. }
  2137. static void mvs_write_port_cfg_addr(struct mvs_info *mvi, u32 port, u32 addr)
  2138. {
  2139. mvs_write_port(mvi, MVS_P0_CFG_ADDR, MVS_P4_CFG_ADDR, port, addr);
  2140. }
  2141. static u32 mvs_read_port_vsr_data(struct mvs_info *mvi, u32 port)
  2142. {
  2143. return mvs_read_port(mvi, MVS_P0_VSR_DATA, MVS_P4_VSR_DATA, port);
  2144. }
  2145. static void mvs_write_port_vsr_data(struct mvs_info *mvi, u32 port, u32 val)
  2146. {
  2147. mvs_write_port(mvi, MVS_P0_VSR_DATA, MVS_P4_VSR_DATA, port, val);
  2148. }
  2149. static void mvs_write_port_vsr_addr(struct mvs_info *mvi, u32 port, u32 addr)
  2150. {
  2151. mvs_write_port(mvi, MVS_P0_VSR_ADDR, MVS_P4_VSR_ADDR, port, addr);
  2152. }
  2153. static u32 mvs_read_port_irq_stat(struct mvs_info *mvi, u32 port)
  2154. {
  2155. return mvs_read_port(mvi, MVS_P0_INT_STAT, MVS_P4_INT_STAT, port);
  2156. }
  2157. static void mvs_write_port_irq_stat(struct mvs_info *mvi, u32 port, u32 val)
  2158. {
  2159. mvs_write_port(mvi, MVS_P0_INT_STAT, MVS_P4_INT_STAT, port, val);
  2160. }
  2161. static u32 mvs_read_port_irq_mask(struct mvs_info *mvi, u32 port)
  2162. {
  2163. return mvs_read_port(mvi, MVS_P0_INT_MASK, MVS_P4_INT_MASK, port);
  2164. }
  2165. static void mvs_write_port_irq_mask(struct mvs_info *mvi, u32 port, u32 val)
  2166. {
  2167. mvs_write_port(mvi, MVS_P0_INT_MASK, MVS_P4_INT_MASK, port, val);
  2168. }
  2169. static void __devinit mvs_phy_hacks(struct mvs_info *mvi)
  2170. {
  2171. void __iomem *regs = mvi->regs;
  2172. u32 tmp;
  2173. /* workaround for SATA R-ERR, to ignore phy glitch */
  2174. tmp = mvs_cr32(regs, CMD_PHY_TIMER);
  2175. tmp &= ~(1 << 9);
  2176. tmp |= (1 << 10);
  2177. mvs_cw32(regs, CMD_PHY_TIMER, tmp);
  2178. /* enable retry 127 times */
  2179. mvs_cw32(regs, CMD_SAS_CTL1, 0x7f7f);
  2180. /* extend open frame timeout to max */
  2181. tmp = mvs_cr32(regs, CMD_SAS_CTL0);
  2182. tmp &= ~0xffff;
  2183. tmp |= 0x3fff;
  2184. mvs_cw32(regs, CMD_SAS_CTL0, tmp);
  2185. /* workaround for WDTIMEOUT , set to 550 ms */
  2186. mvs_cw32(regs, CMD_WD_TIMER, 0xffffff);
  2187. /* not to halt for different port op during wideport link change */
  2188. mvs_cw32(regs, CMD_APP_ERR_CONFIG, 0xffefbf7d);
  2189. /* workaround for Seagate disk not-found OOB sequence, recv
  2190. * COMINIT before sending out COMWAKE */
  2191. tmp = mvs_cr32(regs, CMD_PHY_MODE_21);
  2192. tmp &= 0x0000ffff;
  2193. tmp |= 0x00fa0000;
  2194. mvs_cw32(regs, CMD_PHY_MODE_21, tmp);
  2195. tmp = mvs_cr32(regs, CMD_PHY_TIMER);
  2196. tmp &= 0x1fffffff;
  2197. tmp |= (2U << 29); /* 8 ms retry */
  2198. mvs_cw32(regs, CMD_PHY_TIMER, tmp);
  2199. /* TEST - for phy decoding error, adjust voltage levels */
  2200. mw32(P0_VSR_ADDR + 0, 0x8);
  2201. mw32(P0_VSR_DATA + 0, 0x2F0);
  2202. mw32(P0_VSR_ADDR + 8, 0x8);
  2203. mw32(P0_VSR_DATA + 8, 0x2F0);
  2204. mw32(P0_VSR_ADDR + 16, 0x8);
  2205. mw32(P0_VSR_DATA + 16, 0x2F0);
  2206. mw32(P0_VSR_ADDR + 24, 0x8);
  2207. mw32(P0_VSR_DATA + 24, 0x2F0);
  2208. }
  2209. static void mvs_enable_xmt(struct mvs_info *mvi, int PhyId)
  2210. {
  2211. void __iomem *regs = mvi->regs;
  2212. u32 tmp;
  2213. tmp = mr32(PCS);
  2214. if (mvi->chip->n_phy <= 4)
  2215. tmp |= 1 << (PhyId + PCS_EN_PORT_XMT_SHIFT);
  2216. else
  2217. tmp |= 1 << (PhyId + PCS_EN_PORT_XMT_SHIFT2);
  2218. mw32(PCS, tmp);
  2219. }
  2220. static void mvs_detect_porttype(struct mvs_info *mvi, int i)
  2221. {
  2222. void __iomem *regs = mvi->regs;
  2223. u32 reg;
  2224. struct mvs_phy *phy = &mvi->phy[i];
  2225. /* TODO check & save device type */
  2226. reg = mr32(GBL_PORT_TYPE);
  2227. if (reg & MODE_SAS_SATA & (1 << i))
  2228. phy->phy_type |= PORT_TYPE_SAS;
  2229. else
  2230. phy->phy_type |= PORT_TYPE_SATA;
  2231. }
  2232. static void *mvs_get_d2h_reg(struct mvs_info *mvi, int i, void *buf)
  2233. {
  2234. u32 *s = (u32 *) buf;
  2235. if (!s)
  2236. return NULL;
  2237. mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG3);
  2238. s[3] = mvs_read_port_cfg_data(mvi, i);
  2239. mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG2);
  2240. s[2] = mvs_read_port_cfg_data(mvi, i);
  2241. mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG1);
  2242. s[1] = mvs_read_port_cfg_data(mvi, i);
  2243. mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG0);
  2244. s[0] = mvs_read_port_cfg_data(mvi, i);
  2245. return (void *)s;
  2246. }
  2247. static u32 mvs_is_sig_fis_received(u32 irq_status)
  2248. {
  2249. return irq_status & PHYEV_SIG_FIS;
  2250. }
  2251. static void mvs_update_wideport(struct mvs_info *mvi, int i)
  2252. {
  2253. struct mvs_phy *phy = &mvi->phy[i];
  2254. struct mvs_port *port = phy->port;
  2255. int j, no;
  2256. for_each_phy(port->wide_port_phymap, no, j, mvi->chip->n_phy)
  2257. if (no & 1) {
  2258. mvs_write_port_cfg_addr(mvi, no, PHYR_WIDE_PORT);
  2259. mvs_write_port_cfg_data(mvi, no,
  2260. port->wide_port_phymap);
  2261. } else {
  2262. mvs_write_port_cfg_addr(mvi, no, PHYR_WIDE_PORT);
  2263. mvs_write_port_cfg_data(mvi, no, 0);
  2264. }
  2265. }
  2266. static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i)
  2267. {
  2268. u32 tmp;
  2269. struct mvs_phy *phy = &mvi->phy[i];
  2270. struct mvs_port *port;
  2271. tmp = mvs_read_phy_ctl(mvi, i);
  2272. if ((tmp & PHY_READY_MASK) && !(phy->irq_status & PHYEV_POOF)) {
  2273. if (!phy->port)
  2274. phy->phy_attached = 1;
  2275. return tmp;
  2276. }
  2277. port = phy->port;
  2278. if (port) {
  2279. if (phy->phy_type & PORT_TYPE_SAS) {
  2280. port->wide_port_phymap &= ~(1U << i);
  2281. if (!port->wide_port_phymap)
  2282. port->port_attached = 0;
  2283. mvs_update_wideport(mvi, i);
  2284. } else if (phy->phy_type & PORT_TYPE_SATA)
  2285. port->port_attached = 0;
  2286. mvs_free_reg_set(mvi, phy->port);
  2287. phy->port = NULL;
  2288. phy->phy_attached = 0;
  2289. phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
  2290. }
  2291. return 0;
  2292. }
  2293. static void mvs_update_phyinfo(struct mvs_info *mvi, int i,
  2294. int get_st)
  2295. {
  2296. struct mvs_phy *phy = &mvi->phy[i];
  2297. struct pci_dev *pdev = mvi->pdev;
  2298. u32 tmp, j;
  2299. u64 tmp64;
  2300. mvs_write_port_cfg_addr(mvi, i, PHYR_IDENTIFY);
  2301. phy->dev_info = mvs_read_port_cfg_data(mvi, i);
  2302. mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_HI);
  2303. phy->dev_sas_addr = (u64) mvs_read_port_cfg_data(mvi, i) << 32;
  2304. mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_LO);
  2305. phy->dev_sas_addr |= mvs_read_port_cfg_data(mvi, i);
  2306. if (get_st) {
  2307. phy->irq_status = mvs_read_port_irq_stat(mvi, i);
  2308. phy->phy_status = mvs_is_phy_ready(mvi, i);
  2309. }
  2310. if (phy->phy_status) {
  2311. u32 phy_st;
  2312. struct asd_sas_phy *sas_phy = mvi->sas.sas_phy[i];
  2313. mvs_write_port_cfg_addr(mvi, i, PHYR_PHY_STAT);
  2314. phy_st = mvs_read_port_cfg_data(mvi, i);
  2315. sas_phy->linkrate =
  2316. (phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
  2317. PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET;
  2318. /* Updated attached_sas_addr */
  2319. mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_HI);
  2320. phy->att_dev_sas_addr =
  2321. (u64) mvs_read_port_cfg_data(mvi, i) << 32;
  2322. mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_LO);
  2323. phy->att_dev_sas_addr |= mvs_read_port_cfg_data(mvi, i);
  2324. dev_printk(KERN_DEBUG, &pdev->dev,
  2325. "phy[%d] Get Attached Address 0x%llX ,"
  2326. " SAS Address 0x%llX\n",
  2327. i, phy->att_dev_sas_addr, phy->dev_sas_addr);
  2328. dev_printk(KERN_DEBUG, &pdev->dev,
  2329. "Rate = %x , type = %d\n",
  2330. sas_phy->linkrate, phy->phy_type);
  2331. #if 1
  2332. /*
  2333. * If the device is capable of supporting a wide port
  2334. * on its phys, it may configure the phys as a wide port.
  2335. */
  2336. if (phy->phy_type & PORT_TYPE_SAS)
  2337. for (j = 0; j < mvi->chip->n_phy && j != i; ++j) {
  2338. if ((mvi->phy[j].phy_attached) &&
  2339. (mvi->phy[j].phy_type & PORT_TYPE_SAS))
  2340. if (phy->att_dev_sas_addr ==
  2341. mvi->phy[j].att_dev_sas_addr - 1) {
  2342. phy->att_dev_sas_addr =
  2343. mvi->phy[j].att_dev_sas_addr;
  2344. break;
  2345. }
  2346. }
  2347. #endif
  2348. tmp64 = cpu_to_be64(phy->att_dev_sas_addr);
  2349. memcpy(sas_phy->attached_sas_addr, &tmp64, SAS_ADDR_SIZE);
  2350. if (phy->phy_type & PORT_TYPE_SAS) {
  2351. mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_DEV_INFO);
  2352. phy->att_dev_info = mvs_read_port_cfg_data(mvi, i);
  2353. phy->identify.device_type =
  2354. phy->att_dev_info & PORT_DEV_TYPE_MASK;
  2355. if (phy->identify.device_type == SAS_END_DEV)
  2356. phy->identify.target_port_protocols =
  2357. SAS_PROTOCOL_SSP;
  2358. else if (phy->identify.device_type != NO_DEVICE)
  2359. phy->identify.target_port_protocols =
  2360. SAS_PROTOCOL_SMP;
  2361. if (phy_st & PHY_OOB_DTCTD)
  2362. sas_phy->oob_mode = SAS_OOB_MODE;
  2363. phy->frame_rcvd_size =
  2364. sizeof(struct sas_identify_frame);
  2365. } else if (phy->phy_type & PORT_TYPE_SATA) {
  2366. phy->identify.target_port_protocols = SAS_PROTOCOL_STP;
  2367. if (mvs_is_sig_fis_received(phy->irq_status)) {
  2368. if (phy_st & PHY_OOB_DTCTD)
  2369. sas_phy->oob_mode = SATA_OOB_MODE;
  2370. phy->frame_rcvd_size =
  2371. sizeof(struct dev_to_host_fis);
  2372. mvs_get_d2h_reg(mvi, i,
  2373. (void *)sas_phy->frame_rcvd);
  2374. } else {
  2375. dev_printk(KERN_DEBUG, &pdev->dev,
  2376. "No sig fis\n");
  2377. }
  2378. }
  2379. /* workaround for HW phy decoding error on 1.5g disk drive */
  2380. mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE6);
  2381. tmp = mvs_read_port_vsr_data(mvi, i);
  2382. if (((phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
  2383. PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET) ==
  2384. SAS_LINK_RATE_1_5_GBPS)
  2385. tmp &= ~PHY_MODE6_DTL_SPEED;
  2386. else
  2387. tmp |= PHY_MODE6_DTL_SPEED;
  2388. mvs_write_port_vsr_data(mvi, i, tmp);
  2389. }
  2390. if (get_st)
  2391. mvs_write_port_irq_stat(mvi, i, phy->irq_status);
  2392. }
  2393. static void mvs_port_formed(struct asd_sas_phy *sas_phy)
  2394. {
  2395. struct sas_ha_struct *sas_ha = sas_phy->ha;
  2396. struct mvs_info *mvi = sas_ha->lldd_ha;
  2397. struct asd_sas_port *sas_port = sas_phy->port;
  2398. struct mvs_phy *phy = sas_phy->lldd_phy;
  2399. struct mvs_port *port = &mvi->port[sas_port->id];
  2400. unsigned long flags;
  2401. spin_lock_irqsave(&mvi->lock, flags);
  2402. port->port_attached = 1;
  2403. phy->port = port;
  2404. port->taskfileset = MVS_ID_NOT_MAPPED;
  2405. if (phy->phy_type & PORT_TYPE_SAS) {
  2406. port->wide_port_phymap = sas_port->phy_mask;
  2407. mvs_update_wideport(mvi, sas_phy->id);
  2408. }
  2409. spin_unlock_irqrestore(&mvi->lock, flags);
  2410. }
  2411. static int __devinit mvs_hw_init(struct mvs_info *mvi)
  2412. {
  2413. void __iomem *regs = mvi->regs;
  2414. int i;
  2415. u32 tmp, cctl;
  2416. /* make sure interrupts are masked immediately (paranoia) */
  2417. mw32(GBL_CTL, 0);
  2418. tmp = mr32(GBL_CTL);
  2419. /* Reset Controller */
  2420. if (!(tmp & HBA_RST)) {
  2421. if (mvi->flags & MVF_PHY_PWR_FIX) {
  2422. pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp);
  2423. tmp &= ~PCTL_PWR_ON;
  2424. tmp |= PCTL_OFF;
  2425. pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
  2426. pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp);
  2427. tmp &= ~PCTL_PWR_ON;
  2428. tmp |= PCTL_OFF;
  2429. pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
  2430. }
  2431. /* global reset, incl. COMRESET/H_RESET_N (self-clearing) */
  2432. mw32_f(GBL_CTL, HBA_RST);
  2433. }
  2434. /* wait for reset to finish; timeout is just a guess */
  2435. i = 1000;
  2436. while (i-- > 0) {
  2437. msleep(10);
  2438. if (!(mr32(GBL_CTL) & HBA_RST))
  2439. break;
  2440. }
  2441. if (mr32(GBL_CTL) & HBA_RST) {
  2442. dev_printk(KERN_ERR, &mvi->pdev->dev, "HBA reset failed\n");
  2443. return -EBUSY;
  2444. }
  2445. /* Init Chip */
  2446. /* make sure RST is set; HBA_RST /should/ have done that for us */
  2447. cctl = mr32(CTL);
  2448. if (cctl & CCTL_RST)
  2449. cctl &= ~CCTL_RST;
  2450. else
  2451. mw32_f(CTL, cctl | CCTL_RST);
  2452. /* write to device control _AND_ device status register? - A.C. */
  2453. pci_read_config_dword(mvi->pdev, PCR_DEV_CTRL, &tmp);
  2454. tmp &= ~PRD_REQ_MASK;
  2455. tmp |= PRD_REQ_SIZE;
  2456. pci_write_config_dword(mvi->pdev, PCR_DEV_CTRL, tmp);
  2457. pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp);
  2458. tmp |= PCTL_PWR_ON;
  2459. tmp &= ~PCTL_OFF;
  2460. pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
  2461. pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp);
  2462. tmp |= PCTL_PWR_ON;
  2463. tmp &= ~PCTL_OFF;
  2464. pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
  2465. mw32_f(CTL, cctl);
  2466. /* reset control */
  2467. mw32(PCS, 0); /*MVS_PCS */
  2468. mvs_phy_hacks(mvi);
  2469. mw32(CMD_LIST_LO, mvi->slot_dma);
  2470. mw32(CMD_LIST_HI, (mvi->slot_dma >> 16) >> 16);
  2471. mw32(RX_FIS_LO, mvi->rx_fis_dma);
  2472. mw32(RX_FIS_HI, (mvi->rx_fis_dma >> 16) >> 16);
  2473. mw32(TX_CFG, MVS_CHIP_SLOT_SZ);
  2474. mw32(TX_LO, mvi->tx_dma);
  2475. mw32(TX_HI, (mvi->tx_dma >> 16) >> 16);
  2476. mw32(RX_CFG, MVS_RX_RING_SZ);
  2477. mw32(RX_LO, mvi->rx_dma);
  2478. mw32(RX_HI, (mvi->rx_dma >> 16) >> 16);
  2479. /* enable auto port detection */
  2480. mw32(GBL_PORT_TYPE, MODE_AUTO_DET_EN);
  2481. msleep(100);
  2482. /* init and reset phys */
  2483. for (i = 0; i < mvi->chip->n_phy; i++) {
  2484. u32 lo = be32_to_cpu(*(u32 *)&mvi->sas_addr[4]);
  2485. u32 hi = be32_to_cpu(*(u32 *)&mvi->sas_addr[0]);
  2486. mvs_detect_porttype(mvi, i);
  2487. /* set phy local SAS address */
  2488. mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_LO);
  2489. mvs_write_port_cfg_data(mvi, i, lo);
  2490. mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_HI);
  2491. mvs_write_port_cfg_data(mvi, i, hi);
  2492. /* reset phy */
  2493. tmp = mvs_read_phy_ctl(mvi, i);
  2494. tmp |= PHY_RST;
  2495. mvs_write_phy_ctl(mvi, i, tmp);
  2496. }
  2497. msleep(100);
  2498. for (i = 0; i < mvi->chip->n_phy; i++) {
  2499. /* clear phy int status */
  2500. tmp = mvs_read_port_irq_stat(mvi, i);
  2501. tmp &= ~PHYEV_SIG_FIS;
  2502. mvs_write_port_irq_stat(mvi, i, tmp);
  2503. /* set phy int mask */
  2504. tmp = PHYEV_RDY_CH | PHYEV_BROAD_CH | PHYEV_UNASSOC_FIS |
  2505. PHYEV_ID_DONE | PHYEV_DEC_ERR;
  2506. mvs_write_port_irq_mask(mvi, i, tmp);
  2507. msleep(100);
  2508. mvs_update_phyinfo(mvi, i, 1);
  2509. mvs_enable_xmt(mvi, i);
  2510. }
  2511. /* FIXME: update wide port bitmaps */
  2512. /* little endian for open address and command table, etc. */
  2513. /* A.C.
  2514. * it seems that ( from the spec ) turning on big-endian won't
  2515. * do us any good on big-endian machines, need further confirmation
  2516. */
  2517. cctl = mr32(CTL);
  2518. cctl |= CCTL_ENDIAN_CMD;
  2519. cctl |= CCTL_ENDIAN_DATA;
  2520. cctl &= ~CCTL_ENDIAN_OPEN;
  2521. cctl |= CCTL_ENDIAN_RSP;
  2522. mw32_f(CTL, cctl);
  2523. /* reset CMD queue */
  2524. tmp = mr32(PCS);
  2525. tmp |= PCS_CMD_RST;
  2526. mw32(PCS, tmp);
  2527. /* interrupt coalescing may cause missing HW interrput in some case,
  2528. * and the max count is 0x1ff, while our max slot is 0x200,
  2529. * it will make count 0.
  2530. */
  2531. tmp = 0;
  2532. mw32(INT_COAL, tmp);
  2533. tmp = 0x100;
  2534. mw32(INT_COAL_TMOUT, tmp);
  2535. /* ladies and gentlemen, start your engines */
  2536. mw32(TX_CFG, 0);
  2537. mw32(TX_CFG, MVS_CHIP_SLOT_SZ | TX_EN);
  2538. mw32(RX_CFG, MVS_RX_RING_SZ | RX_EN);
  2539. /* enable CMD/CMPL_Q/RESP mode */
  2540. mw32(PCS, PCS_SATA_RETRY | PCS_FIS_RX_EN | PCS_CMD_EN);
  2541. /* re-enable interrupts globally */
  2542. mvs_hba_interrupt_enable(mvi);
  2543. /* enable completion queue interrupt */
  2544. tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM);
  2545. mw32(INT_MASK, tmp);
  2546. return 0;
  2547. }
  2548. static void __devinit mvs_print_info(struct mvs_info *mvi)
  2549. {
  2550. struct pci_dev *pdev = mvi->pdev;
  2551. static int printed_version;
  2552. if (!printed_version++)
  2553. dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
  2554. dev_printk(KERN_INFO, &pdev->dev, "%u phys, addr %llx\n",
  2555. mvi->chip->n_phy, SAS_ADDR(mvi->sas_addr));
  2556. }
  2557. static int __devinit mvs_pci_init(struct pci_dev *pdev,
  2558. const struct pci_device_id *ent)
  2559. {
  2560. int rc;
  2561. struct mvs_info *mvi;
  2562. irq_handler_t irq_handler = mvs_interrupt;
  2563. rc = pci_enable_device(pdev);
  2564. if (rc)
  2565. return rc;
  2566. pci_set_master(pdev);
  2567. rc = pci_request_regions(pdev, DRV_NAME);
  2568. if (rc)
  2569. goto err_out_disable;
  2570. rc = pci_go_64(pdev);
  2571. if (rc)
  2572. goto err_out_regions;
  2573. mvi = mvs_alloc(pdev, ent);
  2574. if (!mvi) {
  2575. rc = -ENOMEM;
  2576. goto err_out_regions;
  2577. }
  2578. rc = mvs_hw_init(mvi);
  2579. if (rc)
  2580. goto err_out_mvi;
  2581. #ifndef MVS_DISABLE_MSI
  2582. if (!pci_enable_msi(pdev)) {
  2583. u32 tmp;
  2584. void __iomem *regs = mvi->regs;
  2585. mvi->flags |= MVF_MSI;
  2586. irq_handler = mvs_msi_interrupt;
  2587. tmp = mr32(PCS);
  2588. mw32(PCS, tmp | PCS_SELF_CLEAR);
  2589. }
  2590. #endif
  2591. rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED, DRV_NAME, mvi);
  2592. if (rc)
  2593. goto err_out_msi;
  2594. rc = scsi_add_host(mvi->shost, &pdev->dev);
  2595. if (rc)
  2596. goto err_out_irq;
  2597. rc = sas_register_ha(&mvi->sas);
  2598. if (rc)
  2599. goto err_out_shost;
  2600. pci_set_drvdata(pdev, mvi);
  2601. mvs_print_info(mvi);
  2602. scsi_scan_host(mvi->shost);
  2603. return 0;
  2604. err_out_shost:
  2605. scsi_remove_host(mvi->shost);
  2606. err_out_irq:
  2607. free_irq(pdev->irq, mvi);
  2608. err_out_msi:
  2609. if (mvi->flags |= MVF_MSI)
  2610. pci_disable_msi(pdev);
  2611. err_out_mvi:
  2612. mvs_free(mvi);
  2613. err_out_regions:
  2614. pci_release_regions(pdev);
  2615. err_out_disable:
  2616. pci_disable_device(pdev);
  2617. return rc;
  2618. }
  2619. static void __devexit mvs_pci_remove(struct pci_dev *pdev)
  2620. {
  2621. struct mvs_info *mvi = pci_get_drvdata(pdev);
  2622. pci_set_drvdata(pdev, NULL);
  2623. if (mvi) {
  2624. sas_unregister_ha(&mvi->sas);
  2625. mvs_hba_interrupt_disable(mvi);
  2626. sas_remove_host(mvi->shost);
  2627. scsi_remove_host(mvi->shost);
  2628. free_irq(pdev->irq, mvi);
  2629. if (mvi->flags & MVF_MSI)
  2630. pci_disable_msi(pdev);
  2631. mvs_free(mvi);
  2632. pci_release_regions(pdev);
  2633. }
  2634. pci_disable_device(pdev);
  2635. }
  2636. static struct sas_domain_function_template mvs_transport_ops = {
  2637. .lldd_execute_task = mvs_task_exec,
  2638. .lldd_control_phy = mvs_phy_control,
  2639. .lldd_abort_task = mvs_task_abort,
  2640. .lldd_port_formed = mvs_port_formed
  2641. };
  2642. static struct pci_device_id __devinitdata mvs_pci_table[] = {
  2643. { PCI_VDEVICE(MARVELL, 0x6320), chip_6320 },
  2644. { PCI_VDEVICE(MARVELL, 0x6340), chip_6440 },
  2645. { PCI_VDEVICE(MARVELL, 0x6440), chip_6440 },
  2646. { PCI_VDEVICE(MARVELL, 0x6480), chip_6480 },
  2647. { } /* terminate list */
  2648. };
  2649. static struct pci_driver mvs_pci_driver = {
  2650. .name = DRV_NAME,
  2651. .id_table = mvs_pci_table,
  2652. .probe = mvs_pci_init,
  2653. .remove = __devexit_p(mvs_pci_remove),
  2654. };
  2655. static int __init mvs_init(void)
  2656. {
  2657. int rc;
  2658. mvs_stt = sas_domain_attach_transport(&mvs_transport_ops);
  2659. if (!mvs_stt)
  2660. return -ENOMEM;
  2661. rc = pci_register_driver(&mvs_pci_driver);
  2662. if (rc)
  2663. goto err_out;
  2664. return 0;
  2665. err_out:
  2666. sas_release_transport(mvs_stt);
  2667. return rc;
  2668. }
  2669. static void __exit mvs_exit(void)
  2670. {
  2671. pci_unregister_driver(&mvs_pci_driver);
  2672. sas_release_transport(mvs_stt);
  2673. }
  2674. module_init(mvs_init);
  2675. module_exit(mvs_exit);
  2676. MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
  2677. MODULE_DESCRIPTION("Marvell 88SE6440 SAS/SATA controller driver");
  2678. MODULE_VERSION(DRV_VERSION);
  2679. MODULE_LICENSE("GPL");
  2680. MODULE_DEVICE_TABLE(pci, mvs_pci_table);