mvsas.c 86 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222
  1. /*
  2. mvsas.c - Marvell 88SE6440 SAS/SATA support
  3. Copyright 2007 Red Hat, Inc.
  4. Copyright 2008 Marvell. <kewei@marvell.com>
  5. This program is free software; you can redistribute it and/or
  6. modify it under the terms of the GNU General Public License as
  7. published by the Free Software Foundation; either version 2,
  8. or (at your option) any later version.
  9. This program is distributed in the hope that it will be useful,
  10. but WITHOUT ANY WARRANTY; without even the implied warranty
  11. of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  12. See the GNU General Public License for more details.
  13. You should have received a copy of the GNU General Public
  14. License along with this program; see the file COPYING. If not,
  15. write to the Free Software Foundation, 675 Mass Ave, Cambridge,
  16. MA 02139, USA.
  17. ---------------------------------------------------------------
  18. Random notes:
  19. * hardware supports controlling the endian-ness of data
  20. structures. this permits elimination of all the le32_to_cpu()
  21. and cpu_to_le32() conversions.
  22. */
  23. #include <linux/kernel.h>
  24. #include <linux/module.h>
  25. #include <linux/pci.h>
  26. #include <linux/interrupt.h>
  27. #include <linux/spinlock.h>
  28. #include <linux/delay.h>
  29. #include <linux/dma-mapping.h>
  30. #include <linux/ctype.h>
  31. #include <scsi/libsas.h>
  32. #include <scsi/scsi_tcq.h>
  33. #include <scsi/sas_ata.h>
  34. #include <asm/io.h>
  35. #define DRV_NAME "mvsas"
  36. #define DRV_VERSION "0.5.2"
  37. #define _MV_DUMP 0
  38. #define MVS_DISABLE_NVRAM
  39. #define MVS_DISABLE_MSI
  40. #define mr32(reg) readl(regs + MVS_##reg)
  41. #define mw32(reg,val) writel((val), regs + MVS_##reg)
  42. #define mw32_f(reg,val) do { \
  43. writel((val), regs + MVS_##reg); \
  44. readl(regs + MVS_##reg); \
  45. } while (0)
  46. #define MVS_ID_NOT_MAPPED 0x7f
  47. #define MVS_CHIP_SLOT_SZ (1U << mvi->chip->slot_width)
  48. /* offset for D2H FIS in the Received FIS List Structure */
  49. #define SATA_RECEIVED_D2H_FIS(reg_set) \
  50. ((void *) mvi->rx_fis + 0x400 + 0x100 * reg_set + 0x40)
  51. #define SATA_RECEIVED_PIO_FIS(reg_set) \
  52. ((void *) mvi->rx_fis + 0x400 + 0x100 * reg_set + 0x20)
  53. #define UNASSOC_D2H_FIS(id) \
  54. ((void *) mvi->rx_fis + 0x100 * id)
  55. #define for_each_phy(__lseq_mask, __mc, __lseq, __rest) \
  56. for ((__mc) = (__lseq_mask), (__lseq) = 0; \
  57. (__mc) != 0 && __rest; \
  58. (++__lseq), (__mc) >>= 1)
  59. /* driver compile-time configuration */
  60. enum driver_configuration {
  61. MVS_TX_RING_SZ = 1024, /* TX ring size (12-bit) */
  62. MVS_RX_RING_SZ = 1024, /* RX ring size (12-bit) */
  63. /* software requires power-of-2
  64. ring size */
  65. MVS_SLOTS = 512, /* command slots */
  66. MVS_SLOT_BUF_SZ = 8192, /* cmd tbl + IU + status + PRD */
  67. MVS_SSP_CMD_SZ = 64, /* SSP command table buffer size */
  68. MVS_ATA_CMD_SZ = 96, /* SATA command table buffer size */
  69. MVS_OAF_SZ = 64, /* Open address frame buffer size */
  70. MVS_RX_FIS_COUNT = 17, /* Optional rx'd FISs (max 17) */
  71. MVS_QUEUE_SIZE = 30, /* Support Queue depth */
  72. MVS_CAN_QUEUE = MVS_SLOTS - 1, /* SCSI Queue depth */
  73. };
  74. /* unchangeable hardware details */
  75. enum hardware_details {
  76. MVS_MAX_PHYS = 8, /* max. possible phys */
  77. MVS_MAX_PORTS = 8, /* max. possible ports */
  78. MVS_RX_FISL_SZ = 0x400 + (MVS_RX_FIS_COUNT * 0x100),
  79. };
  80. /* peripheral registers (BAR2) */
  81. enum peripheral_registers {
  82. SPI_CTL = 0x10, /* EEPROM control */
  83. SPI_CMD = 0x14, /* EEPROM command */
  84. SPI_DATA = 0x18, /* EEPROM data */
  85. };
  86. enum peripheral_register_bits {
  87. TWSI_RDY = (1U << 7), /* EEPROM interface ready */
  88. TWSI_RD = (1U << 4), /* EEPROM read access */
  89. SPI_ADDR_MASK = 0x3ffff, /* bits 17:0 */
  90. };
  91. /* enhanced mode registers (BAR4) */
  92. enum hw_registers {
  93. MVS_GBL_CTL = 0x04, /* global control */
  94. MVS_GBL_INT_STAT = 0x08, /* global irq status */
  95. MVS_GBL_PI = 0x0C, /* ports implemented bitmask */
  96. MVS_GBL_PORT_TYPE = 0xa0, /* port type */
  97. MVS_CTL = 0x100, /* SAS/SATA port configuration */
  98. MVS_PCS = 0x104, /* SAS/SATA port control/status */
  99. MVS_CMD_LIST_LO = 0x108, /* cmd list addr */
  100. MVS_CMD_LIST_HI = 0x10C,
  101. MVS_RX_FIS_LO = 0x110, /* RX FIS list addr */
  102. MVS_RX_FIS_HI = 0x114,
  103. MVS_TX_CFG = 0x120, /* TX configuration */
  104. MVS_TX_LO = 0x124, /* TX (delivery) ring addr */
  105. MVS_TX_HI = 0x128,
  106. MVS_TX_PROD_IDX = 0x12C, /* TX producer pointer */
  107. MVS_TX_CONS_IDX = 0x130, /* TX consumer pointer (RO) */
  108. MVS_RX_CFG = 0x134, /* RX configuration */
  109. MVS_RX_LO = 0x138, /* RX (completion) ring addr */
  110. MVS_RX_HI = 0x13C,
  111. MVS_RX_CONS_IDX = 0x140, /* RX consumer pointer (RO) */
  112. MVS_INT_COAL = 0x148, /* Int coalescing config */
  113. MVS_INT_COAL_TMOUT = 0x14C, /* Int coalescing timeout */
  114. MVS_INT_STAT = 0x150, /* Central int status */
  115. MVS_INT_MASK = 0x154, /* Central int enable */
  116. MVS_INT_STAT_SRS = 0x158, /* SATA register set status */
  117. MVS_INT_MASK_SRS = 0x15C,
  118. /* ports 1-3 follow after this */
  119. MVS_P0_INT_STAT = 0x160, /* port0 interrupt status */
  120. MVS_P0_INT_MASK = 0x164, /* port0 interrupt mask */
  121. MVS_P4_INT_STAT = 0x200, /* Port 4 interrupt status */
  122. MVS_P4_INT_MASK = 0x204, /* Port 4 interrupt enable mask */
  123. /* ports 1-3 follow after this */
  124. MVS_P0_SER_CTLSTAT = 0x180, /* port0 serial control/status */
  125. MVS_P4_SER_CTLSTAT = 0x220, /* port4 serial control/status */
  126. MVS_CMD_ADDR = 0x1B8, /* Command register port (addr) */
  127. MVS_CMD_DATA = 0x1BC, /* Command register port (data) */
  128. /* ports 1-3 follow after this */
  129. MVS_P0_CFG_ADDR = 0x1C0, /* port0 phy register address */
  130. MVS_P0_CFG_DATA = 0x1C4, /* port0 phy register data */
  131. MVS_P4_CFG_ADDR = 0x230, /* Port 4 config address */
  132. MVS_P4_CFG_DATA = 0x234, /* Port 4 config data */
  133. /* ports 1-3 follow after this */
  134. MVS_P0_VSR_ADDR = 0x1E0, /* port0 VSR address */
  135. MVS_P0_VSR_DATA = 0x1E4, /* port0 VSR data */
  136. MVS_P4_VSR_ADDR = 0x250, /* port 4 VSR addr */
  137. MVS_P4_VSR_DATA = 0x254, /* port 4 VSR data */
  138. };
  139. enum hw_register_bits {
  140. /* MVS_GBL_CTL */
  141. INT_EN = (1U << 1), /* Global int enable */
  142. HBA_RST = (1U << 0), /* HBA reset */
  143. /* MVS_GBL_INT_STAT */
  144. INT_XOR = (1U << 4), /* XOR engine event */
  145. INT_SAS_SATA = (1U << 0), /* SAS/SATA event */
  146. /* MVS_GBL_PORT_TYPE */ /* shl for ports 1-3 */
  147. SATA_TARGET = (1U << 16), /* port0 SATA target enable */
  148. MODE_AUTO_DET_PORT7 = (1U << 15), /* port0 SAS/SATA autodetect */
  149. MODE_AUTO_DET_PORT6 = (1U << 14),
  150. MODE_AUTO_DET_PORT5 = (1U << 13),
  151. MODE_AUTO_DET_PORT4 = (1U << 12),
  152. MODE_AUTO_DET_PORT3 = (1U << 11),
  153. MODE_AUTO_DET_PORT2 = (1U << 10),
  154. MODE_AUTO_DET_PORT1 = (1U << 9),
  155. MODE_AUTO_DET_PORT0 = (1U << 8),
  156. MODE_AUTO_DET_EN = MODE_AUTO_DET_PORT0 | MODE_AUTO_DET_PORT1 |
  157. MODE_AUTO_DET_PORT2 | MODE_AUTO_DET_PORT3 |
  158. MODE_AUTO_DET_PORT4 | MODE_AUTO_DET_PORT5 |
  159. MODE_AUTO_DET_PORT6 | MODE_AUTO_DET_PORT7,
  160. MODE_SAS_PORT7_MASK = (1U << 7), /* port0 SAS(1), SATA(0) mode */
  161. MODE_SAS_PORT6_MASK = (1U << 6),
  162. MODE_SAS_PORT5_MASK = (1U << 5),
  163. MODE_SAS_PORT4_MASK = (1U << 4),
  164. MODE_SAS_PORT3_MASK = (1U << 3),
  165. MODE_SAS_PORT2_MASK = (1U << 2),
  166. MODE_SAS_PORT1_MASK = (1U << 1),
  167. MODE_SAS_PORT0_MASK = (1U << 0),
  168. MODE_SAS_SATA = MODE_SAS_PORT0_MASK | MODE_SAS_PORT1_MASK |
  169. MODE_SAS_PORT2_MASK | MODE_SAS_PORT3_MASK |
  170. MODE_SAS_PORT4_MASK | MODE_SAS_PORT5_MASK |
  171. MODE_SAS_PORT6_MASK | MODE_SAS_PORT7_MASK,
  172. /* SAS_MODE value may be
  173. * dictated (in hw) by values
  174. * of SATA_TARGET & AUTO_DET
  175. */
  176. /* MVS_TX_CFG */
  177. TX_EN = (1U << 16), /* Enable TX */
  178. TX_RING_SZ_MASK = 0xfff, /* TX ring size, bits 11:0 */
  179. /* MVS_RX_CFG */
  180. RX_EN = (1U << 16), /* Enable RX */
  181. RX_RING_SZ_MASK = 0xfff, /* RX ring size, bits 11:0 */
  182. /* MVS_INT_COAL */
  183. COAL_EN = (1U << 16), /* Enable int coalescing */
  184. /* MVS_INT_STAT, MVS_INT_MASK */
  185. CINT_I2C = (1U << 31), /* I2C event */
  186. CINT_SW0 = (1U << 30), /* software event 0 */
  187. CINT_SW1 = (1U << 29), /* software event 1 */
  188. CINT_PRD_BC = (1U << 28), /* PRD BC err for read cmd */
  189. CINT_DMA_PCIE = (1U << 27), /* DMA to PCIE timeout */
  190. CINT_MEM = (1U << 26), /* int mem parity err */
  191. CINT_I2C_SLAVE = (1U << 25), /* slave I2C event */
  192. CINT_SRS = (1U << 3), /* SRS event */
  193. CINT_CI_STOP = (1U << 1), /* cmd issue stopped */
  194. CINT_DONE = (1U << 0), /* cmd completion */
  195. /* shl for ports 1-3 */
  196. CINT_PORT_STOPPED = (1U << 16), /* port0 stopped */
  197. CINT_PORT = (1U << 8), /* port0 event */
  198. CINT_PORT_MASK_OFFSET = 8,
  199. CINT_PORT_MASK = (0xFF << CINT_PORT_MASK_OFFSET),
  200. /* TX (delivery) ring bits */
  201. TXQ_CMD_SHIFT = 29,
  202. TXQ_CMD_SSP = 1, /* SSP protocol */
  203. TXQ_CMD_SMP = 2, /* SMP protocol */
  204. TXQ_CMD_STP = 3, /* STP/SATA protocol */
  205. TXQ_CMD_SSP_FREE_LIST = 4, /* add to SSP targ free list */
  206. TXQ_CMD_SLOT_RESET = 7, /* reset command slot */
  207. TXQ_MODE_I = (1U << 28), /* mode: 0=target,1=initiator */
  208. TXQ_PRIO_HI = (1U << 27), /* priority: 0=normal, 1=high */
  209. TXQ_SRS_SHIFT = 20, /* SATA register set */
  210. TXQ_SRS_MASK = 0x7f,
  211. TXQ_PHY_SHIFT = 12, /* PHY bitmap */
  212. TXQ_PHY_MASK = 0xff,
  213. TXQ_SLOT_MASK = 0xfff, /* slot number */
  214. /* RX (completion) ring bits */
  215. RXQ_GOOD = (1U << 23), /* Response good */
  216. RXQ_SLOT_RESET = (1U << 21), /* Slot reset complete */
  217. RXQ_CMD_RX = (1U << 20), /* target cmd received */
  218. RXQ_ATTN = (1U << 19), /* attention */
  219. RXQ_RSP = (1U << 18), /* response frame xfer'd */
  220. RXQ_ERR = (1U << 17), /* err info rec xfer'd */
  221. RXQ_DONE = (1U << 16), /* cmd complete */
  222. RXQ_SLOT_MASK = 0xfff, /* slot number */
  223. /* mvs_cmd_hdr bits */
  224. MCH_PRD_LEN_SHIFT = 16, /* 16-bit PRD table len */
  225. MCH_SSP_FR_TYPE_SHIFT = 13, /* SSP frame type */
  226. /* SSP initiator only */
  227. MCH_SSP_FR_CMD = 0x0, /* COMMAND frame */
  228. /* SSP initiator or target */
  229. MCH_SSP_FR_TASK = 0x1, /* TASK frame */
  230. /* SSP target only */
  231. MCH_SSP_FR_XFER_RDY = 0x4, /* XFER_RDY frame */
  232. MCH_SSP_FR_RESP = 0x5, /* RESPONSE frame */
  233. MCH_SSP_FR_READ = 0x6, /* Read DATA frame(s) */
  234. MCH_SSP_FR_READ_RESP = 0x7, /* ditto, plus RESPONSE */
  235. MCH_PASSTHRU = (1U << 12), /* pass-through (SSP) */
  236. MCH_FBURST = (1U << 11), /* first burst (SSP) */
  237. MCH_CHK_LEN = (1U << 10), /* chk xfer len (SSP) */
  238. MCH_RETRY = (1U << 9), /* tport layer retry (SSP) */
  239. MCH_PROTECTION = (1U << 8), /* protection info rec (SSP) */
  240. MCH_RESET = (1U << 7), /* Reset (STP/SATA) */
  241. MCH_FPDMA = (1U << 6), /* First party DMA (STP/SATA) */
  242. MCH_ATAPI = (1U << 5), /* ATAPI (STP/SATA) */
  243. MCH_BIST = (1U << 4), /* BIST activate (STP/SATA) */
  244. MCH_PMP_MASK = 0xf, /* PMP from cmd FIS (STP/SATA)*/
  245. CCTL_RST = (1U << 5), /* port logic reset */
  246. /* 0(LSB first), 1(MSB first) */
  247. CCTL_ENDIAN_DATA = (1U << 3), /* PRD data */
  248. CCTL_ENDIAN_RSP = (1U << 2), /* response frame */
  249. CCTL_ENDIAN_OPEN = (1U << 1), /* open address frame */
  250. CCTL_ENDIAN_CMD = (1U << 0), /* command table */
  251. /* MVS_Px_SER_CTLSTAT (per-phy control) */
  252. PHY_SSP_RST = (1U << 3), /* reset SSP link layer */
  253. PHY_BCAST_CHG = (1U << 2), /* broadcast(change) notif */
  254. PHY_RST_HARD = (1U << 1), /* hard reset + phy reset */
  255. PHY_RST = (1U << 0), /* phy reset */
  256. PHY_MIN_SPP_PHYS_LINK_RATE_MASK = (0xF << 8),
  257. PHY_MAX_SPP_PHYS_LINK_RATE_MASK = (0xF << 12),
  258. PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET = (16),
  259. PHY_NEG_SPP_PHYS_LINK_RATE_MASK =
  260. (0xF << PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET),
  261. PHY_READY_MASK = (1U << 20),
  262. /* MVS_Px_INT_STAT, MVS_Px_INT_MASK (per-phy events) */
  263. PHYEV_DEC_ERR = (1U << 24), /* Phy Decoding Error */
  264. PHYEV_UNASSOC_FIS = (1U << 19), /* unassociated FIS rx'd */
  265. PHYEV_AN = (1U << 18), /* SATA async notification */
  266. PHYEV_BIST_ACT = (1U << 17), /* BIST activate FIS */
  267. PHYEV_SIG_FIS = (1U << 16), /* signature FIS */
  268. PHYEV_POOF = (1U << 12), /* phy ready from 1 -> 0 */
  269. PHYEV_IU_BIG = (1U << 11), /* IU too long err */
  270. PHYEV_IU_SMALL = (1U << 10), /* IU too short err */
  271. PHYEV_UNK_TAG = (1U << 9), /* unknown tag */
  272. PHYEV_BROAD_CH = (1U << 8), /* broadcast(CHANGE) */
  273. PHYEV_COMWAKE = (1U << 7), /* COMWAKE rx'd */
  274. PHYEV_PORT_SEL = (1U << 6), /* port selector present */
  275. PHYEV_HARD_RST = (1U << 5), /* hard reset rx'd */
  276. PHYEV_ID_TMOUT = (1U << 4), /* identify timeout */
  277. PHYEV_ID_FAIL = (1U << 3), /* identify failed */
  278. PHYEV_ID_DONE = (1U << 2), /* identify done */
  279. PHYEV_HARD_RST_DONE = (1U << 1), /* hard reset done */
  280. PHYEV_RDY_CH = (1U << 0), /* phy ready changed state */
  281. /* MVS_PCS */
  282. PCS_EN_SATA_REG_SHIFT = (16), /* Enable SATA Register Set */
  283. PCS_EN_PORT_XMT_SHIFT = (12), /* Enable Port Transmit */
  284. PCS_EN_PORT_XMT_SHIFT2 = (8), /* For 6480 */
  285. PCS_SATA_RETRY = (1U << 8), /* retry ctl FIS on R_ERR */
  286. PCS_RSP_RX_EN = (1U << 7), /* raw response rx */
  287. PCS_SELF_CLEAR = (1U << 5), /* self-clearing int mode */
  288. PCS_FIS_RX_EN = (1U << 4), /* FIS rx enable */
  289. PCS_CMD_STOP_ERR = (1U << 3), /* cmd stop-on-err enable */
  290. PCS_CMD_RST = (1U << 1), /* reset cmd issue */
  291. PCS_CMD_EN = (1U << 0), /* enable cmd issue */
  292. /* Port n Attached Device Info */
  293. PORT_DEV_SSP_TRGT = (1U << 19),
  294. PORT_DEV_SMP_TRGT = (1U << 18),
  295. PORT_DEV_STP_TRGT = (1U << 17),
  296. PORT_DEV_SSP_INIT = (1U << 11),
  297. PORT_DEV_SMP_INIT = (1U << 10),
  298. PORT_DEV_STP_INIT = (1U << 9),
  299. PORT_PHY_ID_MASK = (0xFFU << 24),
  300. PORT_DEV_TRGT_MASK = (0x7U << 17),
  301. PORT_DEV_INIT_MASK = (0x7U << 9),
  302. PORT_DEV_TYPE_MASK = (0x7U << 0),
  303. /* Port n PHY Status */
  304. PHY_RDY = (1U << 2),
  305. PHY_DW_SYNC = (1U << 1),
  306. PHY_OOB_DTCTD = (1U << 0),
  307. /* VSR */
  308. /* PHYMODE 6 (CDB) */
  309. PHY_MODE6_LATECLK = (1U << 29), /* Lock Clock */
  310. PHY_MODE6_DTL_SPEED = (1U << 27), /* Digital Loop Speed */
  311. PHY_MODE6_FC_ORDER = (1U << 26), /* Fibre Channel Mode Order*/
  312. PHY_MODE6_MUCNT_EN = (1U << 24), /* u Count Enable */
  313. PHY_MODE6_SEL_MUCNT_LEN = (1U << 22), /* Training Length Select */
  314. PHY_MODE6_SELMUPI = (1U << 20), /* Phase Multi Select (init) */
  315. PHY_MODE6_SELMUPF = (1U << 18), /* Phase Multi Select (final) */
  316. PHY_MODE6_SELMUFF = (1U << 16), /* Freq Loop Multi Sel(final) */
  317. PHY_MODE6_SELMUFI = (1U << 14), /* Freq Loop Multi Sel(init) */
  318. PHY_MODE6_FREEZE_LOOP = (1U << 12), /* Freeze Rx CDR Loop */
  319. PHY_MODE6_INT_RXFOFFS = (1U << 3), /* Rx CDR Freq Loop Enable */
  320. PHY_MODE6_FRC_RXFOFFS = (1U << 2), /* Initial Rx CDR Offset */
  321. PHY_MODE6_STAU_0D8 = (1U << 1), /* Rx CDR Freq Loop Saturate */
  322. PHY_MODE6_RXSAT_DIS = (1U << 0), /* Saturate Ctl */
  323. };
  324. enum mvs_info_flags {
  325. MVF_MSI = (1U << 0), /* MSI is enabled */
  326. MVF_PHY_PWR_FIX = (1U << 1), /* bug workaround */
  327. };
  328. enum sas_cmd_port_registers {
  329. CMD_CMRST_OOB_DET = 0x100, /* COMRESET OOB detect register */
  330. CMD_CMWK_OOB_DET = 0x104, /* COMWAKE OOB detect register */
  331. CMD_CMSAS_OOB_DET = 0x108, /* COMSAS OOB detect register */
  332. CMD_BRST_OOB_DET = 0x10c, /* burst OOB detect register */
  333. CMD_OOB_SPACE = 0x110, /* OOB space control register */
  334. CMD_OOB_BURST = 0x114, /* OOB burst control register */
  335. CMD_PHY_TIMER = 0x118, /* PHY timer control register */
  336. CMD_PHY_CONFIG0 = 0x11c, /* PHY config register 0 */
  337. CMD_PHY_CONFIG1 = 0x120, /* PHY config register 1 */
  338. CMD_SAS_CTL0 = 0x124, /* SAS control register 0 */
  339. CMD_SAS_CTL1 = 0x128, /* SAS control register 1 */
  340. CMD_SAS_CTL2 = 0x12c, /* SAS control register 2 */
  341. CMD_SAS_CTL3 = 0x130, /* SAS control register 3 */
  342. CMD_ID_TEST = 0x134, /* ID test register */
  343. CMD_PL_TIMER = 0x138, /* PL timer register */
  344. CMD_WD_TIMER = 0x13c, /* WD timer register */
  345. CMD_PORT_SEL_COUNT = 0x140, /* port selector count register */
  346. CMD_APP_MEM_CTL = 0x144, /* Application Memory Control */
  347. CMD_XOR_MEM_CTL = 0x148, /* XOR Block Memory Control */
  348. CMD_DMA_MEM_CTL = 0x14c, /* DMA Block Memory Control */
  349. CMD_PORT_MEM_CTL0 = 0x150, /* Port Memory Control 0 */
  350. CMD_PORT_MEM_CTL1 = 0x154, /* Port Memory Control 1 */
  351. CMD_SATA_PORT_MEM_CTL0 = 0x158, /* SATA Port Memory Control 0 */
  352. CMD_SATA_PORT_MEM_CTL1 = 0x15c, /* SATA Port Memory Control 1 */
  353. CMD_XOR_MEM_BIST_CTL = 0x160, /* XOR Memory BIST Control */
  354. CMD_XOR_MEM_BIST_STAT = 0x164, /* XOR Memroy BIST Status */
  355. CMD_DMA_MEM_BIST_CTL = 0x168, /* DMA Memory BIST Control */
  356. CMD_DMA_MEM_BIST_STAT = 0x16c, /* DMA Memory BIST Status */
  357. CMD_PORT_MEM_BIST_CTL = 0x170, /* Port Memory BIST Control */
  358. CMD_PORT_MEM_BIST_STAT0 = 0x174, /* Port Memory BIST Status 0 */
  359. CMD_PORT_MEM_BIST_STAT1 = 0x178, /* Port Memory BIST Status 1 */
  360. CMD_STP_MEM_BIST_CTL = 0x17c, /* STP Memory BIST Control */
  361. CMD_STP_MEM_BIST_STAT0 = 0x180, /* STP Memory BIST Status 0 */
  362. CMD_STP_MEM_BIST_STAT1 = 0x184, /* STP Memory BIST Status 1 */
  363. CMD_RESET_COUNT = 0x188, /* Reset Count */
  364. CMD_MONTR_DATA_SEL = 0x18C, /* Monitor Data/Select */
  365. CMD_PLL_PHY_CONFIG = 0x190, /* PLL/PHY Configuration */
  366. CMD_PHY_CTL = 0x194, /* PHY Control and Status */
  367. CMD_PHY_TEST_COUNT0 = 0x198, /* Phy Test Count 0 */
  368. CMD_PHY_TEST_COUNT1 = 0x19C, /* Phy Test Count 1 */
  369. CMD_PHY_TEST_COUNT2 = 0x1A0, /* Phy Test Count 2 */
  370. CMD_APP_ERR_CONFIG = 0x1A4, /* Application Error Configuration */
  371. CMD_PND_FIFO_CTL0 = 0x1A8, /* Pending FIFO Control 0 */
  372. CMD_HOST_CTL = 0x1AC, /* Host Control Status */
  373. CMD_HOST_WR_DATA = 0x1B0, /* Host Write Data */
  374. CMD_HOST_RD_DATA = 0x1B4, /* Host Read Data */
  375. CMD_PHY_MODE_21 = 0x1B8, /* Phy Mode 21 */
  376. CMD_SL_MODE0 = 0x1BC, /* SL Mode 0 */
  377. CMD_SL_MODE1 = 0x1C0, /* SL Mode 1 */
  378. CMD_PND_FIFO_CTL1 = 0x1C4, /* Pending FIFO Control 1 */
  379. };
  380. /* SAS/SATA configuration port registers, aka phy registers */
  381. enum sas_sata_config_port_regs {
  382. PHYR_IDENTIFY = 0x00, /* info for IDENTIFY frame */
  383. PHYR_ADDR_LO = 0x04, /* my SAS address (low) */
  384. PHYR_ADDR_HI = 0x08, /* my SAS address (high) */
  385. PHYR_ATT_DEV_INFO = 0x0C, /* attached device info */
  386. PHYR_ATT_ADDR_LO = 0x10, /* attached dev SAS addr (low) */
  387. PHYR_ATT_ADDR_HI = 0x14, /* attached dev SAS addr (high) */
  388. PHYR_SATA_CTL = 0x18, /* SATA control */
  389. PHYR_PHY_STAT = 0x1C, /* PHY status */
  390. PHYR_SATA_SIG0 = 0x20, /*port SATA signature FIS(Byte 0-3) */
  391. PHYR_SATA_SIG1 = 0x24, /*port SATA signature FIS(Byte 4-7) */
  392. PHYR_SATA_SIG2 = 0x28, /*port SATA signature FIS(Byte 8-11) */
  393. PHYR_SATA_SIG3 = 0x2c, /*port SATA signature FIS(Byte 12-15) */
  394. PHYR_R_ERR_COUNT = 0x30, /* port R_ERR count register */
  395. PHYR_CRC_ERR_COUNT = 0x34, /* port CRC error count register */
  396. PHYR_WIDE_PORT = 0x38, /* wide port participating */
  397. PHYR_CURRENT0 = 0x80, /* current connection info 0 */
  398. PHYR_CURRENT1 = 0x84, /* current connection info 1 */
  399. PHYR_CURRENT2 = 0x88, /* current connection info 2 */
  400. };
  401. /* SAS/SATA Vendor Specific Port Registers */
  402. enum sas_sata_vsp_regs {
  403. VSR_PHY_STAT = 0x00, /* Phy Status */
  404. VSR_PHY_MODE1 = 0x01, /* phy tx */
  405. VSR_PHY_MODE2 = 0x02, /* tx scc */
  406. VSR_PHY_MODE3 = 0x03, /* pll */
  407. VSR_PHY_MODE4 = 0x04, /* VCO */
  408. VSR_PHY_MODE5 = 0x05, /* Rx */
  409. VSR_PHY_MODE6 = 0x06, /* CDR */
  410. VSR_PHY_MODE7 = 0x07, /* Impedance */
  411. VSR_PHY_MODE8 = 0x08, /* Voltage */
  412. VSR_PHY_MODE9 = 0x09, /* Test */
  413. VSR_PHY_MODE10 = 0x0A, /* Power */
  414. VSR_PHY_MODE11 = 0x0B, /* Phy Mode */
  415. VSR_PHY_VS0 = 0x0C, /* Vednor Specific 0 */
  416. VSR_PHY_VS1 = 0x0D, /* Vednor Specific 1 */
  417. };
  418. enum pci_cfg_registers {
  419. PCR_PHY_CTL = 0x40,
  420. PCR_PHY_CTL2 = 0x90,
  421. PCR_DEV_CTRL = 0xE8,
  422. };
  423. enum pci_cfg_register_bits {
  424. PCTL_PWR_ON = (0xFU << 24),
  425. PCTL_OFF = (0xFU << 12),
  426. PRD_REQ_SIZE = (0x4000),
  427. PRD_REQ_MASK = (0x00007000),
  428. };
  429. enum nvram_layout_offsets {
  430. NVR_SIG = 0x00, /* 0xAA, 0x55 */
  431. NVR_SAS_ADDR = 0x02, /* 8-byte SAS address */
  432. };
  433. enum chip_flavors {
  434. chip_6320,
  435. chip_6440,
  436. chip_6480,
  437. };
  438. enum port_type {
  439. PORT_TYPE_SAS = (1L << 1),
  440. PORT_TYPE_SATA = (1L << 0),
  441. };
  442. /* Command Table Format */
  443. enum ct_format {
  444. /* SSP */
  445. SSP_F_H = 0x00,
  446. SSP_F_IU = 0x18,
  447. SSP_F_MAX = 0x4D,
  448. /* STP */
  449. STP_CMD_FIS = 0x00,
  450. STP_ATAPI_CMD = 0x40,
  451. STP_F_MAX = 0x10,
  452. /* SMP */
  453. SMP_F_T = 0x00,
  454. SMP_F_DEP = 0x01,
  455. SMP_F_MAX = 0x101,
  456. };
  457. enum status_buffer {
  458. SB_EIR_OFF = 0x00, /* Error Information Record */
  459. SB_RFB_OFF = 0x08, /* Response Frame Buffer */
  460. SB_RFB_MAX = 0x400, /* RFB size*/
  461. };
  462. enum error_info_rec {
  463. CMD_ISS_STPD = (1U << 31), /* Cmd Issue Stopped */
  464. CMD_PI_ERR = (1U << 30), /* Protection info error. see flags2 */
  465. RSP_OVER = (1U << 29), /* rsp buffer overflow */
  466. RETRY_LIM = (1U << 28), /* FIS/frame retry limit exceeded */
  467. UNK_FIS = (1U << 27), /* unknown FIS */
  468. DMA_TERM = (1U << 26), /* DMA terminate primitive rx'd */
  469. SYNC_ERR = (1U << 25), /* SYNC rx'd during frame xmit */
  470. TFILE_ERR = (1U << 24), /* SATA taskfile Error bit set */
  471. R_ERR = (1U << 23), /* SATA returned R_ERR prim */
  472. RD_OFS = (1U << 20), /* Read DATA frame invalid offset */
  473. XFER_RDY_OFS = (1U << 19), /* XFER_RDY offset error */
  474. UNEXP_XFER_RDY = (1U << 18), /* unexpected XFER_RDY error */
  475. DATA_OVER_UNDER = (1U << 16), /* data overflow/underflow */
  476. INTERLOCK = (1U << 15), /* interlock error */
  477. NAK = (1U << 14), /* NAK rx'd */
  478. ACK_NAK_TO = (1U << 13), /* ACK/NAK timeout */
  479. CXN_CLOSED = (1U << 12), /* cxn closed w/out ack/nak */
  480. OPEN_TO = (1U << 11), /* I_T nexus lost, open cxn timeout */
  481. PATH_BLOCKED = (1U << 10), /* I_T nexus lost, pathway blocked */
  482. NO_DEST = (1U << 9), /* I_T nexus lost, no destination */
  483. STP_RES_BSY = (1U << 8), /* STP resources busy */
  484. BREAK = (1U << 7), /* break received */
  485. BAD_DEST = (1U << 6), /* bad destination */
  486. BAD_PROTO = (1U << 5), /* protocol not supported */
  487. BAD_RATE = (1U << 4), /* cxn rate not supported */
  488. WRONG_DEST = (1U << 3), /* wrong destination error */
  489. CREDIT_TO = (1U << 2), /* credit timeout */
  490. WDOG_TO = (1U << 1), /* watchdog timeout */
  491. BUF_PAR = (1U << 0), /* buffer parity error */
  492. };
  493. enum error_info_rec_2 {
  494. SLOT_BSY_ERR = (1U << 31), /* Slot Busy Error */
  495. GRD_CHK_ERR = (1U << 14), /* Guard Check Error */
  496. APP_CHK_ERR = (1U << 13), /* Application Check error */
  497. REF_CHK_ERR = (1U << 12), /* Reference Check Error */
  498. USR_BLK_NM = (1U << 0), /* User Block Number */
  499. };
  500. struct mvs_chip_info {
  501. u32 n_phy;
  502. u32 srs_sz;
  503. u32 slot_width;
  504. };
  505. struct mvs_err_info {
  506. __le32 flags;
  507. __le32 flags2;
  508. };
  509. struct mvs_prd {
  510. __le64 addr; /* 64-bit buffer address */
  511. __le32 reserved;
  512. __le32 len; /* 16-bit length */
  513. };
  514. struct mvs_cmd_hdr {
  515. __le32 flags; /* PRD tbl len; SAS, SATA ctl */
  516. __le32 lens; /* cmd, max resp frame len */
  517. __le32 tags; /* targ port xfer tag; tag */
  518. __le32 data_len; /* data xfer len */
  519. __le64 cmd_tbl; /* command table address */
  520. __le64 open_frame; /* open addr frame address */
  521. __le64 status_buf; /* status buffer address */
  522. __le64 prd_tbl; /* PRD tbl address */
  523. __le32 reserved[4];
  524. };
  525. struct mvs_port {
  526. struct asd_sas_port sas_port;
  527. u8 port_attached;
  528. u8 taskfileset;
  529. u8 wide_port_phymap;
  530. struct list_head list;
  531. };
  532. struct mvs_phy {
  533. struct mvs_port *port;
  534. struct asd_sas_phy sas_phy;
  535. struct sas_identify identify;
  536. struct scsi_device *sdev;
  537. u64 dev_sas_addr;
  538. u64 att_dev_sas_addr;
  539. u32 att_dev_info;
  540. u32 dev_info;
  541. u32 phy_type;
  542. u32 phy_status;
  543. u32 irq_status;
  544. u32 frame_rcvd_size;
  545. u8 frame_rcvd[32];
  546. u8 phy_attached;
  547. enum sas_linkrate minimum_linkrate;
  548. enum sas_linkrate maximum_linkrate;
  549. };
  550. struct mvs_slot_info {
  551. struct list_head list;
  552. struct sas_task *task;
  553. u32 n_elem;
  554. u32 tx;
  555. /* DMA buffer for storing cmd tbl, open addr frame, status buffer,
  556. * and PRD table
  557. */
  558. void *buf;
  559. dma_addr_t buf_dma;
  560. #if _MV_DUMP
  561. u32 cmd_size;
  562. #endif
  563. void *response;
  564. struct mvs_port *port;
  565. };
  566. struct mvs_info {
  567. unsigned long flags;
  568. spinlock_t lock; /* host-wide lock */
  569. struct pci_dev *pdev; /* our device */
  570. void __iomem *regs; /* enhanced mode registers */
  571. void __iomem *peri_regs; /* peripheral registers */
  572. u8 sas_addr[SAS_ADDR_SIZE];
  573. struct sas_ha_struct sas; /* SCSI/SAS glue */
  574. struct Scsi_Host *shost;
  575. __le32 *tx; /* TX (delivery) DMA ring */
  576. dma_addr_t tx_dma;
  577. u32 tx_prod; /* cached next-producer idx */
  578. __le32 *rx; /* RX (completion) DMA ring */
  579. dma_addr_t rx_dma;
  580. u32 rx_cons; /* RX consumer idx */
  581. __le32 *rx_fis; /* RX'd FIS area */
  582. dma_addr_t rx_fis_dma;
  583. struct mvs_cmd_hdr *slot; /* DMA command header slots */
  584. dma_addr_t slot_dma;
  585. const struct mvs_chip_info *chip;
  586. u8 tags[MVS_SLOTS];
  587. struct mvs_slot_info slot_info[MVS_SLOTS];
  588. /* further per-slot information */
  589. struct mvs_phy phy[MVS_MAX_PHYS];
  590. struct mvs_port port[MVS_MAX_PHYS];
  591. #ifdef MVS_USE_TASKLET
  592. struct tasklet_struct tasklet;
  593. #endif
  594. };
  595. static int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
  596. void *funcdata);
  597. static u32 mvs_read_phy_ctl(struct mvs_info *mvi, u32 port);
  598. static void mvs_write_phy_ctl(struct mvs_info *mvi, u32 port, u32 val);
  599. static u32 mvs_read_port_irq_stat(struct mvs_info *mvi, u32 port);
  600. static void mvs_write_port_irq_stat(struct mvs_info *mvi, u32 port, u32 val);
  601. static void mvs_write_port_irq_mask(struct mvs_info *mvi, u32 port, u32 val);
  602. static u32 mvs_read_port_irq_mask(struct mvs_info *mvi, u32 port);
  603. static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i);
  604. static void mvs_detect_porttype(struct mvs_info *mvi, int i);
  605. static void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st);
  606. static void mvs_release_task(struct mvs_info *mvi, int phy_no);
  607. static int mvs_scan_finished(struct Scsi_Host *, unsigned long);
  608. static void mvs_scan_start(struct Scsi_Host *);
  609. static int mvs_slave_configure(struct scsi_device *sdev);
  610. static struct scsi_transport_template *mvs_stt;
  611. static const struct mvs_chip_info mvs_chips[] = {
  612. [chip_6320] = { 2, 16, 9 },
  613. [chip_6440] = { 4, 16, 9 },
  614. [chip_6480] = { 8, 32, 10 },
  615. };
  616. static struct scsi_host_template mvs_sht = {
  617. .module = THIS_MODULE,
  618. .name = DRV_NAME,
  619. .queuecommand = sas_queuecommand,
  620. .target_alloc = sas_target_alloc,
  621. .slave_configure = mvs_slave_configure,
  622. .slave_destroy = sas_slave_destroy,
  623. .scan_finished = mvs_scan_finished,
  624. .scan_start = mvs_scan_start,
  625. .change_queue_depth = sas_change_queue_depth,
  626. .change_queue_type = sas_change_queue_type,
  627. .bios_param = sas_bios_param,
  628. .can_queue = 1,
  629. .cmd_per_lun = 1,
  630. .this_id = -1,
  631. .sg_tablesize = SG_ALL,
  632. .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
  633. .use_clustering = ENABLE_CLUSTERING,
  634. .eh_device_reset_handler = sas_eh_device_reset_handler,
  635. .eh_bus_reset_handler = sas_eh_bus_reset_handler,
  636. .slave_alloc = sas_slave_alloc,
  637. .target_destroy = sas_target_destroy,
  638. .ioctl = sas_ioctl,
  639. };
  640. static void mvs_hexdump(u32 size, u8 *data, u32 baseaddr)
  641. {
  642. u32 i;
  643. u32 run;
  644. u32 offset;
  645. offset = 0;
  646. while (size) {
  647. printk("%08X : ", baseaddr + offset);
  648. if (size >= 16)
  649. run = 16;
  650. else
  651. run = size;
  652. size -= run;
  653. for (i = 0; i < 16; i++) {
  654. if (i < run)
  655. printk("%02X ", (u32)data[i]);
  656. else
  657. printk(" ");
  658. }
  659. printk(": ");
  660. for (i = 0; i < run; i++)
  661. printk("%c", isalnum(data[i]) ? data[i] : '.');
  662. printk("\n");
  663. data = &data[16];
  664. offset += run;
  665. }
  666. printk("\n");
  667. }
  668. #if _MV_DUMP
  669. static void mvs_hba_sb_dump(struct mvs_info *mvi, u32 tag,
  670. enum sas_protocol proto)
  671. {
  672. u32 offset;
  673. struct pci_dev *pdev = mvi->pdev;
  674. struct mvs_slot_info *slot = &mvi->slot_info[tag];
  675. offset = slot->cmd_size + MVS_OAF_SZ +
  676. sizeof(struct mvs_prd) * slot->n_elem;
  677. dev_printk(KERN_DEBUG, &pdev->dev, "+---->Status buffer[%d] :\n",
  678. tag);
  679. mvs_hexdump(32, (u8 *) slot->response,
  680. (u32) slot->buf_dma + offset);
  681. }
  682. #endif
  683. static void mvs_hba_memory_dump(struct mvs_info *mvi, u32 tag,
  684. enum sas_protocol proto)
  685. {
  686. #if _MV_DUMP
  687. u32 sz, w_ptr;
  688. u64 addr;
  689. void __iomem *regs = mvi->regs;
  690. struct pci_dev *pdev = mvi->pdev;
  691. struct mvs_slot_info *slot = &mvi->slot_info[tag];
  692. /*Delivery Queue */
  693. sz = mr32(TX_CFG) & TX_RING_SZ_MASK;
  694. w_ptr = slot->tx;
  695. addr = mr32(TX_HI) << 16 << 16 | mr32(TX_LO);
  696. dev_printk(KERN_DEBUG, &pdev->dev,
  697. "Delivery Queue Size=%04d , WRT_PTR=%04X\n", sz, w_ptr);
  698. dev_printk(KERN_DEBUG, &pdev->dev,
  699. "Delivery Queue Base Address=0x%llX (PA)"
  700. "(tx_dma=0x%llX), Entry=%04d\n",
  701. addr, mvi->tx_dma, w_ptr);
  702. mvs_hexdump(sizeof(u32), (u8 *)(&mvi->tx[mvi->tx_prod]),
  703. (u32) mvi->tx_dma + sizeof(u32) * w_ptr);
  704. /*Command List */
  705. addr = mvi->slot_dma;
  706. dev_printk(KERN_DEBUG, &pdev->dev,
  707. "Command List Base Address=0x%llX (PA)"
  708. "(slot_dma=0x%llX), Header=%03d\n",
  709. addr, slot->buf_dma, tag);
  710. dev_printk(KERN_DEBUG, &pdev->dev, "Command Header[%03d]:\n", tag);
  711. /*mvs_cmd_hdr */
  712. mvs_hexdump(sizeof(struct mvs_cmd_hdr), (u8 *)(&mvi->slot[tag]),
  713. (u32) mvi->slot_dma + tag * sizeof(struct mvs_cmd_hdr));
  714. /*1.command table area */
  715. dev_printk(KERN_DEBUG, &pdev->dev, "+---->Command Table :\n");
  716. mvs_hexdump(slot->cmd_size, (u8 *) slot->buf, (u32) slot->buf_dma);
  717. /*2.open address frame area */
  718. dev_printk(KERN_DEBUG, &pdev->dev, "+---->Open Address Frame :\n");
  719. mvs_hexdump(MVS_OAF_SZ, (u8 *) slot->buf + slot->cmd_size,
  720. (u32) slot->buf_dma + slot->cmd_size);
  721. /*3.status buffer */
  722. mvs_hba_sb_dump(mvi, tag, proto);
  723. /*4.PRD table */
  724. dev_printk(KERN_DEBUG, &pdev->dev, "+---->PRD table :\n");
  725. mvs_hexdump(sizeof(struct mvs_prd) * slot->n_elem,
  726. (u8 *) slot->buf + slot->cmd_size + MVS_OAF_SZ,
  727. (u32) slot->buf_dma + slot->cmd_size + MVS_OAF_SZ);
  728. #endif
  729. }
  730. static void mvs_hba_cq_dump(struct mvs_info *mvi)
  731. {
  732. #if (_MV_DUMP > 2)
  733. u64 addr;
  734. void __iomem *regs = mvi->regs;
  735. struct pci_dev *pdev = mvi->pdev;
  736. u32 entry = mvi->rx_cons + 1;
  737. u32 rx_desc = le32_to_cpu(mvi->rx[entry]);
  738. /*Completion Queue */
  739. addr = mr32(RX_HI) << 16 << 16 | mr32(RX_LO);
  740. dev_printk(KERN_DEBUG, &pdev->dev, "Completion Task = 0x%p\n",
  741. mvi->slot_info[rx_desc & RXQ_SLOT_MASK].task);
  742. dev_printk(KERN_DEBUG, &pdev->dev,
  743. "Completion List Base Address=0x%llX (PA), "
  744. "CQ_Entry=%04d, CQ_WP=0x%08X\n",
  745. addr, entry - 1, mvi->rx[0]);
  746. mvs_hexdump(sizeof(u32), (u8 *)(&rx_desc),
  747. mvi->rx_dma + sizeof(u32) * entry);
  748. #endif
  749. }
  750. static void mvs_hba_interrupt_enable(struct mvs_info *mvi)
  751. {
  752. void __iomem *regs = mvi->regs;
  753. u32 tmp;
  754. tmp = mr32(GBL_CTL);
  755. mw32(GBL_CTL, tmp | INT_EN);
  756. }
  757. static void mvs_hba_interrupt_disable(struct mvs_info *mvi)
  758. {
  759. void __iomem *regs = mvi->regs;
  760. u32 tmp;
  761. tmp = mr32(GBL_CTL);
  762. mw32(GBL_CTL, tmp & ~INT_EN);
  763. }
  764. static int mvs_int_rx(struct mvs_info *mvi, bool self_clear);
  765. /* move to PCI layer or libata core? */
  766. static int pci_go_64(struct pci_dev *pdev)
  767. {
  768. int rc;
  769. if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
  770. rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
  771. if (rc) {
  772. rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
  773. if (rc) {
  774. dev_printk(KERN_ERR, &pdev->dev,
  775. "64-bit DMA enable failed\n");
  776. return rc;
  777. }
  778. }
  779. } else {
  780. rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
  781. if (rc) {
  782. dev_printk(KERN_ERR, &pdev->dev,
  783. "32-bit DMA enable failed\n");
  784. return rc;
  785. }
  786. rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
  787. if (rc) {
  788. dev_printk(KERN_ERR, &pdev->dev,
  789. "32-bit consistent DMA enable failed\n");
  790. return rc;
  791. }
  792. }
  793. return rc;
  794. }
  795. static int mvs_find_tag(struct mvs_info *mvi, struct sas_task *task, u32 *tag)
  796. {
  797. if (task->lldd_task) {
  798. struct mvs_slot_info *slot;
  799. slot = (struct mvs_slot_info *) task->lldd_task;
  800. *tag = slot - mvi->slot_info;
  801. return 1;
  802. }
  803. return 0;
  804. }
  805. static void mvs_tag_clear(struct mvs_info *mvi, u32 tag)
  806. {
  807. void *bitmap = (void *) &mvi->tags;
  808. clear_bit(tag, bitmap);
  809. }
  810. static void mvs_tag_free(struct mvs_info *mvi, u32 tag)
  811. {
  812. mvs_tag_clear(mvi, tag);
  813. }
  814. static void mvs_tag_set(struct mvs_info *mvi, unsigned int tag)
  815. {
  816. void *bitmap = (void *) &mvi->tags;
  817. set_bit(tag, bitmap);
  818. }
  819. static int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out)
  820. {
  821. unsigned int index, tag;
  822. void *bitmap = (void *) &mvi->tags;
  823. index = find_first_zero_bit(bitmap, MVS_SLOTS);
  824. tag = index;
  825. if (tag >= MVS_SLOTS)
  826. return -SAS_QUEUE_FULL;
  827. mvs_tag_set(mvi, tag);
  828. *tag_out = tag;
  829. return 0;
  830. }
  831. static void mvs_tag_init(struct mvs_info *mvi)
  832. {
  833. int i;
  834. for (i = 0; i < MVS_SLOTS; ++i)
  835. mvs_tag_clear(mvi, i);
  836. }
  837. #ifndef MVS_DISABLE_NVRAM
  838. static int mvs_eep_read(void __iomem *regs, u32 addr, u32 *data)
  839. {
  840. int timeout = 1000;
  841. if (addr & ~SPI_ADDR_MASK)
  842. return -EINVAL;
  843. writel(addr, regs + SPI_CMD);
  844. writel(TWSI_RD, regs + SPI_CTL);
  845. while (timeout-- > 0) {
  846. if (readl(regs + SPI_CTL) & TWSI_RDY) {
  847. *data = readl(regs + SPI_DATA);
  848. return 0;
  849. }
  850. udelay(10);
  851. }
  852. return -EBUSY;
  853. }
  854. static int mvs_eep_read_buf(void __iomem *regs, u32 addr,
  855. void *buf, u32 buflen)
  856. {
  857. u32 addr_end, tmp_addr, i, j;
  858. u32 tmp = 0;
  859. int rc;
  860. u8 *tmp8, *buf8 = buf;
  861. addr_end = addr + buflen;
  862. tmp_addr = ALIGN(addr, 4);
  863. if (addr > 0xff)
  864. return -EINVAL;
  865. j = addr & 0x3;
  866. if (j) {
  867. rc = mvs_eep_read(regs, tmp_addr, &tmp);
  868. if (rc)
  869. return rc;
  870. tmp8 = (u8 *)&tmp;
  871. for (i = j; i < 4; i++)
  872. *buf8++ = tmp8[i];
  873. tmp_addr += 4;
  874. }
  875. for (j = ALIGN(addr_end, 4); tmp_addr < j; tmp_addr += 4) {
  876. rc = mvs_eep_read(regs, tmp_addr, &tmp);
  877. if (rc)
  878. return rc;
  879. memcpy(buf8, &tmp, 4);
  880. buf8 += 4;
  881. }
  882. if (tmp_addr < addr_end) {
  883. rc = mvs_eep_read(regs, tmp_addr, &tmp);
  884. if (rc)
  885. return rc;
  886. tmp8 = (u8 *)&tmp;
  887. j = addr_end - tmp_addr;
  888. for (i = 0; i < j; i++)
  889. *buf8++ = tmp8[i];
  890. tmp_addr += 4;
  891. }
  892. return 0;
  893. }
  894. #endif
  895. static int mvs_nvram_read(struct mvs_info *mvi, u32 addr,
  896. void *buf, u32 buflen)
  897. {
  898. #ifndef MVS_DISABLE_NVRAM
  899. void __iomem *regs = mvi->regs;
  900. int rc, i;
  901. u32 sum;
  902. u8 hdr[2], *tmp;
  903. const char *msg;
  904. rc = mvs_eep_read_buf(regs, addr, &hdr, 2);
  905. if (rc) {
  906. msg = "nvram hdr read failed";
  907. goto err_out;
  908. }
  909. rc = mvs_eep_read_buf(regs, addr + 2, buf, buflen);
  910. if (rc) {
  911. msg = "nvram read failed";
  912. goto err_out;
  913. }
  914. if (hdr[0] != 0x5A) {
  915. /* entry id */
  916. msg = "invalid nvram entry id";
  917. rc = -ENOENT;
  918. goto err_out;
  919. }
  920. tmp = buf;
  921. sum = ((u32)hdr[0]) + ((u32)hdr[1]);
  922. for (i = 0; i < buflen; i++)
  923. sum += ((u32)tmp[i]);
  924. if (sum) {
  925. msg = "nvram checksum failure";
  926. rc = -EILSEQ;
  927. goto err_out;
  928. }
  929. return 0;
  930. err_out:
  931. dev_printk(KERN_ERR, &mvi->pdev->dev, "%s", msg);
  932. return rc;
  933. #else
  934. /* FIXME , For SAS target mode */
  935. memcpy(buf, "\x50\x05\x04\x30\x11\xab\x00\x00", 8);
  936. return 0;
  937. #endif
  938. }
  939. static void mvs_bytes_dmaed(struct mvs_info *mvi, int i)
  940. {
  941. struct mvs_phy *phy = &mvi->phy[i];
  942. struct asd_sas_phy *sas_phy = mvi->sas.sas_phy[i];
  943. if (!phy->phy_attached)
  944. return;
  945. if (sas_phy->phy) {
  946. struct sas_phy *sphy = sas_phy->phy;
  947. sphy->negotiated_linkrate = sas_phy->linkrate;
  948. sphy->minimum_linkrate = phy->minimum_linkrate;
  949. sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
  950. sphy->maximum_linkrate = phy->maximum_linkrate;
  951. sphy->maximum_linkrate_hw = SAS_LINK_RATE_3_0_GBPS;
  952. }
  953. if (phy->phy_type & PORT_TYPE_SAS) {
  954. struct sas_identify_frame *id;
  955. id = (struct sas_identify_frame *)phy->frame_rcvd;
  956. id->dev_type = phy->identify.device_type;
  957. id->initiator_bits = SAS_PROTOCOL_ALL;
  958. id->target_bits = phy->identify.target_port_protocols;
  959. } else if (phy->phy_type & PORT_TYPE_SATA) {
  960. /* TODO */
  961. }
  962. mvi->sas.sas_phy[i]->frame_rcvd_size = phy->frame_rcvd_size;
  963. mvi->sas.notify_port_event(mvi->sas.sas_phy[i],
  964. PORTE_BYTES_DMAED);
  965. }
  966. static int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time)
  967. {
  968. /* give the phy enabling interrupt event time to come in (1s
  969. * is empirically about all it takes) */
  970. if (time < HZ)
  971. return 0;
  972. /* Wait for discovery to finish */
  973. scsi_flush_work(shost);
  974. return 1;
  975. }
  976. static void mvs_scan_start(struct Scsi_Host *shost)
  977. {
  978. int i;
  979. struct mvs_info *mvi = SHOST_TO_SAS_HA(shost)->lldd_ha;
  980. for (i = 0; i < mvi->chip->n_phy; ++i) {
  981. mvs_bytes_dmaed(mvi, i);
  982. }
  983. }
  984. static int mvs_slave_configure(struct scsi_device *sdev)
  985. {
  986. struct domain_device *dev = sdev_to_domain_dev(sdev);
  987. int ret = sas_slave_configure(sdev);
  988. if (ret)
  989. return ret;
  990. if (dev_is_sata(dev)) {
  991. /* struct ata_port *ap = dev->sata_dev.ap; */
  992. /* struct ata_device *adev = ap->link.device; */
  993. /* clamp at no NCQ for the time being */
  994. /* adev->flags |= ATA_DFLAG_NCQ_OFF; */
  995. scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, 1);
  996. }
  997. return 0;
  998. }
  999. static void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
  1000. {
  1001. struct pci_dev *pdev = mvi->pdev;
  1002. struct sas_ha_struct *sas_ha = &mvi->sas;
  1003. struct mvs_phy *phy = &mvi->phy[phy_no];
  1004. struct asd_sas_phy *sas_phy = &phy->sas_phy;
  1005. phy->irq_status = mvs_read_port_irq_stat(mvi, phy_no);
  1006. /*
  1007. * events is port event now ,
  1008. * we need check the interrupt status which belongs to per port.
  1009. */
  1010. dev_printk(KERN_DEBUG, &pdev->dev,
  1011. "Port %d Event = %X\n",
  1012. phy_no, phy->irq_status);
  1013. if (phy->irq_status & (PHYEV_POOF | PHYEV_DEC_ERR)) {
  1014. mvs_release_task(mvi, phy_no);
  1015. if (!mvs_is_phy_ready(mvi, phy_no)) {
  1016. sas_phy_disconnected(sas_phy);
  1017. sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL);
  1018. dev_printk(KERN_INFO, &pdev->dev,
  1019. "Port %d Unplug Notice\n", phy_no);
  1020. } else
  1021. mvs_phy_control(sas_phy, PHY_FUNC_LINK_RESET, NULL);
  1022. }
  1023. if (!(phy->irq_status & PHYEV_DEC_ERR)) {
  1024. if (phy->irq_status & PHYEV_COMWAKE) {
  1025. u32 tmp = mvs_read_port_irq_mask(mvi, phy_no);
  1026. mvs_write_port_irq_mask(mvi, phy_no,
  1027. tmp | PHYEV_SIG_FIS);
  1028. }
  1029. if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) {
  1030. phy->phy_status = mvs_is_phy_ready(mvi, phy_no);
  1031. if (phy->phy_status) {
  1032. mvs_detect_porttype(mvi, phy_no);
  1033. if (phy->phy_type & PORT_TYPE_SATA) {
  1034. u32 tmp = mvs_read_port_irq_mask(mvi,
  1035. phy_no);
  1036. tmp &= ~PHYEV_SIG_FIS;
  1037. mvs_write_port_irq_mask(mvi,
  1038. phy_no, tmp);
  1039. }
  1040. mvs_update_phyinfo(mvi, phy_no, 0);
  1041. sas_ha->notify_phy_event(sas_phy,
  1042. PHYE_OOB_DONE);
  1043. mvs_bytes_dmaed(mvi, phy_no);
  1044. } else {
  1045. dev_printk(KERN_DEBUG, &pdev->dev,
  1046. "plugin interrupt but phy is gone\n");
  1047. mvs_phy_control(sas_phy, PHY_FUNC_LINK_RESET,
  1048. NULL);
  1049. }
  1050. } else if (phy->irq_status & PHYEV_BROAD_CH) {
  1051. mvs_release_task(mvi, phy_no);
  1052. sas_ha->notify_port_event(sas_phy,
  1053. PORTE_BROADCAST_RCVD);
  1054. }
  1055. }
  1056. mvs_write_port_irq_stat(mvi, phy_no, phy->irq_status);
  1057. }
  1058. static void mvs_int_sata(struct mvs_info *mvi)
  1059. {
  1060. u32 tmp;
  1061. void __iomem *regs = mvi->regs;
  1062. tmp = mr32(INT_STAT_SRS);
  1063. mw32(INT_STAT_SRS, tmp & 0xFFFF);
  1064. }
  1065. static void mvs_slot_reset(struct mvs_info *mvi, struct sas_task *task,
  1066. u32 slot_idx)
  1067. {
  1068. void __iomem *regs = mvi->regs;
  1069. struct domain_device *dev = task->dev;
  1070. struct asd_sas_port *sas_port = dev->port;
  1071. struct mvs_port *port = mvi->slot_info[slot_idx].port;
  1072. u32 reg_set, phy_mask;
  1073. if (!sas_protocol_ata(task->task_proto)) {
  1074. reg_set = 0;
  1075. phy_mask = (port->wide_port_phymap) ? port->wide_port_phymap :
  1076. sas_port->phy_mask;
  1077. } else {
  1078. reg_set = port->taskfileset;
  1079. phy_mask = sas_port->phy_mask;
  1080. }
  1081. mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | slot_idx |
  1082. (TXQ_CMD_SLOT_RESET << TXQ_CMD_SHIFT) |
  1083. (phy_mask << TXQ_PHY_SHIFT) |
  1084. (reg_set << TXQ_SRS_SHIFT));
  1085. mw32(TX_PROD_IDX, mvi->tx_prod);
  1086. mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1);
  1087. }
  1088. static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task,
  1089. u32 slot_idx, int err)
  1090. {
  1091. struct mvs_port *port = mvi->slot_info[slot_idx].port;
  1092. struct task_status_struct *tstat = &task->task_status;
  1093. struct ata_task_resp *resp = (struct ata_task_resp *)tstat->buf;
  1094. int stat = SAM_GOOD;
  1095. resp->frame_len = sizeof(struct dev_to_host_fis);
  1096. memcpy(&resp->ending_fis[0],
  1097. SATA_RECEIVED_D2H_FIS(port->taskfileset),
  1098. sizeof(struct dev_to_host_fis));
  1099. tstat->buf_valid_size = sizeof(*resp);
  1100. if (unlikely(err))
  1101. stat = SAS_PROTO_RESPONSE;
  1102. return stat;
  1103. }
  1104. static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc)
  1105. {
  1106. u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
  1107. mvs_tag_clear(mvi, slot_idx);
  1108. }
  1109. static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task,
  1110. struct mvs_slot_info *slot, u32 slot_idx)
  1111. {
  1112. if (!sas_protocol_ata(task->task_proto))
  1113. if (slot->n_elem)
  1114. pci_unmap_sg(mvi->pdev, task->scatter,
  1115. slot->n_elem, task->data_dir);
  1116. switch (task->task_proto) {
  1117. case SAS_PROTOCOL_SMP:
  1118. pci_unmap_sg(mvi->pdev, &task->smp_task.smp_resp, 1,
  1119. PCI_DMA_FROMDEVICE);
  1120. pci_unmap_sg(mvi->pdev, &task->smp_task.smp_req, 1,
  1121. PCI_DMA_TODEVICE);
  1122. break;
  1123. case SAS_PROTOCOL_SATA:
  1124. case SAS_PROTOCOL_STP:
  1125. case SAS_PROTOCOL_SSP:
  1126. default:
  1127. /* do nothing */
  1128. break;
  1129. }
  1130. list_del(&slot->list);
  1131. task->lldd_task = NULL;
  1132. slot->task = NULL;
  1133. slot->port = NULL;
  1134. }
  1135. static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task,
  1136. u32 slot_idx)
  1137. {
  1138. struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
  1139. u32 err_dw0 = le32_to_cpu(*(u32 *) (slot->response));
  1140. u32 err_dw1 = le32_to_cpu(*(u32 *) (slot->response + 4));
  1141. int stat = SAM_CHECK_COND;
  1142. if (err_dw1 & SLOT_BSY_ERR) {
  1143. stat = SAS_QUEUE_FULL;
  1144. mvs_slot_reset(mvi, task, slot_idx);
  1145. }
  1146. switch (task->task_proto) {
  1147. case SAS_PROTOCOL_SSP:
  1148. break;
  1149. case SAS_PROTOCOL_SMP:
  1150. break;
  1151. case SAS_PROTOCOL_SATA:
  1152. case SAS_PROTOCOL_STP:
  1153. case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
  1154. if (err_dw0 & TFILE_ERR)
  1155. stat = mvs_sata_done(mvi, task, slot_idx, 1);
  1156. break;
  1157. default:
  1158. break;
  1159. }
  1160. mvs_hexdump(16, (u8 *) slot->response, 0);
  1161. return stat;
  1162. }
  1163. static int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
  1164. {
  1165. u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
  1166. struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
  1167. struct sas_task *task = slot->task;
  1168. struct task_status_struct *tstat;
  1169. struct mvs_port *port;
  1170. bool aborted;
  1171. void *to;
  1172. if (unlikely(!task || !task->lldd_task))
  1173. return -1;
  1174. mvs_hba_cq_dump(mvi);
  1175. spin_lock(&task->task_state_lock);
  1176. aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED;
  1177. if (!aborted) {
  1178. task->task_state_flags &=
  1179. ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
  1180. task->task_state_flags |= SAS_TASK_STATE_DONE;
  1181. }
  1182. spin_unlock(&task->task_state_lock);
  1183. if (aborted) {
  1184. mvs_slot_task_free(mvi, task, slot, slot_idx);
  1185. mvs_slot_free(mvi, rx_desc);
  1186. return -1;
  1187. }
  1188. port = slot->port;
  1189. tstat = &task->task_status;
  1190. memset(tstat, 0, sizeof(*tstat));
  1191. tstat->resp = SAS_TASK_COMPLETE;
  1192. if (unlikely(!port->port_attached || flags)) {
  1193. mvs_slot_err(mvi, task, slot_idx);
  1194. if (!sas_protocol_ata(task->task_proto))
  1195. tstat->stat = SAS_PHY_DOWN;
  1196. goto out;
  1197. }
  1198. /* error info record present */
  1199. if (unlikely((rx_desc & RXQ_ERR) && (*(u64 *) slot->response))) {
  1200. tstat->stat = mvs_slot_err(mvi, task, slot_idx);
  1201. goto out;
  1202. }
  1203. switch (task->task_proto) {
  1204. case SAS_PROTOCOL_SSP:
  1205. /* hw says status == 0, datapres == 0 */
  1206. if (rx_desc & RXQ_GOOD) {
  1207. tstat->stat = SAM_GOOD;
  1208. tstat->resp = SAS_TASK_COMPLETE;
  1209. }
  1210. /* response frame present */
  1211. else if (rx_desc & RXQ_RSP) {
  1212. struct ssp_response_iu *iu =
  1213. slot->response + sizeof(struct mvs_err_info);
  1214. sas_ssp_task_response(&mvi->pdev->dev, task, iu);
  1215. }
  1216. /* should never happen? */
  1217. else
  1218. tstat->stat = SAM_CHECK_COND;
  1219. break;
  1220. case SAS_PROTOCOL_SMP: {
  1221. struct scatterlist *sg_resp = &task->smp_task.smp_resp;
  1222. tstat->stat = SAM_GOOD;
  1223. to = kmap_atomic(sg_page(sg_resp), KM_IRQ0);
  1224. memcpy(to + sg_resp->offset,
  1225. slot->response + sizeof(struct mvs_err_info),
  1226. sg_dma_len(sg_resp));
  1227. kunmap_atomic(to, KM_IRQ0);
  1228. break;
  1229. }
  1230. case SAS_PROTOCOL_SATA:
  1231. case SAS_PROTOCOL_STP:
  1232. case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: {
  1233. tstat->stat = mvs_sata_done(mvi, task, slot_idx, 0);
  1234. break;
  1235. }
  1236. default:
  1237. tstat->stat = SAM_CHECK_COND;
  1238. break;
  1239. }
  1240. out:
  1241. mvs_slot_task_free(mvi, task, slot, slot_idx);
  1242. if (unlikely(tstat->stat != SAS_QUEUE_FULL))
  1243. mvs_slot_free(mvi, rx_desc);
  1244. spin_unlock(&mvi->lock);
  1245. task->task_done(task);
  1246. spin_lock(&mvi->lock);
  1247. return tstat->stat;
  1248. }
  1249. static void mvs_release_task(struct mvs_info *mvi, int phy_no)
  1250. {
  1251. struct list_head *pos, *n;
  1252. struct mvs_slot_info *slot;
  1253. struct mvs_phy *phy = &mvi->phy[phy_no];
  1254. struct mvs_port *port = phy->port;
  1255. u32 rx_desc;
  1256. if (!port)
  1257. return;
  1258. list_for_each_safe(pos, n, &port->list) {
  1259. slot = container_of(pos, struct mvs_slot_info, list);
  1260. rx_desc = (u32) (slot - mvi->slot_info);
  1261. mvs_slot_complete(mvi, rx_desc, 1);
  1262. }
  1263. }
  1264. static void mvs_int_full(struct mvs_info *mvi)
  1265. {
  1266. void __iomem *regs = mvi->regs;
  1267. u32 tmp, stat;
  1268. int i;
  1269. stat = mr32(INT_STAT);
  1270. mvs_int_rx(mvi, false);
  1271. for (i = 0; i < MVS_MAX_PORTS; i++) {
  1272. tmp = (stat >> i) & (CINT_PORT | CINT_PORT_STOPPED);
  1273. if (tmp)
  1274. mvs_int_port(mvi, i, tmp);
  1275. }
  1276. if (stat & CINT_SRS)
  1277. mvs_int_sata(mvi);
  1278. mw32(INT_STAT, stat);
  1279. }
  1280. static int mvs_int_rx(struct mvs_info *mvi, bool self_clear)
  1281. {
  1282. void __iomem *regs = mvi->regs;
  1283. u32 rx_prod_idx, rx_desc;
  1284. bool attn = false;
  1285. struct pci_dev *pdev = mvi->pdev;
  1286. /* the first dword in the RX ring is special: it contains
  1287. * a mirror of the hardware's RX producer index, so that
  1288. * we don't have to stall the CPU reading that register.
  1289. * The actual RX ring is offset by one dword, due to this.
  1290. */
  1291. rx_prod_idx = mvi->rx_cons;
  1292. mvi->rx_cons = le32_to_cpu(mvi->rx[0]);
  1293. if (mvi->rx_cons == 0xfff) /* h/w hasn't touched RX ring yet */
  1294. return 0;
  1295. /* The CMPL_Q may come late, read from register and try again
  1296. * note: if coalescing is enabled,
  1297. * it will need to read from register every time for sure
  1298. */
  1299. if (mvi->rx_cons == rx_prod_idx)
  1300. mvi->rx_cons = mr32(RX_CONS_IDX) & RX_RING_SZ_MASK;
  1301. if (mvi->rx_cons == rx_prod_idx)
  1302. return 0;
  1303. while (mvi->rx_cons != rx_prod_idx) {
  1304. /* increment our internal RX consumer pointer */
  1305. rx_prod_idx = (rx_prod_idx + 1) & (MVS_RX_RING_SZ - 1);
  1306. rx_desc = le32_to_cpu(mvi->rx[rx_prod_idx + 1]);
  1307. if (likely(rx_desc & RXQ_DONE))
  1308. mvs_slot_complete(mvi, rx_desc, 0);
  1309. if (rx_desc & RXQ_ATTN) {
  1310. attn = true;
  1311. dev_printk(KERN_DEBUG, &pdev->dev, "ATTN %X\n",
  1312. rx_desc);
  1313. } else if (rx_desc & RXQ_ERR) {
  1314. if (!(rx_desc & RXQ_DONE))
  1315. mvs_slot_complete(mvi, rx_desc, 0);
  1316. dev_printk(KERN_DEBUG, &pdev->dev, "RXQ_ERR %X\n",
  1317. rx_desc);
  1318. } else if (rx_desc & RXQ_SLOT_RESET) {
  1319. dev_printk(KERN_DEBUG, &pdev->dev, "Slot reset[%X]\n",
  1320. rx_desc);
  1321. mvs_slot_free(mvi, rx_desc);
  1322. }
  1323. }
  1324. if (attn && self_clear)
  1325. mvs_int_full(mvi);
  1326. return 0;
  1327. }
  1328. #ifdef MVS_USE_TASKLET
  1329. static void mvs_tasklet(unsigned long data)
  1330. {
  1331. struct mvs_info *mvi = (struct mvs_info *) data;
  1332. unsigned long flags;
  1333. spin_lock_irqsave(&mvi->lock, flags);
  1334. #ifdef MVS_DISABLE_MSI
  1335. mvs_int_full(mvi);
  1336. #else
  1337. mvs_int_rx(mvi, true);
  1338. #endif
  1339. spin_unlock_irqrestore(&mvi->lock, flags);
  1340. }
  1341. #endif
  1342. static irqreturn_t mvs_interrupt(int irq, void *opaque)
  1343. {
  1344. struct mvs_info *mvi = opaque;
  1345. void __iomem *regs = mvi->regs;
  1346. u32 stat;
  1347. stat = mr32(GBL_INT_STAT);
  1348. if (stat == 0 || stat == 0xffffffff)
  1349. return IRQ_NONE;
  1350. /* clear CMD_CMPLT ASAP */
  1351. mw32_f(INT_STAT, CINT_DONE);
  1352. #ifndef MVS_USE_TASKLET
  1353. spin_lock(&mvi->lock);
  1354. mvs_int_full(mvi);
  1355. spin_unlock(&mvi->lock);
  1356. #else
  1357. tasklet_schedule(&mvi->tasklet);
  1358. #endif
  1359. return IRQ_HANDLED;
  1360. }
  1361. #ifndef MVS_DISABLE_MSI
  1362. static irqreturn_t mvs_msi_interrupt(int irq, void *opaque)
  1363. {
  1364. struct mvs_info *mvi = opaque;
  1365. #ifndef MVS_USE_TASKLET
  1366. spin_lock(&mvi->lock);
  1367. mvs_int_rx(mvi, true);
  1368. spin_unlock(&mvi->lock);
  1369. #else
  1370. tasklet_schedule(&mvi->tasklet);
  1371. #endif
  1372. return IRQ_HANDLED;
  1373. }
  1374. #endif
  1375. struct mvs_task_exec_info {
  1376. struct sas_task *task;
  1377. struct mvs_cmd_hdr *hdr;
  1378. struct mvs_port *port;
  1379. u32 tag;
  1380. int n_elem;
  1381. };
  1382. static int mvs_task_prep_smp(struct mvs_info *mvi,
  1383. struct mvs_task_exec_info *tei)
  1384. {
  1385. int elem, rc, i;
  1386. struct sas_task *task = tei->task;
  1387. struct mvs_cmd_hdr *hdr = tei->hdr;
  1388. struct scatterlist *sg_req, *sg_resp;
  1389. u32 req_len, resp_len, tag = tei->tag;
  1390. void *buf_tmp;
  1391. u8 *buf_oaf;
  1392. dma_addr_t buf_tmp_dma;
  1393. struct mvs_prd *buf_prd;
  1394. struct scatterlist *sg;
  1395. struct mvs_slot_info *slot = &mvi->slot_info[tag];
  1396. struct asd_sas_port *sas_port = task->dev->port;
  1397. u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
  1398. #if _MV_DUMP
  1399. u8 *buf_cmd;
  1400. void *from;
  1401. #endif
  1402. /*
  1403. * DMA-map SMP request, response buffers
  1404. */
  1405. sg_req = &task->smp_task.smp_req;
  1406. elem = pci_map_sg(mvi->pdev, sg_req, 1, PCI_DMA_TODEVICE);
  1407. if (!elem)
  1408. return -ENOMEM;
  1409. req_len = sg_dma_len(sg_req);
  1410. sg_resp = &task->smp_task.smp_resp;
  1411. elem = pci_map_sg(mvi->pdev, sg_resp, 1, PCI_DMA_FROMDEVICE);
  1412. if (!elem) {
  1413. rc = -ENOMEM;
  1414. goto err_out;
  1415. }
  1416. resp_len = sg_dma_len(sg_resp);
  1417. /* must be in dwords */
  1418. if ((req_len & 0x3) || (resp_len & 0x3)) {
  1419. rc = -EINVAL;
  1420. goto err_out_2;
  1421. }
  1422. /*
  1423. * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
  1424. */
  1425. /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */
  1426. buf_tmp = slot->buf;
  1427. buf_tmp_dma = slot->buf_dma;
  1428. #if _MV_DUMP
  1429. buf_cmd = buf_tmp;
  1430. hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
  1431. buf_tmp += req_len;
  1432. buf_tmp_dma += req_len;
  1433. slot->cmd_size = req_len;
  1434. #else
  1435. hdr->cmd_tbl = cpu_to_le64(sg_dma_address(sg_req));
  1436. #endif
  1437. /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
  1438. buf_oaf = buf_tmp;
  1439. hdr->open_frame = cpu_to_le64(buf_tmp_dma);
  1440. buf_tmp += MVS_OAF_SZ;
  1441. buf_tmp_dma += MVS_OAF_SZ;
  1442. /* region 3: PRD table ********************************************* */
  1443. buf_prd = buf_tmp;
  1444. if (tei->n_elem)
  1445. hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
  1446. else
  1447. hdr->prd_tbl = 0;
  1448. i = sizeof(struct mvs_prd) * tei->n_elem;
  1449. buf_tmp += i;
  1450. buf_tmp_dma += i;
  1451. /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
  1452. slot->response = buf_tmp;
  1453. hdr->status_buf = cpu_to_le64(buf_tmp_dma);
  1454. /*
  1455. * Fill in TX ring and command slot header
  1456. */
  1457. slot->tx = mvi->tx_prod;
  1458. mvi->tx[mvi->tx_prod] = cpu_to_le32((TXQ_CMD_SMP << TXQ_CMD_SHIFT) |
  1459. TXQ_MODE_I | tag |
  1460. (sas_port->phy_mask << TXQ_PHY_SHIFT));
  1461. hdr->flags |= flags;
  1462. hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | ((req_len - 4) / 4));
  1463. hdr->tags = cpu_to_le32(tag);
  1464. hdr->data_len = 0;
  1465. /* generate open address frame hdr (first 12 bytes) */
  1466. buf_oaf[0] = (1 << 7) | (0 << 4) | 0x01; /* initiator, SMP, ftype 1h */
  1467. buf_oaf[1] = task->dev->linkrate & 0xf;
  1468. *(u16 *)(buf_oaf + 2) = 0xFFFF; /* SAS SPEC */
  1469. memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE);
  1470. /* fill in PRD (scatter/gather) table, if any */
  1471. for_each_sg(task->scatter, sg, tei->n_elem, i) {
  1472. buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
  1473. buf_prd->len = cpu_to_le32(sg_dma_len(sg));
  1474. buf_prd++;
  1475. }
  1476. #if _MV_DUMP
  1477. /* copy cmd table */
  1478. from = kmap_atomic(sg_page(sg_req), KM_IRQ0);
  1479. memcpy(buf_cmd, from + sg_req->offset, req_len);
  1480. kunmap_atomic(from, KM_IRQ0);
  1481. #endif
  1482. return 0;
  1483. err_out_2:
  1484. pci_unmap_sg(mvi->pdev, &tei->task->smp_task.smp_resp, 1,
  1485. PCI_DMA_FROMDEVICE);
  1486. err_out:
  1487. pci_unmap_sg(mvi->pdev, &tei->task->smp_task.smp_req, 1,
  1488. PCI_DMA_TODEVICE);
  1489. return rc;
  1490. }
  1491. static void mvs_free_reg_set(struct mvs_info *mvi, struct mvs_port *port)
  1492. {
  1493. void __iomem *regs = mvi->regs;
  1494. u32 tmp, offs;
  1495. u8 *tfs = &port->taskfileset;
  1496. if (*tfs == MVS_ID_NOT_MAPPED)
  1497. return;
  1498. offs = 1U << ((*tfs & 0x0f) + PCS_EN_SATA_REG_SHIFT);
  1499. if (*tfs < 16) {
  1500. tmp = mr32(PCS);
  1501. mw32(PCS, tmp & ~offs);
  1502. } else {
  1503. tmp = mr32(CTL);
  1504. mw32(CTL, tmp & ~offs);
  1505. }
  1506. tmp = mr32(INT_STAT_SRS) & (1U << *tfs);
  1507. if (tmp)
  1508. mw32(INT_STAT_SRS, tmp);
  1509. *tfs = MVS_ID_NOT_MAPPED;
  1510. }
  1511. static u8 mvs_assign_reg_set(struct mvs_info *mvi, struct mvs_port *port)
  1512. {
  1513. int i;
  1514. u32 tmp, offs;
  1515. void __iomem *regs = mvi->regs;
  1516. if (port->taskfileset != MVS_ID_NOT_MAPPED)
  1517. return 0;
  1518. tmp = mr32(PCS);
  1519. for (i = 0; i < mvi->chip->srs_sz; i++) {
  1520. if (i == 16)
  1521. tmp = mr32(CTL);
  1522. offs = 1U << ((i & 0x0f) + PCS_EN_SATA_REG_SHIFT);
  1523. if (!(tmp & offs)) {
  1524. port->taskfileset = i;
  1525. if (i < 16)
  1526. mw32(PCS, tmp | offs);
  1527. else
  1528. mw32(CTL, tmp | offs);
  1529. tmp = mr32(INT_STAT_SRS) & (1U << i);
  1530. if (tmp)
  1531. mw32(INT_STAT_SRS, tmp);
  1532. return 0;
  1533. }
  1534. }
  1535. return MVS_ID_NOT_MAPPED;
  1536. }
  1537. static u32 mvs_get_ncq_tag(struct sas_task *task, u32 *tag)
  1538. {
  1539. struct ata_queued_cmd *qc = task->uldd_task;
  1540. if (qc) {
  1541. if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
  1542. qc->tf.command == ATA_CMD_FPDMA_READ) {
  1543. *tag = qc->tag;
  1544. return 1;
  1545. }
  1546. }
  1547. return 0;
  1548. }
  1549. static int mvs_task_prep_ata(struct mvs_info *mvi,
  1550. struct mvs_task_exec_info *tei)
  1551. {
  1552. struct sas_task *task = tei->task;
  1553. struct domain_device *dev = task->dev;
  1554. struct mvs_cmd_hdr *hdr = tei->hdr;
  1555. struct asd_sas_port *sas_port = dev->port;
  1556. struct mvs_slot_info *slot;
  1557. struct scatterlist *sg;
  1558. struct mvs_prd *buf_prd;
  1559. struct mvs_port *port = tei->port;
  1560. u32 tag = tei->tag;
  1561. u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
  1562. void *buf_tmp;
  1563. u8 *buf_cmd, *buf_oaf;
  1564. dma_addr_t buf_tmp_dma;
  1565. u32 i, req_len, resp_len;
  1566. const u32 max_resp_len = SB_RFB_MAX;
  1567. if (mvs_assign_reg_set(mvi, port) == MVS_ID_NOT_MAPPED)
  1568. return -EBUSY;
  1569. slot = &mvi->slot_info[tag];
  1570. slot->tx = mvi->tx_prod;
  1571. mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag |
  1572. (TXQ_CMD_STP << TXQ_CMD_SHIFT) |
  1573. (sas_port->phy_mask << TXQ_PHY_SHIFT) |
  1574. (port->taskfileset << TXQ_SRS_SHIFT));
  1575. if (task->ata_task.use_ncq)
  1576. flags |= MCH_FPDMA;
  1577. if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) {
  1578. if (task->ata_task.fis.command != ATA_CMD_ID_ATAPI)
  1579. flags |= MCH_ATAPI;
  1580. }
  1581. /* FIXME: fill in port multiplier number */
  1582. hdr->flags = cpu_to_le32(flags);
  1583. /* FIXME: the low order order 5 bits for the TAG if enable NCQ */
  1584. if (task->ata_task.use_ncq && mvs_get_ncq_tag(task, &hdr->tags))
  1585. task->ata_task.fis.sector_count |= hdr->tags << 3;
  1586. else
  1587. hdr->tags = cpu_to_le32(tag);
  1588. hdr->data_len = cpu_to_le32(task->total_xfer_len);
  1589. /*
  1590. * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
  1591. */
  1592. /* region 1: command table area (MVS_ATA_CMD_SZ bytes) ************** */
  1593. buf_cmd = buf_tmp = slot->buf;
  1594. buf_tmp_dma = slot->buf_dma;
  1595. hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
  1596. buf_tmp += MVS_ATA_CMD_SZ;
  1597. buf_tmp_dma += MVS_ATA_CMD_SZ;
  1598. #if _MV_DUMP
  1599. slot->cmd_size = MVS_ATA_CMD_SZ;
  1600. #endif
  1601. /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
  1602. /* used for STP. unused for SATA? */
  1603. buf_oaf = buf_tmp;
  1604. hdr->open_frame = cpu_to_le64(buf_tmp_dma);
  1605. buf_tmp += MVS_OAF_SZ;
  1606. buf_tmp_dma += MVS_OAF_SZ;
  1607. /* region 3: PRD table ********************************************* */
  1608. buf_prd = buf_tmp;
  1609. if (tei->n_elem)
  1610. hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
  1611. else
  1612. hdr->prd_tbl = 0;
  1613. i = sizeof(struct mvs_prd) * tei->n_elem;
  1614. buf_tmp += i;
  1615. buf_tmp_dma += i;
  1616. /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
  1617. /* FIXME: probably unused, for SATA. kept here just in case
  1618. * we get a STP/SATA error information record
  1619. */
  1620. slot->response = buf_tmp;
  1621. hdr->status_buf = cpu_to_le64(buf_tmp_dma);
  1622. req_len = sizeof(struct host_to_dev_fis);
  1623. resp_len = MVS_SLOT_BUF_SZ - MVS_ATA_CMD_SZ -
  1624. sizeof(struct mvs_err_info) - i;
  1625. /* request, response lengths */
  1626. resp_len = min(resp_len, max_resp_len);
  1627. hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
  1628. task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */
  1629. /* fill in command FIS and ATAPI CDB */
  1630. memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
  1631. if (dev->sata_dev.command_set == ATAPI_COMMAND_SET)
  1632. memcpy(buf_cmd + STP_ATAPI_CMD,
  1633. task->ata_task.atapi_packet, 16);
  1634. /* generate open address frame hdr (first 12 bytes) */
  1635. buf_oaf[0] = (1 << 7) | (2 << 4) | 0x1; /* initiator, STP, ftype 1h */
  1636. buf_oaf[1] = task->dev->linkrate & 0xf;
  1637. *(u16 *)(buf_oaf + 2) = cpu_to_be16(tag);
  1638. memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE);
  1639. /* fill in PRD (scatter/gather) table, if any */
  1640. for_each_sg(task->scatter, sg, tei->n_elem, i) {
  1641. buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
  1642. buf_prd->len = cpu_to_le32(sg_dma_len(sg));
  1643. buf_prd++;
  1644. }
  1645. return 0;
  1646. }
  1647. static int mvs_task_prep_ssp(struct mvs_info *mvi,
  1648. struct mvs_task_exec_info *tei)
  1649. {
  1650. struct sas_task *task = tei->task;
  1651. struct mvs_cmd_hdr *hdr = tei->hdr;
  1652. struct mvs_port *port = tei->port;
  1653. struct mvs_slot_info *slot;
  1654. struct scatterlist *sg;
  1655. struct mvs_prd *buf_prd;
  1656. struct ssp_frame_hdr *ssp_hdr;
  1657. void *buf_tmp;
  1658. u8 *buf_cmd, *buf_oaf, fburst = 0;
  1659. dma_addr_t buf_tmp_dma;
  1660. u32 flags;
  1661. u32 resp_len, req_len, i, tag = tei->tag;
  1662. const u32 max_resp_len = SB_RFB_MAX;
  1663. u8 phy_mask;
  1664. slot = &mvi->slot_info[tag];
  1665. phy_mask = (port->wide_port_phymap) ? port->wide_port_phymap :
  1666. task->dev->port->phy_mask;
  1667. slot->tx = mvi->tx_prod;
  1668. mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag |
  1669. (TXQ_CMD_SSP << TXQ_CMD_SHIFT) |
  1670. (phy_mask << TXQ_PHY_SHIFT));
  1671. flags = MCH_RETRY;
  1672. if (task->ssp_task.enable_first_burst) {
  1673. flags |= MCH_FBURST;
  1674. fburst = (1 << 7);
  1675. }
  1676. hdr->flags = cpu_to_le32(flags |
  1677. (tei->n_elem << MCH_PRD_LEN_SHIFT) |
  1678. (MCH_SSP_FR_CMD << MCH_SSP_FR_TYPE_SHIFT));
  1679. hdr->tags = cpu_to_le32(tag);
  1680. hdr->data_len = cpu_to_le32(task->total_xfer_len);
  1681. /*
  1682. * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
  1683. */
  1684. /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */
  1685. buf_cmd = buf_tmp = slot->buf;
  1686. buf_tmp_dma = slot->buf_dma;
  1687. hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
  1688. buf_tmp += MVS_SSP_CMD_SZ;
  1689. buf_tmp_dma += MVS_SSP_CMD_SZ;
  1690. #if _MV_DUMP
  1691. slot->cmd_size = MVS_SSP_CMD_SZ;
  1692. #endif
  1693. /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
  1694. buf_oaf = buf_tmp;
  1695. hdr->open_frame = cpu_to_le64(buf_tmp_dma);
  1696. buf_tmp += MVS_OAF_SZ;
  1697. buf_tmp_dma += MVS_OAF_SZ;
  1698. /* region 3: PRD table ********************************************* */
  1699. buf_prd = buf_tmp;
  1700. if (tei->n_elem)
  1701. hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
  1702. else
  1703. hdr->prd_tbl = 0;
  1704. i = sizeof(struct mvs_prd) * tei->n_elem;
  1705. buf_tmp += i;
  1706. buf_tmp_dma += i;
  1707. /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
  1708. slot->response = buf_tmp;
  1709. hdr->status_buf = cpu_to_le64(buf_tmp_dma);
  1710. resp_len = MVS_SLOT_BUF_SZ - MVS_SSP_CMD_SZ - MVS_OAF_SZ -
  1711. sizeof(struct mvs_err_info) - i;
  1712. resp_len = min(resp_len, max_resp_len);
  1713. req_len = sizeof(struct ssp_frame_hdr) + 28;
  1714. /* request, response lengths */
  1715. hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
  1716. /* generate open address frame hdr (first 12 bytes) */
  1717. buf_oaf[0] = (1 << 7) | (1 << 4) | 0x1; /* initiator, SSP, ftype 1h */
  1718. buf_oaf[1] = task->dev->linkrate & 0xf;
  1719. *(u16 *)(buf_oaf + 2) = cpu_to_be16(tag);
  1720. memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE);
  1721. /* fill in SSP frame header (Command Table.SSP frame header) */
  1722. ssp_hdr = (struct ssp_frame_hdr *)buf_cmd;
  1723. ssp_hdr->frame_type = SSP_COMMAND;
  1724. memcpy(ssp_hdr->hashed_dest_addr, task->dev->hashed_sas_addr,
  1725. HASHED_SAS_ADDR_SIZE);
  1726. memcpy(ssp_hdr->hashed_src_addr,
  1727. task->dev->port->ha->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
  1728. ssp_hdr->tag = cpu_to_be16(tag);
  1729. /* fill in command frame IU */
  1730. buf_cmd += sizeof(*ssp_hdr);
  1731. memcpy(buf_cmd, &task->ssp_task.LUN, 8);
  1732. buf_cmd[9] = fburst | task->ssp_task.task_attr |
  1733. (task->ssp_task.task_prio << 3);
  1734. memcpy(buf_cmd + 12, &task->ssp_task.cdb, 16);
  1735. /* fill in PRD (scatter/gather) table, if any */
  1736. for_each_sg(task->scatter, sg, tei->n_elem, i) {
  1737. buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
  1738. buf_prd->len = cpu_to_le32(sg_dma_len(sg));
  1739. buf_prd++;
  1740. }
  1741. return 0;
  1742. }
  1743. static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags)
  1744. {
  1745. struct domain_device *dev = task->dev;
  1746. struct mvs_info *mvi = dev->port->ha->lldd_ha;
  1747. struct pci_dev *pdev = mvi->pdev;
  1748. void __iomem *regs = mvi->regs;
  1749. struct mvs_task_exec_info tei;
  1750. struct sas_task *t = task;
  1751. struct mvs_slot_info *slot;
  1752. u32 tag = 0xdeadbeef, rc, n_elem = 0;
  1753. unsigned long flags;
  1754. u32 n = num, pass = 0;
  1755. spin_lock_irqsave(&mvi->lock, flags);
  1756. do {
  1757. dev = t->dev;
  1758. tei.port = &mvi->port[dev->port->id];
  1759. if (!tei.port->port_attached) {
  1760. if (sas_protocol_ata(t->task_proto)) {
  1761. rc = SAS_PHY_DOWN;
  1762. goto out_done;
  1763. } else {
  1764. struct task_status_struct *ts = &t->task_status;
  1765. ts->resp = SAS_TASK_UNDELIVERED;
  1766. ts->stat = SAS_PHY_DOWN;
  1767. t->task_done(t);
  1768. if (n > 1)
  1769. t = list_entry(t->list.next,
  1770. struct sas_task, list);
  1771. continue;
  1772. }
  1773. }
  1774. if (!sas_protocol_ata(t->task_proto)) {
  1775. if (t->num_scatter) {
  1776. n_elem = pci_map_sg(mvi->pdev, t->scatter,
  1777. t->num_scatter,
  1778. t->data_dir);
  1779. if (!n_elem) {
  1780. rc = -ENOMEM;
  1781. goto err_out;
  1782. }
  1783. }
  1784. } else {
  1785. n_elem = t->num_scatter;
  1786. }
  1787. rc = mvs_tag_alloc(mvi, &tag);
  1788. if (rc)
  1789. goto err_out;
  1790. slot = &mvi->slot_info[tag];
  1791. t->lldd_task = NULL;
  1792. slot->n_elem = n_elem;
  1793. memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
  1794. tei.task = t;
  1795. tei.hdr = &mvi->slot[tag];
  1796. tei.tag = tag;
  1797. tei.n_elem = n_elem;
  1798. switch (t->task_proto) {
  1799. case SAS_PROTOCOL_SMP:
  1800. rc = mvs_task_prep_smp(mvi, &tei);
  1801. break;
  1802. case SAS_PROTOCOL_SSP:
  1803. rc = mvs_task_prep_ssp(mvi, &tei);
  1804. break;
  1805. case SAS_PROTOCOL_SATA:
  1806. case SAS_PROTOCOL_STP:
  1807. case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
  1808. rc = mvs_task_prep_ata(mvi, &tei);
  1809. break;
  1810. default:
  1811. dev_printk(KERN_ERR, &pdev->dev,
  1812. "unknown sas_task proto: 0x%x\n",
  1813. t->task_proto);
  1814. rc = -EINVAL;
  1815. break;
  1816. }
  1817. if (rc)
  1818. goto err_out_tag;
  1819. slot->task = t;
  1820. slot->port = tei.port;
  1821. t->lldd_task = (void *) slot;
  1822. list_add_tail(&slot->list, &slot->port->list);
  1823. /* TODO: select normal or high priority */
  1824. spin_lock(&t->task_state_lock);
  1825. t->task_state_flags |= SAS_TASK_AT_INITIATOR;
  1826. spin_unlock(&t->task_state_lock);
  1827. mvs_hba_memory_dump(mvi, tag, t->task_proto);
  1828. ++pass;
  1829. mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1);
  1830. if (n > 1)
  1831. t = list_entry(t->list.next, struct sas_task, list);
  1832. } while (--n);
  1833. rc = 0;
  1834. goto out_done;
  1835. err_out_tag:
  1836. mvs_tag_free(mvi, tag);
  1837. err_out:
  1838. dev_printk(KERN_ERR, &pdev->dev, "mvsas exec failed[%d]!\n", rc);
  1839. if (!sas_protocol_ata(t->task_proto))
  1840. if (n_elem)
  1841. pci_unmap_sg(mvi->pdev, t->scatter, n_elem,
  1842. t->data_dir);
  1843. out_done:
  1844. if (pass)
  1845. mw32(TX_PROD_IDX, (mvi->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1));
  1846. spin_unlock_irqrestore(&mvi->lock, flags);
  1847. return rc;
  1848. }
  1849. static int mvs_task_abort(struct sas_task *task)
  1850. {
  1851. int rc;
  1852. unsigned long flags;
  1853. struct mvs_info *mvi = task->dev->port->ha->lldd_ha;
  1854. struct pci_dev *pdev = mvi->pdev;
  1855. int tag;
  1856. spin_lock_irqsave(&task->task_state_lock, flags);
  1857. if (task->task_state_flags & SAS_TASK_STATE_DONE) {
  1858. rc = TMF_RESP_FUNC_COMPLETE;
  1859. spin_unlock_irqrestore(&task->task_state_lock, flags);
  1860. goto out_done;
  1861. }
  1862. spin_unlock_irqrestore(&task->task_state_lock, flags);
  1863. switch (task->task_proto) {
  1864. case SAS_PROTOCOL_SMP:
  1865. dev_printk(KERN_DEBUG, &pdev->dev, "SMP Abort! \n");
  1866. break;
  1867. case SAS_PROTOCOL_SSP:
  1868. dev_printk(KERN_DEBUG, &pdev->dev, "SSP Abort! \n");
  1869. break;
  1870. case SAS_PROTOCOL_SATA:
  1871. case SAS_PROTOCOL_STP:
  1872. case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:{
  1873. dev_printk(KERN_DEBUG, &pdev->dev, "STP Abort! \n");
  1874. #if _MV_DUMP
  1875. dev_printk(KERN_DEBUG, &pdev->dev, "Dump D2H FIS: \n");
  1876. mvs_hexdump(sizeof(struct host_to_dev_fis),
  1877. (void *)&task->ata_task.fis, 0);
  1878. dev_printk(KERN_DEBUG, &pdev->dev, "Dump ATAPI Cmd : \n");
  1879. mvs_hexdump(16, task->ata_task.atapi_packet, 0);
  1880. #endif
  1881. spin_lock_irqsave(&task->task_state_lock, flags);
  1882. if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET) {
  1883. /* TODO */
  1884. ;
  1885. }
  1886. spin_unlock_irqrestore(&task->task_state_lock, flags);
  1887. break;
  1888. }
  1889. default:
  1890. break;
  1891. }
  1892. if (mvs_find_tag(mvi, task, &tag)) {
  1893. spin_lock_irqsave(&mvi->lock, flags);
  1894. mvs_slot_task_free(mvi, task, &mvi->slot_info[tag], tag);
  1895. spin_unlock_irqrestore(&mvi->lock, flags);
  1896. }
  1897. if (!mvs_task_exec(task, 1, GFP_ATOMIC))
  1898. rc = TMF_RESP_FUNC_COMPLETE;
  1899. else
  1900. rc = TMF_RESP_FUNC_FAILED;
  1901. out_done:
  1902. return rc;
  1903. }
  1904. static void mvs_free(struct mvs_info *mvi)
  1905. {
  1906. int i;
  1907. if (!mvi)
  1908. return;
  1909. for (i = 0; i < MVS_SLOTS; i++) {
  1910. struct mvs_slot_info *slot = &mvi->slot_info[i];
  1911. if (slot->buf)
  1912. dma_free_coherent(&mvi->pdev->dev, MVS_SLOT_BUF_SZ,
  1913. slot->buf, slot->buf_dma);
  1914. }
  1915. if (mvi->tx)
  1916. dma_free_coherent(&mvi->pdev->dev,
  1917. sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ,
  1918. mvi->tx, mvi->tx_dma);
  1919. if (mvi->rx_fis)
  1920. dma_free_coherent(&mvi->pdev->dev, MVS_RX_FISL_SZ,
  1921. mvi->rx_fis, mvi->rx_fis_dma);
  1922. if (mvi->rx)
  1923. dma_free_coherent(&mvi->pdev->dev,
  1924. sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1),
  1925. mvi->rx, mvi->rx_dma);
  1926. if (mvi->slot)
  1927. dma_free_coherent(&mvi->pdev->dev,
  1928. sizeof(*mvi->slot) * MVS_SLOTS,
  1929. mvi->slot, mvi->slot_dma);
  1930. #ifdef MVS_ENABLE_PERI
  1931. if (mvi->peri_regs)
  1932. iounmap(mvi->peri_regs);
  1933. #endif
  1934. if (mvi->regs)
  1935. iounmap(mvi->regs);
  1936. if (mvi->shost)
  1937. scsi_host_put(mvi->shost);
  1938. kfree(mvi->sas.sas_port);
  1939. kfree(mvi->sas.sas_phy);
  1940. kfree(mvi);
  1941. }
  1942. /* FIXME: locking? */
  1943. static int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
  1944. void *funcdata)
  1945. {
  1946. struct mvs_info *mvi = sas_phy->ha->lldd_ha;
  1947. int rc = 0, phy_id = sas_phy->id;
  1948. u32 tmp;
  1949. tmp = mvs_read_phy_ctl(mvi, phy_id);
  1950. switch (func) {
  1951. case PHY_FUNC_SET_LINK_RATE:{
  1952. struct sas_phy_linkrates *rates = funcdata;
  1953. u32 lrmin = 0, lrmax = 0;
  1954. lrmin = (rates->minimum_linkrate << 8);
  1955. lrmax = (rates->maximum_linkrate << 12);
  1956. if (lrmin) {
  1957. tmp &= ~(0xf << 8);
  1958. tmp |= lrmin;
  1959. }
  1960. if (lrmax) {
  1961. tmp &= ~(0xf << 12);
  1962. tmp |= lrmax;
  1963. }
  1964. mvs_write_phy_ctl(mvi, phy_id, tmp);
  1965. break;
  1966. }
  1967. case PHY_FUNC_HARD_RESET:
  1968. if (tmp & PHY_RST_HARD)
  1969. break;
  1970. mvs_write_phy_ctl(mvi, phy_id, tmp | PHY_RST_HARD);
  1971. break;
  1972. case PHY_FUNC_LINK_RESET:
  1973. mvs_write_phy_ctl(mvi, phy_id, tmp | PHY_RST);
  1974. break;
  1975. case PHY_FUNC_DISABLE:
  1976. case PHY_FUNC_RELEASE_SPINUP_HOLD:
  1977. default:
  1978. rc = -EOPNOTSUPP;
  1979. }
  1980. return rc;
  1981. }
  1982. static void __devinit mvs_phy_init(struct mvs_info *mvi, int phy_id)
  1983. {
  1984. struct mvs_phy *phy = &mvi->phy[phy_id];
  1985. struct asd_sas_phy *sas_phy = &phy->sas_phy;
  1986. sas_phy->enabled = (phy_id < mvi->chip->n_phy) ? 1 : 0;
  1987. sas_phy->class = SAS;
  1988. sas_phy->iproto = SAS_PROTOCOL_ALL;
  1989. sas_phy->tproto = 0;
  1990. sas_phy->type = PHY_TYPE_PHYSICAL;
  1991. sas_phy->role = PHY_ROLE_INITIATOR;
  1992. sas_phy->oob_mode = OOB_NOT_CONNECTED;
  1993. sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
  1994. sas_phy->id = phy_id;
  1995. sas_phy->sas_addr = &mvi->sas_addr[0];
  1996. sas_phy->frame_rcvd = &phy->frame_rcvd[0];
  1997. sas_phy->ha = &mvi->sas;
  1998. sas_phy->lldd_phy = phy;
  1999. }
  2000. static struct mvs_info *__devinit mvs_alloc(struct pci_dev *pdev,
  2001. const struct pci_device_id *ent)
  2002. {
  2003. struct mvs_info *mvi;
  2004. unsigned long res_start, res_len, res_flag;
  2005. struct asd_sas_phy **arr_phy;
  2006. struct asd_sas_port **arr_port;
  2007. const struct mvs_chip_info *chip = &mvs_chips[ent->driver_data];
  2008. int i;
  2009. /*
  2010. * alloc and init our per-HBA mvs_info struct
  2011. */
  2012. mvi = kzalloc(sizeof(*mvi), GFP_KERNEL);
  2013. if (!mvi)
  2014. return NULL;
  2015. spin_lock_init(&mvi->lock);
  2016. #ifdef MVS_USE_TASKLET
  2017. tasklet_init(&mvi->tasklet, mvs_tasklet, (unsigned long)mvi);
  2018. #endif
  2019. mvi->pdev = pdev;
  2020. mvi->chip = chip;
  2021. if (pdev->device == 0x6440 && pdev->revision == 0)
  2022. mvi->flags |= MVF_PHY_PWR_FIX;
  2023. /*
  2024. * alloc and init SCSI, SAS glue
  2025. */
  2026. mvi->shost = scsi_host_alloc(&mvs_sht, sizeof(void *));
  2027. if (!mvi->shost)
  2028. goto err_out;
  2029. arr_phy = kcalloc(MVS_MAX_PHYS, sizeof(void *), GFP_KERNEL);
  2030. arr_port = kcalloc(MVS_MAX_PHYS, sizeof(void *), GFP_KERNEL);
  2031. if (!arr_phy || !arr_port)
  2032. goto err_out;
  2033. for (i = 0; i < MVS_MAX_PHYS; i++) {
  2034. mvs_phy_init(mvi, i);
  2035. arr_phy[i] = &mvi->phy[i].sas_phy;
  2036. arr_port[i] = &mvi->port[i].sas_port;
  2037. mvi->port[i].taskfileset = MVS_ID_NOT_MAPPED;
  2038. mvi->port[i].wide_port_phymap = 0;
  2039. mvi->port[i].port_attached = 0;
  2040. INIT_LIST_HEAD(&mvi->port[i].list);
  2041. }
  2042. SHOST_TO_SAS_HA(mvi->shost) = &mvi->sas;
  2043. mvi->shost->transportt = mvs_stt;
  2044. mvi->shost->max_id = 21;
  2045. mvi->shost->max_lun = ~0;
  2046. mvi->shost->max_channel = 0;
  2047. mvi->shost->max_cmd_len = 16;
  2048. mvi->sas.sas_ha_name = DRV_NAME;
  2049. mvi->sas.dev = &pdev->dev;
  2050. mvi->sas.lldd_module = THIS_MODULE;
  2051. mvi->sas.sas_addr = &mvi->sas_addr[0];
  2052. mvi->sas.sas_phy = arr_phy;
  2053. mvi->sas.sas_port = arr_port;
  2054. mvi->sas.num_phys = chip->n_phy;
  2055. mvi->sas.lldd_max_execute_num = 1;
  2056. mvi->sas.lldd_queue_size = MVS_QUEUE_SIZE;
  2057. mvi->shost->can_queue = MVS_CAN_QUEUE;
  2058. mvi->shost->cmd_per_lun = MVS_SLOTS / mvi->sas.num_phys;
  2059. mvi->sas.lldd_ha = mvi;
  2060. mvi->sas.core.shost = mvi->shost;
  2061. mvs_tag_init(mvi);
  2062. /*
  2063. * ioremap main and peripheral registers
  2064. */
  2065. #ifdef MVS_ENABLE_PERI
  2066. res_start = pci_resource_start(pdev, 2);
  2067. res_len = pci_resource_len(pdev, 2);
  2068. if (!res_start || !res_len)
  2069. goto err_out;
  2070. mvi->peri_regs = ioremap_nocache(res_start, res_len);
  2071. if (!mvi->peri_regs)
  2072. goto err_out;
  2073. #endif
  2074. res_start = pci_resource_start(pdev, 4);
  2075. res_len = pci_resource_len(pdev, 4);
  2076. if (!res_start || !res_len)
  2077. goto err_out;
  2078. res_flag = pci_resource_flags(pdev, 4);
  2079. if (res_flag & IORESOURCE_CACHEABLE)
  2080. mvi->regs = ioremap(res_start, res_len);
  2081. else
  2082. mvi->regs = ioremap_nocache(res_start, res_len);
  2083. if (!mvi->regs)
  2084. goto err_out;
  2085. /*
  2086. * alloc and init our DMA areas
  2087. */
  2088. mvi->tx = dma_alloc_coherent(&pdev->dev,
  2089. sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ,
  2090. &mvi->tx_dma, GFP_KERNEL);
  2091. if (!mvi->tx)
  2092. goto err_out;
  2093. memset(mvi->tx, 0, sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ);
  2094. mvi->rx_fis = dma_alloc_coherent(&pdev->dev, MVS_RX_FISL_SZ,
  2095. &mvi->rx_fis_dma, GFP_KERNEL);
  2096. if (!mvi->rx_fis)
  2097. goto err_out;
  2098. memset(mvi->rx_fis, 0, MVS_RX_FISL_SZ);
  2099. mvi->rx = dma_alloc_coherent(&pdev->dev,
  2100. sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1),
  2101. &mvi->rx_dma, GFP_KERNEL);
  2102. if (!mvi->rx)
  2103. goto err_out;
  2104. memset(mvi->rx, 0, sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1));
  2105. mvi->rx[0] = cpu_to_le32(0xfff);
  2106. mvi->rx_cons = 0xfff;
  2107. mvi->slot = dma_alloc_coherent(&pdev->dev,
  2108. sizeof(*mvi->slot) * MVS_SLOTS,
  2109. &mvi->slot_dma, GFP_KERNEL);
  2110. if (!mvi->slot)
  2111. goto err_out;
  2112. memset(mvi->slot, 0, sizeof(*mvi->slot) * MVS_SLOTS);
  2113. for (i = 0; i < MVS_SLOTS; i++) {
  2114. struct mvs_slot_info *slot = &mvi->slot_info[i];
  2115. slot->buf = dma_alloc_coherent(&pdev->dev, MVS_SLOT_BUF_SZ,
  2116. &slot->buf_dma, GFP_KERNEL);
  2117. if (!slot->buf)
  2118. goto err_out;
  2119. memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
  2120. }
  2121. /* finally, read NVRAM to get our SAS address */
  2122. if (mvs_nvram_read(mvi, NVR_SAS_ADDR, &mvi->sas_addr, 8))
  2123. goto err_out;
  2124. return mvi;
  2125. err_out:
  2126. mvs_free(mvi);
  2127. return NULL;
  2128. }
  2129. static u32 mvs_cr32(void __iomem *regs, u32 addr)
  2130. {
  2131. mw32(CMD_ADDR, addr);
  2132. return mr32(CMD_DATA);
  2133. }
  2134. static void mvs_cw32(void __iomem *regs, u32 addr, u32 val)
  2135. {
  2136. mw32(CMD_ADDR, addr);
  2137. mw32(CMD_DATA, val);
  2138. }
  2139. static u32 mvs_read_phy_ctl(struct mvs_info *mvi, u32 port)
  2140. {
  2141. void __iomem *regs = mvi->regs;
  2142. return (port < 4)?mr32(P0_SER_CTLSTAT + port * 4):
  2143. mr32(P4_SER_CTLSTAT + (port - 4) * 4);
  2144. }
  2145. static void mvs_write_phy_ctl(struct mvs_info *mvi, u32 port, u32 val)
  2146. {
  2147. void __iomem *regs = mvi->regs;
  2148. if (port < 4)
  2149. mw32(P0_SER_CTLSTAT + port * 4, val);
  2150. else
  2151. mw32(P4_SER_CTLSTAT + (port - 4) * 4, val);
  2152. }
  2153. static u32 mvs_read_port(struct mvs_info *mvi, u32 off, u32 off2, u32 port)
  2154. {
  2155. void __iomem *regs = mvi->regs + off;
  2156. void __iomem *regs2 = mvi->regs + off2;
  2157. return (port < 4)?readl(regs + port * 8):
  2158. readl(regs2 + (port - 4) * 8);
  2159. }
  2160. static void mvs_write_port(struct mvs_info *mvi, u32 off, u32 off2,
  2161. u32 port, u32 val)
  2162. {
  2163. void __iomem *regs = mvi->regs + off;
  2164. void __iomem *regs2 = mvi->regs + off2;
  2165. if (port < 4)
  2166. writel(val, regs + port * 8);
  2167. else
  2168. writel(val, regs2 + (port - 4) * 8);
  2169. }
  2170. static u32 mvs_read_port_cfg_data(struct mvs_info *mvi, u32 port)
  2171. {
  2172. return mvs_read_port(mvi, MVS_P0_CFG_DATA, MVS_P4_CFG_DATA, port);
  2173. }
  2174. static void mvs_write_port_cfg_data(struct mvs_info *mvi, u32 port, u32 val)
  2175. {
  2176. mvs_write_port(mvi, MVS_P0_CFG_DATA, MVS_P4_CFG_DATA, port, val);
  2177. }
  2178. static void mvs_write_port_cfg_addr(struct mvs_info *mvi, u32 port, u32 addr)
  2179. {
  2180. mvs_write_port(mvi, MVS_P0_CFG_ADDR, MVS_P4_CFG_ADDR, port, addr);
  2181. }
  2182. static u32 mvs_read_port_vsr_data(struct mvs_info *mvi, u32 port)
  2183. {
  2184. return mvs_read_port(mvi, MVS_P0_VSR_DATA, MVS_P4_VSR_DATA, port);
  2185. }
  2186. static void mvs_write_port_vsr_data(struct mvs_info *mvi, u32 port, u32 val)
  2187. {
  2188. mvs_write_port(mvi, MVS_P0_VSR_DATA, MVS_P4_VSR_DATA, port, val);
  2189. }
  2190. static void mvs_write_port_vsr_addr(struct mvs_info *mvi, u32 port, u32 addr)
  2191. {
  2192. mvs_write_port(mvi, MVS_P0_VSR_ADDR, MVS_P4_VSR_ADDR, port, addr);
  2193. }
  2194. static u32 mvs_read_port_irq_stat(struct mvs_info *mvi, u32 port)
  2195. {
  2196. return mvs_read_port(mvi, MVS_P0_INT_STAT, MVS_P4_INT_STAT, port);
  2197. }
  2198. static void mvs_write_port_irq_stat(struct mvs_info *mvi, u32 port, u32 val)
  2199. {
  2200. mvs_write_port(mvi, MVS_P0_INT_STAT, MVS_P4_INT_STAT, port, val);
  2201. }
  2202. static u32 mvs_read_port_irq_mask(struct mvs_info *mvi, u32 port)
  2203. {
  2204. return mvs_read_port(mvi, MVS_P0_INT_MASK, MVS_P4_INT_MASK, port);
  2205. }
  2206. static void mvs_write_port_irq_mask(struct mvs_info *mvi, u32 port, u32 val)
  2207. {
  2208. mvs_write_port(mvi, MVS_P0_INT_MASK, MVS_P4_INT_MASK, port, val);
  2209. }
  2210. static void __devinit mvs_phy_hacks(struct mvs_info *mvi)
  2211. {
  2212. void __iomem *regs = mvi->regs;
  2213. u32 tmp;
  2214. /* workaround for SATA R-ERR, to ignore phy glitch */
  2215. tmp = mvs_cr32(regs, CMD_PHY_TIMER);
  2216. tmp &= ~(1 << 9);
  2217. tmp |= (1 << 10);
  2218. mvs_cw32(regs, CMD_PHY_TIMER, tmp);
  2219. /* enable retry 127 times */
  2220. mvs_cw32(regs, CMD_SAS_CTL1, 0x7f7f);
  2221. /* extend open frame timeout to max */
  2222. tmp = mvs_cr32(regs, CMD_SAS_CTL0);
  2223. tmp &= ~0xffff;
  2224. tmp |= 0x3fff;
  2225. mvs_cw32(regs, CMD_SAS_CTL0, tmp);
  2226. /* workaround for WDTIMEOUT , set to 550 ms */
  2227. mvs_cw32(regs, CMD_WD_TIMER, 0x86470);
  2228. /* not to halt for different port op during wideport link change */
  2229. mvs_cw32(regs, CMD_APP_ERR_CONFIG, 0xffefbf7d);
  2230. /* workaround for Seagate disk not-found OOB sequence, recv
  2231. * COMINIT before sending out COMWAKE */
  2232. tmp = mvs_cr32(regs, CMD_PHY_MODE_21);
  2233. tmp &= 0x0000ffff;
  2234. tmp |= 0x00fa0000;
  2235. mvs_cw32(regs, CMD_PHY_MODE_21, tmp);
  2236. tmp = mvs_cr32(regs, CMD_PHY_TIMER);
  2237. tmp &= 0x1fffffff;
  2238. tmp |= (2U << 29); /* 8 ms retry */
  2239. mvs_cw32(regs, CMD_PHY_TIMER, tmp);
  2240. /* TEST - for phy decoding error, adjust voltage levels */
  2241. mw32(P0_VSR_ADDR + 0, 0x8);
  2242. mw32(P0_VSR_DATA + 0, 0x2F0);
  2243. mw32(P0_VSR_ADDR + 8, 0x8);
  2244. mw32(P0_VSR_DATA + 8, 0x2F0);
  2245. mw32(P0_VSR_ADDR + 16, 0x8);
  2246. mw32(P0_VSR_DATA + 16, 0x2F0);
  2247. mw32(P0_VSR_ADDR + 24, 0x8);
  2248. mw32(P0_VSR_DATA + 24, 0x2F0);
  2249. }
  2250. static void mvs_enable_xmt(struct mvs_info *mvi, int PhyId)
  2251. {
  2252. void __iomem *regs = mvi->regs;
  2253. u32 tmp;
  2254. tmp = mr32(PCS);
  2255. if (mvi->chip->n_phy <= 4)
  2256. tmp |= 1 << (PhyId + PCS_EN_PORT_XMT_SHIFT);
  2257. else
  2258. tmp |= 1 << (PhyId + PCS_EN_PORT_XMT_SHIFT2);
  2259. mw32(PCS, tmp);
  2260. }
  2261. static void mvs_detect_porttype(struct mvs_info *mvi, int i)
  2262. {
  2263. void __iomem *regs = mvi->regs;
  2264. u32 reg;
  2265. struct mvs_phy *phy = &mvi->phy[i];
  2266. /* TODO check & save device type */
  2267. reg = mr32(GBL_PORT_TYPE);
  2268. if (reg & MODE_SAS_SATA & (1 << i))
  2269. phy->phy_type |= PORT_TYPE_SAS;
  2270. else
  2271. phy->phy_type |= PORT_TYPE_SATA;
  2272. }
  2273. static void *mvs_get_d2h_reg(struct mvs_info *mvi, int i, void *buf)
  2274. {
  2275. u32 *s = (u32 *) buf;
  2276. if (!s)
  2277. return NULL;
  2278. mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG3);
  2279. s[3] = mvs_read_port_cfg_data(mvi, i);
  2280. mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG2);
  2281. s[2] = mvs_read_port_cfg_data(mvi, i);
  2282. mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG1);
  2283. s[1] = mvs_read_port_cfg_data(mvi, i);
  2284. mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG0);
  2285. s[0] = mvs_read_port_cfg_data(mvi, i);
  2286. return (void *)s;
  2287. }
  2288. static u32 mvs_is_sig_fis_received(u32 irq_status)
  2289. {
  2290. return irq_status & PHYEV_SIG_FIS;
  2291. }
  2292. static void mvs_update_wideport(struct mvs_info *mvi, int i)
  2293. {
  2294. struct mvs_phy *phy = &mvi->phy[i];
  2295. struct mvs_port *port = phy->port;
  2296. int j, no;
  2297. for_each_phy(port->wide_port_phymap, no, j, mvi->chip->n_phy)
  2298. if (no & 1) {
  2299. mvs_write_port_cfg_addr(mvi, no, PHYR_WIDE_PORT);
  2300. mvs_write_port_cfg_data(mvi, no,
  2301. port->wide_port_phymap);
  2302. } else {
  2303. mvs_write_port_cfg_addr(mvi, no, PHYR_WIDE_PORT);
  2304. mvs_write_port_cfg_data(mvi, no, 0);
  2305. }
  2306. }
  2307. static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i)
  2308. {
  2309. u32 tmp;
  2310. struct mvs_phy *phy = &mvi->phy[i];
  2311. struct mvs_port *port = phy->port;;
  2312. tmp = mvs_read_phy_ctl(mvi, i);
  2313. if ((tmp & PHY_READY_MASK) && !(phy->irq_status & PHYEV_POOF)) {
  2314. if (!port)
  2315. phy->phy_attached = 1;
  2316. return tmp;
  2317. }
  2318. if (port) {
  2319. if (phy->phy_type & PORT_TYPE_SAS) {
  2320. port->wide_port_phymap &= ~(1U << i);
  2321. if (!port->wide_port_phymap)
  2322. port->port_attached = 0;
  2323. mvs_update_wideport(mvi, i);
  2324. } else if (phy->phy_type & PORT_TYPE_SATA)
  2325. port->port_attached = 0;
  2326. mvs_free_reg_set(mvi, phy->port);
  2327. phy->port = NULL;
  2328. phy->phy_attached = 0;
  2329. phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
  2330. }
  2331. return 0;
  2332. }
  2333. static void mvs_update_phyinfo(struct mvs_info *mvi, int i,
  2334. int get_st)
  2335. {
  2336. struct mvs_phy *phy = &mvi->phy[i];
  2337. struct pci_dev *pdev = mvi->pdev;
  2338. u32 tmp;
  2339. u64 tmp64;
  2340. mvs_write_port_cfg_addr(mvi, i, PHYR_IDENTIFY);
  2341. phy->dev_info = mvs_read_port_cfg_data(mvi, i);
  2342. mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_HI);
  2343. phy->dev_sas_addr = (u64) mvs_read_port_cfg_data(mvi, i) << 32;
  2344. mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_LO);
  2345. phy->dev_sas_addr |= mvs_read_port_cfg_data(mvi, i);
  2346. if (get_st) {
  2347. phy->irq_status = mvs_read_port_irq_stat(mvi, i);
  2348. phy->phy_status = mvs_is_phy_ready(mvi, i);
  2349. }
  2350. if (phy->phy_status) {
  2351. u32 phy_st;
  2352. struct asd_sas_phy *sas_phy = mvi->sas.sas_phy[i];
  2353. mvs_write_port_cfg_addr(mvi, i, PHYR_PHY_STAT);
  2354. phy_st = mvs_read_port_cfg_data(mvi, i);
  2355. sas_phy->linkrate =
  2356. (phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
  2357. PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET;
  2358. phy->minimum_linkrate =
  2359. (phy->phy_status &
  2360. PHY_MIN_SPP_PHYS_LINK_RATE_MASK) >> 8;
  2361. phy->maximum_linkrate =
  2362. (phy->phy_status &
  2363. PHY_MAX_SPP_PHYS_LINK_RATE_MASK) >> 12;
  2364. if (phy->phy_type & PORT_TYPE_SAS) {
  2365. /* Updated attached_sas_addr */
  2366. mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_HI);
  2367. phy->att_dev_sas_addr =
  2368. (u64) mvs_read_port_cfg_data(mvi, i) << 32;
  2369. mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_LO);
  2370. phy->att_dev_sas_addr |= mvs_read_port_cfg_data(mvi, i);
  2371. mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_DEV_INFO);
  2372. phy->att_dev_info = mvs_read_port_cfg_data(mvi, i);
  2373. phy->identify.device_type =
  2374. phy->att_dev_info & PORT_DEV_TYPE_MASK;
  2375. if (phy->identify.device_type == SAS_END_DEV)
  2376. phy->identify.target_port_protocols =
  2377. SAS_PROTOCOL_SSP;
  2378. else if (phy->identify.device_type != NO_DEVICE)
  2379. phy->identify.target_port_protocols =
  2380. SAS_PROTOCOL_SMP;
  2381. if (phy_st & PHY_OOB_DTCTD)
  2382. sas_phy->oob_mode = SAS_OOB_MODE;
  2383. phy->frame_rcvd_size =
  2384. sizeof(struct sas_identify_frame);
  2385. } else if (phy->phy_type & PORT_TYPE_SATA) {
  2386. phy->identify.target_port_protocols = SAS_PROTOCOL_STP;
  2387. if (mvs_is_sig_fis_received(phy->irq_status)) {
  2388. phy->att_dev_sas_addr = i; /* temp */
  2389. if (phy_st & PHY_OOB_DTCTD)
  2390. sas_phy->oob_mode = SATA_OOB_MODE;
  2391. phy->frame_rcvd_size =
  2392. sizeof(struct dev_to_host_fis);
  2393. mvs_get_d2h_reg(mvi, i,
  2394. (void *)sas_phy->frame_rcvd);
  2395. } else {
  2396. dev_printk(KERN_DEBUG, &pdev->dev,
  2397. "No sig fis\n");
  2398. phy->phy_type &= ~(PORT_TYPE_SATA);
  2399. goto out_done;
  2400. }
  2401. }
  2402. tmp64 = cpu_to_be64(phy->att_dev_sas_addr);
  2403. memcpy(sas_phy->attached_sas_addr, &tmp64, SAS_ADDR_SIZE);
  2404. dev_printk(KERN_DEBUG, &pdev->dev,
  2405. "phy[%d] Get Attached Address 0x%llX ,"
  2406. " SAS Address 0x%llX\n",
  2407. i,
  2408. (unsigned long long)phy->att_dev_sas_addr,
  2409. (unsigned long long)phy->dev_sas_addr);
  2410. dev_printk(KERN_DEBUG, &pdev->dev,
  2411. "Rate = %x , type = %d\n",
  2412. sas_phy->linkrate, phy->phy_type);
  2413. /* workaround for HW phy decoding error on 1.5g disk drive */
  2414. mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE6);
  2415. tmp = mvs_read_port_vsr_data(mvi, i);
  2416. if (((phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
  2417. PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET) ==
  2418. SAS_LINK_RATE_1_5_GBPS)
  2419. tmp &= ~PHY_MODE6_LATECLK;
  2420. else
  2421. tmp |= PHY_MODE6_LATECLK;
  2422. mvs_write_port_vsr_data(mvi, i, tmp);
  2423. }
  2424. out_done:
  2425. if (get_st)
  2426. mvs_write_port_irq_stat(mvi, i, phy->irq_status);
  2427. }
  2428. static void mvs_port_formed(struct asd_sas_phy *sas_phy)
  2429. {
  2430. struct sas_ha_struct *sas_ha = sas_phy->ha;
  2431. struct mvs_info *mvi = sas_ha->lldd_ha;
  2432. struct asd_sas_port *sas_port = sas_phy->port;
  2433. struct mvs_phy *phy = sas_phy->lldd_phy;
  2434. struct mvs_port *port = &mvi->port[sas_port->id];
  2435. unsigned long flags;
  2436. spin_lock_irqsave(&mvi->lock, flags);
  2437. port->port_attached = 1;
  2438. phy->port = port;
  2439. port->taskfileset = MVS_ID_NOT_MAPPED;
  2440. if (phy->phy_type & PORT_TYPE_SAS) {
  2441. port->wide_port_phymap = sas_port->phy_mask;
  2442. mvs_update_wideport(mvi, sas_phy->id);
  2443. }
  2444. spin_unlock_irqrestore(&mvi->lock, flags);
  2445. }
  2446. static int mvs_I_T_nexus_reset(struct domain_device *dev)
  2447. {
  2448. return TMF_RESP_FUNC_FAILED;
  2449. }
  2450. static int __devinit mvs_hw_init(struct mvs_info *mvi)
  2451. {
  2452. void __iomem *regs = mvi->regs;
  2453. int i;
  2454. u32 tmp, cctl;
  2455. /* make sure interrupts are masked immediately (paranoia) */
  2456. mw32(GBL_CTL, 0);
  2457. tmp = mr32(GBL_CTL);
  2458. /* Reset Controller */
  2459. if (!(tmp & HBA_RST)) {
  2460. if (mvi->flags & MVF_PHY_PWR_FIX) {
  2461. pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp);
  2462. tmp &= ~PCTL_PWR_ON;
  2463. tmp |= PCTL_OFF;
  2464. pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
  2465. pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp);
  2466. tmp &= ~PCTL_PWR_ON;
  2467. tmp |= PCTL_OFF;
  2468. pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
  2469. }
  2470. /* global reset, incl. COMRESET/H_RESET_N (self-clearing) */
  2471. mw32_f(GBL_CTL, HBA_RST);
  2472. }
  2473. /* wait for reset to finish; timeout is just a guess */
  2474. i = 1000;
  2475. while (i-- > 0) {
  2476. msleep(10);
  2477. if (!(mr32(GBL_CTL) & HBA_RST))
  2478. break;
  2479. }
  2480. if (mr32(GBL_CTL) & HBA_RST) {
  2481. dev_printk(KERN_ERR, &mvi->pdev->dev, "HBA reset failed\n");
  2482. return -EBUSY;
  2483. }
  2484. /* Init Chip */
  2485. /* make sure RST is set; HBA_RST /should/ have done that for us */
  2486. cctl = mr32(CTL);
  2487. if (cctl & CCTL_RST)
  2488. cctl &= ~CCTL_RST;
  2489. else
  2490. mw32_f(CTL, cctl | CCTL_RST);
  2491. /* write to device control _AND_ device status register? - A.C. */
  2492. pci_read_config_dword(mvi->pdev, PCR_DEV_CTRL, &tmp);
  2493. tmp &= ~PRD_REQ_MASK;
  2494. tmp |= PRD_REQ_SIZE;
  2495. pci_write_config_dword(mvi->pdev, PCR_DEV_CTRL, tmp);
  2496. pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp);
  2497. tmp |= PCTL_PWR_ON;
  2498. tmp &= ~PCTL_OFF;
  2499. pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
  2500. pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp);
  2501. tmp |= PCTL_PWR_ON;
  2502. tmp &= ~PCTL_OFF;
  2503. pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
  2504. mw32_f(CTL, cctl);
  2505. /* reset control */
  2506. mw32(PCS, 0); /*MVS_PCS */
  2507. mvs_phy_hacks(mvi);
  2508. mw32(CMD_LIST_LO, mvi->slot_dma);
  2509. mw32(CMD_LIST_HI, (mvi->slot_dma >> 16) >> 16);
  2510. mw32(RX_FIS_LO, mvi->rx_fis_dma);
  2511. mw32(RX_FIS_HI, (mvi->rx_fis_dma >> 16) >> 16);
  2512. mw32(TX_CFG, MVS_CHIP_SLOT_SZ);
  2513. mw32(TX_LO, mvi->tx_dma);
  2514. mw32(TX_HI, (mvi->tx_dma >> 16) >> 16);
  2515. mw32(RX_CFG, MVS_RX_RING_SZ);
  2516. mw32(RX_LO, mvi->rx_dma);
  2517. mw32(RX_HI, (mvi->rx_dma >> 16) >> 16);
  2518. /* enable auto port detection */
  2519. mw32(GBL_PORT_TYPE, MODE_AUTO_DET_EN);
  2520. msleep(100);
  2521. /* init and reset phys */
  2522. for (i = 0; i < mvi->chip->n_phy; i++) {
  2523. u32 lo = be32_to_cpu(*(u32 *)&mvi->sas_addr[4]);
  2524. u32 hi = be32_to_cpu(*(u32 *)&mvi->sas_addr[0]);
  2525. mvs_detect_porttype(mvi, i);
  2526. /* set phy local SAS address */
  2527. mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_LO);
  2528. mvs_write_port_cfg_data(mvi, i, lo);
  2529. mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_HI);
  2530. mvs_write_port_cfg_data(mvi, i, hi);
  2531. /* reset phy */
  2532. tmp = mvs_read_phy_ctl(mvi, i);
  2533. tmp |= PHY_RST;
  2534. mvs_write_phy_ctl(mvi, i, tmp);
  2535. }
  2536. msleep(100);
  2537. for (i = 0; i < mvi->chip->n_phy; i++) {
  2538. /* clear phy int status */
  2539. tmp = mvs_read_port_irq_stat(mvi, i);
  2540. tmp &= ~PHYEV_SIG_FIS;
  2541. mvs_write_port_irq_stat(mvi, i, tmp);
  2542. /* set phy int mask */
  2543. tmp = PHYEV_RDY_CH | PHYEV_BROAD_CH | PHYEV_UNASSOC_FIS |
  2544. PHYEV_ID_DONE | PHYEV_DEC_ERR;
  2545. mvs_write_port_irq_mask(mvi, i, tmp);
  2546. msleep(100);
  2547. mvs_update_phyinfo(mvi, i, 1);
  2548. mvs_enable_xmt(mvi, i);
  2549. }
  2550. /* FIXME: update wide port bitmaps */
  2551. /* little endian for open address and command table, etc. */
  2552. /* A.C.
  2553. * it seems that ( from the spec ) turning on big-endian won't
  2554. * do us any good on big-endian machines, need further confirmation
  2555. */
  2556. cctl = mr32(CTL);
  2557. cctl |= CCTL_ENDIAN_CMD;
  2558. cctl |= CCTL_ENDIAN_DATA;
  2559. cctl &= ~CCTL_ENDIAN_OPEN;
  2560. cctl |= CCTL_ENDIAN_RSP;
  2561. mw32_f(CTL, cctl);
  2562. /* reset CMD queue */
  2563. tmp = mr32(PCS);
  2564. tmp |= PCS_CMD_RST;
  2565. mw32(PCS, tmp);
  2566. /* interrupt coalescing may cause missing HW interrput in some case,
  2567. * and the max count is 0x1ff, while our max slot is 0x200,
  2568. * it will make count 0.
  2569. */
  2570. tmp = 0;
  2571. mw32(INT_COAL, tmp);
  2572. tmp = 0x100;
  2573. mw32(INT_COAL_TMOUT, tmp);
  2574. /* ladies and gentlemen, start your engines */
  2575. mw32(TX_CFG, 0);
  2576. mw32(TX_CFG, MVS_CHIP_SLOT_SZ | TX_EN);
  2577. mw32(RX_CFG, MVS_RX_RING_SZ | RX_EN);
  2578. /* enable CMD/CMPL_Q/RESP mode */
  2579. mw32(PCS, PCS_SATA_RETRY | PCS_FIS_RX_EN | PCS_CMD_EN);
  2580. /* enable completion queue interrupt */
  2581. tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM | CINT_SRS);
  2582. mw32(INT_MASK, tmp);
  2583. /* Enable SRS interrupt */
  2584. mw32(INT_MASK_SRS, 0xFF);
  2585. return 0;
  2586. }
  2587. static void __devinit mvs_print_info(struct mvs_info *mvi)
  2588. {
  2589. struct pci_dev *pdev = mvi->pdev;
  2590. static int printed_version;
  2591. if (!printed_version++)
  2592. dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
  2593. dev_printk(KERN_INFO, &pdev->dev, "%u phys, addr %llx\n",
  2594. mvi->chip->n_phy, SAS_ADDR(mvi->sas_addr));
  2595. }
  2596. static int __devinit mvs_pci_init(struct pci_dev *pdev,
  2597. const struct pci_device_id *ent)
  2598. {
  2599. int rc;
  2600. struct mvs_info *mvi;
  2601. irq_handler_t irq_handler = mvs_interrupt;
  2602. rc = pci_enable_device(pdev);
  2603. if (rc)
  2604. return rc;
  2605. pci_set_master(pdev);
  2606. rc = pci_request_regions(pdev, DRV_NAME);
  2607. if (rc)
  2608. goto err_out_disable;
  2609. rc = pci_go_64(pdev);
  2610. if (rc)
  2611. goto err_out_regions;
  2612. mvi = mvs_alloc(pdev, ent);
  2613. if (!mvi) {
  2614. rc = -ENOMEM;
  2615. goto err_out_regions;
  2616. }
  2617. rc = mvs_hw_init(mvi);
  2618. if (rc)
  2619. goto err_out_mvi;
  2620. #ifndef MVS_DISABLE_MSI
  2621. if (!pci_enable_msi(pdev)) {
  2622. u32 tmp;
  2623. void __iomem *regs = mvi->regs;
  2624. mvi->flags |= MVF_MSI;
  2625. irq_handler = mvs_msi_interrupt;
  2626. tmp = mr32(PCS);
  2627. mw32(PCS, tmp | PCS_SELF_CLEAR);
  2628. }
  2629. #endif
  2630. rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED, DRV_NAME, mvi);
  2631. if (rc)
  2632. goto err_out_msi;
  2633. rc = scsi_add_host(mvi->shost, &pdev->dev);
  2634. if (rc)
  2635. goto err_out_irq;
  2636. rc = sas_register_ha(&mvi->sas);
  2637. if (rc)
  2638. goto err_out_shost;
  2639. pci_set_drvdata(pdev, mvi);
  2640. mvs_print_info(mvi);
  2641. mvs_hba_interrupt_enable(mvi);
  2642. scsi_scan_host(mvi->shost);
  2643. return 0;
  2644. err_out_shost:
  2645. scsi_remove_host(mvi->shost);
  2646. err_out_irq:
  2647. free_irq(pdev->irq, mvi);
  2648. err_out_msi:
  2649. if (mvi->flags |= MVF_MSI)
  2650. pci_disable_msi(pdev);
  2651. err_out_mvi:
  2652. mvs_free(mvi);
  2653. err_out_regions:
  2654. pci_release_regions(pdev);
  2655. err_out_disable:
  2656. pci_disable_device(pdev);
  2657. return rc;
  2658. }
  2659. static void __devexit mvs_pci_remove(struct pci_dev *pdev)
  2660. {
  2661. struct mvs_info *mvi = pci_get_drvdata(pdev);
  2662. pci_set_drvdata(pdev, NULL);
  2663. if (mvi) {
  2664. sas_unregister_ha(&mvi->sas);
  2665. mvs_hba_interrupt_disable(mvi);
  2666. sas_remove_host(mvi->shost);
  2667. scsi_remove_host(mvi->shost);
  2668. free_irq(pdev->irq, mvi);
  2669. if (mvi->flags & MVF_MSI)
  2670. pci_disable_msi(pdev);
  2671. mvs_free(mvi);
  2672. pci_release_regions(pdev);
  2673. }
  2674. pci_disable_device(pdev);
  2675. }
  2676. static struct sas_domain_function_template mvs_transport_ops = {
  2677. .lldd_execute_task = mvs_task_exec,
  2678. .lldd_control_phy = mvs_phy_control,
  2679. .lldd_abort_task = mvs_task_abort,
  2680. .lldd_port_formed = mvs_port_formed,
  2681. .lldd_I_T_nexus_reset = mvs_I_T_nexus_reset,
  2682. };
  2683. static struct pci_device_id __devinitdata mvs_pci_table[] = {
  2684. { PCI_VDEVICE(MARVELL, 0x6320), chip_6320 },
  2685. { PCI_VDEVICE(MARVELL, 0x6340), chip_6440 },
  2686. {
  2687. .vendor = PCI_VENDOR_ID_MARVELL,
  2688. .device = 0x6440,
  2689. .subvendor = PCI_ANY_ID,
  2690. .subdevice = 0x6480,
  2691. .class = 0,
  2692. .class_mask = 0,
  2693. .driver_data = chip_6480,
  2694. },
  2695. { PCI_VDEVICE(MARVELL, 0x6440), chip_6440 },
  2696. { PCI_VDEVICE(MARVELL, 0x6480), chip_6480 },
  2697. { } /* terminate list */
  2698. };
  2699. static struct pci_driver mvs_pci_driver = {
  2700. .name = DRV_NAME,
  2701. .id_table = mvs_pci_table,
  2702. .probe = mvs_pci_init,
  2703. .remove = __devexit_p(mvs_pci_remove),
  2704. };
  2705. static int __init mvs_init(void)
  2706. {
  2707. int rc;
  2708. mvs_stt = sas_domain_attach_transport(&mvs_transport_ops);
  2709. if (!mvs_stt)
  2710. return -ENOMEM;
  2711. rc = pci_register_driver(&mvs_pci_driver);
  2712. if (rc)
  2713. goto err_out;
  2714. return 0;
  2715. err_out:
  2716. sas_release_transport(mvs_stt);
  2717. return rc;
  2718. }
  2719. static void __exit mvs_exit(void)
  2720. {
  2721. pci_unregister_driver(&mvs_pci_driver);
  2722. sas_release_transport(mvs_stt);
  2723. }
  2724. module_init(mvs_init);
  2725. module_exit(mvs_exit);
  2726. MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
  2727. MODULE_DESCRIPTION("Marvell 88SE6440 SAS/SATA controller driver");
  2728. MODULE_VERSION(DRV_VERSION);
  2729. MODULE_LICENSE("GPL");
  2730. MODULE_DEVICE_TABLE(pci, mvs_pci_table);