mvsas.c 78 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969
  1. /*
  2. mvsas.c - Marvell 88SE6440 SAS/SATA support
  3. Copyright 2007 Red Hat, Inc.
  4. Copyright 2008 Marvell. <kewei@marvell.com>
  5. This program is free software; you can redistribute it and/or
  6. modify it under the terms of the GNU General Public License as
  7. published by the Free Software Foundation; either version 2,
  8. or (at your option) any later version.
  9. This program is distributed in the hope that it will be useful,
  10. but WITHOUT ANY WARRANTY; without even the implied warranty
  11. of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  12. See the GNU General Public License for more details.
  13. You should have received a copy of the GNU General Public
  14. License along with this program; see the file COPYING. If not,
  15. write to the Free Software Foundation, 675 Mass Ave, Cambridge,
  16. MA 02139, USA.
  17. ---------------------------------------------------------------
  18. Random notes:
  19. * hardware supports controlling the endian-ness of data
  20. structures. this permits elimination of all the le32_to_cpu()
  21. and cpu_to_le32() conversions.
  22. */
  23. #include <linux/kernel.h>
  24. #include <linux/module.h>
  25. #include <linux/pci.h>
  26. #include <linux/interrupt.h>
  27. #include <linux/spinlock.h>
  28. #include <linux/delay.h>
  29. #include <linux/dma-mapping.h>
  30. #include <linux/ctype.h>
  31. #include <scsi/libsas.h>
  32. #include <asm/io.h>
  33. #define DRV_NAME "mvsas"
  34. #define DRV_VERSION "0.5.1"
  35. #define _MV_DUMP 0
  36. #define MVS_DISABLE_NVRAM
  37. #define MVS_DISABLE_MSI
  38. #define mr32(reg) readl(regs + MVS_##reg)
  39. #define mw32(reg,val) writel((val), regs + MVS_##reg)
  40. #define mw32_f(reg,val) do { \
  41. writel((val), regs + MVS_##reg); \
  42. readl(regs + MVS_##reg); \
  43. } while (0)
  44. #define MVS_ID_NOT_MAPPED 0xff
  45. #define MVS_CHIP_SLOT_SZ (1U << mvi->chip->slot_width)
  46. /* offset for D2H FIS in the Received FIS List Structure */
  47. #define SATA_RECEIVED_D2H_FIS(reg_set) \
  48. ((void *) mvi->rx_fis + 0x400 + 0x100 * reg_set + 0x40)
  49. #define SATA_RECEIVED_PIO_FIS(reg_set) \
  50. ((void *) mvi->rx_fis + 0x400 + 0x100 * reg_set + 0x20)
  51. #define UNASSOC_D2H_FIS(id) \
  52. ((void *) mvi->rx_fis + 0x100 * id)
  53. #define for_each_phy(__lseq_mask, __mc, __lseq, __rest) \
  54. for ((__mc) = (__lseq_mask), (__lseq) = 0; \
  55. (__mc) != 0 && __rest; \
  56. (++__lseq), (__mc) >>= 1)
  57. /* driver compile-time configuration */
  58. enum driver_configuration {
  59. MVS_TX_RING_SZ = 1024, /* TX ring size (12-bit) */
  60. MVS_RX_RING_SZ = 1024, /* RX ring size (12-bit) */
  61. /* software requires power-of-2
  62. ring size */
  63. MVS_SLOTS = 512, /* command slots */
  64. MVS_SLOT_BUF_SZ = 8192, /* cmd tbl + IU + status + PRD */
  65. MVS_SSP_CMD_SZ = 64, /* SSP command table buffer size */
  66. MVS_ATA_CMD_SZ = 96, /* SATA command table buffer size */
  67. MVS_OAF_SZ = 64, /* Open address frame buffer size */
  68. MVS_RX_FIS_COUNT = 17, /* Optional rx'd FISs (max 17) */
  69. MVS_QUEUE_SIZE = 30, /* Support Queue depth */
  70. };
  71. /* unchangeable hardware details */
  72. enum hardware_details {
  73. MVS_MAX_PHYS = 8, /* max. possible phys */
  74. MVS_MAX_PORTS = 8, /* max. possible ports */
  75. MVS_RX_FISL_SZ = 0x400 + (MVS_RX_FIS_COUNT * 0x100),
  76. };
  77. /* peripheral registers (BAR2) */
  78. enum peripheral_registers {
  79. SPI_CTL = 0x10, /* EEPROM control */
  80. SPI_CMD = 0x14, /* EEPROM command */
  81. SPI_DATA = 0x18, /* EEPROM data */
  82. };
  83. enum peripheral_register_bits {
  84. TWSI_RDY = (1U << 7), /* EEPROM interface ready */
  85. TWSI_RD = (1U << 4), /* EEPROM read access */
  86. SPI_ADDR_MASK = 0x3ffff, /* bits 17:0 */
  87. };
  88. /* enhanced mode registers (BAR4) */
  89. enum hw_registers {
  90. MVS_GBL_CTL = 0x04, /* global control */
  91. MVS_GBL_INT_STAT = 0x08, /* global irq status */
  92. MVS_GBL_PI = 0x0C, /* ports implemented bitmask */
  93. MVS_GBL_PORT_TYPE = 0xa0, /* port type */
  94. MVS_CTL = 0x100, /* SAS/SATA port configuration */
  95. MVS_PCS = 0x104, /* SAS/SATA port control/status */
  96. MVS_CMD_LIST_LO = 0x108, /* cmd list addr */
  97. MVS_CMD_LIST_HI = 0x10C,
  98. MVS_RX_FIS_LO = 0x110, /* RX FIS list addr */
  99. MVS_RX_FIS_HI = 0x114,
  100. MVS_TX_CFG = 0x120, /* TX configuration */
  101. MVS_TX_LO = 0x124, /* TX (delivery) ring addr */
  102. MVS_TX_HI = 0x128,
  103. MVS_TX_PROD_IDX = 0x12C, /* TX producer pointer */
  104. MVS_TX_CONS_IDX = 0x130, /* TX consumer pointer (RO) */
  105. MVS_RX_CFG = 0x134, /* RX configuration */
  106. MVS_RX_LO = 0x138, /* RX (completion) ring addr */
  107. MVS_RX_HI = 0x13C,
  108. MVS_RX_CONS_IDX = 0x140, /* RX consumer pointer (RO) */
  109. MVS_INT_COAL = 0x148, /* Int coalescing config */
  110. MVS_INT_COAL_TMOUT = 0x14C, /* Int coalescing timeout */
  111. MVS_INT_STAT = 0x150, /* Central int status */
  112. MVS_INT_MASK = 0x154, /* Central int enable */
  113. MVS_INT_STAT_SRS = 0x158, /* SATA register set status */
  114. MVS_INT_MASK_SRS = 0x15C,
  115. /* ports 1-3 follow after this */
  116. MVS_P0_INT_STAT = 0x160, /* port0 interrupt status */
  117. MVS_P0_INT_MASK = 0x164, /* port0 interrupt mask */
  118. MVS_P4_INT_STAT = 0x200, /* Port 4 interrupt status */
  119. MVS_P4_INT_MASK = 0x204, /* Port 4 interrupt enable mask */
  120. /* ports 1-3 follow after this */
  121. MVS_P0_SER_CTLSTAT = 0x180, /* port0 serial control/status */
  122. MVS_P4_SER_CTLSTAT = 0x220, /* port4 serial control/status */
  123. MVS_CMD_ADDR = 0x1B8, /* Command register port (addr) */
  124. MVS_CMD_DATA = 0x1BC, /* Command register port (data) */
  125. /* ports 1-3 follow after this */
  126. MVS_P0_CFG_ADDR = 0x1C0, /* port0 phy register address */
  127. MVS_P0_CFG_DATA = 0x1C4, /* port0 phy register data */
  128. MVS_P4_CFG_ADDR = 0x230, /* Port 4 config address */
  129. MVS_P4_CFG_DATA = 0x234, /* Port 4 config data */
  130. /* ports 1-3 follow after this */
  131. MVS_P0_VSR_ADDR = 0x1E0, /* port0 VSR address */
  132. MVS_P0_VSR_DATA = 0x1E4, /* port0 VSR data */
  133. MVS_P4_VSR_ADDR = 0x250, /* port 4 VSR addr */
  134. MVS_P4_VSR_DATA = 0x254, /* port 4 VSR data */
  135. };
  136. enum hw_register_bits {
  137. /* MVS_GBL_CTL */
  138. INT_EN = (1U << 1), /* Global int enable */
  139. HBA_RST = (1U << 0), /* HBA reset */
  140. /* MVS_GBL_INT_STAT */
  141. INT_XOR = (1U << 4), /* XOR engine event */
  142. INT_SAS_SATA = (1U << 0), /* SAS/SATA event */
  143. /* MVS_GBL_PORT_TYPE */ /* shl for ports 1-3 */
  144. SATA_TARGET = (1U << 16), /* port0 SATA target enable */
  145. MODE_AUTO_DET_PORT7 = (1U << 15), /* port0 SAS/SATA autodetect */
  146. MODE_AUTO_DET_PORT6 = (1U << 14),
  147. MODE_AUTO_DET_PORT5 = (1U << 13),
  148. MODE_AUTO_DET_PORT4 = (1U << 12),
  149. MODE_AUTO_DET_PORT3 = (1U << 11),
  150. MODE_AUTO_DET_PORT2 = (1U << 10),
  151. MODE_AUTO_DET_PORT1 = (1U << 9),
  152. MODE_AUTO_DET_PORT0 = (1U << 8),
  153. MODE_AUTO_DET_EN = MODE_AUTO_DET_PORT0 | MODE_AUTO_DET_PORT1 |
  154. MODE_AUTO_DET_PORT2 | MODE_AUTO_DET_PORT3 |
  155. MODE_AUTO_DET_PORT4 | MODE_AUTO_DET_PORT5 |
  156. MODE_AUTO_DET_PORT6 | MODE_AUTO_DET_PORT7,
  157. MODE_SAS_PORT7_MASK = (1U << 7), /* port0 SAS(1), SATA(0) mode */
  158. MODE_SAS_PORT6_MASK = (1U << 6),
  159. MODE_SAS_PORT5_MASK = (1U << 5),
  160. MODE_SAS_PORT4_MASK = (1U << 4),
  161. MODE_SAS_PORT3_MASK = (1U << 3),
  162. MODE_SAS_PORT2_MASK = (1U << 2),
  163. MODE_SAS_PORT1_MASK = (1U << 1),
  164. MODE_SAS_PORT0_MASK = (1U << 0),
  165. MODE_SAS_SATA = MODE_SAS_PORT0_MASK | MODE_SAS_PORT1_MASK |
  166. MODE_SAS_PORT2_MASK | MODE_SAS_PORT3_MASK |
  167. MODE_SAS_PORT4_MASK | MODE_SAS_PORT5_MASK |
  168. MODE_SAS_PORT6_MASK | MODE_SAS_PORT7_MASK,
  169. /* SAS_MODE value may be
  170. * dictated (in hw) by values
  171. * of SATA_TARGET & AUTO_DET
  172. */
  173. /* MVS_TX_CFG */
  174. TX_EN = (1U << 16), /* Enable TX */
  175. TX_RING_SZ_MASK = 0xfff, /* TX ring size, bits 11:0 */
  176. /* MVS_RX_CFG */
  177. RX_EN = (1U << 16), /* Enable RX */
  178. RX_RING_SZ_MASK = 0xfff, /* RX ring size, bits 11:0 */
  179. /* MVS_INT_COAL */
  180. COAL_EN = (1U << 16), /* Enable int coalescing */
  181. /* MVS_INT_STAT, MVS_INT_MASK */
  182. CINT_I2C = (1U << 31), /* I2C event */
  183. CINT_SW0 = (1U << 30), /* software event 0 */
  184. CINT_SW1 = (1U << 29), /* software event 1 */
  185. CINT_PRD_BC = (1U << 28), /* PRD BC err for read cmd */
  186. CINT_DMA_PCIE = (1U << 27), /* DMA to PCIE timeout */
  187. CINT_MEM = (1U << 26), /* int mem parity err */
  188. CINT_I2C_SLAVE = (1U << 25), /* slave I2C event */
  189. CINT_SRS = (1U << 3), /* SRS event */
  190. CINT_CI_STOP = (1U << 1), /* cmd issue stopped */
  191. CINT_DONE = (1U << 0), /* cmd completion */
  192. /* shl for ports 1-3 */
  193. CINT_PORT_STOPPED = (1U << 16), /* port0 stopped */
  194. CINT_PORT = (1U << 8), /* port0 event */
  195. CINT_PORT_MASK_OFFSET = 8,
  196. CINT_PORT_MASK = (0xFF << CINT_PORT_MASK_OFFSET),
  197. /* TX (delivery) ring bits */
  198. TXQ_CMD_SHIFT = 29,
  199. TXQ_CMD_SSP = 1, /* SSP protocol */
  200. TXQ_CMD_SMP = 2, /* SMP protocol */
  201. TXQ_CMD_STP = 3, /* STP/SATA protocol */
  202. TXQ_CMD_SSP_FREE_LIST = 4, /* add to SSP targ free list */
  203. TXQ_CMD_SLOT_RESET = 7, /* reset command slot */
  204. TXQ_MODE_I = (1U << 28), /* mode: 0=target,1=initiator */
  205. TXQ_PRIO_HI = (1U << 27), /* priority: 0=normal, 1=high */
  206. TXQ_SRS_SHIFT = 20, /* SATA register set */
  207. TXQ_SRS_MASK = 0x7f,
  208. TXQ_PHY_SHIFT = 12, /* PHY bitmap */
  209. TXQ_PHY_MASK = 0xff,
  210. TXQ_SLOT_MASK = 0xfff, /* slot number */
  211. /* RX (completion) ring bits */
  212. RXQ_GOOD = (1U << 23), /* Response good */
  213. RXQ_SLOT_RESET = (1U << 21), /* Slot reset complete */
  214. RXQ_CMD_RX = (1U << 20), /* target cmd received */
  215. RXQ_ATTN = (1U << 19), /* attention */
  216. RXQ_RSP = (1U << 18), /* response frame xfer'd */
  217. RXQ_ERR = (1U << 17), /* err info rec xfer'd */
  218. RXQ_DONE = (1U << 16), /* cmd complete */
  219. RXQ_SLOT_MASK = 0xfff, /* slot number */
  220. /* mvs_cmd_hdr bits */
  221. MCH_PRD_LEN_SHIFT = 16, /* 16-bit PRD table len */
  222. MCH_SSP_FR_TYPE_SHIFT = 13, /* SSP frame type */
  223. /* SSP initiator only */
  224. MCH_SSP_FR_CMD = 0x0, /* COMMAND frame */
  225. /* SSP initiator or target */
  226. MCH_SSP_FR_TASK = 0x1, /* TASK frame */
  227. /* SSP target only */
  228. MCH_SSP_FR_XFER_RDY = 0x4, /* XFER_RDY frame */
  229. MCH_SSP_FR_RESP = 0x5, /* RESPONSE frame */
  230. MCH_SSP_FR_READ = 0x6, /* Read DATA frame(s) */
  231. MCH_SSP_FR_READ_RESP = 0x7, /* ditto, plus RESPONSE */
  232. MCH_PASSTHRU = (1U << 12), /* pass-through (SSP) */
  233. MCH_FBURST = (1U << 11), /* first burst (SSP) */
  234. MCH_CHK_LEN = (1U << 10), /* chk xfer len (SSP) */
  235. MCH_RETRY = (1U << 9), /* tport layer retry (SSP) */
  236. MCH_PROTECTION = (1U << 8), /* protection info rec (SSP) */
  237. MCH_RESET = (1U << 7), /* Reset (STP/SATA) */
  238. MCH_FPDMA = (1U << 6), /* First party DMA (STP/SATA) */
  239. MCH_ATAPI = (1U << 5), /* ATAPI (STP/SATA) */
  240. MCH_BIST = (1U << 4), /* BIST activate (STP/SATA) */
  241. MCH_PMP_MASK = 0xf, /* PMP from cmd FIS (STP/SATA)*/
  242. CCTL_RST = (1U << 5), /* port logic reset */
  243. /* 0(LSB first), 1(MSB first) */
  244. CCTL_ENDIAN_DATA = (1U << 3), /* PRD data */
  245. CCTL_ENDIAN_RSP = (1U << 2), /* response frame */
  246. CCTL_ENDIAN_OPEN = (1U << 1), /* open address frame */
  247. CCTL_ENDIAN_CMD = (1U << 0), /* command table */
  248. /* MVS_Px_SER_CTLSTAT (per-phy control) */
  249. PHY_SSP_RST = (1U << 3), /* reset SSP link layer */
  250. PHY_BCAST_CHG = (1U << 2), /* broadcast(change) notif */
  251. PHY_RST_HARD = (1U << 1), /* hard reset + phy reset */
  252. PHY_RST = (1U << 0), /* phy reset */
  253. PHY_MIN_SPP_PHYS_LINK_RATE_MASK = (0xF << 8),
  254. PHY_MAX_SPP_PHYS_LINK_RATE_MASK = (0xF << 12),
  255. PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET = (16),
  256. PHY_NEG_SPP_PHYS_LINK_RATE_MASK =
  257. (0xF << PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET),
  258. PHY_READY_MASK = (1U << 20),
  259. /* MVS_Px_INT_STAT, MVS_Px_INT_MASK (per-phy events) */
  260. PHYEV_DEC_ERR = (1U << 24), /* Phy Decoding Error */
  261. PHYEV_UNASSOC_FIS = (1U << 19), /* unassociated FIS rx'd */
  262. PHYEV_AN = (1U << 18), /* SATA async notification */
  263. PHYEV_BIST_ACT = (1U << 17), /* BIST activate FIS */
  264. PHYEV_SIG_FIS = (1U << 16), /* signature FIS */
  265. PHYEV_POOF = (1U << 12), /* phy ready from 1 -> 0 */
  266. PHYEV_IU_BIG = (1U << 11), /* IU too long err */
  267. PHYEV_IU_SMALL = (1U << 10), /* IU too short err */
  268. PHYEV_UNK_TAG = (1U << 9), /* unknown tag */
  269. PHYEV_BROAD_CH = (1U << 8), /* broadcast(CHANGE) */
  270. PHYEV_COMWAKE = (1U << 7), /* COMWAKE rx'd */
  271. PHYEV_PORT_SEL = (1U << 6), /* port selector present */
  272. PHYEV_HARD_RST = (1U << 5), /* hard reset rx'd */
  273. PHYEV_ID_TMOUT = (1U << 4), /* identify timeout */
  274. PHYEV_ID_FAIL = (1U << 3), /* identify failed */
  275. PHYEV_ID_DONE = (1U << 2), /* identify done */
  276. PHYEV_HARD_RST_DONE = (1U << 1), /* hard reset done */
  277. PHYEV_RDY_CH = (1U << 0), /* phy ready changed state */
  278. /* MVS_PCS */
  279. PCS_EN_SATA_REG_SHIFT = (16), /* Enable SATA Register Set */
  280. PCS_EN_PORT_XMT_SHIFT = (12), /* Enable Port Transmit */
  281. PCS_EN_PORT_XMT_SHIFT2 = (8), /* For 6480 */
  282. PCS_SATA_RETRY = (1U << 8), /* retry ctl FIS on R_ERR */
  283. PCS_RSP_RX_EN = (1U << 7), /* raw response rx */
  284. PCS_SELF_CLEAR = (1U << 5), /* self-clearing int mode */
  285. PCS_FIS_RX_EN = (1U << 4), /* FIS rx enable */
  286. PCS_CMD_STOP_ERR = (1U << 3), /* cmd stop-on-err enable */
  287. PCS_CMD_RST = (1U << 1), /* reset cmd issue */
  288. PCS_CMD_EN = (1U << 0), /* enable cmd issue */
  289. /* Port n Attached Device Info */
  290. PORT_DEV_SSP_TRGT = (1U << 19),
  291. PORT_DEV_SMP_TRGT = (1U << 18),
  292. PORT_DEV_STP_TRGT = (1U << 17),
  293. PORT_DEV_SSP_INIT = (1U << 11),
  294. PORT_DEV_SMP_INIT = (1U << 10),
  295. PORT_DEV_STP_INIT = (1U << 9),
  296. PORT_PHY_ID_MASK = (0xFFU << 24),
  297. PORT_DEV_TRGT_MASK = (0x7U << 17),
  298. PORT_DEV_INIT_MASK = (0x7U << 9),
  299. PORT_DEV_TYPE_MASK = (0x7U << 0),
  300. /* Port n PHY Status */
  301. PHY_RDY = (1U << 2),
  302. PHY_DW_SYNC = (1U << 1),
  303. PHY_OOB_DTCTD = (1U << 0),
  304. /* VSR */
  305. /* PHYMODE 6 (CDB) */
  306. PHY_MODE6_DTL_SPEED = (1U << 27),
  307. };
  308. enum mvs_info_flags {
  309. MVF_MSI = (1U << 0), /* MSI is enabled */
  310. MVF_PHY_PWR_FIX = (1U << 1), /* bug workaround */
  311. };
  312. enum sas_cmd_port_registers {
  313. CMD_CMRST_OOB_DET = 0x100, /* COMRESET OOB detect register */
  314. CMD_CMWK_OOB_DET = 0x104, /* COMWAKE OOB detect register */
  315. CMD_CMSAS_OOB_DET = 0x108, /* COMSAS OOB detect register */
  316. CMD_BRST_OOB_DET = 0x10c, /* burst OOB detect register */
  317. CMD_OOB_SPACE = 0x110, /* OOB space control register */
  318. CMD_OOB_BURST = 0x114, /* OOB burst control register */
  319. CMD_PHY_TIMER = 0x118, /* PHY timer control register */
  320. CMD_PHY_CONFIG0 = 0x11c, /* PHY config register 0 */
  321. CMD_PHY_CONFIG1 = 0x120, /* PHY config register 1 */
  322. CMD_SAS_CTL0 = 0x124, /* SAS control register 0 */
  323. CMD_SAS_CTL1 = 0x128, /* SAS control register 1 */
  324. CMD_SAS_CTL2 = 0x12c, /* SAS control register 2 */
  325. CMD_SAS_CTL3 = 0x130, /* SAS control register 3 */
  326. CMD_ID_TEST = 0x134, /* ID test register */
  327. CMD_PL_TIMER = 0x138, /* PL timer register */
  328. CMD_WD_TIMER = 0x13c, /* WD timer register */
  329. CMD_PORT_SEL_COUNT = 0x140, /* port selector count register */
  330. CMD_APP_MEM_CTL = 0x144, /* Application Memory Control */
  331. CMD_XOR_MEM_CTL = 0x148, /* XOR Block Memory Control */
  332. CMD_DMA_MEM_CTL = 0x14c, /* DMA Block Memory Control */
  333. CMD_PORT_MEM_CTL0 = 0x150, /* Port Memory Control 0 */
  334. CMD_PORT_MEM_CTL1 = 0x154, /* Port Memory Control 1 */
  335. CMD_SATA_PORT_MEM_CTL0 = 0x158, /* SATA Port Memory Control 0 */
  336. CMD_SATA_PORT_MEM_CTL1 = 0x15c, /* SATA Port Memory Control 1 */
  337. CMD_XOR_MEM_BIST_CTL = 0x160, /* XOR Memory BIST Control */
  338. CMD_XOR_MEM_BIST_STAT = 0x164, /* XOR Memroy BIST Status */
  339. CMD_DMA_MEM_BIST_CTL = 0x168, /* DMA Memory BIST Control */
  340. CMD_DMA_MEM_BIST_STAT = 0x16c, /* DMA Memory BIST Status */
  341. CMD_PORT_MEM_BIST_CTL = 0x170, /* Port Memory BIST Control */
  342. CMD_PORT_MEM_BIST_STAT0 = 0x174, /* Port Memory BIST Status 0 */
  343. CMD_PORT_MEM_BIST_STAT1 = 0x178, /* Port Memory BIST Status 1 */
  344. CMD_STP_MEM_BIST_CTL = 0x17c, /* STP Memory BIST Control */
  345. CMD_STP_MEM_BIST_STAT0 = 0x180, /* STP Memory BIST Status 0 */
  346. CMD_STP_MEM_BIST_STAT1 = 0x184, /* STP Memory BIST Status 1 */
  347. CMD_RESET_COUNT = 0x188, /* Reset Count */
  348. CMD_MONTR_DATA_SEL = 0x18C, /* Monitor Data/Select */
  349. CMD_PLL_PHY_CONFIG = 0x190, /* PLL/PHY Configuration */
  350. CMD_PHY_CTL = 0x194, /* PHY Control and Status */
  351. CMD_PHY_TEST_COUNT0 = 0x198, /* Phy Test Count 0 */
  352. CMD_PHY_TEST_COUNT1 = 0x19C, /* Phy Test Count 1 */
  353. CMD_PHY_TEST_COUNT2 = 0x1A0, /* Phy Test Count 2 */
  354. CMD_APP_ERR_CONFIG = 0x1A4, /* Application Error Configuration */
  355. CMD_PND_FIFO_CTL0 = 0x1A8, /* Pending FIFO Control 0 */
  356. CMD_HOST_CTL = 0x1AC, /* Host Control Status */
  357. CMD_HOST_WR_DATA = 0x1B0, /* Host Write Data */
  358. CMD_HOST_RD_DATA = 0x1B4, /* Host Read Data */
  359. CMD_PHY_MODE_21 = 0x1B8, /* Phy Mode 21 */
  360. CMD_SL_MODE0 = 0x1BC, /* SL Mode 0 */
  361. CMD_SL_MODE1 = 0x1C0, /* SL Mode 1 */
  362. CMD_PND_FIFO_CTL1 = 0x1C4, /* Pending FIFO Control 1 */
  363. };
  364. /* SAS/SATA configuration port registers, aka phy registers */
  365. enum sas_sata_config_port_regs {
  366. PHYR_IDENTIFY = 0x00, /* info for IDENTIFY frame */
  367. PHYR_ADDR_LO = 0x04, /* my SAS address (low) */
  368. PHYR_ADDR_HI = 0x08, /* my SAS address (high) */
  369. PHYR_ATT_DEV_INFO = 0x0C, /* attached device info */
  370. PHYR_ATT_ADDR_LO = 0x10, /* attached dev SAS addr (low) */
  371. PHYR_ATT_ADDR_HI = 0x14, /* attached dev SAS addr (high) */
  372. PHYR_SATA_CTL = 0x18, /* SATA control */
  373. PHYR_PHY_STAT = 0x1C, /* PHY status */
  374. PHYR_SATA_SIG0 = 0x20, /*port SATA signature FIS(Byte 0-3) */
  375. PHYR_SATA_SIG1 = 0x24, /*port SATA signature FIS(Byte 4-7) */
  376. PHYR_SATA_SIG2 = 0x28, /*port SATA signature FIS(Byte 8-11) */
  377. PHYR_SATA_SIG3 = 0x2c, /*port SATA signature FIS(Byte 12-15) */
  378. PHYR_R_ERR_COUNT = 0x30, /* port R_ERR count register */
  379. PHYR_CRC_ERR_COUNT = 0x34, /* port CRC error count register */
  380. PHYR_WIDE_PORT = 0x38, /* wide port participating */
  381. PHYR_CURRENT0 = 0x80, /* current connection info 0 */
  382. PHYR_CURRENT1 = 0x84, /* current connection info 1 */
  383. PHYR_CURRENT2 = 0x88, /* current connection info 2 */
  384. };
  385. /* SAS/SATA Vendor Specific Port Registers */
  386. enum sas_sata_vsp_regs {
  387. VSR_PHY_STAT = 0x00, /* Phy Status */
  388. VSR_PHY_MODE1 = 0x01, /* phy tx */
  389. VSR_PHY_MODE2 = 0x02, /* tx scc */
  390. VSR_PHY_MODE3 = 0x03, /* pll */
  391. VSR_PHY_MODE4 = 0x04, /* VCO */
  392. VSR_PHY_MODE5 = 0x05, /* Rx */
  393. VSR_PHY_MODE6 = 0x06, /* CDR */
  394. VSR_PHY_MODE7 = 0x07, /* Impedance */
  395. VSR_PHY_MODE8 = 0x08, /* Voltage */
  396. VSR_PHY_MODE9 = 0x09, /* Test */
  397. VSR_PHY_MODE10 = 0x0A, /* Power */
  398. VSR_PHY_MODE11 = 0x0B, /* Phy Mode */
  399. VSR_PHY_VS0 = 0x0C, /* Vednor Specific 0 */
  400. VSR_PHY_VS1 = 0x0D, /* Vednor Specific 1 */
  401. };
  402. enum pci_cfg_registers {
  403. PCR_PHY_CTL = 0x40,
  404. PCR_PHY_CTL2 = 0x90,
  405. PCR_DEV_CTRL = 0xE8,
  406. };
  407. enum pci_cfg_register_bits {
  408. PCTL_PWR_ON = (0xFU << 24),
  409. PCTL_OFF = (0xFU << 12),
  410. PRD_REQ_SIZE = (0x4000),
  411. PRD_REQ_MASK = (0x00007000),
  412. };
  413. enum nvram_layout_offsets {
  414. NVR_SIG = 0x00, /* 0xAA, 0x55 */
  415. NVR_SAS_ADDR = 0x02, /* 8-byte SAS address */
  416. };
  417. enum chip_flavors {
  418. chip_6320,
  419. chip_6440,
  420. chip_6480,
  421. };
  422. enum port_type {
  423. PORT_TYPE_SAS = (1L << 1),
  424. PORT_TYPE_SATA = (1L << 0),
  425. };
  426. /* Command Table Format */
  427. enum ct_format {
  428. /* SSP */
  429. SSP_F_H = 0x00,
  430. SSP_F_IU = 0x18,
  431. SSP_F_MAX = 0x4D,
  432. /* STP */
  433. STP_CMD_FIS = 0x00,
  434. STP_ATAPI_CMD = 0x40,
  435. STP_F_MAX = 0x10,
  436. /* SMP */
  437. SMP_F_T = 0x00,
  438. SMP_F_DEP = 0x01,
  439. SMP_F_MAX = 0x101,
  440. };
  441. enum status_buffer {
  442. SB_EIR_OFF = 0x00, /* Error Information Record */
  443. SB_RFB_OFF = 0x08, /* Response Frame Buffer */
  444. SB_RFB_MAX = 0x400, /* RFB size*/
  445. };
  446. enum error_info_rec {
  447. CMD_ISS_STPD = (1U << 31), /* Cmd Issue Stopped */
  448. };
  449. struct mvs_chip_info {
  450. u32 n_phy;
  451. u32 srs_sz;
  452. u32 slot_width;
  453. };
  454. struct mvs_err_info {
  455. __le32 flags;
  456. __le32 flags2;
  457. };
  458. struct mvs_prd {
  459. __le64 addr; /* 64-bit buffer address */
  460. __le32 reserved;
  461. __le32 len; /* 16-bit length */
  462. };
  463. struct mvs_cmd_hdr {
  464. __le32 flags; /* PRD tbl len; SAS, SATA ctl */
  465. __le32 lens; /* cmd, max resp frame len */
  466. __le32 tags; /* targ port xfer tag; tag */
  467. __le32 data_len; /* data xfer len */
  468. __le64 cmd_tbl; /* command table address */
  469. __le64 open_frame; /* open addr frame address */
  470. __le64 status_buf; /* status buffer address */
  471. __le64 prd_tbl; /* PRD tbl address */
  472. __le32 reserved[4];
  473. };
  474. struct mvs_slot_info {
  475. struct sas_task *task;
  476. u32 n_elem;
  477. u32 tx;
  478. /* DMA buffer for storing cmd tbl, open addr frame, status buffer,
  479. * and PRD table
  480. */
  481. void *buf;
  482. dma_addr_t buf_dma;
  483. #if _MV_DUMP
  484. u32 cmd_size;
  485. #endif
  486. void *response;
  487. };
  488. struct mvs_port {
  489. struct asd_sas_port sas_port;
  490. u8 port_attached;
  491. u8 taskfileset;
  492. u8 wide_port_phymap;
  493. };
  494. struct mvs_phy {
  495. struct mvs_port *port;
  496. struct asd_sas_phy sas_phy;
  497. struct sas_identify identify;
  498. struct scsi_device *sdev;
  499. u64 dev_sas_addr;
  500. u64 att_dev_sas_addr;
  501. u32 att_dev_info;
  502. u32 dev_info;
  503. u32 phy_type;
  504. u32 phy_status;
  505. u32 irq_status;
  506. u32 frame_rcvd_size;
  507. u8 frame_rcvd[32];
  508. u8 phy_attached;
  509. };
  510. struct mvs_info {
  511. unsigned long flags;
  512. spinlock_t lock; /* host-wide lock */
  513. struct pci_dev *pdev; /* our device */
  514. void __iomem *regs; /* enhanced mode registers */
  515. void __iomem *peri_regs; /* peripheral registers */
  516. u8 sas_addr[SAS_ADDR_SIZE];
  517. struct sas_ha_struct sas; /* SCSI/SAS glue */
  518. struct Scsi_Host *shost;
  519. __le32 *tx; /* TX (delivery) DMA ring */
  520. dma_addr_t tx_dma;
  521. u32 tx_prod; /* cached next-producer idx */
  522. __le32 *rx; /* RX (completion) DMA ring */
  523. dma_addr_t rx_dma;
  524. u32 rx_cons; /* RX consumer idx */
  525. __le32 *rx_fis; /* RX'd FIS area */
  526. dma_addr_t rx_fis_dma;
  527. struct mvs_cmd_hdr *slot; /* DMA command header slots */
  528. dma_addr_t slot_dma;
  529. const struct mvs_chip_info *chip;
  530. unsigned long tags[MVS_SLOTS];
  531. struct mvs_slot_info slot_info[MVS_SLOTS];
  532. /* further per-slot information */
  533. struct mvs_phy phy[MVS_MAX_PHYS];
  534. struct mvs_port port[MVS_MAX_PHYS];
  535. u32 can_queue; /* per adapter */
  536. u32 tag_out; /*Get*/
  537. u32 tag_in; /*Give*/
  538. };
  539. struct mvs_queue_task {
  540. struct list_head list;
  541. void *uldd_task;
  542. };
  543. static int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
  544. void *funcdata);
  545. static u32 mvs_read_phy_ctl(struct mvs_info *mvi, u32 port);
  546. static void mvs_write_phy_ctl(struct mvs_info *mvi, u32 port, u32 val);
  547. static u32 mvs_read_port_irq_stat(struct mvs_info *mvi, u32 port);
  548. static void mvs_write_port_irq_stat(struct mvs_info *mvi, u32 port, u32 val);
  549. static void mvs_write_port_irq_mask(struct mvs_info *mvi, u32 port, u32 val);
  550. static u32 mvs_read_port_irq_mask(struct mvs_info *mvi, u32 port);
  551. static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i);
  552. static void mvs_detect_porttype(struct mvs_info *mvi, int i);
  553. static void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st);
  554. static int mvs_scan_finished(struct Scsi_Host *, unsigned long);
  555. static void mvs_scan_start(struct Scsi_Host *);
  556. static int mvs_sas_slave_alloc(struct scsi_device *scsi_dev);
  557. static struct scsi_transport_template *mvs_stt;
  558. static const struct mvs_chip_info mvs_chips[] = {
  559. [chip_6320] = { 2, 16, 9 },
  560. [chip_6440] = { 4, 16, 9 },
  561. [chip_6480] = { 8, 32, 10 },
  562. };
  563. static struct scsi_host_template mvs_sht = {
  564. .module = THIS_MODULE,
  565. .name = DRV_NAME,
  566. .queuecommand = sas_queuecommand,
  567. .target_alloc = sas_target_alloc,
  568. .slave_configure = sas_slave_configure,
  569. .slave_destroy = sas_slave_destroy,
  570. .scan_finished = mvs_scan_finished,
  571. .scan_start = mvs_scan_start,
  572. .change_queue_depth = sas_change_queue_depth,
  573. .change_queue_type = sas_change_queue_type,
  574. .bios_param = sas_bios_param,
  575. .can_queue = 1,
  576. .cmd_per_lun = 1,
  577. .this_id = -1,
  578. .sg_tablesize = SG_ALL,
  579. .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
  580. .use_clustering = ENABLE_CLUSTERING,
  581. .eh_device_reset_handler = sas_eh_device_reset_handler,
  582. .eh_bus_reset_handler = sas_eh_bus_reset_handler,
  583. .slave_alloc = mvs_sas_slave_alloc,
  584. .target_destroy = sas_target_destroy,
  585. .ioctl = sas_ioctl,
  586. };
  587. static void mvs_hexdump(u32 size, u8 *data, u32 baseaddr)
  588. {
  589. u32 i;
  590. u32 run;
  591. u32 offset;
  592. offset = 0;
  593. while (size) {
  594. printk("%08X : ", baseaddr + offset);
  595. if (size >= 16)
  596. run = 16;
  597. else
  598. run = size;
  599. size -= run;
  600. for (i = 0; i < 16; i++) {
  601. if (i < run)
  602. printk("%02X ", (u32)data[i]);
  603. else
  604. printk(" ");
  605. }
  606. printk(": ");
  607. for (i = 0; i < run; i++)
  608. printk("%c", isalnum(data[i]) ? data[i] : '.');
  609. printk("\n");
  610. data = &data[16];
  611. offset += run;
  612. }
  613. printk("\n");
  614. }
  615. static void mvs_hba_sb_dump(struct mvs_info *mvi, u32 tag,
  616. enum sas_protocol proto)
  617. {
  618. #if _MV_DUMP
  619. u32 offset;
  620. struct pci_dev *pdev = mvi->pdev;
  621. struct mvs_slot_info *slot = &mvi->slot_info[tag];
  622. offset = slot->cmd_size + MVS_OAF_SZ +
  623. sizeof(struct mvs_prd) * slot->n_elem;
  624. dev_printk(KERN_DEBUG, &pdev->dev, "+---->Status buffer[%d] :\n",
  625. tag);
  626. mvs_hexdump(32, (u8 *) slot->response,
  627. (u32) slot->buf_dma + offset);
  628. #endif
  629. }
  630. static void mvs_hba_memory_dump(struct mvs_info *mvi, u32 tag,
  631. enum sas_protocol proto)
  632. {
  633. #if _MV_DUMP
  634. u32 sz, w_ptr, r_ptr;
  635. u64 addr;
  636. void __iomem *regs = mvi->regs;
  637. struct pci_dev *pdev = mvi->pdev;
  638. struct mvs_slot_info *slot = &mvi->slot_info[tag];
  639. /*Delivery Queue */
  640. sz = mr32(TX_CFG) & TX_RING_SZ_MASK;
  641. w_ptr = mr32(TX_PROD_IDX) & TX_RING_SZ_MASK;
  642. r_ptr = mr32(TX_CONS_IDX) & TX_RING_SZ_MASK;
  643. addr = mr32(TX_HI) << 16 << 16 | mr32(TX_LO);
  644. dev_printk(KERN_DEBUG, &pdev->dev,
  645. "Delivery Queue Size=%04d , WRT_PTR=%04X , RD_PTR=%04X\n",
  646. sz, w_ptr, r_ptr);
  647. dev_printk(KERN_DEBUG, &pdev->dev,
  648. "Delivery Queue Base Address=0x%llX (PA)"
  649. "(tx_dma=0x%llX), Entry=%04d\n",
  650. addr, mvi->tx_dma, w_ptr);
  651. mvs_hexdump(sizeof(u32), (u8 *)(&mvi->tx[mvi->tx_prod]),
  652. (u32) mvi->tx_dma + sizeof(u32) * w_ptr);
  653. /*Command List */
  654. addr = mr32(CMD_LIST_HI) << 16 << 16 | mr32(CMD_LIST_LO);
  655. dev_printk(KERN_DEBUG, &pdev->dev,
  656. "Command List Base Address=0x%llX (PA)"
  657. "(slot_dma=0x%llX), Header=%03d\n",
  658. addr, mvi->slot_dma, tag);
  659. dev_printk(KERN_DEBUG, &pdev->dev, "Command Header[%03d]:\n", tag);
  660. /*mvs_cmd_hdr */
  661. mvs_hexdump(sizeof(struct mvs_cmd_hdr), (u8 *)(&mvi->slot[tag]),
  662. (u32) mvi->slot_dma + tag * sizeof(struct mvs_cmd_hdr));
  663. /*1.command table area */
  664. dev_printk(KERN_DEBUG, &pdev->dev, "+---->Command Table :\n");
  665. mvs_hexdump(slot->cmd_size, (u8 *) slot->buf, (u32) slot->buf_dma);
  666. /*2.open address frame area */
  667. dev_printk(KERN_DEBUG, &pdev->dev, "+---->Open Address Frame :\n");
  668. mvs_hexdump(MVS_OAF_SZ, (u8 *) slot->buf + slot->cmd_size,
  669. (u32) slot->buf_dma + slot->cmd_size);
  670. /*3.status buffer */
  671. mvs_hba_sb_dump(mvi, tag, proto);
  672. /*4.PRD table */
  673. dev_printk(KERN_DEBUG, &pdev->dev, "+---->PRD table :\n");
  674. mvs_hexdump(sizeof(struct mvs_prd) * slot->n_elem,
  675. (u8 *) slot->buf + slot->cmd_size + MVS_OAF_SZ,
  676. (u32) slot->buf_dma + slot->cmd_size + MVS_OAF_SZ);
  677. #endif
  678. }
  679. static void mvs_hba_cq_dump(struct mvs_info *mvi)
  680. {
  681. #if _MV_DUMP
  682. u64 addr;
  683. void __iomem *regs = mvi->regs;
  684. struct pci_dev *pdev = mvi->pdev;
  685. u32 entry = mvi->rx_cons + 1;
  686. u32 rx_desc = le32_to_cpu(mvi->rx[entry]);
  687. /*Completion Queue */
  688. addr = mr32(RX_HI) << 16 << 16 | mr32(RX_LO);
  689. dev_printk(KERN_DEBUG, &pdev->dev, "Completion Task = 0x%08X\n",
  690. (u32) mvi->slot_info[rx_desc & RXQ_SLOT_MASK].task);
  691. dev_printk(KERN_DEBUG, &pdev->dev,
  692. "Completion List Base Address=0x%llX (PA), "
  693. "CQ_Entry=%04d, CQ_WP=0x%08X\n",
  694. addr, entry - 1, mvi->rx[0]);
  695. mvs_hexdump(sizeof(u32), (u8 *)(&rx_desc),
  696. mvi->rx_dma + sizeof(u32) * entry);
  697. #endif
  698. }
  699. static void mvs_hba_interrupt_enable(struct mvs_info *mvi)
  700. {
  701. void __iomem *regs = mvi->regs;
  702. u32 tmp;
  703. tmp = mr32(GBL_CTL);
  704. mw32(GBL_CTL, tmp | INT_EN);
  705. }
  706. static void mvs_hba_interrupt_disable(struct mvs_info *mvi)
  707. {
  708. void __iomem *regs = mvi->regs;
  709. u32 tmp;
  710. tmp = mr32(GBL_CTL);
  711. mw32(GBL_CTL, tmp & ~INT_EN);
  712. }
  713. static int mvs_int_rx(struct mvs_info *mvi, bool self_clear);
  714. /* move to PCI layer or libata core? */
  715. static int pci_go_64(struct pci_dev *pdev)
  716. {
  717. int rc;
  718. if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
  719. rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
  720. if (rc) {
  721. rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
  722. if (rc) {
  723. dev_printk(KERN_ERR, &pdev->dev,
  724. "64-bit DMA enable failed\n");
  725. return rc;
  726. }
  727. }
  728. } else {
  729. rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
  730. if (rc) {
  731. dev_printk(KERN_ERR, &pdev->dev,
  732. "32-bit DMA enable failed\n");
  733. return rc;
  734. }
  735. rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
  736. if (rc) {
  737. dev_printk(KERN_ERR, &pdev->dev,
  738. "32-bit consistent DMA enable failed\n");
  739. return rc;
  740. }
  741. }
  742. return rc;
  743. }
  744. static void mvs_tag_clear(struct mvs_info *mvi, u32 tag)
  745. {
  746. mvi->tag_in = (mvi->tag_in + 1) & (MVS_SLOTS - 1);
  747. mvi->tags[mvi->tag_in] = tag;
  748. }
  749. static void mvs_tag_free(struct mvs_info *mvi, u32 tag)
  750. {
  751. mvi->tag_out = (mvi->tag_out - 1) & (MVS_SLOTS - 1);
  752. }
  753. static int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out)
  754. {
  755. if (mvi->tag_out != mvi->tag_in) {
  756. *tag_out = mvi->tags[mvi->tag_out];
  757. mvi->tag_out = (mvi->tag_out + 1) & (MVS_SLOTS - 1);
  758. return 0;
  759. }
  760. return -EBUSY;
  761. }
  762. static void mvs_tag_init(struct mvs_info *mvi)
  763. {
  764. int i;
  765. for (i = 0; i < MVS_SLOTS; ++i)
  766. mvi->tags[i] = i;
  767. mvi->tag_out = 0;
  768. mvi->tag_in = MVS_SLOTS - 1;
  769. }
  770. #ifndef MVS_DISABLE_NVRAM
  771. static int mvs_eep_read(void __iomem *regs, u32 addr, u32 *data)
  772. {
  773. int timeout = 1000;
  774. if (addr & ~SPI_ADDR_MASK)
  775. return -EINVAL;
  776. writel(addr, regs + SPI_CMD);
  777. writel(TWSI_RD, regs + SPI_CTL);
  778. while (timeout-- > 0) {
  779. if (readl(regs + SPI_CTL) & TWSI_RDY) {
  780. *data = readl(regs + SPI_DATA);
  781. return 0;
  782. }
  783. udelay(10);
  784. }
  785. return -EBUSY;
  786. }
  787. static int mvs_eep_read_buf(void __iomem *regs, u32 addr,
  788. void *buf, u32 buflen)
  789. {
  790. u32 addr_end, tmp_addr, i, j;
  791. u32 tmp = 0;
  792. int rc;
  793. u8 *tmp8, *buf8 = buf;
  794. addr_end = addr + buflen;
  795. tmp_addr = ALIGN(addr, 4);
  796. if (addr > 0xff)
  797. return -EINVAL;
  798. j = addr & 0x3;
  799. if (j) {
  800. rc = mvs_eep_read(regs, tmp_addr, &tmp);
  801. if (rc)
  802. return rc;
  803. tmp8 = (u8 *)&tmp;
  804. for (i = j; i < 4; i++)
  805. *buf8++ = tmp8[i];
  806. tmp_addr += 4;
  807. }
  808. for (j = ALIGN(addr_end, 4); tmp_addr < j; tmp_addr += 4) {
  809. rc = mvs_eep_read(regs, tmp_addr, &tmp);
  810. if (rc)
  811. return rc;
  812. memcpy(buf8, &tmp, 4);
  813. buf8 += 4;
  814. }
  815. if (tmp_addr < addr_end) {
  816. rc = mvs_eep_read(regs, tmp_addr, &tmp);
  817. if (rc)
  818. return rc;
  819. tmp8 = (u8 *)&tmp;
  820. j = addr_end - tmp_addr;
  821. for (i = 0; i < j; i++)
  822. *buf8++ = tmp8[i];
  823. tmp_addr += 4;
  824. }
  825. return 0;
  826. }
  827. #endif
  828. static int mvs_nvram_read(struct mvs_info *mvi, u32 addr,
  829. void *buf, u32 buflen)
  830. {
  831. #ifndef MVS_DISABLE_NVRAM
  832. void __iomem *regs = mvi->regs;
  833. int rc, i;
  834. u32 sum;
  835. u8 hdr[2], *tmp;
  836. const char *msg;
  837. rc = mvs_eep_read_buf(regs, addr, &hdr, 2);
  838. if (rc) {
  839. msg = "nvram hdr read failed";
  840. goto err_out;
  841. }
  842. rc = mvs_eep_read_buf(regs, addr + 2, buf, buflen);
  843. if (rc) {
  844. msg = "nvram read failed";
  845. goto err_out;
  846. }
  847. if (hdr[0] != 0x5A) {
  848. /* entry id */
  849. msg = "invalid nvram entry id";
  850. rc = -ENOENT;
  851. goto err_out;
  852. }
  853. tmp = buf;
  854. sum = ((u32)hdr[0]) + ((u32)hdr[1]);
  855. for (i = 0; i < buflen; i++)
  856. sum += ((u32)tmp[i]);
  857. if (sum) {
  858. msg = "nvram checksum failure";
  859. rc = -EILSEQ;
  860. goto err_out;
  861. }
  862. return 0;
  863. err_out:
  864. dev_printk(KERN_ERR, &mvi->pdev->dev, "%s", msg);
  865. return rc;
  866. #else
  867. /* FIXME , For SAS target mode */
  868. memcpy(buf, "\x50\x05\x04\x30\x11\xab\x00\x00", 8);
  869. return 0;
  870. #endif
  871. }
  872. static void mvs_bytes_dmaed(struct mvs_info *mvi, int i)
  873. {
  874. struct mvs_phy *phy = &mvi->phy[i];
  875. if (!phy->phy_attached)
  876. return;
  877. if (phy->phy_type & PORT_TYPE_SAS) {
  878. struct sas_identify_frame *id;
  879. id = (struct sas_identify_frame *)phy->frame_rcvd;
  880. id->dev_type = phy->identify.device_type;
  881. id->initiator_bits = SAS_PROTOCOL_ALL;
  882. id->target_bits = phy->identify.target_port_protocols;
  883. } else if (phy->phy_type & PORT_TYPE_SATA) {
  884. /* TODO */
  885. }
  886. mvi->sas.sas_phy[i]->frame_rcvd_size = phy->frame_rcvd_size;
  887. mvi->sas.notify_port_event(mvi->sas.sas_phy[i],
  888. PORTE_BYTES_DMAED);
  889. }
  890. static int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time)
  891. {
  892. /* give the phy enabling interrupt event time to come in (1s
  893. * is empirically about all it takes) */
  894. if (time < HZ)
  895. return 0;
  896. /* Wait for discovery to finish */
  897. scsi_flush_work(shost);
  898. return 1;
  899. }
  900. static void mvs_scan_start(struct Scsi_Host *shost)
  901. {
  902. int i;
  903. struct mvs_info *mvi = SHOST_TO_SAS_HA(shost)->lldd_ha;
  904. for (i = 0; i < mvi->chip->n_phy; ++i) {
  905. mvs_bytes_dmaed(mvi, i);
  906. }
  907. }
  908. static int mvs_sas_slave_alloc(struct scsi_device *scsi_dev)
  909. {
  910. int rc;
  911. rc = sas_slave_alloc(scsi_dev);
  912. return rc;
  913. }
  914. static void mvs_int_port(struct mvs_info *mvi, int port_no, u32 events)
  915. {
  916. struct pci_dev *pdev = mvi->pdev;
  917. struct sas_ha_struct *sas_ha = &mvi->sas;
  918. struct mvs_phy *phy = &mvi->phy[port_no];
  919. struct asd_sas_phy *sas_phy = &phy->sas_phy;
  920. phy->irq_status = mvs_read_port_irq_stat(mvi, port_no);
  921. /*
  922. * events is port event now ,
  923. * we need check the interrupt status which belongs to per port.
  924. */
  925. dev_printk(KERN_DEBUG, &pdev->dev,
  926. "Port %d Event = %X\n",
  927. port_no, phy->irq_status);
  928. if (phy->irq_status & (PHYEV_POOF | PHYEV_DEC_ERR)) {
  929. if (!mvs_is_phy_ready(mvi, port_no)) {
  930. sas_phy_disconnected(sas_phy);
  931. sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL);
  932. } else
  933. mvs_phy_control(sas_phy, PHY_FUNC_LINK_RESET, NULL);
  934. }
  935. if (!(phy->irq_status & PHYEV_DEC_ERR)) {
  936. if (phy->irq_status & PHYEV_COMWAKE) {
  937. u32 tmp = mvs_read_port_irq_mask(mvi, port_no);
  938. mvs_write_port_irq_mask(mvi, port_no,
  939. tmp | PHYEV_SIG_FIS);
  940. }
  941. if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) {
  942. phy->phy_status = mvs_is_phy_ready(mvi, port_no);
  943. if (phy->phy_status) {
  944. mvs_detect_porttype(mvi, port_no);
  945. if (phy->phy_type & PORT_TYPE_SATA) {
  946. u32 tmp = mvs_read_port_irq_mask(mvi,
  947. port_no);
  948. tmp &= ~PHYEV_SIG_FIS;
  949. mvs_write_port_irq_mask(mvi,
  950. port_no, tmp);
  951. }
  952. mvs_update_phyinfo(mvi, port_no, 0);
  953. sas_ha->notify_phy_event(sas_phy,
  954. PHYE_OOB_DONE);
  955. mvs_bytes_dmaed(mvi, port_no);
  956. } else {
  957. dev_printk(KERN_DEBUG, &pdev->dev,
  958. "plugin interrupt but phy is gone\n");
  959. mvs_phy_control(sas_phy, PHY_FUNC_LINK_RESET,
  960. NULL);
  961. }
  962. } else if (phy->irq_status & PHYEV_BROAD_CH)
  963. sas_ha->notify_port_event(sas_phy,
  964. PORTE_BROADCAST_RCVD);
  965. }
  966. mvs_write_port_irq_stat(mvi, port_no, phy->irq_status);
  967. }
  968. static void mvs_int_sata(struct mvs_info *mvi)
  969. {
  970. /* FIXME */
  971. }
  972. static void mvs_slot_free(struct mvs_info *mvi, struct sas_task *task,
  973. struct mvs_slot_info *slot, u32 slot_idx)
  974. {
  975. if (!sas_protocol_ata(task->task_proto))
  976. if (slot->n_elem)
  977. pci_unmap_sg(mvi->pdev, task->scatter,
  978. slot->n_elem, task->data_dir);
  979. switch (task->task_proto) {
  980. case SAS_PROTOCOL_SMP:
  981. pci_unmap_sg(mvi->pdev, &task->smp_task.smp_resp, 1,
  982. PCI_DMA_FROMDEVICE);
  983. pci_unmap_sg(mvi->pdev, &task->smp_task.smp_req, 1,
  984. PCI_DMA_TODEVICE);
  985. break;
  986. case SAS_PROTOCOL_SATA:
  987. case SAS_PROTOCOL_STP:
  988. case SAS_PROTOCOL_SSP:
  989. default:
  990. /* do nothing */
  991. break;
  992. }
  993. slot->task = NULL;
  994. mvs_tag_clear(mvi, slot_idx);
  995. }
  996. static void mvs_slot_err(struct mvs_info *mvi, struct sas_task *task,
  997. u32 slot_idx)
  998. {
  999. struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
  1000. u64 err_dw0 = *(u32 *) slot->response;
  1001. void __iomem *regs = mvi->regs;
  1002. u32 tmp;
  1003. if (err_dw0 & CMD_ISS_STPD)
  1004. if (sas_protocol_ata(task->task_proto)) {
  1005. tmp = mr32(INT_STAT_SRS);
  1006. mw32(INT_STAT_SRS, tmp & 0xFFFF);
  1007. }
  1008. mvs_hba_sb_dump(mvi, slot_idx, task->task_proto);
  1009. }
  1010. static int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc)
  1011. {
  1012. u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
  1013. struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
  1014. struct sas_task *task = slot->task;
  1015. struct task_status_struct *tstat = &task->task_status;
  1016. struct mvs_port *port = &mvi->port[task->dev->port->id];
  1017. bool aborted;
  1018. void *to;
  1019. spin_lock(&task->task_state_lock);
  1020. aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED;
  1021. if (!aborted) {
  1022. task->task_state_flags &=
  1023. ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
  1024. task->task_state_flags |= SAS_TASK_STATE_DONE;
  1025. }
  1026. spin_unlock(&task->task_state_lock);
  1027. if (aborted)
  1028. return -1;
  1029. memset(tstat, 0, sizeof(*tstat));
  1030. tstat->resp = SAS_TASK_COMPLETE;
  1031. if (unlikely(!port->port_attached)) {
  1032. tstat->stat = SAS_PHY_DOWN;
  1033. goto out;
  1034. }
  1035. /* error info record present */
  1036. if ((rx_desc & RXQ_ERR) && (*(u64 *) slot->response)) {
  1037. tstat->stat = SAM_CHECK_COND;
  1038. mvs_slot_err(mvi, task, slot_idx);
  1039. goto out;
  1040. }
  1041. switch (task->task_proto) {
  1042. case SAS_PROTOCOL_SSP:
  1043. /* hw says status == 0, datapres == 0 */
  1044. if (rx_desc & RXQ_GOOD) {
  1045. tstat->stat = SAM_GOOD;
  1046. tstat->resp = SAS_TASK_COMPLETE;
  1047. }
  1048. /* response frame present */
  1049. else if (rx_desc & RXQ_RSP) {
  1050. struct ssp_response_iu *iu =
  1051. slot->response + sizeof(struct mvs_err_info);
  1052. sas_ssp_task_response(&mvi->pdev->dev, task, iu);
  1053. }
  1054. /* should never happen? */
  1055. else
  1056. tstat->stat = SAM_CHECK_COND;
  1057. break;
  1058. case SAS_PROTOCOL_SMP: {
  1059. struct scatterlist *sg_resp = &task->smp_task.smp_resp;
  1060. tstat->stat = SAM_GOOD;
  1061. to = kmap_atomic(sg_page(sg_resp), KM_IRQ0);
  1062. memcpy(to + sg_resp->offset,
  1063. slot->response + sizeof(struct mvs_err_info),
  1064. sg_dma_len(sg_resp));
  1065. kunmap_atomic(to, KM_IRQ0);
  1066. break;
  1067. }
  1068. case SAS_PROTOCOL_SATA:
  1069. case SAS_PROTOCOL_STP:
  1070. case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: {
  1071. struct ata_task_resp *resp =
  1072. (struct ata_task_resp *)tstat->buf;
  1073. if ((rx_desc & (RXQ_DONE | RXQ_ERR | RXQ_ATTN)) ==
  1074. RXQ_DONE)
  1075. tstat->stat = SAM_GOOD;
  1076. else
  1077. tstat->stat = SAM_CHECK_COND;
  1078. resp->frame_len = sizeof(struct dev_to_host_fis);
  1079. memcpy(&resp->ending_fis[0],
  1080. SATA_RECEIVED_D2H_FIS(port->taskfileset),
  1081. sizeof(struct dev_to_host_fis));
  1082. if (resp->ending_fis[2] & ATA_ERR)
  1083. mvs_hexdump(16, resp->ending_fis, 0);
  1084. break;
  1085. }
  1086. default:
  1087. tstat->stat = SAM_CHECK_COND;
  1088. break;
  1089. }
  1090. out:
  1091. mvs_slot_free(mvi, task, slot, slot_idx);
  1092. task->task_done(task);
  1093. return tstat->stat;
  1094. }
  1095. static void mvs_int_full(struct mvs_info *mvi)
  1096. {
  1097. void __iomem *regs = mvi->regs;
  1098. u32 tmp, stat;
  1099. int i;
  1100. stat = mr32(INT_STAT);
  1101. mvs_int_rx(mvi, false);
  1102. for (i = 0; i < MVS_MAX_PORTS; i++) {
  1103. tmp = (stat >> i) & (CINT_PORT | CINT_PORT_STOPPED);
  1104. if (tmp)
  1105. mvs_int_port(mvi, i, tmp);
  1106. }
  1107. if (stat & CINT_SRS)
  1108. mvs_int_sata(mvi);
  1109. mw32(INT_STAT, stat);
  1110. }
  1111. static int mvs_int_rx(struct mvs_info *mvi, bool self_clear)
  1112. {
  1113. void __iomem *regs = mvi->regs;
  1114. u32 rx_prod_idx, rx_desc;
  1115. bool attn = false;
  1116. struct pci_dev *pdev = mvi->pdev;
  1117. /* the first dword in the RX ring is special: it contains
  1118. * a mirror of the hardware's RX producer index, so that
  1119. * we don't have to stall the CPU reading that register.
  1120. * The actual RX ring is offset by one dword, due to this.
  1121. */
  1122. rx_prod_idx = mr32(RX_CONS_IDX) & RX_RING_SZ_MASK;
  1123. if (rx_prod_idx == 0xfff) { /* h/w hasn't touched RX ring yet */
  1124. mvi->rx_cons = 0xfff;
  1125. return 0;
  1126. }
  1127. /* The CMPL_Q may come late, read from register and try again
  1128. * note: if coalescing is enabled,
  1129. * it will need to read from register every time for sure
  1130. */
  1131. if (mvi->rx_cons == rx_prod_idx)
  1132. return 0;
  1133. if (mvi->rx_cons == 0xfff)
  1134. mvi->rx_cons = MVS_RX_RING_SZ - 1;
  1135. while (mvi->rx_cons != rx_prod_idx) {
  1136. /* increment our internal RX consumer pointer */
  1137. mvi->rx_cons = (mvi->rx_cons + 1) & (MVS_RX_RING_SZ - 1);
  1138. rx_desc = le32_to_cpu(mvi->rx[mvi->rx_cons + 1]);
  1139. mvs_hba_cq_dump(mvi);
  1140. if (likely(rx_desc & RXQ_DONE))
  1141. mvs_slot_complete(mvi, rx_desc);
  1142. if (rx_desc & RXQ_ATTN) {
  1143. attn = true;
  1144. dev_printk(KERN_DEBUG, &pdev->dev, "ATTN %X\n",
  1145. rx_desc);
  1146. } else if (rx_desc & RXQ_ERR) {
  1147. dev_printk(KERN_DEBUG, &pdev->dev, "RXQ_ERR %X\n",
  1148. rx_desc);
  1149. }
  1150. }
  1151. if (attn && self_clear)
  1152. mvs_int_full(mvi);
  1153. return 0;
  1154. }
  1155. static irqreturn_t mvs_interrupt(int irq, void *opaque)
  1156. {
  1157. struct mvs_info *mvi = opaque;
  1158. void __iomem *regs = mvi->regs;
  1159. u32 stat;
  1160. stat = mr32(GBL_INT_STAT);
  1161. /* clear CMD_CMPLT ASAP */
  1162. mw32_f(INT_STAT, CINT_DONE);
  1163. if (stat == 0 || stat == 0xffffffff)
  1164. return IRQ_NONE;
  1165. spin_lock(&mvi->lock);
  1166. mvs_int_full(mvi);
  1167. spin_unlock(&mvi->lock);
  1168. return IRQ_HANDLED;
  1169. }
  1170. #ifndef MVS_DISABLE_MSI
  1171. static irqreturn_t mvs_msi_interrupt(int irq, void *opaque)
  1172. {
  1173. struct mvs_info *mvi = opaque;
  1174. spin_lock(&mvi->lock);
  1175. mvs_int_rx(mvi, true);
  1176. spin_unlock(&mvi->lock);
  1177. return IRQ_HANDLED;
  1178. }
  1179. #endif
  1180. struct mvs_task_exec_info {
  1181. struct sas_task *task;
  1182. struct mvs_cmd_hdr *hdr;
  1183. struct mvs_port *port;
  1184. u32 tag;
  1185. int n_elem;
  1186. };
  1187. static int mvs_task_prep_smp(struct mvs_info *mvi,
  1188. struct mvs_task_exec_info *tei)
  1189. {
  1190. int elem, rc, i;
  1191. struct sas_task *task = tei->task;
  1192. struct mvs_cmd_hdr *hdr = tei->hdr;
  1193. struct scatterlist *sg_req, *sg_resp;
  1194. u32 req_len, resp_len, tag = tei->tag;
  1195. void *buf_tmp;
  1196. u8 *buf_oaf;
  1197. dma_addr_t buf_tmp_dma;
  1198. struct mvs_prd *buf_prd;
  1199. struct scatterlist *sg;
  1200. struct mvs_slot_info *slot = &mvi->slot_info[tag];
  1201. struct asd_sas_port *sas_port = task->dev->port;
  1202. u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
  1203. #if _MV_DUMP
  1204. u8 *buf_cmd;
  1205. void *from;
  1206. #endif
  1207. /*
  1208. * DMA-map SMP request, response buffers
  1209. */
  1210. sg_req = &task->smp_task.smp_req;
  1211. elem = pci_map_sg(mvi->pdev, sg_req, 1, PCI_DMA_TODEVICE);
  1212. if (!elem)
  1213. return -ENOMEM;
  1214. req_len = sg_dma_len(sg_req);
  1215. sg_resp = &task->smp_task.smp_resp;
  1216. elem = pci_map_sg(mvi->pdev, sg_resp, 1, PCI_DMA_FROMDEVICE);
  1217. if (!elem) {
  1218. rc = -ENOMEM;
  1219. goto err_out;
  1220. }
  1221. resp_len = sg_dma_len(sg_resp);
  1222. /* must be in dwords */
  1223. if ((req_len & 0x3) || (resp_len & 0x3)) {
  1224. rc = -EINVAL;
  1225. goto err_out_2;
  1226. }
  1227. /*
  1228. * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
  1229. */
  1230. /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */
  1231. buf_tmp = slot->buf;
  1232. buf_tmp_dma = slot->buf_dma;
  1233. #if _MV_DUMP
  1234. buf_cmd = buf_tmp;
  1235. hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
  1236. buf_tmp += req_len;
  1237. buf_tmp_dma += req_len;
  1238. slot->cmd_size = req_len;
  1239. #else
  1240. hdr->cmd_tbl = cpu_to_le64(sg_dma_address(sg_req));
  1241. #endif
  1242. /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
  1243. buf_oaf = buf_tmp;
  1244. hdr->open_frame = cpu_to_le64(buf_tmp_dma);
  1245. buf_tmp += MVS_OAF_SZ;
  1246. buf_tmp_dma += MVS_OAF_SZ;
  1247. /* region 3: PRD table ********************************************* */
  1248. buf_prd = buf_tmp;
  1249. if (tei->n_elem)
  1250. hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
  1251. else
  1252. hdr->prd_tbl = 0;
  1253. i = sizeof(struct mvs_prd) * tei->n_elem;
  1254. buf_tmp += i;
  1255. buf_tmp_dma += i;
  1256. /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
  1257. slot->response = buf_tmp;
  1258. hdr->status_buf = cpu_to_le64(buf_tmp_dma);
  1259. /*
  1260. * Fill in TX ring and command slot header
  1261. */
  1262. slot->tx = mvi->tx_prod;
  1263. mvi->tx[mvi->tx_prod] = cpu_to_le32((TXQ_CMD_SMP << TXQ_CMD_SHIFT) |
  1264. TXQ_MODE_I | tag |
  1265. (sas_port->phy_mask << TXQ_PHY_SHIFT));
  1266. hdr->flags |= flags;
  1267. hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | ((req_len - 4) / 4));
  1268. hdr->tags = cpu_to_le32(tag);
  1269. hdr->data_len = 0;
  1270. /* generate open address frame hdr (first 12 bytes) */
  1271. buf_oaf[0] = (1 << 7) | (0 << 4) | 0x01; /* initiator, SMP, ftype 1h */
  1272. buf_oaf[1] = task->dev->linkrate & 0xf;
  1273. *(u16 *)(buf_oaf + 2) = 0xFFFF; /* SAS SPEC */
  1274. memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE);
  1275. /* fill in PRD (scatter/gather) table, if any */
  1276. for_each_sg(task->scatter, sg, tei->n_elem, i) {
  1277. buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
  1278. buf_prd->len = cpu_to_le32(sg_dma_len(sg));
  1279. buf_prd++;
  1280. }
  1281. #if _MV_DUMP
  1282. /* copy cmd table */
  1283. from = kmap_atomic(sg_page(sg_req), KM_IRQ0);
  1284. memcpy(buf_cmd, from + sg_req->offset, req_len);
  1285. kunmap_atomic(from, KM_IRQ0);
  1286. #endif
  1287. return 0;
  1288. err_out_2:
  1289. pci_unmap_sg(mvi->pdev, &tei->task->smp_task.smp_resp, 1,
  1290. PCI_DMA_FROMDEVICE);
  1291. err_out:
  1292. pci_unmap_sg(mvi->pdev, &tei->task->smp_task.smp_req, 1,
  1293. PCI_DMA_TODEVICE);
  1294. return rc;
  1295. }
  1296. static void mvs_free_reg_set(struct mvs_info *mvi, struct mvs_port *port)
  1297. {
  1298. void __iomem *regs = mvi->regs;
  1299. u32 tmp, offs;
  1300. u8 *tfs = &port->taskfileset;
  1301. if (*tfs == MVS_ID_NOT_MAPPED)
  1302. return;
  1303. offs = 1U << ((*tfs & 0x0f) + PCS_EN_SATA_REG_SHIFT);
  1304. if (*tfs < 16) {
  1305. tmp = mr32(PCS);
  1306. mw32(PCS, tmp & ~offs);
  1307. } else {
  1308. tmp = mr32(CTL);
  1309. mw32(CTL, tmp & ~offs);
  1310. }
  1311. tmp = mr32(INT_STAT_SRS) & (1U << *tfs);
  1312. if (tmp)
  1313. mw32(INT_STAT_SRS, tmp);
  1314. *tfs = MVS_ID_NOT_MAPPED;
  1315. }
  1316. static u8 mvs_assign_reg_set(struct mvs_info *mvi, struct mvs_port *port)
  1317. {
  1318. int i;
  1319. u32 tmp, offs;
  1320. void __iomem *regs = mvi->regs;
  1321. if (port->taskfileset != MVS_ID_NOT_MAPPED)
  1322. return 0;
  1323. tmp = mr32(PCS);
  1324. for (i = 0; i < mvi->chip->srs_sz; i++) {
  1325. if (i == 16)
  1326. tmp = mr32(CTL);
  1327. offs = 1U << ((i & 0x0f) + PCS_EN_SATA_REG_SHIFT);
  1328. if (!(tmp & offs)) {
  1329. port->taskfileset = i;
  1330. if (i < 16)
  1331. mw32(PCS, tmp | offs);
  1332. else
  1333. mw32(CTL, tmp | offs);
  1334. tmp = mr32(INT_STAT_SRS) & (1U << i);
  1335. if (tmp)
  1336. mw32(INT_STAT_SRS, tmp);
  1337. return 0;
  1338. }
  1339. }
  1340. return MVS_ID_NOT_MAPPED;
  1341. }
  1342. static u32 mvs_get_ncq_tag(struct sas_task *task)
  1343. {
  1344. u32 tag = 0;
  1345. struct ata_queued_cmd *qc = task->uldd_task;
  1346. if (qc)
  1347. tag = qc->tag;
  1348. return tag;
  1349. }
  1350. static int mvs_task_prep_ata(struct mvs_info *mvi,
  1351. struct mvs_task_exec_info *tei)
  1352. {
  1353. struct sas_task *task = tei->task;
  1354. struct domain_device *dev = task->dev;
  1355. struct mvs_cmd_hdr *hdr = tei->hdr;
  1356. struct asd_sas_port *sas_port = dev->port;
  1357. struct mvs_slot_info *slot;
  1358. struct scatterlist *sg;
  1359. struct mvs_prd *buf_prd;
  1360. struct mvs_port *port = tei->port;
  1361. u32 tag = tei->tag;
  1362. u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
  1363. void *buf_tmp;
  1364. u8 *buf_cmd, *buf_oaf;
  1365. dma_addr_t buf_tmp_dma;
  1366. u32 i, req_len, resp_len;
  1367. const u32 max_resp_len = SB_RFB_MAX;
  1368. if (mvs_assign_reg_set(mvi, port) == MVS_ID_NOT_MAPPED)
  1369. return -EBUSY;
  1370. slot = &mvi->slot_info[tag];
  1371. slot->tx = mvi->tx_prod;
  1372. mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag |
  1373. (TXQ_CMD_STP << TXQ_CMD_SHIFT) |
  1374. (sas_port->phy_mask << TXQ_PHY_SHIFT) |
  1375. (port->taskfileset << TXQ_SRS_SHIFT));
  1376. if (task->ata_task.use_ncq)
  1377. flags |= MCH_FPDMA;
  1378. if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) {
  1379. if (task->ata_task.fis.command != ATA_CMD_ID_ATAPI)
  1380. flags |= MCH_ATAPI;
  1381. }
  1382. /* FIXME: fill in port multiplier number */
  1383. hdr->flags = cpu_to_le32(flags);
  1384. /* FIXME: the low order order 5 bits for the TAG if enable NCQ */
  1385. if (task->ata_task.use_ncq) {
  1386. hdr->tags = cpu_to_le32(mvs_get_ncq_tag(task));
  1387. /*Fill in task file */
  1388. task->ata_task.fis.sector_count = hdr->tags << 3;
  1389. } else
  1390. hdr->tags = cpu_to_le32(tag);
  1391. hdr->data_len = cpu_to_le32(task->total_xfer_len);
  1392. /*
  1393. * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
  1394. */
  1395. /* region 1: command table area (MVS_ATA_CMD_SZ bytes) ************** */
  1396. buf_cmd = buf_tmp = slot->buf;
  1397. buf_tmp_dma = slot->buf_dma;
  1398. hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
  1399. buf_tmp += MVS_ATA_CMD_SZ;
  1400. buf_tmp_dma += MVS_ATA_CMD_SZ;
  1401. #if _MV_DUMP
  1402. slot->cmd_size = MVS_ATA_CMD_SZ;
  1403. #endif
  1404. /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
  1405. /* used for STP. unused for SATA? */
  1406. buf_oaf = buf_tmp;
  1407. hdr->open_frame = cpu_to_le64(buf_tmp_dma);
  1408. buf_tmp += MVS_OAF_SZ;
  1409. buf_tmp_dma += MVS_OAF_SZ;
  1410. /* region 3: PRD table ********************************************* */
  1411. buf_prd = buf_tmp;
  1412. if (tei->n_elem)
  1413. hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
  1414. else
  1415. hdr->prd_tbl = 0;
  1416. i = sizeof(struct mvs_prd) * tei->n_elem;
  1417. buf_tmp += i;
  1418. buf_tmp_dma += i;
  1419. /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
  1420. /* FIXME: probably unused, for SATA. kept here just in case
  1421. * we get a STP/SATA error information record
  1422. */
  1423. slot->response = buf_tmp;
  1424. hdr->status_buf = cpu_to_le64(buf_tmp_dma);
  1425. req_len = sizeof(struct host_to_dev_fis);
  1426. resp_len = MVS_SLOT_BUF_SZ - MVS_ATA_CMD_SZ -
  1427. sizeof(struct mvs_err_info) - i;
  1428. /* request, response lengths */
  1429. resp_len = min(resp_len, max_resp_len);
  1430. hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
  1431. task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */
  1432. /* fill in command FIS and ATAPI CDB */
  1433. memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
  1434. if (dev->sata_dev.command_set == ATAPI_COMMAND_SET)
  1435. memcpy(buf_cmd + STP_ATAPI_CMD,
  1436. task->ata_task.atapi_packet, 16);
  1437. /* generate open address frame hdr (first 12 bytes) */
  1438. buf_oaf[0] = (1 << 7) | (2 << 4) | 0x1; /* initiator, STP, ftype 1h */
  1439. buf_oaf[1] = task->dev->linkrate & 0xf;
  1440. *(u16 *)(buf_oaf + 2) = cpu_to_be16(tag);
  1441. memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE);
  1442. /* fill in PRD (scatter/gather) table, if any */
  1443. for_each_sg(task->scatter, sg, tei->n_elem, i) {
  1444. buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
  1445. buf_prd->len = cpu_to_le32(sg_dma_len(sg));
  1446. buf_prd++;
  1447. }
  1448. return 0;
  1449. }
  1450. static int mvs_task_prep_ssp(struct mvs_info *mvi,
  1451. struct mvs_task_exec_info *tei)
  1452. {
  1453. struct sas_task *task = tei->task;
  1454. struct mvs_cmd_hdr *hdr = tei->hdr;
  1455. struct mvs_port *port = tei->port;
  1456. struct mvs_slot_info *slot;
  1457. struct scatterlist *sg;
  1458. struct mvs_prd *buf_prd;
  1459. struct ssp_frame_hdr *ssp_hdr;
  1460. void *buf_tmp;
  1461. u8 *buf_cmd, *buf_oaf, fburst = 0;
  1462. dma_addr_t buf_tmp_dma;
  1463. u32 flags;
  1464. u32 resp_len, req_len, i, tag = tei->tag;
  1465. const u32 max_resp_len = SB_RFB_MAX;
  1466. slot = &mvi->slot_info[tag];
  1467. slot->tx = mvi->tx_prod;
  1468. mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag |
  1469. (TXQ_CMD_SSP << TXQ_CMD_SHIFT) |
  1470. (port->wide_port_phymap << TXQ_PHY_SHIFT));
  1471. flags = MCH_RETRY;
  1472. if (task->ssp_task.enable_first_burst) {
  1473. flags |= MCH_FBURST;
  1474. fburst = (1 << 7);
  1475. }
  1476. hdr->flags = cpu_to_le32(flags |
  1477. (tei->n_elem << MCH_PRD_LEN_SHIFT) |
  1478. (MCH_SSP_FR_CMD << MCH_SSP_FR_TYPE_SHIFT));
  1479. hdr->tags = cpu_to_le32(tag);
  1480. hdr->data_len = cpu_to_le32(task->total_xfer_len);
  1481. /*
  1482. * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
  1483. */
  1484. /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */
  1485. buf_cmd = buf_tmp = slot->buf;
  1486. buf_tmp_dma = slot->buf_dma;
  1487. hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
  1488. buf_tmp += MVS_SSP_CMD_SZ;
  1489. buf_tmp_dma += MVS_SSP_CMD_SZ;
  1490. #if _MV_DUMP
  1491. slot->cmd_size = MVS_SSP_CMD_SZ;
  1492. #endif
  1493. /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
  1494. buf_oaf = buf_tmp;
  1495. hdr->open_frame = cpu_to_le64(buf_tmp_dma);
  1496. buf_tmp += MVS_OAF_SZ;
  1497. buf_tmp_dma += MVS_OAF_SZ;
  1498. /* region 3: PRD table ********************************************* */
  1499. buf_prd = buf_tmp;
  1500. if (tei->n_elem)
  1501. hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
  1502. else
  1503. hdr->prd_tbl = 0;
  1504. i = sizeof(struct mvs_prd) * tei->n_elem;
  1505. buf_tmp += i;
  1506. buf_tmp_dma += i;
  1507. /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
  1508. slot->response = buf_tmp;
  1509. hdr->status_buf = cpu_to_le64(buf_tmp_dma);
  1510. resp_len = MVS_SLOT_BUF_SZ - MVS_SSP_CMD_SZ - MVS_OAF_SZ -
  1511. sizeof(struct mvs_err_info) - i;
  1512. resp_len = min(resp_len, max_resp_len);
  1513. req_len = sizeof(struct ssp_frame_hdr) + 28;
  1514. /* request, response lengths */
  1515. hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
  1516. /* generate open address frame hdr (first 12 bytes) */
  1517. buf_oaf[0] = (1 << 7) | (1 << 4) | 0x1; /* initiator, SSP, ftype 1h */
  1518. buf_oaf[1] = task->dev->linkrate & 0xf;
  1519. *(u16 *)(buf_oaf + 2) = cpu_to_be16(tag);
  1520. memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE);
  1521. /* fill in SSP frame header (Command Table.SSP frame header) */
  1522. ssp_hdr = (struct ssp_frame_hdr *)buf_cmd;
  1523. ssp_hdr->frame_type = SSP_COMMAND;
  1524. memcpy(ssp_hdr->hashed_dest_addr, task->dev->hashed_sas_addr,
  1525. HASHED_SAS_ADDR_SIZE);
  1526. memcpy(ssp_hdr->hashed_src_addr,
  1527. task->dev->port->ha->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
  1528. ssp_hdr->tag = cpu_to_be16(tag);
  1529. /* fill in command frame IU */
  1530. buf_cmd += sizeof(*ssp_hdr);
  1531. memcpy(buf_cmd, &task->ssp_task.LUN, 8);
  1532. buf_cmd[9] = fburst | task->ssp_task.task_attr |
  1533. (task->ssp_task.task_prio << 3);
  1534. memcpy(buf_cmd + 12, &task->ssp_task.cdb, 16);
  1535. /* fill in PRD (scatter/gather) table, if any */
  1536. for_each_sg(task->scatter, sg, tei->n_elem, i) {
  1537. buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
  1538. buf_prd->len = cpu_to_le32(sg_dma_len(sg));
  1539. buf_prd++;
  1540. }
  1541. return 0;
  1542. }
  1543. static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags)
  1544. {
  1545. struct domain_device *dev = task->dev;
  1546. struct mvs_info *mvi = dev->port->ha->lldd_ha;
  1547. struct pci_dev *pdev = mvi->pdev;
  1548. void __iomem *regs = mvi->regs;
  1549. struct mvs_task_exec_info tei;
  1550. struct sas_task *t = task;
  1551. u32 tag = 0xdeadbeef, rc, n_elem = 0;
  1552. unsigned long flags;
  1553. u32 n = num, pass = 0;
  1554. spin_lock_irqsave(&mvi->lock, flags);
  1555. do {
  1556. tei.port = &mvi->port[dev->port->id];
  1557. if (!tei.port->port_attached) {
  1558. struct task_status_struct *ts = &t->task_status;
  1559. ts->stat = SAS_PHY_DOWN;
  1560. t->task_done(t);
  1561. rc = 0;
  1562. goto exec_exit;
  1563. }
  1564. if (!sas_protocol_ata(t->task_proto)) {
  1565. if (t->num_scatter) {
  1566. n_elem = pci_map_sg(mvi->pdev, t->scatter,
  1567. t->num_scatter,
  1568. t->data_dir);
  1569. if (!n_elem) {
  1570. rc = -ENOMEM;
  1571. goto err_out;
  1572. }
  1573. }
  1574. } else {
  1575. n_elem = t->num_scatter;
  1576. }
  1577. rc = mvs_tag_alloc(mvi, &tag);
  1578. if (rc)
  1579. goto err_out;
  1580. mvi->slot_info[tag].task = t;
  1581. mvi->slot_info[tag].n_elem = n_elem;
  1582. memset(mvi->slot_info[tag].buf, 0, MVS_SLOT_BUF_SZ);
  1583. tei.task = t;
  1584. tei.hdr = &mvi->slot[tag];
  1585. tei.tag = tag;
  1586. tei.n_elem = n_elem;
  1587. switch (t->task_proto) {
  1588. case SAS_PROTOCOL_SMP:
  1589. rc = mvs_task_prep_smp(mvi, &tei);
  1590. break;
  1591. case SAS_PROTOCOL_SSP:
  1592. rc = mvs_task_prep_ssp(mvi, &tei);
  1593. break;
  1594. case SAS_PROTOCOL_SATA:
  1595. case SAS_PROTOCOL_STP:
  1596. case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
  1597. rc = mvs_task_prep_ata(mvi, &tei);
  1598. break;
  1599. default:
  1600. dev_printk(KERN_ERR, &pdev->dev,
  1601. "unknown sas_task proto: 0x%x\n",
  1602. t->task_proto);
  1603. rc = -EINVAL;
  1604. break;
  1605. }
  1606. if (rc)
  1607. goto err_out_tag;
  1608. /* TODO: select normal or high priority */
  1609. spin_lock(&t->task_state_lock);
  1610. t->task_state_flags |= SAS_TASK_AT_INITIATOR;
  1611. spin_unlock(&t->task_state_lock);
  1612. if (n == 1) {
  1613. spin_unlock_irqrestore(&mvi->lock, flags);
  1614. mw32(TX_PROD_IDX, mvi->tx_prod);
  1615. }
  1616. mvs_hba_memory_dump(mvi, tag, t->task_proto);
  1617. ++pass;
  1618. mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1);
  1619. if (n == 1)
  1620. break;
  1621. t = list_entry(t->list.next, struct sas_task, list);
  1622. } while (--n);
  1623. return 0;
  1624. err_out_tag:
  1625. mvs_tag_free(mvi, tag);
  1626. err_out:
  1627. dev_printk(KERN_ERR, &pdev->dev, "mvsas exec failed[%d]!\n", rc);
  1628. if (!sas_protocol_ata(t->task_proto))
  1629. if (n_elem)
  1630. pci_unmap_sg(mvi->pdev, t->scatter, n_elem,
  1631. t->data_dir);
  1632. exec_exit:
  1633. if (pass)
  1634. mw32(TX_PROD_IDX, (mvi->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1));
  1635. spin_unlock_irqrestore(&mvi->lock, flags);
  1636. return rc;
  1637. }
  1638. static int mvs_task_abort(struct sas_task *task)
  1639. {
  1640. int rc = 1;
  1641. unsigned long flags;
  1642. struct mvs_info *mvi = task->dev->port->ha->lldd_ha;
  1643. struct pci_dev *pdev = mvi->pdev;
  1644. spin_lock_irqsave(&task->task_state_lock, flags);
  1645. if (task->task_state_flags & SAS_TASK_STATE_DONE) {
  1646. rc = TMF_RESP_FUNC_COMPLETE;
  1647. goto out_done;
  1648. }
  1649. spin_unlock_irqrestore(&task->task_state_lock, flags);
  1650. /*FIXME*/
  1651. rc = TMF_RESP_FUNC_COMPLETE;
  1652. switch (task->task_proto) {
  1653. case SAS_PROTOCOL_SMP:
  1654. dev_printk(KERN_DEBUG, &pdev->dev, "SMP Abort! ");
  1655. break;
  1656. case SAS_PROTOCOL_SSP:
  1657. dev_printk(KERN_DEBUG, &pdev->dev, "SSP Abort! ");
  1658. break;
  1659. case SAS_PROTOCOL_SATA:
  1660. case SAS_PROTOCOL_STP:
  1661. case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:{
  1662. dev_printk(KERN_DEBUG, &pdev->dev, "STP Abort! "
  1663. "Dump D2H FIS: \n");
  1664. mvs_hexdump(sizeof(struct host_to_dev_fis),
  1665. (void *)&task->ata_task.fis, 0);
  1666. dev_printk(KERN_DEBUG, &pdev->dev, "Dump ATAPI Cmd : \n");
  1667. mvs_hexdump(16, task->ata_task.atapi_packet, 0);
  1668. break;
  1669. }
  1670. default:
  1671. break;
  1672. }
  1673. out_done:
  1674. return rc;
  1675. }
  1676. static void mvs_free(struct mvs_info *mvi)
  1677. {
  1678. int i;
  1679. if (!mvi)
  1680. return;
  1681. for (i = 0; i < MVS_SLOTS; i++) {
  1682. struct mvs_slot_info *slot = &mvi->slot_info[i];
  1683. if (slot->buf)
  1684. dma_free_coherent(&mvi->pdev->dev, MVS_SLOT_BUF_SZ,
  1685. slot->buf, slot->buf_dma);
  1686. }
  1687. if (mvi->tx)
  1688. dma_free_coherent(&mvi->pdev->dev,
  1689. sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ,
  1690. mvi->tx, mvi->tx_dma);
  1691. if (mvi->rx_fis)
  1692. dma_free_coherent(&mvi->pdev->dev, MVS_RX_FISL_SZ,
  1693. mvi->rx_fis, mvi->rx_fis_dma);
  1694. if (mvi->rx)
  1695. dma_free_coherent(&mvi->pdev->dev,
  1696. sizeof(*mvi->rx) * MVS_RX_RING_SZ,
  1697. mvi->rx, mvi->rx_dma);
  1698. if (mvi->slot)
  1699. dma_free_coherent(&mvi->pdev->dev,
  1700. sizeof(*mvi->slot) * MVS_SLOTS,
  1701. mvi->slot, mvi->slot_dma);
  1702. #ifdef MVS_ENABLE_PERI
  1703. if (mvi->peri_regs)
  1704. iounmap(mvi->peri_regs);
  1705. #endif
  1706. if (mvi->regs)
  1707. iounmap(mvi->regs);
  1708. if (mvi->shost)
  1709. scsi_host_put(mvi->shost);
  1710. kfree(mvi->sas.sas_port);
  1711. kfree(mvi->sas.sas_phy);
  1712. kfree(mvi);
  1713. }
  1714. /* FIXME: locking? */
  1715. static int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
  1716. void *funcdata)
  1717. {
  1718. struct mvs_info *mvi = sas_phy->ha->lldd_ha;
  1719. int rc = 0, phy_id = sas_phy->id;
  1720. u32 tmp;
  1721. tmp = mvs_read_phy_ctl(mvi, phy_id);
  1722. switch (func) {
  1723. case PHY_FUNC_SET_LINK_RATE:{
  1724. struct sas_phy_linkrates *rates = funcdata;
  1725. u32 lrmin = 0, lrmax = 0;
  1726. lrmin = (rates->minimum_linkrate << 8);
  1727. lrmax = (rates->maximum_linkrate << 12);
  1728. if (lrmin) {
  1729. tmp &= ~(0xf << 8);
  1730. tmp |= lrmin;
  1731. }
  1732. if (lrmax) {
  1733. tmp &= ~(0xf << 12);
  1734. tmp |= lrmax;
  1735. }
  1736. mvs_write_phy_ctl(mvi, phy_id, tmp);
  1737. break;
  1738. }
  1739. case PHY_FUNC_HARD_RESET:
  1740. if (tmp & PHY_RST_HARD)
  1741. break;
  1742. mvs_write_phy_ctl(mvi, phy_id, tmp | PHY_RST_HARD);
  1743. break;
  1744. case PHY_FUNC_LINK_RESET:
  1745. mvs_write_phy_ctl(mvi, phy_id, tmp | PHY_RST);
  1746. break;
  1747. case PHY_FUNC_DISABLE:
  1748. case PHY_FUNC_RELEASE_SPINUP_HOLD:
  1749. default:
  1750. rc = -EOPNOTSUPP;
  1751. }
  1752. return rc;
  1753. }
  1754. static void __devinit mvs_phy_init(struct mvs_info *mvi, int phy_id)
  1755. {
  1756. struct mvs_phy *phy = &mvi->phy[phy_id];
  1757. struct asd_sas_phy *sas_phy = &phy->sas_phy;
  1758. sas_phy->enabled = (phy_id < mvi->chip->n_phy) ? 1 : 0;
  1759. sas_phy->class = SAS;
  1760. sas_phy->iproto = SAS_PROTOCOL_ALL;
  1761. sas_phy->tproto = 0;
  1762. sas_phy->type = PHY_TYPE_PHYSICAL;
  1763. sas_phy->role = PHY_ROLE_INITIATOR;
  1764. sas_phy->oob_mode = OOB_NOT_CONNECTED;
  1765. sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
  1766. sas_phy->id = phy_id;
  1767. sas_phy->sas_addr = &mvi->sas_addr[0];
  1768. sas_phy->frame_rcvd = &phy->frame_rcvd[0];
  1769. sas_phy->ha = &mvi->sas;
  1770. sas_phy->lldd_phy = phy;
  1771. }
  1772. static struct mvs_info *__devinit mvs_alloc(struct pci_dev *pdev,
  1773. const struct pci_device_id *ent)
  1774. {
  1775. struct mvs_info *mvi;
  1776. unsigned long res_start, res_len, res_flag;
  1777. struct asd_sas_phy **arr_phy;
  1778. struct asd_sas_port **arr_port;
  1779. const struct mvs_chip_info *chip = &mvs_chips[ent->driver_data];
  1780. int i;
  1781. /*
  1782. * alloc and init our per-HBA mvs_info struct
  1783. */
  1784. mvi = kzalloc(sizeof(*mvi), GFP_KERNEL);
  1785. if (!mvi)
  1786. return NULL;
  1787. spin_lock_init(&mvi->lock);
  1788. mvi->pdev = pdev;
  1789. mvi->chip = chip;
  1790. if (pdev->device == 0x6440 && pdev->revision == 0)
  1791. mvi->flags |= MVF_PHY_PWR_FIX;
  1792. /*
  1793. * alloc and init SCSI, SAS glue
  1794. */
  1795. mvi->shost = scsi_host_alloc(&mvs_sht, sizeof(void *));
  1796. if (!mvi->shost)
  1797. goto err_out;
  1798. arr_phy = kcalloc(MVS_MAX_PHYS, sizeof(void *), GFP_KERNEL);
  1799. arr_port = kcalloc(MVS_MAX_PHYS, sizeof(void *), GFP_KERNEL);
  1800. if (!arr_phy || !arr_port)
  1801. goto err_out;
  1802. for (i = 0; i < MVS_MAX_PHYS; i++) {
  1803. mvs_phy_init(mvi, i);
  1804. arr_phy[i] = &mvi->phy[i].sas_phy;
  1805. arr_port[i] = &mvi->port[i].sas_port;
  1806. }
  1807. SHOST_TO_SAS_HA(mvi->shost) = &mvi->sas;
  1808. mvi->shost->transportt = mvs_stt;
  1809. mvi->shost->max_id = 21;
  1810. mvi->shost->max_lun = ~0;
  1811. mvi->shost->max_channel = 0;
  1812. mvi->shost->max_cmd_len = 16;
  1813. mvi->sas.sas_ha_name = DRV_NAME;
  1814. mvi->sas.dev = &pdev->dev;
  1815. mvi->sas.lldd_module = THIS_MODULE;
  1816. mvi->sas.sas_addr = &mvi->sas_addr[0];
  1817. mvi->sas.sas_phy = arr_phy;
  1818. mvi->sas.sas_port = arr_port;
  1819. mvi->sas.num_phys = chip->n_phy;
  1820. mvi->sas.lldd_max_execute_num = MVS_CHIP_SLOT_SZ - 1;
  1821. mvi->sas.lldd_queue_size = MVS_QUEUE_SIZE;
  1822. mvi->can_queue = (MVS_CHIP_SLOT_SZ >> 1) - 1;
  1823. mvi->sas.lldd_ha = mvi;
  1824. mvi->sas.core.shost = mvi->shost;
  1825. mvs_tag_init(mvi);
  1826. /*
  1827. * ioremap main and peripheral registers
  1828. */
  1829. #ifdef MVS_ENABLE_PERI
  1830. res_start = pci_resource_start(pdev, 2);
  1831. res_len = pci_resource_len(pdev, 2);
  1832. if (!res_start || !res_len)
  1833. goto err_out;
  1834. mvi->peri_regs = ioremap_nocache(res_start, res_len);
  1835. if (!mvi->peri_regs)
  1836. goto err_out;
  1837. #endif
  1838. res_start = pci_resource_start(pdev, 4);
  1839. res_len = pci_resource_len(pdev, 4);
  1840. if (!res_start || !res_len)
  1841. goto err_out;
  1842. res_flag = pci_resource_flags(pdev, 4);
  1843. if (res_flag & IORESOURCE_CACHEABLE)
  1844. mvi->regs = ioremap(res_start, res_len);
  1845. else
  1846. mvi->regs = ioremap_nocache(res_start, res_len);
  1847. if (!mvi->regs)
  1848. goto err_out;
  1849. /*
  1850. * alloc and init our DMA areas
  1851. */
  1852. mvi->tx = dma_alloc_coherent(&pdev->dev,
  1853. sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ,
  1854. &mvi->tx_dma, GFP_KERNEL);
  1855. if (!mvi->tx)
  1856. goto err_out;
  1857. memset(mvi->tx, 0, sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ);
  1858. mvi->rx_fis = dma_alloc_coherent(&pdev->dev, MVS_RX_FISL_SZ,
  1859. &mvi->rx_fis_dma, GFP_KERNEL);
  1860. if (!mvi->rx_fis)
  1861. goto err_out;
  1862. memset(mvi->rx_fis, 0, MVS_RX_FISL_SZ);
  1863. mvi->rx = dma_alloc_coherent(&pdev->dev,
  1864. sizeof(*mvi->rx) * MVS_RX_RING_SZ,
  1865. &mvi->rx_dma, GFP_KERNEL);
  1866. if (!mvi->rx)
  1867. goto err_out;
  1868. memset(mvi->rx, 0, sizeof(*mvi->rx) * MVS_RX_RING_SZ);
  1869. mvi->rx[0] = cpu_to_le32(0xfff);
  1870. mvi->rx_cons = 0xfff;
  1871. mvi->slot = dma_alloc_coherent(&pdev->dev,
  1872. sizeof(*mvi->slot) * MVS_SLOTS,
  1873. &mvi->slot_dma, GFP_KERNEL);
  1874. if (!mvi->slot)
  1875. goto err_out;
  1876. memset(mvi->slot, 0, sizeof(*mvi->slot) * MVS_SLOTS);
  1877. for (i = 0; i < MVS_SLOTS; i++) {
  1878. struct mvs_slot_info *slot = &mvi->slot_info[i];
  1879. slot->buf = dma_alloc_coherent(&pdev->dev, MVS_SLOT_BUF_SZ,
  1880. &slot->buf_dma, GFP_KERNEL);
  1881. if (!slot->buf)
  1882. goto err_out;
  1883. memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
  1884. }
  1885. /* finally, read NVRAM to get our SAS address */
  1886. if (mvs_nvram_read(mvi, NVR_SAS_ADDR, &mvi->sas_addr, 8))
  1887. goto err_out;
  1888. return mvi;
  1889. err_out:
  1890. mvs_free(mvi);
  1891. return NULL;
  1892. }
  1893. static u32 mvs_cr32(void __iomem *regs, u32 addr)
  1894. {
  1895. mw32(CMD_ADDR, addr);
  1896. return mr32(CMD_DATA);
  1897. }
  1898. static void mvs_cw32(void __iomem *regs, u32 addr, u32 val)
  1899. {
  1900. mw32(CMD_ADDR, addr);
  1901. mw32(CMD_DATA, val);
  1902. }
  1903. static u32 mvs_read_phy_ctl(struct mvs_info *mvi, u32 port)
  1904. {
  1905. void __iomem *regs = mvi->regs;
  1906. return (port < 4)?mr32(P0_SER_CTLSTAT + port * 4):
  1907. mr32(P4_SER_CTLSTAT + (port - 4) * 4);
  1908. }
  1909. static void mvs_write_phy_ctl(struct mvs_info *mvi, u32 port, u32 val)
  1910. {
  1911. void __iomem *regs = mvi->regs;
  1912. if (port < 4)
  1913. mw32(P0_SER_CTLSTAT + port * 4, val);
  1914. else
  1915. mw32(P4_SER_CTLSTAT + (port - 4) * 4, val);
  1916. }
  1917. static u32 mvs_read_port(struct mvs_info *mvi, u32 off, u32 off2, u32 port)
  1918. {
  1919. void __iomem *regs = mvi->regs + off;
  1920. void __iomem *regs2 = mvi->regs + off2;
  1921. return (port < 4)?readl(regs + port * 8):
  1922. readl(regs2 + (port - 4) * 8);
  1923. }
  1924. static void mvs_write_port(struct mvs_info *mvi, u32 off, u32 off2,
  1925. u32 port, u32 val)
  1926. {
  1927. void __iomem *regs = mvi->regs + off;
  1928. void __iomem *regs2 = mvi->regs + off2;
  1929. if (port < 4)
  1930. writel(val, regs + port * 8);
  1931. else
  1932. writel(val, regs2 + (port - 4) * 8);
  1933. }
  1934. static u32 mvs_read_port_cfg_data(struct mvs_info *mvi, u32 port)
  1935. {
  1936. return mvs_read_port(mvi, MVS_P0_CFG_DATA, MVS_P4_CFG_DATA, port);
  1937. }
  1938. static void mvs_write_port_cfg_data(struct mvs_info *mvi, u32 port, u32 val)
  1939. {
  1940. mvs_write_port(mvi, MVS_P0_CFG_DATA, MVS_P4_CFG_DATA, port, val);
  1941. }
  1942. static void mvs_write_port_cfg_addr(struct mvs_info *mvi, u32 port, u32 addr)
  1943. {
  1944. mvs_write_port(mvi, MVS_P0_CFG_ADDR, MVS_P4_CFG_ADDR, port, addr);
  1945. }
  1946. static u32 mvs_read_port_vsr_data(struct mvs_info *mvi, u32 port)
  1947. {
  1948. return mvs_read_port(mvi, MVS_P0_VSR_DATA, MVS_P4_VSR_DATA, port);
  1949. }
  1950. static void mvs_write_port_vsr_data(struct mvs_info *mvi, u32 port, u32 val)
  1951. {
  1952. mvs_write_port(mvi, MVS_P0_VSR_DATA, MVS_P4_VSR_DATA, port, val);
  1953. }
  1954. static void mvs_write_port_vsr_addr(struct mvs_info *mvi, u32 port, u32 addr)
  1955. {
  1956. mvs_write_port(mvi, MVS_P0_VSR_ADDR, MVS_P4_VSR_ADDR, port, addr);
  1957. }
  1958. static u32 mvs_read_port_irq_stat(struct mvs_info *mvi, u32 port)
  1959. {
  1960. return mvs_read_port(mvi, MVS_P0_INT_STAT, MVS_P4_INT_STAT, port);
  1961. }
  1962. static void mvs_write_port_irq_stat(struct mvs_info *mvi, u32 port, u32 val)
  1963. {
  1964. mvs_write_port(mvi, MVS_P0_INT_STAT, MVS_P4_INT_STAT, port, val);
  1965. }
  1966. static u32 mvs_read_port_irq_mask(struct mvs_info *mvi, u32 port)
  1967. {
  1968. return mvs_read_port(mvi, MVS_P0_INT_MASK, MVS_P4_INT_MASK, port);
  1969. }
  1970. static void mvs_write_port_irq_mask(struct mvs_info *mvi, u32 port, u32 val)
  1971. {
  1972. mvs_write_port(mvi, MVS_P0_INT_MASK, MVS_P4_INT_MASK, port, val);
  1973. }
  1974. static void __devinit mvs_phy_hacks(struct mvs_info *mvi)
  1975. {
  1976. void __iomem *regs = mvi->regs;
  1977. u32 tmp;
  1978. /* workaround for SATA R-ERR, to ignore phy glitch */
  1979. tmp = mvs_cr32(regs, CMD_PHY_TIMER);
  1980. tmp &= ~(1 << 9);
  1981. tmp |= (1 << 10);
  1982. mvs_cw32(regs, CMD_PHY_TIMER, tmp);
  1983. /* enable retry 127 times */
  1984. mvs_cw32(regs, CMD_SAS_CTL1, 0x7f7f);
  1985. /* extend open frame timeout to max */
  1986. tmp = mvs_cr32(regs, CMD_SAS_CTL0);
  1987. tmp &= ~0xffff;
  1988. tmp |= 0x3fff;
  1989. mvs_cw32(regs, CMD_SAS_CTL0, tmp);
  1990. /* workaround for WDTIMEOUT , set to 550 ms */
  1991. mvs_cw32(regs, CMD_WD_TIMER, 0xffffff);
  1992. /* not to halt for different port op during wideport link change */
  1993. mvs_cw32(regs, CMD_APP_ERR_CONFIG, 0xffefbf7d);
  1994. /* workaround for Seagate disk not-found OOB sequence, recv
  1995. * COMINIT before sending out COMWAKE */
  1996. tmp = mvs_cr32(regs, CMD_PHY_MODE_21);
  1997. tmp &= 0x0000ffff;
  1998. tmp |= 0x00fa0000;
  1999. mvs_cw32(regs, CMD_PHY_MODE_21, tmp);
  2000. tmp = mvs_cr32(regs, CMD_PHY_TIMER);
  2001. tmp &= 0x1fffffff;
  2002. tmp |= (2U << 29); /* 8 ms retry */
  2003. mvs_cw32(regs, CMD_PHY_TIMER, tmp);
  2004. /* TEST - for phy decoding error, adjust voltage levels */
  2005. mw32(P0_VSR_ADDR + 0, 0x8);
  2006. mw32(P0_VSR_DATA + 0, 0x2F0);
  2007. mw32(P0_VSR_ADDR + 8, 0x8);
  2008. mw32(P0_VSR_DATA + 8, 0x2F0);
  2009. mw32(P0_VSR_ADDR + 16, 0x8);
  2010. mw32(P0_VSR_DATA + 16, 0x2F0);
  2011. mw32(P0_VSR_ADDR + 24, 0x8);
  2012. mw32(P0_VSR_DATA + 24, 0x2F0);
  2013. }
  2014. static void mvs_enable_xmt(struct mvs_info *mvi, int PhyId)
  2015. {
  2016. void __iomem *regs = mvi->regs;
  2017. u32 tmp;
  2018. tmp = mr32(PCS);
  2019. if (mvi->chip->n_phy <= 4)
  2020. tmp |= 1 << (PhyId + PCS_EN_PORT_XMT_SHIFT);
  2021. else
  2022. tmp |= 1 << (PhyId + PCS_EN_PORT_XMT_SHIFT2);
  2023. mw32(PCS, tmp);
  2024. }
  2025. static void mvs_detect_porttype(struct mvs_info *mvi, int i)
  2026. {
  2027. void __iomem *regs = mvi->regs;
  2028. u32 reg;
  2029. struct mvs_phy *phy = &mvi->phy[i];
  2030. /* TODO check & save device type */
  2031. reg = mr32(GBL_PORT_TYPE);
  2032. if (reg & MODE_SAS_SATA & (1 << i))
  2033. phy->phy_type |= PORT_TYPE_SAS;
  2034. else
  2035. phy->phy_type |= PORT_TYPE_SATA;
  2036. }
  2037. static void *mvs_get_d2h_reg(struct mvs_info *mvi, int i, void *buf)
  2038. {
  2039. u32 *s = (u32 *) buf;
  2040. if (!s)
  2041. return NULL;
  2042. mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG3);
  2043. s[3] = mvs_read_port_cfg_data(mvi, i);
  2044. mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG2);
  2045. s[2] = mvs_read_port_cfg_data(mvi, i);
  2046. mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG1);
  2047. s[1] = mvs_read_port_cfg_data(mvi, i);
  2048. mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG0);
  2049. s[0] = mvs_read_port_cfg_data(mvi, i);
  2050. return (void *)s;
  2051. }
  2052. static u32 mvs_is_sig_fis_received(u32 irq_status)
  2053. {
  2054. return irq_status & PHYEV_SIG_FIS;
  2055. }
  2056. static void mvs_update_wideport(struct mvs_info *mvi, int i)
  2057. {
  2058. struct mvs_phy *phy = &mvi->phy[i];
  2059. struct mvs_port *port = phy->port;
  2060. int j, no;
  2061. for_each_phy(port->wide_port_phymap, no, j, mvi->chip->n_phy)
  2062. if (no & 1) {
  2063. mvs_write_port_cfg_addr(mvi, no, PHYR_WIDE_PORT);
  2064. mvs_write_port_cfg_data(mvi, no,
  2065. port->wide_port_phymap);
  2066. } else {
  2067. mvs_write_port_cfg_addr(mvi, no, PHYR_WIDE_PORT);
  2068. mvs_write_port_cfg_data(mvi, no, 0);
  2069. }
  2070. }
  2071. static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i)
  2072. {
  2073. u32 tmp;
  2074. struct mvs_phy *phy = &mvi->phy[i];
  2075. struct mvs_port *port;
  2076. tmp = mvs_read_phy_ctl(mvi, i);
  2077. if ((tmp & PHY_READY_MASK) && !(phy->irq_status & PHYEV_POOF)) {
  2078. if (!phy->port)
  2079. phy->phy_attached = 1;
  2080. return tmp;
  2081. }
  2082. port = phy->port;
  2083. if (port) {
  2084. if (phy->phy_type & PORT_TYPE_SAS) {
  2085. port->wide_port_phymap &= ~(1U << i);
  2086. if (!port->wide_port_phymap)
  2087. port->port_attached = 0;
  2088. mvs_update_wideport(mvi, i);
  2089. } else if (phy->phy_type & PORT_TYPE_SATA)
  2090. port->port_attached = 0;
  2091. mvs_free_reg_set(mvi, phy->port);
  2092. phy->port = NULL;
  2093. phy->phy_attached = 0;
  2094. phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
  2095. }
  2096. return 0;
  2097. }
  2098. static void mvs_update_phyinfo(struct mvs_info *mvi, int i,
  2099. int get_st)
  2100. {
  2101. struct mvs_phy *phy = &mvi->phy[i];
  2102. struct pci_dev *pdev = mvi->pdev;
  2103. u32 tmp, j;
  2104. u64 tmp64;
  2105. mvs_write_port_cfg_addr(mvi, i, PHYR_IDENTIFY);
  2106. phy->dev_info = mvs_read_port_cfg_data(mvi, i);
  2107. mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_HI);
  2108. phy->dev_sas_addr = (u64) mvs_read_port_cfg_data(mvi, i) << 32;
  2109. mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_LO);
  2110. phy->dev_sas_addr |= mvs_read_port_cfg_data(mvi, i);
  2111. if (get_st) {
  2112. phy->irq_status = mvs_read_port_irq_stat(mvi, i);
  2113. phy->phy_status = mvs_is_phy_ready(mvi, i);
  2114. }
  2115. if (phy->phy_status) {
  2116. u32 phy_st;
  2117. struct asd_sas_phy *sas_phy = mvi->sas.sas_phy[i];
  2118. mvs_write_port_cfg_addr(mvi, i, PHYR_PHY_STAT);
  2119. phy_st = mvs_read_port_cfg_data(mvi, i);
  2120. sas_phy->linkrate =
  2121. (phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
  2122. PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET;
  2123. /* Updated attached_sas_addr */
  2124. mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_HI);
  2125. phy->att_dev_sas_addr =
  2126. (u64) mvs_read_port_cfg_data(mvi, i) << 32;
  2127. mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_LO);
  2128. phy->att_dev_sas_addr |= mvs_read_port_cfg_data(mvi, i);
  2129. dev_printk(KERN_DEBUG, &pdev->dev,
  2130. "phy[%d] Get Attached Address 0x%llX ,"
  2131. " SAS Address 0x%llX\n",
  2132. i, phy->att_dev_sas_addr, phy->dev_sas_addr);
  2133. dev_printk(KERN_DEBUG, &pdev->dev,
  2134. "Rate = %x , type = %d\n",
  2135. sas_phy->linkrate, phy->phy_type);
  2136. #if 1
  2137. /*
  2138. * If the device is capable of supporting a wide port
  2139. * on its phys, it may configure the phys as a wide port.
  2140. */
  2141. if (phy->phy_type & PORT_TYPE_SAS)
  2142. for (j = 0; j < mvi->chip->n_phy && j != i; ++j) {
  2143. if ((mvi->phy[j].phy_attached) &&
  2144. (mvi->phy[j].phy_type & PORT_TYPE_SAS))
  2145. if (phy->att_dev_sas_addr ==
  2146. mvi->phy[j].att_dev_sas_addr - 1) {
  2147. phy->att_dev_sas_addr =
  2148. mvi->phy[j].att_dev_sas_addr;
  2149. break;
  2150. }
  2151. }
  2152. #endif
  2153. tmp64 = cpu_to_be64(phy->att_dev_sas_addr);
  2154. memcpy(sas_phy->attached_sas_addr, &tmp64, SAS_ADDR_SIZE);
  2155. if (phy->phy_type & PORT_TYPE_SAS) {
  2156. mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_DEV_INFO);
  2157. phy->att_dev_info = mvs_read_port_cfg_data(mvi, i);
  2158. phy->identify.device_type =
  2159. phy->att_dev_info & PORT_DEV_TYPE_MASK;
  2160. if (phy->identify.device_type == SAS_END_DEV)
  2161. phy->identify.target_port_protocols =
  2162. SAS_PROTOCOL_SSP;
  2163. else if (phy->identify.device_type != NO_DEVICE)
  2164. phy->identify.target_port_protocols =
  2165. SAS_PROTOCOL_SMP;
  2166. if (phy_st & PHY_OOB_DTCTD)
  2167. sas_phy->oob_mode = SAS_OOB_MODE;
  2168. phy->frame_rcvd_size =
  2169. sizeof(struct sas_identify_frame);
  2170. } else if (phy->phy_type & PORT_TYPE_SATA) {
  2171. phy->identify.target_port_protocols = SAS_PROTOCOL_STP;
  2172. if (mvs_is_sig_fis_received(phy->irq_status)) {
  2173. if (phy_st & PHY_OOB_DTCTD)
  2174. sas_phy->oob_mode = SATA_OOB_MODE;
  2175. phy->frame_rcvd_size =
  2176. sizeof(struct dev_to_host_fis);
  2177. mvs_get_d2h_reg(mvi, i,
  2178. (void *)sas_phy->frame_rcvd);
  2179. } else {
  2180. dev_printk(KERN_DEBUG, &pdev->dev,
  2181. "No sig fis\n");
  2182. }
  2183. }
  2184. /* workaround for HW phy decoding error on 1.5g disk drive */
  2185. mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE6);
  2186. tmp = mvs_read_port_vsr_data(mvi, i);
  2187. if (((phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
  2188. PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET) ==
  2189. SAS_LINK_RATE_1_5_GBPS)
  2190. tmp &= ~PHY_MODE6_DTL_SPEED;
  2191. else
  2192. tmp |= PHY_MODE6_DTL_SPEED;
  2193. mvs_write_port_vsr_data(mvi, i, tmp);
  2194. }
  2195. if (get_st)
  2196. mvs_write_port_irq_stat(mvi, i, phy->irq_status);
  2197. }
  2198. static void mvs_port_formed(struct asd_sas_phy *sas_phy)
  2199. {
  2200. struct sas_ha_struct *sas_ha = sas_phy->ha;
  2201. struct mvs_info *mvi = sas_ha->lldd_ha;
  2202. struct asd_sas_port *sas_port = sas_phy->port;
  2203. struct mvs_phy *phy = sas_phy->lldd_phy;
  2204. struct mvs_port *port = &mvi->port[sas_port->id];
  2205. unsigned long flags;
  2206. spin_lock_irqsave(&mvi->lock, flags);
  2207. port->port_attached = 1;
  2208. phy->port = port;
  2209. port->taskfileset = MVS_ID_NOT_MAPPED;
  2210. if (phy->phy_type & PORT_TYPE_SAS) {
  2211. port->wide_port_phymap = sas_port->phy_mask;
  2212. mvs_update_wideport(mvi, sas_phy->id);
  2213. }
  2214. spin_unlock_irqrestore(&mvi->lock, flags);
  2215. }
  2216. static int __devinit mvs_hw_init(struct mvs_info *mvi)
  2217. {
  2218. void __iomem *regs = mvi->regs;
  2219. int i;
  2220. u32 tmp, cctl;
  2221. /* make sure interrupts are masked immediately (paranoia) */
  2222. mw32(GBL_CTL, 0);
  2223. tmp = mr32(GBL_CTL);
  2224. /* Reset Controller */
  2225. if (!(tmp & HBA_RST)) {
  2226. if (mvi->flags & MVF_PHY_PWR_FIX) {
  2227. pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp);
  2228. tmp &= ~PCTL_PWR_ON;
  2229. tmp |= PCTL_OFF;
  2230. pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
  2231. pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp);
  2232. tmp &= ~PCTL_PWR_ON;
  2233. tmp |= PCTL_OFF;
  2234. pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
  2235. }
  2236. /* global reset, incl. COMRESET/H_RESET_N (self-clearing) */
  2237. mw32_f(GBL_CTL, HBA_RST);
  2238. }
  2239. /* wait for reset to finish; timeout is just a guess */
  2240. i = 1000;
  2241. while (i-- > 0) {
  2242. msleep(10);
  2243. if (!(mr32(GBL_CTL) & HBA_RST))
  2244. break;
  2245. }
  2246. if (mr32(GBL_CTL) & HBA_RST) {
  2247. dev_printk(KERN_ERR, &mvi->pdev->dev, "HBA reset failed\n");
  2248. return -EBUSY;
  2249. }
  2250. /* Init Chip */
  2251. /* make sure RST is set; HBA_RST /should/ have done that for us */
  2252. cctl = mr32(CTL);
  2253. if (cctl & CCTL_RST)
  2254. cctl &= ~CCTL_RST;
  2255. else
  2256. mw32_f(CTL, cctl | CCTL_RST);
  2257. /* write to device control _AND_ device status register? - A.C. */
  2258. pci_read_config_dword(mvi->pdev, PCR_DEV_CTRL, &tmp);
  2259. tmp &= ~PRD_REQ_MASK;
  2260. tmp |= PRD_REQ_SIZE;
  2261. pci_write_config_dword(mvi->pdev, PCR_DEV_CTRL, tmp);
  2262. pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp);
  2263. tmp |= PCTL_PWR_ON;
  2264. tmp &= ~PCTL_OFF;
  2265. pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
  2266. pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp);
  2267. tmp |= PCTL_PWR_ON;
  2268. tmp &= ~PCTL_OFF;
  2269. pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
  2270. mw32_f(CTL, cctl);
  2271. /* reset control */
  2272. mw32(PCS, 0); /*MVS_PCS */
  2273. mvs_phy_hacks(mvi);
  2274. mw32(CMD_LIST_LO, mvi->slot_dma);
  2275. mw32(CMD_LIST_HI, (mvi->slot_dma >> 16) >> 16);
  2276. mw32(RX_FIS_LO, mvi->rx_fis_dma);
  2277. mw32(RX_FIS_HI, (mvi->rx_fis_dma >> 16) >> 16);
  2278. mw32(TX_CFG, MVS_CHIP_SLOT_SZ);
  2279. mw32(TX_LO, mvi->tx_dma);
  2280. mw32(TX_HI, (mvi->tx_dma >> 16) >> 16);
  2281. mw32(RX_CFG, MVS_RX_RING_SZ);
  2282. mw32(RX_LO, mvi->rx_dma);
  2283. mw32(RX_HI, (mvi->rx_dma >> 16) >> 16);
  2284. /* enable auto port detection */
  2285. mw32(GBL_PORT_TYPE, MODE_AUTO_DET_EN);
  2286. msleep(100);
  2287. /* init and reset phys */
  2288. for (i = 0; i < mvi->chip->n_phy; i++) {
  2289. u32 lo = be32_to_cpu(*(u32 *)&mvi->sas_addr[4]);
  2290. u32 hi = be32_to_cpu(*(u32 *)&mvi->sas_addr[0]);
  2291. mvs_detect_porttype(mvi, i);
  2292. /* set phy local SAS address */
  2293. mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_LO);
  2294. mvs_write_port_cfg_data(mvi, i, lo);
  2295. mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_HI);
  2296. mvs_write_port_cfg_data(mvi, i, hi);
  2297. /* reset phy */
  2298. tmp = mvs_read_phy_ctl(mvi, i);
  2299. tmp |= PHY_RST;
  2300. mvs_write_phy_ctl(mvi, i, tmp);
  2301. }
  2302. msleep(100);
  2303. for (i = 0; i < mvi->chip->n_phy; i++) {
  2304. /* clear phy int status */
  2305. tmp = mvs_read_port_irq_stat(mvi, i);
  2306. tmp &= ~PHYEV_SIG_FIS;
  2307. mvs_write_port_irq_stat(mvi, i, tmp);
  2308. /* set phy int mask */
  2309. tmp = PHYEV_RDY_CH | PHYEV_BROAD_CH | PHYEV_UNASSOC_FIS |
  2310. PHYEV_ID_DONE | PHYEV_DEC_ERR;
  2311. mvs_write_port_irq_mask(mvi, i, tmp);
  2312. msleep(100);
  2313. mvs_update_phyinfo(mvi, i, 1);
  2314. mvs_enable_xmt(mvi, i);
  2315. }
  2316. /* FIXME: update wide port bitmaps */
  2317. /* little endian for open address and command table, etc. */
  2318. /* A.C.
  2319. * it seems that ( from the spec ) turning on big-endian won't
  2320. * do us any good on big-endian machines, need further confirmation
  2321. */
  2322. cctl = mr32(CTL);
  2323. cctl |= CCTL_ENDIAN_CMD;
  2324. cctl |= CCTL_ENDIAN_DATA;
  2325. cctl &= ~CCTL_ENDIAN_OPEN;
  2326. cctl |= CCTL_ENDIAN_RSP;
  2327. mw32_f(CTL, cctl);
  2328. /* reset CMD queue */
  2329. tmp = mr32(PCS);
  2330. tmp |= PCS_CMD_RST;
  2331. mw32(PCS, tmp);
  2332. /* interrupt coalescing may cause missing HW interrput in some case,
  2333. * and the max count is 0x1ff, while our max slot is 0x200,
  2334. * it will make count 0.
  2335. */
  2336. tmp = 0;
  2337. mw32(INT_COAL, tmp);
  2338. tmp = 0x100;
  2339. mw32(INT_COAL_TMOUT, tmp);
  2340. /* ladies and gentlemen, start your engines */
  2341. mw32(TX_CFG, 0);
  2342. mw32(TX_CFG, MVS_CHIP_SLOT_SZ | TX_EN);
  2343. mw32(RX_CFG, MVS_RX_RING_SZ | RX_EN);
  2344. /* enable CMD/CMPL_Q/RESP mode */
  2345. mw32(PCS, PCS_SATA_RETRY | PCS_FIS_RX_EN | PCS_CMD_EN);
  2346. /* re-enable interrupts globally */
  2347. mvs_hba_interrupt_enable(mvi);
  2348. /* enable completion queue interrupt */
  2349. tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM);
  2350. mw32(INT_MASK, tmp);
  2351. return 0;
  2352. }
  2353. static void __devinit mvs_print_info(struct mvs_info *mvi)
  2354. {
  2355. struct pci_dev *pdev = mvi->pdev;
  2356. static int printed_version;
  2357. if (!printed_version++)
  2358. dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
  2359. dev_printk(KERN_INFO, &pdev->dev, "%u phys, addr %llx\n",
  2360. mvi->chip->n_phy, SAS_ADDR(mvi->sas_addr));
  2361. }
  2362. static int __devinit mvs_pci_init(struct pci_dev *pdev,
  2363. const struct pci_device_id *ent)
  2364. {
  2365. int rc;
  2366. struct mvs_info *mvi;
  2367. irq_handler_t irq_handler = mvs_interrupt;
  2368. rc = pci_enable_device(pdev);
  2369. if (rc)
  2370. return rc;
  2371. pci_set_master(pdev);
  2372. rc = pci_request_regions(pdev, DRV_NAME);
  2373. if (rc)
  2374. goto err_out_disable;
  2375. rc = pci_go_64(pdev);
  2376. if (rc)
  2377. goto err_out_regions;
  2378. mvi = mvs_alloc(pdev, ent);
  2379. if (!mvi) {
  2380. rc = -ENOMEM;
  2381. goto err_out_regions;
  2382. }
  2383. rc = mvs_hw_init(mvi);
  2384. if (rc)
  2385. goto err_out_mvi;
  2386. #ifndef MVS_DISABLE_MSI
  2387. if (!pci_enable_msi(pdev)) {
  2388. u32 tmp;
  2389. void __iomem *regs = mvi->regs;
  2390. mvi->flags |= MVF_MSI;
  2391. irq_handler = mvs_msi_interrupt;
  2392. tmp = mr32(PCS);
  2393. mw32(PCS, tmp | PCS_SELF_CLEAR);
  2394. }
  2395. #endif
  2396. rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED, DRV_NAME, mvi);
  2397. if (rc)
  2398. goto err_out_msi;
  2399. rc = scsi_add_host(mvi->shost, &pdev->dev);
  2400. if (rc)
  2401. goto err_out_irq;
  2402. rc = sas_register_ha(&mvi->sas);
  2403. if (rc)
  2404. goto err_out_shost;
  2405. pci_set_drvdata(pdev, mvi);
  2406. mvs_print_info(mvi);
  2407. scsi_scan_host(mvi->shost);
  2408. return 0;
  2409. err_out_shost:
  2410. scsi_remove_host(mvi->shost);
  2411. err_out_irq:
  2412. free_irq(pdev->irq, mvi);
  2413. err_out_msi:
  2414. if (mvi->flags |= MVF_MSI)
  2415. pci_disable_msi(pdev);
  2416. err_out_mvi:
  2417. mvs_free(mvi);
  2418. err_out_regions:
  2419. pci_release_regions(pdev);
  2420. err_out_disable:
  2421. pci_disable_device(pdev);
  2422. return rc;
  2423. }
  2424. static void __devexit mvs_pci_remove(struct pci_dev *pdev)
  2425. {
  2426. struct mvs_info *mvi = pci_get_drvdata(pdev);
  2427. pci_set_drvdata(pdev, NULL);
  2428. if (mvi) {
  2429. sas_unregister_ha(&mvi->sas);
  2430. mvs_hba_interrupt_disable(mvi);
  2431. sas_remove_host(mvi->shost);
  2432. scsi_remove_host(mvi->shost);
  2433. free_irq(pdev->irq, mvi);
  2434. if (mvi->flags & MVF_MSI)
  2435. pci_disable_msi(pdev);
  2436. mvs_free(mvi);
  2437. pci_release_regions(pdev);
  2438. }
  2439. pci_disable_device(pdev);
  2440. }
  2441. static struct sas_domain_function_template mvs_transport_ops = {
  2442. .lldd_execute_task = mvs_task_exec,
  2443. .lldd_control_phy = mvs_phy_control,
  2444. .lldd_abort_task = mvs_task_abort,
  2445. .lldd_port_formed = mvs_port_formed
  2446. };
  2447. static struct pci_device_id __devinitdata mvs_pci_table[] = {
  2448. { PCI_VDEVICE(MARVELL, 0x6320), chip_6320 },
  2449. { PCI_VDEVICE(MARVELL, 0x6340), chip_6440 },
  2450. { PCI_VDEVICE(MARVELL, 0x6440), chip_6440 },
  2451. { PCI_VDEVICE(MARVELL, 0x6480), chip_6480 },
  2452. { } /* terminate list */
  2453. };
  2454. static struct pci_driver mvs_pci_driver = {
  2455. .name = DRV_NAME,
  2456. .id_table = mvs_pci_table,
  2457. .probe = mvs_pci_init,
  2458. .remove = __devexit_p(mvs_pci_remove),
  2459. };
  2460. static int __init mvs_init(void)
  2461. {
  2462. int rc;
  2463. mvs_stt = sas_domain_attach_transport(&mvs_transport_ops);
  2464. if (!mvs_stt)
  2465. return -ENOMEM;
  2466. rc = pci_register_driver(&mvs_pci_driver);
  2467. if (rc)
  2468. goto err_out;
  2469. return 0;
  2470. err_out:
  2471. sas_release_transport(mvs_stt);
  2472. return rc;
  2473. }
  2474. static void __exit mvs_exit(void)
  2475. {
  2476. pci_unregister_driver(&mvs_pci_driver);
  2477. sas_release_transport(mvs_stt);
  2478. }
  2479. module_init(mvs_init);
  2480. module_exit(mvs_exit);
  2481. MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
  2482. MODULE_DESCRIPTION("Marvell 88SE6440 SAS/SATA controller driver");
  2483. MODULE_VERSION(DRV_VERSION);
  2484. MODULE_LICENSE("GPL");
  2485. MODULE_DEVICE_TABLE(pci, mvs_pci_table);