ahci.c 73 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681
  1. /*
  2. * ahci.c - AHCI SATA support
  3. *
  4. * Maintained by: Jeff Garzik <jgarzik@pobox.com>
  5. * Please ALWAYS copy linux-ide@vger.kernel.org
  6. * on emails.
  7. *
  8. * Copyright 2004-2005 Red Hat, Inc.
  9. *
  10. *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of the GNU General Public License as published by
  13. * the Free Software Foundation; either version 2, or (at your option)
  14. * any later version.
  15. *
  16. * This program is distributed in the hope that it will be useful,
  17. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  19. * GNU General Public License for more details.
  20. *
  21. * You should have received a copy of the GNU General Public License
  22. * along with this program; see the file COPYING. If not, write to
  23. * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
  24. *
  25. *
  26. * libata documentation is available via 'make {ps|pdf}docs',
  27. * as Documentation/DocBook/libata.*
  28. *
  29. * AHCI hardware documentation:
  30. * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
  31. * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
  32. *
  33. */
  34. #include <linux/kernel.h>
  35. #include <linux/module.h>
  36. #include <linux/pci.h>
  37. #include <linux/init.h>
  38. #include <linux/blkdev.h>
  39. #include <linux/delay.h>
  40. #include <linux/interrupt.h>
  41. #include <linux/dma-mapping.h>
  42. #include <linux/device.h>
  43. #include <linux/dmi.h>
  44. #include <scsi/scsi_host.h>
  45. #include <scsi/scsi_cmnd.h>
  46. #include <linux/libata.h>
  47. #define DRV_NAME "ahci"
  48. #define DRV_VERSION "3.0"
  49. static int ahci_skip_host_reset;
  50. module_param_named(skip_host_reset, ahci_skip_host_reset, int, 0444);
  51. MODULE_PARM_DESC(skip_host_reset, "skip global host reset (0=don't skip, 1=skip)");
  52. static int ahci_enable_alpm(struct ata_port *ap,
  53. enum link_pm policy);
  54. static void ahci_disable_alpm(struct ata_port *ap);
  55. static ssize_t ahci_led_show(struct ata_port *ap, char *buf);
  56. static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
  57. size_t size);
  58. static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
  59. ssize_t size);
  60. #define MAX_SLOTS 8
  61. enum {
  62. AHCI_PCI_BAR = 5,
  63. AHCI_MAX_PORTS = 32,
  64. AHCI_MAX_SG = 168, /* hardware max is 64K */
  65. AHCI_DMA_BOUNDARY = 0xffffffff,
  66. AHCI_MAX_CMDS = 32,
  67. AHCI_CMD_SZ = 32,
  68. AHCI_CMD_SLOT_SZ = AHCI_MAX_CMDS * AHCI_CMD_SZ,
  69. AHCI_RX_FIS_SZ = 256,
  70. AHCI_CMD_TBL_CDB = 0x40,
  71. AHCI_CMD_TBL_HDR_SZ = 0x80,
  72. AHCI_CMD_TBL_SZ = AHCI_CMD_TBL_HDR_SZ + (AHCI_MAX_SG * 16),
  73. AHCI_CMD_TBL_AR_SZ = AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS,
  74. AHCI_PORT_PRIV_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ +
  75. AHCI_RX_FIS_SZ,
  76. AHCI_IRQ_ON_SG = (1 << 31),
  77. AHCI_CMD_ATAPI = (1 << 5),
  78. AHCI_CMD_WRITE = (1 << 6),
  79. AHCI_CMD_PREFETCH = (1 << 7),
  80. AHCI_CMD_RESET = (1 << 8),
  81. AHCI_CMD_CLR_BUSY = (1 << 10),
  82. RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */
  83. RX_FIS_SDB = 0x58, /* offset of SDB FIS data */
  84. RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */
  85. board_ahci = 0,
  86. board_ahci_vt8251 = 1,
  87. board_ahci_ign_iferr = 2,
  88. board_ahci_sb600 = 3,
  89. board_ahci_mv = 4,
  90. board_ahci_sb700 = 5,
  91. board_ahci_mcp65 = 6,
  92. board_ahci_nopmp = 7,
  93. /* global controller registers */
  94. HOST_CAP = 0x00, /* host capabilities */
  95. HOST_CTL = 0x04, /* global host control */
  96. HOST_IRQ_STAT = 0x08, /* interrupt status */
  97. HOST_PORTS_IMPL = 0x0c, /* bitmap of implemented ports */
  98. HOST_VERSION = 0x10, /* AHCI spec. version compliancy */
  99. HOST_EM_LOC = 0x1c, /* Enclosure Management location */
  100. HOST_EM_CTL = 0x20, /* Enclosure Management Control */
  101. /* HOST_CTL bits */
  102. HOST_RESET = (1 << 0), /* reset controller; self-clear */
  103. HOST_IRQ_EN = (1 << 1), /* global IRQ enable */
  104. HOST_AHCI_EN = (1 << 31), /* AHCI enabled */
  105. /* HOST_CAP bits */
  106. HOST_CAP_EMS = (1 << 6), /* Enclosure Management support */
  107. HOST_CAP_SSC = (1 << 14), /* Slumber capable */
  108. HOST_CAP_PMP = (1 << 17), /* Port Multiplier support */
  109. HOST_CAP_CLO = (1 << 24), /* Command List Override support */
  110. HOST_CAP_ALPM = (1 << 26), /* Aggressive Link PM support */
  111. HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */
  112. HOST_CAP_SNTF = (1 << 29), /* SNotification register */
  113. HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */
  114. HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */
  115. /* registers for each SATA port */
  116. PORT_LST_ADDR = 0x00, /* command list DMA addr */
  117. PORT_LST_ADDR_HI = 0x04, /* command list DMA addr hi */
  118. PORT_FIS_ADDR = 0x08, /* FIS rx buf addr */
  119. PORT_FIS_ADDR_HI = 0x0c, /* FIS rx buf addr hi */
  120. PORT_IRQ_STAT = 0x10, /* interrupt status */
  121. PORT_IRQ_MASK = 0x14, /* interrupt enable/disable mask */
  122. PORT_CMD = 0x18, /* port command */
  123. PORT_TFDATA = 0x20, /* taskfile data */
  124. PORT_SIG = 0x24, /* device TF signature */
  125. PORT_CMD_ISSUE = 0x38, /* command issue */
  126. PORT_SCR_STAT = 0x28, /* SATA phy register: SStatus */
  127. PORT_SCR_CTL = 0x2c, /* SATA phy register: SControl */
  128. PORT_SCR_ERR = 0x30, /* SATA phy register: SError */
  129. PORT_SCR_ACT = 0x34, /* SATA phy register: SActive */
  130. PORT_SCR_NTF = 0x3c, /* SATA phy register: SNotification */
  131. /* PORT_IRQ_{STAT,MASK} bits */
  132. PORT_IRQ_COLD_PRES = (1 << 31), /* cold presence detect */
  133. PORT_IRQ_TF_ERR = (1 << 30), /* task file error */
  134. PORT_IRQ_HBUS_ERR = (1 << 29), /* host bus fatal error */
  135. PORT_IRQ_HBUS_DATA_ERR = (1 << 28), /* host bus data error */
  136. PORT_IRQ_IF_ERR = (1 << 27), /* interface fatal error */
  137. PORT_IRQ_IF_NONFATAL = (1 << 26), /* interface non-fatal error */
  138. PORT_IRQ_OVERFLOW = (1 << 24), /* xfer exhausted available S/G */
  139. PORT_IRQ_BAD_PMP = (1 << 23), /* incorrect port multiplier */
  140. PORT_IRQ_PHYRDY = (1 << 22), /* PhyRdy changed */
  141. PORT_IRQ_DEV_ILCK = (1 << 7), /* device interlock */
  142. PORT_IRQ_CONNECT = (1 << 6), /* port connect change status */
  143. PORT_IRQ_SG_DONE = (1 << 5), /* descriptor processed */
  144. PORT_IRQ_UNK_FIS = (1 << 4), /* unknown FIS rx'd */
  145. PORT_IRQ_SDB_FIS = (1 << 3), /* Set Device Bits FIS rx'd */
  146. PORT_IRQ_DMAS_FIS = (1 << 2), /* DMA Setup FIS rx'd */
  147. PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */
  148. PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */
  149. PORT_IRQ_FREEZE = PORT_IRQ_HBUS_ERR |
  150. PORT_IRQ_IF_ERR |
  151. PORT_IRQ_CONNECT |
  152. PORT_IRQ_PHYRDY |
  153. PORT_IRQ_UNK_FIS |
  154. PORT_IRQ_BAD_PMP,
  155. PORT_IRQ_ERROR = PORT_IRQ_FREEZE |
  156. PORT_IRQ_TF_ERR |
  157. PORT_IRQ_HBUS_DATA_ERR,
  158. DEF_PORT_IRQ = PORT_IRQ_ERROR | PORT_IRQ_SG_DONE |
  159. PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS |
  160. PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS,
  161. /* PORT_CMD bits */
  162. PORT_CMD_ASP = (1 << 27), /* Aggressive Slumber/Partial */
  163. PORT_CMD_ALPE = (1 << 26), /* Aggressive Link PM enable */
  164. PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */
  165. PORT_CMD_PMP = (1 << 17), /* PMP attached */
  166. PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */
  167. PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */
  168. PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */
  169. PORT_CMD_CLO = (1 << 3), /* Command list override */
  170. PORT_CMD_POWER_ON = (1 << 2), /* Power up device */
  171. PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */
  172. PORT_CMD_START = (1 << 0), /* Enable port DMA engine */
  173. PORT_CMD_ICC_MASK = (0xf << 28), /* i/f ICC state mask */
  174. PORT_CMD_ICC_ACTIVE = (0x1 << 28), /* Put i/f in active state */
  175. PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */
  176. PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */
  177. /* hpriv->flags bits */
  178. AHCI_HFLAG_NO_NCQ = (1 << 0),
  179. AHCI_HFLAG_IGN_IRQ_IF_ERR = (1 << 1), /* ignore IRQ_IF_ERR */
  180. AHCI_HFLAG_IGN_SERR_INTERNAL = (1 << 2), /* ignore SERR_INTERNAL */
  181. AHCI_HFLAG_32BIT_ONLY = (1 << 3), /* force 32bit */
  182. AHCI_HFLAG_MV_PATA = (1 << 4), /* PATA port */
  183. AHCI_HFLAG_NO_MSI = (1 << 5), /* no PCI MSI */
  184. AHCI_HFLAG_NO_PMP = (1 << 6), /* no PMP */
  185. AHCI_HFLAG_NO_HOTPLUG = (1 << 7), /* ignore PxSERR.DIAG.N */
  186. AHCI_HFLAG_SECT255 = (1 << 8), /* max 255 sectors */
  187. AHCI_HFLAG_YES_NCQ = (1 << 9), /* force NCQ cap on */
  188. /* ap->flags bits */
  189. AHCI_FLAG_COMMON = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
  190. ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
  191. ATA_FLAG_ACPI_SATA | ATA_FLAG_AN |
  192. ATA_FLAG_IPM,
  193. ICH_MAP = 0x90, /* ICH MAP register */
  194. /* em_ctl bits */
  195. EM_CTL_RST = (1 << 9), /* Reset */
  196. EM_CTL_TM = (1 << 8), /* Transmit Message */
  197. EM_CTL_ALHD = (1 << 26), /* Activity LED */
  198. };
  199. struct ahci_cmd_hdr {
  200. __le32 opts;
  201. __le32 status;
  202. __le32 tbl_addr;
  203. __le32 tbl_addr_hi;
  204. __le32 reserved[4];
  205. };
  206. struct ahci_sg {
  207. __le32 addr;
  208. __le32 addr_hi;
  209. __le32 reserved;
  210. __le32 flags_size;
  211. };
  212. struct ahci_em_priv {
  213. enum sw_activity blink_policy;
  214. struct timer_list timer;
  215. unsigned long saved_activity;
  216. unsigned long activity;
  217. unsigned long led_state;
  218. };
  219. struct ahci_host_priv {
  220. unsigned int flags; /* AHCI_HFLAG_* */
  221. u32 cap; /* cap to use */
  222. u32 port_map; /* port map to use */
  223. u32 saved_cap; /* saved initial cap */
  224. u32 saved_port_map; /* saved initial port_map */
  225. u32 em_loc; /* enclosure management location */
  226. };
  227. struct ahci_port_priv {
  228. struct ata_link *active_link;
  229. struct ahci_cmd_hdr *cmd_slot;
  230. dma_addr_t cmd_slot_dma;
  231. void *cmd_tbl;
  232. dma_addr_t cmd_tbl_dma;
  233. void *rx_fis;
  234. dma_addr_t rx_fis_dma;
  235. /* for NCQ spurious interrupt analysis */
  236. unsigned int ncq_saw_d2h:1;
  237. unsigned int ncq_saw_dmas:1;
  238. unsigned int ncq_saw_sdb:1;
  239. u32 intr_mask; /* interrupts to enable */
  240. struct ahci_em_priv em_priv[MAX_SLOTS];/* enclosure management info
  241. * per PM slot */
  242. };
  243. static int ahci_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val);
  244. static int ahci_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val);
  245. static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
  246. static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
  247. static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
  248. static int ahci_port_start(struct ata_port *ap);
  249. static void ahci_port_stop(struct ata_port *ap);
  250. static void ahci_qc_prep(struct ata_queued_cmd *qc);
  251. static void ahci_freeze(struct ata_port *ap);
  252. static void ahci_thaw(struct ata_port *ap);
  253. static void ahci_pmp_attach(struct ata_port *ap);
  254. static void ahci_pmp_detach(struct ata_port *ap);
  255. static int ahci_softreset(struct ata_link *link, unsigned int *class,
  256. unsigned long deadline);
  257. static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class,
  258. unsigned long deadline);
  259. static int ahci_hardreset(struct ata_link *link, unsigned int *class,
  260. unsigned long deadline);
  261. static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
  262. unsigned long deadline);
  263. static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
  264. unsigned long deadline);
  265. static void ahci_postreset(struct ata_link *link, unsigned int *class);
  266. static void ahci_error_handler(struct ata_port *ap);
  267. static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
  268. static int ahci_port_resume(struct ata_port *ap);
  269. static void ahci_dev_config(struct ata_device *dev);
  270. static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl);
  271. static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
  272. u32 opts);
  273. #ifdef CONFIG_PM
  274. static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg);
  275. static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
  276. static int ahci_pci_device_resume(struct pci_dev *pdev);
  277. #endif
  278. static ssize_t ahci_activity_show(struct ata_device *dev, char *buf);
  279. static ssize_t ahci_activity_store(struct ata_device *dev,
  280. enum sw_activity val);
  281. static void ahci_init_sw_activity(struct ata_link *link);
  282. static struct device_attribute *ahci_shost_attrs[] = {
  283. &dev_attr_link_power_management_policy,
  284. &dev_attr_em_message_type,
  285. &dev_attr_em_message,
  286. NULL
  287. };
  288. static struct device_attribute *ahci_sdev_attrs[] = {
  289. &dev_attr_sw_activity,
  290. NULL
  291. };
  292. static struct scsi_host_template ahci_sht = {
  293. ATA_NCQ_SHT(DRV_NAME),
  294. .can_queue = AHCI_MAX_CMDS - 1,
  295. .sg_tablesize = AHCI_MAX_SG,
  296. .dma_boundary = AHCI_DMA_BOUNDARY,
  297. .shost_attrs = ahci_shost_attrs,
  298. .sdev_attrs = ahci_sdev_attrs,
  299. };
  300. static struct ata_port_operations ahci_ops = {
  301. .inherits = &sata_pmp_port_ops,
  302. .qc_defer = sata_pmp_qc_defer_cmd_switch,
  303. .qc_prep = ahci_qc_prep,
  304. .qc_issue = ahci_qc_issue,
  305. .qc_fill_rtf = ahci_qc_fill_rtf,
  306. .freeze = ahci_freeze,
  307. .thaw = ahci_thaw,
  308. .softreset = ahci_softreset,
  309. .hardreset = ahci_hardreset,
  310. .postreset = ahci_postreset,
  311. .pmp_softreset = ahci_softreset,
  312. .error_handler = ahci_error_handler,
  313. .post_internal_cmd = ahci_post_internal_cmd,
  314. .dev_config = ahci_dev_config,
  315. .scr_read = ahci_scr_read,
  316. .scr_write = ahci_scr_write,
  317. .pmp_attach = ahci_pmp_attach,
  318. .pmp_detach = ahci_pmp_detach,
  319. .enable_pm = ahci_enable_alpm,
  320. .disable_pm = ahci_disable_alpm,
  321. .em_show = ahci_led_show,
  322. .em_store = ahci_led_store,
  323. .sw_activity_show = ahci_activity_show,
  324. .sw_activity_store = ahci_activity_store,
  325. #ifdef CONFIG_PM
  326. .port_suspend = ahci_port_suspend,
  327. .port_resume = ahci_port_resume,
  328. #endif
  329. .port_start = ahci_port_start,
  330. .port_stop = ahci_port_stop,
  331. };
  332. static struct ata_port_operations ahci_vt8251_ops = {
  333. .inherits = &ahci_ops,
  334. .hardreset = ahci_vt8251_hardreset,
  335. };
  336. static struct ata_port_operations ahci_p5wdh_ops = {
  337. .inherits = &ahci_ops,
  338. .hardreset = ahci_p5wdh_hardreset,
  339. };
  340. static struct ata_port_operations ahci_sb600_ops = {
  341. .inherits = &ahci_ops,
  342. .softreset = ahci_sb600_softreset,
  343. .pmp_softreset = ahci_sb600_softreset,
  344. };
  345. #define AHCI_HFLAGS(flags) .private_data = (void *)(flags)
  346. static const struct ata_port_info ahci_port_info[] = {
  347. /* board_ahci */
  348. {
  349. .flags = AHCI_FLAG_COMMON,
  350. .pio_mask = 0x1f, /* pio0-4 */
  351. .udma_mask = ATA_UDMA6,
  352. .port_ops = &ahci_ops,
  353. },
  354. /* board_ahci_vt8251 */
  355. {
  356. AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_PMP),
  357. .flags = AHCI_FLAG_COMMON,
  358. .pio_mask = 0x1f, /* pio0-4 */
  359. .udma_mask = ATA_UDMA6,
  360. .port_ops = &ahci_vt8251_ops,
  361. },
  362. /* board_ahci_ign_iferr */
  363. {
  364. AHCI_HFLAGS (AHCI_HFLAG_IGN_IRQ_IF_ERR),
  365. .flags = AHCI_FLAG_COMMON,
  366. .pio_mask = 0x1f, /* pio0-4 */
  367. .udma_mask = ATA_UDMA6,
  368. .port_ops = &ahci_ops,
  369. },
  370. /* board_ahci_sb600 */
  371. {
  372. AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL |
  373. AHCI_HFLAG_32BIT_ONLY | AHCI_HFLAG_NO_MSI |
  374. AHCI_HFLAG_SECT255),
  375. .flags = AHCI_FLAG_COMMON,
  376. .pio_mask = 0x1f, /* pio0-4 */
  377. .udma_mask = ATA_UDMA6,
  378. .port_ops = &ahci_sb600_ops,
  379. },
  380. /* board_ahci_mv */
  381. {
  382. AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_MSI |
  383. AHCI_HFLAG_MV_PATA),
  384. .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
  385. ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
  386. .pio_mask = 0x1f, /* pio0-4 */
  387. .udma_mask = ATA_UDMA6,
  388. .port_ops = &ahci_ops,
  389. },
  390. /* board_ahci_sb700 */
  391. {
  392. AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL),
  393. .flags = AHCI_FLAG_COMMON,
  394. .pio_mask = 0x1f, /* pio0-4 */
  395. .udma_mask = ATA_UDMA6,
  396. .port_ops = &ahci_sb600_ops,
  397. },
  398. /* board_ahci_mcp65 */
  399. {
  400. AHCI_HFLAGS (AHCI_HFLAG_YES_NCQ),
  401. .flags = AHCI_FLAG_COMMON,
  402. .pio_mask = 0x1f, /* pio0-4 */
  403. .udma_mask = ATA_UDMA6,
  404. .port_ops = &ahci_ops,
  405. },
  406. /* board_ahci_nopmp */
  407. {
  408. AHCI_HFLAGS (AHCI_HFLAG_NO_PMP),
  409. .flags = AHCI_FLAG_COMMON,
  410. .pio_mask = 0x1f, /* pio0-4 */
  411. .udma_mask = ATA_UDMA6,
  412. .port_ops = &ahci_ops,
  413. },
  414. };
  415. static const struct pci_device_id ahci_pci_tbl[] = {
  416. /* Intel */
  417. { PCI_VDEVICE(INTEL, 0x2652), board_ahci }, /* ICH6 */
  418. { PCI_VDEVICE(INTEL, 0x2653), board_ahci }, /* ICH6M */
  419. { PCI_VDEVICE(INTEL, 0x27c1), board_ahci }, /* ICH7 */
  420. { PCI_VDEVICE(INTEL, 0x27c5), board_ahci }, /* ICH7M */
  421. { PCI_VDEVICE(INTEL, 0x27c3), board_ahci }, /* ICH7R */
  422. { PCI_VDEVICE(AL, 0x5288), board_ahci_ign_iferr }, /* ULi M5288 */
  423. { PCI_VDEVICE(INTEL, 0x2681), board_ahci }, /* ESB2 */
  424. { PCI_VDEVICE(INTEL, 0x2682), board_ahci }, /* ESB2 */
  425. { PCI_VDEVICE(INTEL, 0x2683), board_ahci }, /* ESB2 */
  426. { PCI_VDEVICE(INTEL, 0x27c6), board_ahci }, /* ICH7-M DH */
  427. { PCI_VDEVICE(INTEL, 0x2821), board_ahci }, /* ICH8 */
  428. { PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* ICH8 */
  429. { PCI_VDEVICE(INTEL, 0x2824), board_ahci }, /* ICH8 */
  430. { PCI_VDEVICE(INTEL, 0x2829), board_ahci }, /* ICH8M */
  431. { PCI_VDEVICE(INTEL, 0x282a), board_ahci }, /* ICH8M */
  432. { PCI_VDEVICE(INTEL, 0x2922), board_ahci }, /* ICH9 */
  433. { PCI_VDEVICE(INTEL, 0x2923), board_ahci }, /* ICH9 */
  434. { PCI_VDEVICE(INTEL, 0x2924), board_ahci }, /* ICH9 */
  435. { PCI_VDEVICE(INTEL, 0x2925), board_ahci }, /* ICH9 */
  436. { PCI_VDEVICE(INTEL, 0x2927), board_ahci }, /* ICH9 */
  437. { PCI_VDEVICE(INTEL, 0x2929), board_ahci }, /* ICH9M */
  438. { PCI_VDEVICE(INTEL, 0x292a), board_ahci }, /* ICH9M */
  439. { PCI_VDEVICE(INTEL, 0x292b), board_ahci }, /* ICH9M */
  440. { PCI_VDEVICE(INTEL, 0x292c), board_ahci }, /* ICH9M */
  441. { PCI_VDEVICE(INTEL, 0x292f), board_ahci }, /* ICH9M */
  442. { PCI_VDEVICE(INTEL, 0x294d), board_ahci }, /* ICH9 */
  443. { PCI_VDEVICE(INTEL, 0x294e), board_ahci }, /* ICH9M */
  444. { PCI_VDEVICE(INTEL, 0x502a), board_ahci }, /* Tolapai */
  445. { PCI_VDEVICE(INTEL, 0x502b), board_ahci }, /* Tolapai */
  446. { PCI_VDEVICE(INTEL, 0x3a05), board_ahci }, /* ICH10 */
  447. { PCI_VDEVICE(INTEL, 0x3a25), board_ahci }, /* ICH10 */
  448. { PCI_VDEVICE(INTEL, 0x3b24), board_ahci }, /* PCH RAID */
  449. { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
  450. /* JMicron 360/1/3/5/6, match class to avoid IDE function */
  451. { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
  452. PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci_ign_iferr },
  453. /* ATI */
  454. { PCI_VDEVICE(ATI, 0x4380), board_ahci_sb600 }, /* ATI SB600 */
  455. { PCI_VDEVICE(ATI, 0x4390), board_ahci_sb700 }, /* ATI SB700/800 */
  456. { PCI_VDEVICE(ATI, 0x4391), board_ahci_sb700 }, /* ATI SB700/800 */
  457. { PCI_VDEVICE(ATI, 0x4392), board_ahci_sb700 }, /* ATI SB700/800 */
  458. { PCI_VDEVICE(ATI, 0x4393), board_ahci_sb700 }, /* ATI SB700/800 */
  459. { PCI_VDEVICE(ATI, 0x4394), board_ahci_sb700 }, /* ATI SB700/800 */
  460. { PCI_VDEVICE(ATI, 0x4395), board_ahci_sb700 }, /* ATI SB700/800 */
  461. /* VIA */
  462. { PCI_VDEVICE(VIA, 0x3349), board_ahci_vt8251 }, /* VIA VT8251 */
  463. { PCI_VDEVICE(VIA, 0x6287), board_ahci_vt8251 }, /* VIA VT8251 */
  464. /* NVIDIA */
  465. { PCI_VDEVICE(NVIDIA, 0x044c), board_ahci_mcp65 }, /* MCP65 */
  466. { PCI_VDEVICE(NVIDIA, 0x044d), board_ahci_mcp65 }, /* MCP65 */
  467. { PCI_VDEVICE(NVIDIA, 0x044e), board_ahci_mcp65 }, /* MCP65 */
  468. { PCI_VDEVICE(NVIDIA, 0x044f), board_ahci_mcp65 }, /* MCP65 */
  469. { PCI_VDEVICE(NVIDIA, 0x045c), board_ahci_mcp65 }, /* MCP65 */
  470. { PCI_VDEVICE(NVIDIA, 0x045d), board_ahci_mcp65 }, /* MCP65 */
  471. { PCI_VDEVICE(NVIDIA, 0x045e), board_ahci_mcp65 }, /* MCP65 */
  472. { PCI_VDEVICE(NVIDIA, 0x045f), board_ahci_mcp65 }, /* MCP65 */
  473. { PCI_VDEVICE(NVIDIA, 0x0550), board_ahci }, /* MCP67 */
  474. { PCI_VDEVICE(NVIDIA, 0x0551), board_ahci }, /* MCP67 */
  475. { PCI_VDEVICE(NVIDIA, 0x0552), board_ahci }, /* MCP67 */
  476. { PCI_VDEVICE(NVIDIA, 0x0553), board_ahci }, /* MCP67 */
  477. { PCI_VDEVICE(NVIDIA, 0x0554), board_ahci }, /* MCP67 */
  478. { PCI_VDEVICE(NVIDIA, 0x0555), board_ahci }, /* MCP67 */
  479. { PCI_VDEVICE(NVIDIA, 0x0556), board_ahci }, /* MCP67 */
  480. { PCI_VDEVICE(NVIDIA, 0x0557), board_ahci }, /* MCP67 */
  481. { PCI_VDEVICE(NVIDIA, 0x0558), board_ahci }, /* MCP67 */
  482. { PCI_VDEVICE(NVIDIA, 0x0559), board_ahci }, /* MCP67 */
  483. { PCI_VDEVICE(NVIDIA, 0x055a), board_ahci }, /* MCP67 */
  484. { PCI_VDEVICE(NVIDIA, 0x055b), board_ahci }, /* MCP67 */
  485. { PCI_VDEVICE(NVIDIA, 0x07f0), board_ahci }, /* MCP73 */
  486. { PCI_VDEVICE(NVIDIA, 0x07f1), board_ahci }, /* MCP73 */
  487. { PCI_VDEVICE(NVIDIA, 0x07f2), board_ahci }, /* MCP73 */
  488. { PCI_VDEVICE(NVIDIA, 0x07f3), board_ahci }, /* MCP73 */
  489. { PCI_VDEVICE(NVIDIA, 0x07f4), board_ahci }, /* MCP73 */
  490. { PCI_VDEVICE(NVIDIA, 0x07f5), board_ahci }, /* MCP73 */
  491. { PCI_VDEVICE(NVIDIA, 0x07f6), board_ahci }, /* MCP73 */
  492. { PCI_VDEVICE(NVIDIA, 0x07f7), board_ahci }, /* MCP73 */
  493. { PCI_VDEVICE(NVIDIA, 0x07f8), board_ahci }, /* MCP73 */
  494. { PCI_VDEVICE(NVIDIA, 0x07f9), board_ahci }, /* MCP73 */
  495. { PCI_VDEVICE(NVIDIA, 0x07fa), board_ahci }, /* MCP73 */
  496. { PCI_VDEVICE(NVIDIA, 0x07fb), board_ahci }, /* MCP73 */
  497. { PCI_VDEVICE(NVIDIA, 0x0ad0), board_ahci }, /* MCP77 */
  498. { PCI_VDEVICE(NVIDIA, 0x0ad1), board_ahci }, /* MCP77 */
  499. { PCI_VDEVICE(NVIDIA, 0x0ad2), board_ahci }, /* MCP77 */
  500. { PCI_VDEVICE(NVIDIA, 0x0ad3), board_ahci }, /* MCP77 */
  501. { PCI_VDEVICE(NVIDIA, 0x0ad4), board_ahci }, /* MCP77 */
  502. { PCI_VDEVICE(NVIDIA, 0x0ad5), board_ahci }, /* MCP77 */
  503. { PCI_VDEVICE(NVIDIA, 0x0ad6), board_ahci }, /* MCP77 */
  504. { PCI_VDEVICE(NVIDIA, 0x0ad7), board_ahci }, /* MCP77 */
  505. { PCI_VDEVICE(NVIDIA, 0x0ad8), board_ahci }, /* MCP77 */
  506. { PCI_VDEVICE(NVIDIA, 0x0ad9), board_ahci }, /* MCP77 */
  507. { PCI_VDEVICE(NVIDIA, 0x0ada), board_ahci }, /* MCP77 */
  508. { PCI_VDEVICE(NVIDIA, 0x0adb), board_ahci }, /* MCP77 */
  509. { PCI_VDEVICE(NVIDIA, 0x0ab4), board_ahci }, /* MCP79 */
  510. { PCI_VDEVICE(NVIDIA, 0x0ab5), board_ahci }, /* MCP79 */
  511. { PCI_VDEVICE(NVIDIA, 0x0ab6), board_ahci }, /* MCP79 */
  512. { PCI_VDEVICE(NVIDIA, 0x0ab7), board_ahci }, /* MCP79 */
  513. { PCI_VDEVICE(NVIDIA, 0x0ab8), board_ahci }, /* MCP79 */
  514. { PCI_VDEVICE(NVIDIA, 0x0ab9), board_ahci }, /* MCP79 */
  515. { PCI_VDEVICE(NVIDIA, 0x0aba), board_ahci }, /* MCP79 */
  516. { PCI_VDEVICE(NVIDIA, 0x0abb), board_ahci }, /* MCP79 */
  517. { PCI_VDEVICE(NVIDIA, 0x0abc), board_ahci }, /* MCP79 */
  518. { PCI_VDEVICE(NVIDIA, 0x0abd), board_ahci }, /* MCP79 */
  519. { PCI_VDEVICE(NVIDIA, 0x0abe), board_ahci }, /* MCP79 */
  520. { PCI_VDEVICE(NVIDIA, 0x0abf), board_ahci }, /* MCP79 */
  521. { PCI_VDEVICE(NVIDIA, 0x0bc8), board_ahci }, /* MCP7B */
  522. { PCI_VDEVICE(NVIDIA, 0x0bc9), board_ahci }, /* MCP7B */
  523. { PCI_VDEVICE(NVIDIA, 0x0bca), board_ahci }, /* MCP7B */
  524. { PCI_VDEVICE(NVIDIA, 0x0bcb), board_ahci }, /* MCP7B */
  525. { PCI_VDEVICE(NVIDIA, 0x0bcc), board_ahci }, /* MCP7B */
  526. { PCI_VDEVICE(NVIDIA, 0x0bcd), board_ahci }, /* MCP7B */
  527. { PCI_VDEVICE(NVIDIA, 0x0bce), board_ahci }, /* MCP7B */
  528. { PCI_VDEVICE(NVIDIA, 0x0bcf), board_ahci }, /* MCP7B */
  529. { PCI_VDEVICE(NVIDIA, 0x0bc4), board_ahci }, /* MCP7B */
  530. { PCI_VDEVICE(NVIDIA, 0x0bc5), board_ahci }, /* MCP7B */
  531. { PCI_VDEVICE(NVIDIA, 0x0bc6), board_ahci }, /* MCP7B */
  532. { PCI_VDEVICE(NVIDIA, 0x0bc7), board_ahci }, /* MCP7B */
  533. /* SiS */
  534. { PCI_VDEVICE(SI, 0x1184), board_ahci }, /* SiS 966 */
  535. { PCI_VDEVICE(SI, 0x1185), board_ahci }, /* SiS 968 */
  536. { PCI_VDEVICE(SI, 0x0186), board_ahci }, /* SiS 968 */
  537. /* Marvell */
  538. { PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */
  539. { PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv }, /* 6121 */
  540. /* Generic, PCI class code for AHCI */
  541. { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
  542. PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci },
  543. { } /* terminate list */
  544. };
  545. static struct pci_driver ahci_pci_driver = {
  546. .name = DRV_NAME,
  547. .id_table = ahci_pci_tbl,
  548. .probe = ahci_init_one,
  549. .remove = ata_pci_remove_one,
  550. #ifdef CONFIG_PM
  551. .suspend = ahci_pci_device_suspend,
  552. .resume = ahci_pci_device_resume,
  553. #endif
  554. };
  555. static int ahci_em_messages = 1;
  556. module_param(ahci_em_messages, int, 0444);
  557. /* add other LED protocol types when they become supported */
  558. MODULE_PARM_DESC(ahci_em_messages,
  559. "Set AHCI Enclosure Management Message type (0 = disabled, 1 = LED");
  560. static inline int ahci_nr_ports(u32 cap)
  561. {
  562. return (cap & 0x1f) + 1;
  563. }
  564. static inline void __iomem *__ahci_port_base(struct ata_host *host,
  565. unsigned int port_no)
  566. {
  567. void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
  568. return mmio + 0x100 + (port_no * 0x80);
  569. }
  570. static inline void __iomem *ahci_port_base(struct ata_port *ap)
  571. {
  572. return __ahci_port_base(ap->host, ap->port_no);
  573. }
  574. static void ahci_enable_ahci(void __iomem *mmio)
  575. {
  576. int i;
  577. u32 tmp;
  578. /* turn on AHCI_EN */
  579. tmp = readl(mmio + HOST_CTL);
  580. if (tmp & HOST_AHCI_EN)
  581. return;
  582. /* Some controllers need AHCI_EN to be written multiple times.
  583. * Try a few times before giving up.
  584. */
  585. for (i = 0; i < 5; i++) {
  586. tmp |= HOST_AHCI_EN;
  587. writel(tmp, mmio + HOST_CTL);
  588. tmp = readl(mmio + HOST_CTL); /* flush && sanity check */
  589. if (tmp & HOST_AHCI_EN)
  590. return;
  591. msleep(10);
  592. }
  593. WARN_ON(1);
  594. }
  595. /**
  596. * ahci_save_initial_config - Save and fixup initial config values
  597. * @pdev: target PCI device
  598. * @hpriv: host private area to store config values
  599. *
  600. * Some registers containing configuration info might be setup by
  601. * BIOS and might be cleared on reset. This function saves the
  602. * initial values of those registers into @hpriv such that they
  603. * can be restored after controller reset.
  604. *
  605. * If inconsistent, config values are fixed up by this function.
  606. *
  607. * LOCKING:
  608. * None.
  609. */
  610. static void ahci_save_initial_config(struct pci_dev *pdev,
  611. struct ahci_host_priv *hpriv)
  612. {
  613. void __iomem *mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
  614. u32 cap, port_map;
  615. int i;
  616. int mv;
  617. /* make sure AHCI mode is enabled before accessing CAP */
  618. ahci_enable_ahci(mmio);
  619. /* Values prefixed with saved_ are written back to host after
  620. * reset. Values without are used for driver operation.
  621. */
  622. hpriv->saved_cap = cap = readl(mmio + HOST_CAP);
  623. hpriv->saved_port_map = port_map = readl(mmio + HOST_PORTS_IMPL);
  624. /* some chips have errata preventing 64bit use */
  625. if ((cap & HOST_CAP_64) && (hpriv->flags & AHCI_HFLAG_32BIT_ONLY)) {
  626. dev_printk(KERN_INFO, &pdev->dev,
  627. "controller can't do 64bit DMA, forcing 32bit\n");
  628. cap &= ~HOST_CAP_64;
  629. }
  630. if ((cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_NO_NCQ)) {
  631. dev_printk(KERN_INFO, &pdev->dev,
  632. "controller can't do NCQ, turning off CAP_NCQ\n");
  633. cap &= ~HOST_CAP_NCQ;
  634. }
  635. if (!(cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_YES_NCQ)) {
  636. dev_printk(KERN_INFO, &pdev->dev,
  637. "controller can do NCQ, turning on CAP_NCQ\n");
  638. cap |= HOST_CAP_NCQ;
  639. }
  640. if ((cap & HOST_CAP_PMP) && (hpriv->flags & AHCI_HFLAG_NO_PMP)) {
  641. dev_printk(KERN_INFO, &pdev->dev,
  642. "controller can't do PMP, turning off CAP_PMP\n");
  643. cap &= ~HOST_CAP_PMP;
  644. }
  645. if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361 &&
  646. port_map != 1) {
  647. dev_printk(KERN_INFO, &pdev->dev,
  648. "JMB361 has only one port, port_map 0x%x -> 0x%x\n",
  649. port_map, 1);
  650. port_map = 1;
  651. }
  652. /*
  653. * Temporary Marvell 6145 hack: PATA port presence
  654. * is asserted through the standard AHCI port
  655. * presence register, as bit 4 (counting from 0)
  656. */
  657. if (hpriv->flags & AHCI_HFLAG_MV_PATA) {
  658. if (pdev->device == 0x6121)
  659. mv = 0x3;
  660. else
  661. mv = 0xf;
  662. dev_printk(KERN_ERR, &pdev->dev,
  663. "MV_AHCI HACK: port_map %x -> %x\n",
  664. port_map,
  665. port_map & mv);
  666. port_map &= mv;
  667. }
  668. /* cross check port_map and cap.n_ports */
  669. if (port_map) {
  670. int map_ports = 0;
  671. for (i = 0; i < AHCI_MAX_PORTS; i++)
  672. if (port_map & (1 << i))
  673. map_ports++;
  674. /* If PI has more ports than n_ports, whine, clear
  675. * port_map and let it be generated from n_ports.
  676. */
  677. if (map_ports > ahci_nr_ports(cap)) {
  678. dev_printk(KERN_WARNING, &pdev->dev,
  679. "implemented port map (0x%x) contains more "
  680. "ports than nr_ports (%u), using nr_ports\n",
  681. port_map, ahci_nr_ports(cap));
  682. port_map = 0;
  683. }
  684. }
  685. /* fabricate port_map from cap.nr_ports */
  686. if (!port_map) {
  687. port_map = (1 << ahci_nr_ports(cap)) - 1;
  688. dev_printk(KERN_WARNING, &pdev->dev,
  689. "forcing PORTS_IMPL to 0x%x\n", port_map);
  690. /* write the fixed up value to the PI register */
  691. hpriv->saved_port_map = port_map;
  692. }
  693. /* record values to use during operation */
  694. hpriv->cap = cap;
  695. hpriv->port_map = port_map;
  696. }
  697. /**
  698. * ahci_restore_initial_config - Restore initial config
  699. * @host: target ATA host
  700. *
  701. * Restore initial config stored by ahci_save_initial_config().
  702. *
  703. * LOCKING:
  704. * None.
  705. */
  706. static void ahci_restore_initial_config(struct ata_host *host)
  707. {
  708. struct ahci_host_priv *hpriv = host->private_data;
  709. void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
  710. writel(hpriv->saved_cap, mmio + HOST_CAP);
  711. writel(hpriv->saved_port_map, mmio + HOST_PORTS_IMPL);
  712. (void) readl(mmio + HOST_PORTS_IMPL); /* flush */
  713. }
  714. static unsigned ahci_scr_offset(struct ata_port *ap, unsigned int sc_reg)
  715. {
  716. static const int offset[] = {
  717. [SCR_STATUS] = PORT_SCR_STAT,
  718. [SCR_CONTROL] = PORT_SCR_CTL,
  719. [SCR_ERROR] = PORT_SCR_ERR,
  720. [SCR_ACTIVE] = PORT_SCR_ACT,
  721. [SCR_NOTIFICATION] = PORT_SCR_NTF,
  722. };
  723. struct ahci_host_priv *hpriv = ap->host->private_data;
  724. if (sc_reg < ARRAY_SIZE(offset) &&
  725. (sc_reg != SCR_NOTIFICATION || (hpriv->cap & HOST_CAP_SNTF)))
  726. return offset[sc_reg];
  727. return 0;
  728. }
  729. static int ahci_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
  730. {
  731. void __iomem *port_mmio = ahci_port_base(ap);
  732. int offset = ahci_scr_offset(ap, sc_reg);
  733. if (offset) {
  734. *val = readl(port_mmio + offset);
  735. return 0;
  736. }
  737. return -EINVAL;
  738. }
  739. static int ahci_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val)
  740. {
  741. void __iomem *port_mmio = ahci_port_base(ap);
  742. int offset = ahci_scr_offset(ap, sc_reg);
  743. if (offset) {
  744. writel(val, port_mmio + offset);
  745. return 0;
  746. }
  747. return -EINVAL;
  748. }
  749. static void ahci_start_engine(struct ata_port *ap)
  750. {
  751. void __iomem *port_mmio = ahci_port_base(ap);
  752. u32 tmp;
  753. /* start DMA */
  754. tmp = readl(port_mmio + PORT_CMD);
  755. tmp |= PORT_CMD_START;
  756. writel(tmp, port_mmio + PORT_CMD);
  757. readl(port_mmio + PORT_CMD); /* flush */
  758. }
  759. static int ahci_stop_engine(struct ata_port *ap)
  760. {
  761. void __iomem *port_mmio = ahci_port_base(ap);
  762. u32 tmp;
  763. tmp = readl(port_mmio + PORT_CMD);
  764. /* check if the HBA is idle */
  765. if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0)
  766. return 0;
  767. /* setting HBA to idle */
  768. tmp &= ~PORT_CMD_START;
  769. writel(tmp, port_mmio + PORT_CMD);
  770. /* wait for engine to stop. This could be as long as 500 msec */
  771. tmp = ata_wait_register(port_mmio + PORT_CMD,
  772. PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500);
  773. if (tmp & PORT_CMD_LIST_ON)
  774. return -EIO;
  775. return 0;
  776. }
  777. static void ahci_start_fis_rx(struct ata_port *ap)
  778. {
  779. void __iomem *port_mmio = ahci_port_base(ap);
  780. struct ahci_host_priv *hpriv = ap->host->private_data;
  781. struct ahci_port_priv *pp = ap->private_data;
  782. u32 tmp;
  783. /* set FIS registers */
  784. if (hpriv->cap & HOST_CAP_64)
  785. writel((pp->cmd_slot_dma >> 16) >> 16,
  786. port_mmio + PORT_LST_ADDR_HI);
  787. writel(pp->cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR);
  788. if (hpriv->cap & HOST_CAP_64)
  789. writel((pp->rx_fis_dma >> 16) >> 16,
  790. port_mmio + PORT_FIS_ADDR_HI);
  791. writel(pp->rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR);
  792. /* enable FIS reception */
  793. tmp = readl(port_mmio + PORT_CMD);
  794. tmp |= PORT_CMD_FIS_RX;
  795. writel(tmp, port_mmio + PORT_CMD);
  796. /* flush */
  797. readl(port_mmio + PORT_CMD);
  798. }
  799. static int ahci_stop_fis_rx(struct ata_port *ap)
  800. {
  801. void __iomem *port_mmio = ahci_port_base(ap);
  802. u32 tmp;
  803. /* disable FIS reception */
  804. tmp = readl(port_mmio + PORT_CMD);
  805. tmp &= ~PORT_CMD_FIS_RX;
  806. writel(tmp, port_mmio + PORT_CMD);
  807. /* wait for completion, spec says 500ms, give it 1000 */
  808. tmp = ata_wait_register(port_mmio + PORT_CMD, PORT_CMD_FIS_ON,
  809. PORT_CMD_FIS_ON, 10, 1000);
  810. if (tmp & PORT_CMD_FIS_ON)
  811. return -EBUSY;
  812. return 0;
  813. }
  814. static void ahci_power_up(struct ata_port *ap)
  815. {
  816. struct ahci_host_priv *hpriv = ap->host->private_data;
  817. void __iomem *port_mmio = ahci_port_base(ap);
  818. u32 cmd;
  819. cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
  820. /* spin up device */
  821. if (hpriv->cap & HOST_CAP_SSS) {
  822. cmd |= PORT_CMD_SPIN_UP;
  823. writel(cmd, port_mmio + PORT_CMD);
  824. }
  825. /* wake up link */
  826. writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD);
  827. }
  828. static void ahci_disable_alpm(struct ata_port *ap)
  829. {
  830. struct ahci_host_priv *hpriv = ap->host->private_data;
  831. void __iomem *port_mmio = ahci_port_base(ap);
  832. u32 cmd;
  833. struct ahci_port_priv *pp = ap->private_data;
  834. /* IPM bits should be disabled by libata-core */
  835. /* get the existing command bits */
  836. cmd = readl(port_mmio + PORT_CMD);
  837. /* disable ALPM and ASP */
  838. cmd &= ~PORT_CMD_ASP;
  839. cmd &= ~PORT_CMD_ALPE;
  840. /* force the interface back to active */
  841. cmd |= PORT_CMD_ICC_ACTIVE;
  842. /* write out new cmd value */
  843. writel(cmd, port_mmio + PORT_CMD);
  844. cmd = readl(port_mmio + PORT_CMD);
  845. /* wait 10ms to be sure we've come out of any low power state */
  846. msleep(10);
  847. /* clear out any PhyRdy stuff from interrupt status */
  848. writel(PORT_IRQ_PHYRDY, port_mmio + PORT_IRQ_STAT);
  849. /* go ahead and clean out PhyRdy Change from Serror too */
  850. ahci_scr_write(ap, SCR_ERROR, ((1 << 16) | (1 << 18)));
  851. /*
  852. * Clear flag to indicate that we should ignore all PhyRdy
  853. * state changes
  854. */
  855. hpriv->flags &= ~AHCI_HFLAG_NO_HOTPLUG;
  856. /*
  857. * Enable interrupts on Phy Ready.
  858. */
  859. pp->intr_mask |= PORT_IRQ_PHYRDY;
  860. writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
  861. /*
  862. * don't change the link pm policy - we can be called
  863. * just to turn of link pm temporarily
  864. */
  865. }
  866. static int ahci_enable_alpm(struct ata_port *ap,
  867. enum link_pm policy)
  868. {
  869. struct ahci_host_priv *hpriv = ap->host->private_data;
  870. void __iomem *port_mmio = ahci_port_base(ap);
  871. u32 cmd;
  872. struct ahci_port_priv *pp = ap->private_data;
  873. u32 asp;
  874. /* Make sure the host is capable of link power management */
  875. if (!(hpriv->cap & HOST_CAP_ALPM))
  876. return -EINVAL;
  877. switch (policy) {
  878. case MAX_PERFORMANCE:
  879. case NOT_AVAILABLE:
  880. /*
  881. * if we came here with NOT_AVAILABLE,
  882. * it just means this is the first time we
  883. * have tried to enable - default to max performance,
  884. * and let the user go to lower power modes on request.
  885. */
  886. ahci_disable_alpm(ap);
  887. return 0;
  888. case MIN_POWER:
  889. /* configure HBA to enter SLUMBER */
  890. asp = PORT_CMD_ASP;
  891. break;
  892. case MEDIUM_POWER:
  893. /* configure HBA to enter PARTIAL */
  894. asp = 0;
  895. break;
  896. default:
  897. return -EINVAL;
  898. }
  899. /*
  900. * Disable interrupts on Phy Ready. This keeps us from
  901. * getting woken up due to spurious phy ready interrupts
  902. * TBD - Hot plug should be done via polling now, is
  903. * that even supported?
  904. */
  905. pp->intr_mask &= ~PORT_IRQ_PHYRDY;
  906. writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
  907. /*
  908. * Set a flag to indicate that we should ignore all PhyRdy
  909. * state changes since these can happen now whenever we
  910. * change link state
  911. */
  912. hpriv->flags |= AHCI_HFLAG_NO_HOTPLUG;
  913. /* get the existing command bits */
  914. cmd = readl(port_mmio + PORT_CMD);
  915. /*
  916. * Set ASP based on Policy
  917. */
  918. cmd |= asp;
  919. /*
  920. * Setting this bit will instruct the HBA to aggressively
  921. * enter a lower power link state when it's appropriate and
  922. * based on the value set above for ASP
  923. */
  924. cmd |= PORT_CMD_ALPE;
  925. /* write out new cmd value */
  926. writel(cmd, port_mmio + PORT_CMD);
  927. cmd = readl(port_mmio + PORT_CMD);
  928. /* IPM bits should be set by libata-core */
  929. return 0;
  930. }
  931. #ifdef CONFIG_PM
  932. static void ahci_power_down(struct ata_port *ap)
  933. {
  934. struct ahci_host_priv *hpriv = ap->host->private_data;
  935. void __iomem *port_mmio = ahci_port_base(ap);
  936. u32 cmd, scontrol;
  937. if (!(hpriv->cap & HOST_CAP_SSS))
  938. return;
  939. /* put device into listen mode, first set PxSCTL.DET to 0 */
  940. scontrol = readl(port_mmio + PORT_SCR_CTL);
  941. scontrol &= ~0xf;
  942. writel(scontrol, port_mmio + PORT_SCR_CTL);
  943. /* then set PxCMD.SUD to 0 */
  944. cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
  945. cmd &= ~PORT_CMD_SPIN_UP;
  946. writel(cmd, port_mmio + PORT_CMD);
  947. }
  948. #endif
  949. static void ahci_start_port(struct ata_port *ap)
  950. {
  951. struct ahci_port_priv *pp = ap->private_data;
  952. struct ata_link *link;
  953. struct ahci_em_priv *emp;
  954. /* enable FIS reception */
  955. ahci_start_fis_rx(ap);
  956. /* enable DMA */
  957. ahci_start_engine(ap);
  958. /* turn on LEDs */
  959. if (ap->flags & ATA_FLAG_EM) {
  960. ata_port_for_each_link(link, ap) {
  961. emp = &pp->em_priv[link->pmp];
  962. ahci_transmit_led_message(ap, emp->led_state, 4);
  963. }
  964. }
  965. if (ap->flags & ATA_FLAG_SW_ACTIVITY)
  966. ata_port_for_each_link(link, ap)
  967. ahci_init_sw_activity(link);
  968. }
  969. static int ahci_deinit_port(struct ata_port *ap, const char **emsg)
  970. {
  971. int rc;
  972. /* disable DMA */
  973. rc = ahci_stop_engine(ap);
  974. if (rc) {
  975. *emsg = "failed to stop engine";
  976. return rc;
  977. }
  978. /* disable FIS reception */
  979. rc = ahci_stop_fis_rx(ap);
  980. if (rc) {
  981. *emsg = "failed stop FIS RX";
  982. return rc;
  983. }
  984. return 0;
  985. }
  986. static int ahci_reset_controller(struct ata_host *host)
  987. {
  988. struct pci_dev *pdev = to_pci_dev(host->dev);
  989. struct ahci_host_priv *hpriv = host->private_data;
  990. void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
  991. u32 tmp;
  992. /* we must be in AHCI mode, before using anything
  993. * AHCI-specific, such as HOST_RESET.
  994. */
  995. ahci_enable_ahci(mmio);
  996. /* global controller reset */
  997. if (!ahci_skip_host_reset) {
  998. tmp = readl(mmio + HOST_CTL);
  999. if ((tmp & HOST_RESET) == 0) {
  1000. writel(tmp | HOST_RESET, mmio + HOST_CTL);
  1001. readl(mmio + HOST_CTL); /* flush */
  1002. }
  1003. /*
  1004. * to perform host reset, OS should set HOST_RESET
  1005. * and poll until this bit is read to be "0".
  1006. * reset must complete within 1 second, or
  1007. * the hardware should be considered fried.
  1008. */
  1009. tmp = ata_wait_register(mmio + HOST_CTL, HOST_RESET,
  1010. HOST_RESET, 10, 1000);
  1011. if (tmp & HOST_RESET) {
  1012. dev_printk(KERN_ERR, host->dev,
  1013. "controller reset failed (0x%x)\n", tmp);
  1014. return -EIO;
  1015. }
  1016. /* turn on AHCI mode */
  1017. ahci_enable_ahci(mmio);
  1018. /* Some registers might be cleared on reset. Restore
  1019. * initial values.
  1020. */
  1021. ahci_restore_initial_config(host);
  1022. } else
  1023. dev_printk(KERN_INFO, host->dev,
  1024. "skipping global host reset\n");
  1025. if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
  1026. u16 tmp16;
  1027. /* configure PCS */
  1028. pci_read_config_word(pdev, 0x92, &tmp16);
  1029. if ((tmp16 & hpriv->port_map) != hpriv->port_map) {
  1030. tmp16 |= hpriv->port_map;
  1031. pci_write_config_word(pdev, 0x92, tmp16);
  1032. }
  1033. }
  1034. return 0;
  1035. }
  1036. static void ahci_sw_activity(struct ata_link *link)
  1037. {
  1038. struct ata_port *ap = link->ap;
  1039. struct ahci_port_priv *pp = ap->private_data;
  1040. struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
  1041. if (!(link->flags & ATA_LFLAG_SW_ACTIVITY))
  1042. return;
  1043. emp->activity++;
  1044. if (!timer_pending(&emp->timer))
  1045. mod_timer(&emp->timer, jiffies + msecs_to_jiffies(10));
  1046. }
  1047. static void ahci_sw_activity_blink(unsigned long arg)
  1048. {
  1049. struct ata_link *link = (struct ata_link *)arg;
  1050. struct ata_port *ap = link->ap;
  1051. struct ahci_port_priv *pp = ap->private_data;
  1052. struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
  1053. unsigned long led_message = emp->led_state;
  1054. u32 activity_led_state;
  1055. led_message &= 0xffff0000;
  1056. led_message |= ap->port_no | (link->pmp << 8);
  1057. /* check to see if we've had activity. If so,
  1058. * toggle state of LED and reset timer. If not,
  1059. * turn LED to desired idle state.
  1060. */
  1061. if (emp->saved_activity != emp->activity) {
  1062. emp->saved_activity = emp->activity;
  1063. /* get the current LED state */
  1064. activity_led_state = led_message & 0x00010000;
  1065. if (activity_led_state)
  1066. activity_led_state = 0;
  1067. else
  1068. activity_led_state = 1;
  1069. /* clear old state */
  1070. led_message &= 0xfff8ffff;
  1071. /* toggle state */
  1072. led_message |= (activity_led_state << 16);
  1073. mod_timer(&emp->timer, jiffies + msecs_to_jiffies(100));
  1074. } else {
  1075. /* switch to idle */
  1076. led_message &= 0xfff8ffff;
  1077. if (emp->blink_policy == BLINK_OFF)
  1078. led_message |= (1 << 16);
  1079. }
  1080. ahci_transmit_led_message(ap, led_message, 4);
  1081. }
  1082. static void ahci_init_sw_activity(struct ata_link *link)
  1083. {
  1084. struct ata_port *ap = link->ap;
  1085. struct ahci_port_priv *pp = ap->private_data;
  1086. struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
  1087. /* init activity stats, setup timer */
  1088. emp->saved_activity = emp->activity = 0;
  1089. setup_timer(&emp->timer, ahci_sw_activity_blink, (unsigned long)link);
  1090. /* check our blink policy and set flag for link if it's enabled */
  1091. if (emp->blink_policy)
  1092. link->flags |= ATA_LFLAG_SW_ACTIVITY;
  1093. }
  1094. static int ahci_reset_em(struct ata_host *host)
  1095. {
  1096. void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
  1097. u32 em_ctl;
  1098. em_ctl = readl(mmio + HOST_EM_CTL);
  1099. if ((em_ctl & EM_CTL_TM) || (em_ctl & EM_CTL_RST))
  1100. return -EINVAL;
  1101. writel(em_ctl | EM_CTL_RST, mmio + HOST_EM_CTL);
  1102. return 0;
  1103. }
  1104. static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
  1105. ssize_t size)
  1106. {
  1107. struct ahci_host_priv *hpriv = ap->host->private_data;
  1108. struct ahci_port_priv *pp = ap->private_data;
  1109. void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
  1110. u32 em_ctl;
  1111. u32 message[] = {0, 0};
  1112. unsigned long flags;
  1113. int pmp;
  1114. struct ahci_em_priv *emp;
  1115. /* get the slot number from the message */
  1116. pmp = (state & 0x0000ff00) >> 8;
  1117. if (pmp < MAX_SLOTS)
  1118. emp = &pp->em_priv[pmp];
  1119. else
  1120. return -EINVAL;
  1121. spin_lock_irqsave(ap->lock, flags);
  1122. /*
  1123. * if we are still busy transmitting a previous message,
  1124. * do not allow
  1125. */
  1126. em_ctl = readl(mmio + HOST_EM_CTL);
  1127. if (em_ctl & EM_CTL_TM) {
  1128. spin_unlock_irqrestore(ap->lock, flags);
  1129. return -EINVAL;
  1130. }
  1131. /*
  1132. * create message header - this is all zero except for
  1133. * the message size, which is 4 bytes.
  1134. */
  1135. message[0] |= (4 << 8);
  1136. /* ignore 0:4 of byte zero, fill in port info yourself */
  1137. message[1] = ((state & 0xfffffff0) | ap->port_no);
  1138. /* write message to EM_LOC */
  1139. writel(message[0], mmio + hpriv->em_loc);
  1140. writel(message[1], mmio + hpriv->em_loc+4);
  1141. /* save off new led state for port/slot */
  1142. emp->led_state = message[1];
  1143. /*
  1144. * tell hardware to transmit the message
  1145. */
  1146. writel(em_ctl | EM_CTL_TM, mmio + HOST_EM_CTL);
  1147. spin_unlock_irqrestore(ap->lock, flags);
  1148. return size;
  1149. }
  1150. static ssize_t ahci_led_show(struct ata_port *ap, char *buf)
  1151. {
  1152. struct ahci_port_priv *pp = ap->private_data;
  1153. struct ata_link *link;
  1154. struct ahci_em_priv *emp;
  1155. int rc = 0;
  1156. ata_port_for_each_link(link, ap) {
  1157. emp = &pp->em_priv[link->pmp];
  1158. rc += sprintf(buf, "%lx\n", emp->led_state);
  1159. }
  1160. return rc;
  1161. }
  1162. static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
  1163. size_t size)
  1164. {
  1165. int state;
  1166. int pmp;
  1167. struct ahci_port_priv *pp = ap->private_data;
  1168. struct ahci_em_priv *emp;
  1169. state = simple_strtoul(buf, NULL, 0);
  1170. /* get the slot number from the message */
  1171. pmp = (state & 0x0000ff00) >> 8;
  1172. if (pmp < MAX_SLOTS)
  1173. emp = &pp->em_priv[pmp];
  1174. else
  1175. return -EINVAL;
  1176. /* mask off the activity bits if we are in sw_activity
  1177. * mode, user should turn off sw_activity before setting
  1178. * activity led through em_message
  1179. */
  1180. if (emp->blink_policy)
  1181. state &= 0xfff8ffff;
  1182. return ahci_transmit_led_message(ap, state, size);
  1183. }
  1184. static ssize_t ahci_activity_store(struct ata_device *dev, enum sw_activity val)
  1185. {
  1186. struct ata_link *link = dev->link;
  1187. struct ata_port *ap = link->ap;
  1188. struct ahci_port_priv *pp = ap->private_data;
  1189. struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
  1190. u32 port_led_state = emp->led_state;
  1191. /* save the desired Activity LED behavior */
  1192. if (val == OFF) {
  1193. /* clear LFLAG */
  1194. link->flags &= ~(ATA_LFLAG_SW_ACTIVITY);
  1195. /* set the LED to OFF */
  1196. port_led_state &= 0xfff80000;
  1197. port_led_state |= (ap->port_no | (link->pmp << 8));
  1198. ahci_transmit_led_message(ap, port_led_state, 4);
  1199. } else {
  1200. link->flags |= ATA_LFLAG_SW_ACTIVITY;
  1201. if (val == BLINK_OFF) {
  1202. /* set LED to ON for idle */
  1203. port_led_state &= 0xfff80000;
  1204. port_led_state |= (ap->port_no | (link->pmp << 8));
  1205. port_led_state |= 0x00010000; /* check this */
  1206. ahci_transmit_led_message(ap, port_led_state, 4);
  1207. }
  1208. }
  1209. emp->blink_policy = val;
  1210. return 0;
  1211. }
  1212. static ssize_t ahci_activity_show(struct ata_device *dev, char *buf)
  1213. {
  1214. struct ata_link *link = dev->link;
  1215. struct ata_port *ap = link->ap;
  1216. struct ahci_port_priv *pp = ap->private_data;
  1217. struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
  1218. /* display the saved value of activity behavior for this
  1219. * disk.
  1220. */
  1221. return sprintf(buf, "%d\n", emp->blink_policy);
  1222. }
  1223. static void ahci_port_init(struct pci_dev *pdev, struct ata_port *ap,
  1224. int port_no, void __iomem *mmio,
  1225. void __iomem *port_mmio)
  1226. {
  1227. const char *emsg = NULL;
  1228. int rc;
  1229. u32 tmp;
  1230. /* make sure port is not active */
  1231. rc = ahci_deinit_port(ap, &emsg);
  1232. if (rc)
  1233. dev_printk(KERN_WARNING, &pdev->dev,
  1234. "%s (%d)\n", emsg, rc);
  1235. /* clear SError */
  1236. tmp = readl(port_mmio + PORT_SCR_ERR);
  1237. VPRINTK("PORT_SCR_ERR 0x%x\n", tmp);
  1238. writel(tmp, port_mmio + PORT_SCR_ERR);
  1239. /* clear port IRQ */
  1240. tmp = readl(port_mmio + PORT_IRQ_STAT);
  1241. VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
  1242. if (tmp)
  1243. writel(tmp, port_mmio + PORT_IRQ_STAT);
  1244. writel(1 << port_no, mmio + HOST_IRQ_STAT);
  1245. }
  1246. static void ahci_init_controller(struct ata_host *host)
  1247. {
  1248. struct ahci_host_priv *hpriv = host->private_data;
  1249. struct pci_dev *pdev = to_pci_dev(host->dev);
  1250. void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
  1251. int i;
  1252. void __iomem *port_mmio;
  1253. u32 tmp;
  1254. int mv;
  1255. if (hpriv->flags & AHCI_HFLAG_MV_PATA) {
  1256. if (pdev->device == 0x6121)
  1257. mv = 2;
  1258. else
  1259. mv = 4;
  1260. port_mmio = __ahci_port_base(host, mv);
  1261. writel(0, port_mmio + PORT_IRQ_MASK);
  1262. /* clear port IRQ */
  1263. tmp = readl(port_mmio + PORT_IRQ_STAT);
  1264. VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
  1265. if (tmp)
  1266. writel(tmp, port_mmio + PORT_IRQ_STAT);
  1267. }
  1268. for (i = 0; i < host->n_ports; i++) {
  1269. struct ata_port *ap = host->ports[i];
  1270. port_mmio = ahci_port_base(ap);
  1271. if (ata_port_is_dummy(ap))
  1272. continue;
  1273. ahci_port_init(pdev, ap, i, mmio, port_mmio);
  1274. }
  1275. tmp = readl(mmio + HOST_CTL);
  1276. VPRINTK("HOST_CTL 0x%x\n", tmp);
  1277. writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL);
  1278. tmp = readl(mmio + HOST_CTL);
  1279. VPRINTK("HOST_CTL 0x%x\n", tmp);
  1280. }
  1281. static void ahci_dev_config(struct ata_device *dev)
  1282. {
  1283. struct ahci_host_priv *hpriv = dev->link->ap->host->private_data;
  1284. if (hpriv->flags & AHCI_HFLAG_SECT255) {
  1285. dev->max_sectors = 255;
  1286. ata_dev_printk(dev, KERN_INFO,
  1287. "SB600 AHCI: limiting to 255 sectors per cmd\n");
  1288. }
  1289. }
  1290. static unsigned int ahci_dev_classify(struct ata_port *ap)
  1291. {
  1292. void __iomem *port_mmio = ahci_port_base(ap);
  1293. struct ata_taskfile tf;
  1294. u32 tmp;
  1295. tmp = readl(port_mmio + PORT_SIG);
  1296. tf.lbah = (tmp >> 24) & 0xff;
  1297. tf.lbam = (tmp >> 16) & 0xff;
  1298. tf.lbal = (tmp >> 8) & 0xff;
  1299. tf.nsect = (tmp) & 0xff;
  1300. return ata_dev_classify(&tf);
  1301. }
  1302. static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
  1303. u32 opts)
  1304. {
  1305. dma_addr_t cmd_tbl_dma;
  1306. cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ;
  1307. pp->cmd_slot[tag].opts = cpu_to_le32(opts);
  1308. pp->cmd_slot[tag].status = 0;
  1309. pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff);
  1310. pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16);
  1311. }
  1312. static int ahci_kick_engine(struct ata_port *ap, int force_restart)
  1313. {
  1314. void __iomem *port_mmio = ahci_port_base(ap);
  1315. struct ahci_host_priv *hpriv = ap->host->private_data;
  1316. u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
  1317. u32 tmp;
  1318. int busy, rc;
  1319. /* do we need to kick the port? */
  1320. busy = status & (ATA_BUSY | ATA_DRQ);
  1321. if (!busy && !force_restart)
  1322. return 0;
  1323. /* stop engine */
  1324. rc = ahci_stop_engine(ap);
  1325. if (rc)
  1326. goto out_restart;
  1327. /* need to do CLO? */
  1328. if (!busy) {
  1329. rc = 0;
  1330. goto out_restart;
  1331. }
  1332. if (!(hpriv->cap & HOST_CAP_CLO)) {
  1333. rc = -EOPNOTSUPP;
  1334. goto out_restart;
  1335. }
  1336. /* perform CLO */
  1337. tmp = readl(port_mmio + PORT_CMD);
  1338. tmp |= PORT_CMD_CLO;
  1339. writel(tmp, port_mmio + PORT_CMD);
  1340. rc = 0;
  1341. tmp = ata_wait_register(port_mmio + PORT_CMD,
  1342. PORT_CMD_CLO, PORT_CMD_CLO, 1, 500);
  1343. if (tmp & PORT_CMD_CLO)
  1344. rc = -EIO;
  1345. /* restart engine */
  1346. out_restart:
  1347. ahci_start_engine(ap);
  1348. return rc;
  1349. }
  1350. static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
  1351. struct ata_taskfile *tf, int is_cmd, u16 flags,
  1352. unsigned long timeout_msec)
  1353. {
  1354. const u32 cmd_fis_len = 5; /* five dwords */
  1355. struct ahci_port_priv *pp = ap->private_data;
  1356. void __iomem *port_mmio = ahci_port_base(ap);
  1357. u8 *fis = pp->cmd_tbl;
  1358. u32 tmp;
  1359. /* prep the command */
  1360. ata_tf_to_fis(tf, pmp, is_cmd, fis);
  1361. ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12));
  1362. /* issue & wait */
  1363. writel(1, port_mmio + PORT_CMD_ISSUE);
  1364. if (timeout_msec) {
  1365. tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1,
  1366. 1, timeout_msec);
  1367. if (tmp & 0x1) {
  1368. ahci_kick_engine(ap, 1);
  1369. return -EBUSY;
  1370. }
  1371. } else
  1372. readl(port_mmio + PORT_CMD_ISSUE); /* flush */
  1373. return 0;
  1374. }
  1375. static int ahci_do_softreset(struct ata_link *link, unsigned int *class,
  1376. int pmp, unsigned long deadline,
  1377. int (*check_ready)(struct ata_link *link))
  1378. {
  1379. struct ata_port *ap = link->ap;
  1380. const char *reason = NULL;
  1381. unsigned long now, msecs;
  1382. struct ata_taskfile tf;
  1383. int rc;
  1384. DPRINTK("ENTER\n");
  1385. /* prepare for SRST (AHCI-1.1 10.4.1) */
  1386. rc = ahci_kick_engine(ap, 1);
  1387. if (rc && rc != -EOPNOTSUPP)
  1388. ata_link_printk(link, KERN_WARNING,
  1389. "failed to reset engine (errno=%d)\n", rc);
  1390. ata_tf_init(link->device, &tf);
  1391. /* issue the first D2H Register FIS */
  1392. msecs = 0;
  1393. now = jiffies;
  1394. if (time_after(now, deadline))
  1395. msecs = jiffies_to_msecs(deadline - now);
  1396. tf.ctl |= ATA_SRST;
  1397. if (ahci_exec_polled_cmd(ap, pmp, &tf, 0,
  1398. AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY, msecs)) {
  1399. rc = -EIO;
  1400. reason = "1st FIS failed";
  1401. goto fail;
  1402. }
  1403. /* spec says at least 5us, but be generous and sleep for 1ms */
  1404. msleep(1);
  1405. /* issue the second D2H Register FIS */
  1406. tf.ctl &= ~ATA_SRST;
  1407. ahci_exec_polled_cmd(ap, pmp, &tf, 0, 0, 0);
  1408. /* wait for link to become ready */
  1409. rc = ata_wait_after_reset(link, deadline, check_ready);
  1410. /* link occupied, -ENODEV too is an error */
  1411. if (rc) {
  1412. reason = "device not ready";
  1413. goto fail;
  1414. }
  1415. *class = ahci_dev_classify(ap);
  1416. DPRINTK("EXIT, class=%u\n", *class);
  1417. return 0;
  1418. fail:
  1419. ata_link_printk(link, KERN_ERR, "softreset failed (%s)\n", reason);
  1420. return rc;
  1421. }
  1422. static int ahci_check_ready(struct ata_link *link)
  1423. {
  1424. void __iomem *port_mmio = ahci_port_base(link->ap);
  1425. u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
  1426. return ata_check_ready(status);
  1427. }
  1428. static int ahci_softreset(struct ata_link *link, unsigned int *class,
  1429. unsigned long deadline)
  1430. {
  1431. int pmp = sata_srst_pmp(link);
  1432. DPRINTK("ENTER\n");
  1433. return ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
  1434. }
  1435. static int ahci_sb600_check_ready(struct ata_link *link)
  1436. {
  1437. void __iomem *port_mmio = ahci_port_base(link->ap);
  1438. u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
  1439. u32 irq_status = readl(port_mmio + PORT_IRQ_STAT);
  1440. /*
  1441. * There is no need to check TFDATA if BAD PMP is found due to HW bug,
  1442. * which can save timeout delay.
  1443. */
  1444. if (irq_status & PORT_IRQ_BAD_PMP)
  1445. return -EIO;
  1446. return ata_check_ready(status);
  1447. }
  1448. static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class,
  1449. unsigned long deadline)
  1450. {
  1451. struct ata_port *ap = link->ap;
  1452. void __iomem *port_mmio = ahci_port_base(ap);
  1453. int pmp = sata_srst_pmp(link);
  1454. int rc;
  1455. u32 irq_sts;
  1456. DPRINTK("ENTER\n");
  1457. rc = ahci_do_softreset(link, class, pmp, deadline,
  1458. ahci_sb600_check_ready);
  1459. /*
  1460. * Soft reset fails on some ATI chips with IPMS set when PMP
  1461. * is enabled but SATA HDD/ODD is connected to SATA port,
  1462. * do soft reset again to port 0.
  1463. */
  1464. if (rc == -EIO) {
  1465. irq_sts = readl(port_mmio + PORT_IRQ_STAT);
  1466. if (irq_sts & PORT_IRQ_BAD_PMP) {
  1467. ata_link_printk(link, KERN_WARNING,
  1468. "failed due to HW bug, retry pmp=0\n");
  1469. rc = ahci_do_softreset(link, class, 0, deadline,
  1470. ahci_check_ready);
  1471. }
  1472. }
  1473. return rc;
  1474. }
  1475. static int ahci_hardreset(struct ata_link *link, unsigned int *class,
  1476. unsigned long deadline)
  1477. {
  1478. const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
  1479. struct ata_port *ap = link->ap;
  1480. struct ahci_port_priv *pp = ap->private_data;
  1481. u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
  1482. struct ata_taskfile tf;
  1483. bool online;
  1484. int rc;
  1485. DPRINTK("ENTER\n");
  1486. ahci_stop_engine(ap);
  1487. /* clear D2H reception area to properly wait for D2H FIS */
  1488. ata_tf_init(link->device, &tf);
  1489. tf.command = 0x80;
  1490. ata_tf_to_fis(&tf, 0, 0, d2h_fis);
  1491. rc = sata_link_hardreset(link, timing, deadline, &online,
  1492. ahci_check_ready);
  1493. ahci_start_engine(ap);
  1494. if (online)
  1495. *class = ahci_dev_classify(ap);
  1496. DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
  1497. return rc;
  1498. }
  1499. static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
  1500. unsigned long deadline)
  1501. {
  1502. struct ata_port *ap = link->ap;
  1503. bool online;
  1504. int rc;
  1505. DPRINTK("ENTER\n");
  1506. ahci_stop_engine(ap);
  1507. rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
  1508. deadline, &online, NULL);
  1509. ahci_start_engine(ap);
  1510. DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
  1511. /* vt8251 doesn't clear BSY on signature FIS reception,
  1512. * request follow-up softreset.
  1513. */
  1514. return online ? -EAGAIN : rc;
  1515. }
  1516. static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
  1517. unsigned long deadline)
  1518. {
  1519. struct ata_port *ap = link->ap;
  1520. struct ahci_port_priv *pp = ap->private_data;
  1521. u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
  1522. struct ata_taskfile tf;
  1523. bool online;
  1524. int rc;
  1525. ahci_stop_engine(ap);
  1526. /* clear D2H reception area to properly wait for D2H FIS */
  1527. ata_tf_init(link->device, &tf);
  1528. tf.command = 0x80;
  1529. ata_tf_to_fis(&tf, 0, 0, d2h_fis);
  1530. rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
  1531. deadline, &online, NULL);
  1532. ahci_start_engine(ap);
  1533. /* The pseudo configuration device on SIMG4726 attached to
  1534. * ASUS P5W-DH Deluxe doesn't send signature FIS after
  1535. * hardreset if no device is attached to the first downstream
  1536. * port && the pseudo device locks up on SRST w/ PMP==0. To
  1537. * work around this, wait for !BSY only briefly. If BSY isn't
  1538. * cleared, perform CLO and proceed to IDENTIFY (achieved by
  1539. * ATA_LFLAG_NO_SRST and ATA_LFLAG_ASSUME_ATA).
  1540. *
  1541. * Wait for two seconds. Devices attached to downstream port
  1542. * which can't process the following IDENTIFY after this will
  1543. * have to be reset again. For most cases, this should
  1544. * suffice while making probing snappish enough.
  1545. */
  1546. if (online) {
  1547. rc = ata_wait_after_reset(link, jiffies + 2 * HZ,
  1548. ahci_check_ready);
  1549. if (rc)
  1550. ahci_kick_engine(ap, 0);
  1551. }
  1552. return rc;
  1553. }
  1554. static void ahci_postreset(struct ata_link *link, unsigned int *class)
  1555. {
  1556. struct ata_port *ap = link->ap;
  1557. void __iomem *port_mmio = ahci_port_base(ap);
  1558. u32 new_tmp, tmp;
  1559. ata_std_postreset(link, class);
  1560. /* Make sure port's ATAPI bit is set appropriately */
  1561. new_tmp = tmp = readl(port_mmio + PORT_CMD);
  1562. if (*class == ATA_DEV_ATAPI)
  1563. new_tmp |= PORT_CMD_ATAPI;
  1564. else
  1565. new_tmp &= ~PORT_CMD_ATAPI;
  1566. if (new_tmp != tmp) {
  1567. writel(new_tmp, port_mmio + PORT_CMD);
  1568. readl(port_mmio + PORT_CMD); /* flush */
  1569. }
  1570. }
  1571. static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
  1572. {
  1573. struct scatterlist *sg;
  1574. struct ahci_sg *ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
  1575. unsigned int si;
  1576. VPRINTK("ENTER\n");
  1577. /*
  1578. * Next, the S/G list.
  1579. */
  1580. for_each_sg(qc->sg, sg, qc->n_elem, si) {
  1581. dma_addr_t addr = sg_dma_address(sg);
  1582. u32 sg_len = sg_dma_len(sg);
  1583. ahci_sg[si].addr = cpu_to_le32(addr & 0xffffffff);
  1584. ahci_sg[si].addr_hi = cpu_to_le32((addr >> 16) >> 16);
  1585. ahci_sg[si].flags_size = cpu_to_le32(sg_len - 1);
  1586. }
  1587. return si;
  1588. }
  1589. static void ahci_qc_prep(struct ata_queued_cmd *qc)
  1590. {
  1591. struct ata_port *ap = qc->ap;
  1592. struct ahci_port_priv *pp = ap->private_data;
  1593. int is_atapi = ata_is_atapi(qc->tf.protocol);
  1594. void *cmd_tbl;
  1595. u32 opts;
  1596. const u32 cmd_fis_len = 5; /* five dwords */
  1597. unsigned int n_elem;
  1598. /*
  1599. * Fill in command table information. First, the header,
  1600. * a SATA Register - Host to Device command FIS.
  1601. */
  1602. cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ;
  1603. ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl);
  1604. if (is_atapi) {
  1605. memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
  1606. memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
  1607. }
  1608. n_elem = 0;
  1609. if (qc->flags & ATA_QCFLAG_DMAMAP)
  1610. n_elem = ahci_fill_sg(qc, cmd_tbl);
  1611. /*
  1612. * Fill in command slot information.
  1613. */
  1614. opts = cmd_fis_len | n_elem << 16 | (qc->dev->link->pmp << 12);
  1615. if (qc->tf.flags & ATA_TFLAG_WRITE)
  1616. opts |= AHCI_CMD_WRITE;
  1617. if (is_atapi)
  1618. opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
  1619. ahci_fill_cmd_slot(pp, qc->tag, opts);
  1620. }
  1621. static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
  1622. {
  1623. struct ahci_host_priv *hpriv = ap->host->private_data;
  1624. struct ahci_port_priv *pp = ap->private_data;
  1625. struct ata_eh_info *host_ehi = &ap->link.eh_info;
  1626. struct ata_link *link = NULL;
  1627. struct ata_queued_cmd *active_qc;
  1628. struct ata_eh_info *active_ehi;
  1629. u32 serror;
  1630. /* determine active link */
  1631. ata_port_for_each_link(link, ap)
  1632. if (ata_link_active(link))
  1633. break;
  1634. if (!link)
  1635. link = &ap->link;
  1636. active_qc = ata_qc_from_tag(ap, link->active_tag);
  1637. active_ehi = &link->eh_info;
  1638. /* record irq stat */
  1639. ata_ehi_clear_desc(host_ehi);
  1640. ata_ehi_push_desc(host_ehi, "irq_stat 0x%08x", irq_stat);
  1641. /* AHCI needs SError cleared; otherwise, it might lock up */
  1642. ahci_scr_read(ap, SCR_ERROR, &serror);
  1643. ahci_scr_write(ap, SCR_ERROR, serror);
  1644. host_ehi->serror |= serror;
  1645. /* some controllers set IRQ_IF_ERR on device errors, ignore it */
  1646. if (hpriv->flags & AHCI_HFLAG_IGN_IRQ_IF_ERR)
  1647. irq_stat &= ~PORT_IRQ_IF_ERR;
  1648. if (irq_stat & PORT_IRQ_TF_ERR) {
  1649. /* If qc is active, charge it; otherwise, the active
  1650. * link. There's no active qc on NCQ errors. It will
  1651. * be determined by EH by reading log page 10h.
  1652. */
  1653. if (active_qc)
  1654. active_qc->err_mask |= AC_ERR_DEV;
  1655. else
  1656. active_ehi->err_mask |= AC_ERR_DEV;
  1657. if (hpriv->flags & AHCI_HFLAG_IGN_SERR_INTERNAL)
  1658. host_ehi->serror &= ~SERR_INTERNAL;
  1659. }
  1660. if (irq_stat & PORT_IRQ_UNK_FIS) {
  1661. u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK);
  1662. active_ehi->err_mask |= AC_ERR_HSM;
  1663. active_ehi->action |= ATA_EH_RESET;
  1664. ata_ehi_push_desc(active_ehi,
  1665. "unknown FIS %08x %08x %08x %08x" ,
  1666. unk[0], unk[1], unk[2], unk[3]);
  1667. }
  1668. if (sata_pmp_attached(ap) && (irq_stat & PORT_IRQ_BAD_PMP)) {
  1669. active_ehi->err_mask |= AC_ERR_HSM;
  1670. active_ehi->action |= ATA_EH_RESET;
  1671. ata_ehi_push_desc(active_ehi, "incorrect PMP");
  1672. }
  1673. if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) {
  1674. host_ehi->err_mask |= AC_ERR_HOST_BUS;
  1675. host_ehi->action |= ATA_EH_RESET;
  1676. ata_ehi_push_desc(host_ehi, "host bus error");
  1677. }
  1678. if (irq_stat & PORT_IRQ_IF_ERR) {
  1679. host_ehi->err_mask |= AC_ERR_ATA_BUS;
  1680. host_ehi->action |= ATA_EH_RESET;
  1681. ata_ehi_push_desc(host_ehi, "interface fatal error");
  1682. }
  1683. if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) {
  1684. ata_ehi_hotplugged(host_ehi);
  1685. ata_ehi_push_desc(host_ehi, "%s",
  1686. irq_stat & PORT_IRQ_CONNECT ?
  1687. "connection status changed" : "PHY RDY changed");
  1688. }
  1689. /* okay, let's hand over to EH */
  1690. if (irq_stat & PORT_IRQ_FREEZE)
  1691. ata_port_freeze(ap);
  1692. else
  1693. ata_port_abort(ap);
  1694. }
  1695. static void ahci_port_intr(struct ata_port *ap)
  1696. {
  1697. void __iomem *port_mmio = ahci_port_base(ap);
  1698. struct ata_eh_info *ehi = &ap->link.eh_info;
  1699. struct ahci_port_priv *pp = ap->private_data;
  1700. struct ahci_host_priv *hpriv = ap->host->private_data;
  1701. int resetting = !!(ap->pflags & ATA_PFLAG_RESETTING);
  1702. u32 status, qc_active;
  1703. int rc;
  1704. status = readl(port_mmio + PORT_IRQ_STAT);
  1705. writel(status, port_mmio + PORT_IRQ_STAT);
  1706. /* ignore BAD_PMP while resetting */
  1707. if (unlikely(resetting))
  1708. status &= ~PORT_IRQ_BAD_PMP;
  1709. /* If we are getting PhyRdy, this is
  1710. * just a power state change, we should
  1711. * clear out this, plus the PhyRdy/Comm
  1712. * Wake bits from Serror
  1713. */
  1714. if ((hpriv->flags & AHCI_HFLAG_NO_HOTPLUG) &&
  1715. (status & PORT_IRQ_PHYRDY)) {
  1716. status &= ~PORT_IRQ_PHYRDY;
  1717. ahci_scr_write(ap, SCR_ERROR, ((1 << 16) | (1 << 18)));
  1718. }
  1719. if (unlikely(status & PORT_IRQ_ERROR)) {
  1720. ahci_error_intr(ap, status);
  1721. return;
  1722. }
  1723. if (status & PORT_IRQ_SDB_FIS) {
  1724. /* If SNotification is available, leave notification
  1725. * handling to sata_async_notification(). If not,
  1726. * emulate it by snooping SDB FIS RX area.
  1727. *
  1728. * Snooping FIS RX area is probably cheaper than
  1729. * poking SNotification but some constrollers which
  1730. * implement SNotification, ICH9 for example, don't
  1731. * store AN SDB FIS into receive area.
  1732. */
  1733. if (hpriv->cap & HOST_CAP_SNTF)
  1734. sata_async_notification(ap);
  1735. else {
  1736. /* If the 'N' bit in word 0 of the FIS is set,
  1737. * we just received asynchronous notification.
  1738. * Tell libata about it.
  1739. */
  1740. const __le32 *f = pp->rx_fis + RX_FIS_SDB;
  1741. u32 f0 = le32_to_cpu(f[0]);
  1742. if (f0 & (1 << 15))
  1743. sata_async_notification(ap);
  1744. }
  1745. }
  1746. /* pp->active_link is valid iff any command is in flight */
  1747. if (ap->qc_active && pp->active_link->sactive)
  1748. qc_active = readl(port_mmio + PORT_SCR_ACT);
  1749. else
  1750. qc_active = readl(port_mmio + PORT_CMD_ISSUE);
  1751. rc = ata_qc_complete_multiple(ap, qc_active);
  1752. /* while resetting, invalid completions are expected */
  1753. if (unlikely(rc < 0 && !resetting)) {
  1754. ehi->err_mask |= AC_ERR_HSM;
  1755. ehi->action |= ATA_EH_RESET;
  1756. ata_port_freeze(ap);
  1757. }
  1758. }
  1759. static irqreturn_t ahci_interrupt(int irq, void *dev_instance)
  1760. {
  1761. struct ata_host *host = dev_instance;
  1762. struct ahci_host_priv *hpriv;
  1763. unsigned int i, handled = 0;
  1764. void __iomem *mmio;
  1765. u32 irq_stat, irq_masked;
  1766. VPRINTK("ENTER\n");
  1767. hpriv = host->private_data;
  1768. mmio = host->iomap[AHCI_PCI_BAR];
  1769. /* sigh. 0xffffffff is a valid return from h/w */
  1770. irq_stat = readl(mmio + HOST_IRQ_STAT);
  1771. if (!irq_stat)
  1772. return IRQ_NONE;
  1773. irq_masked = irq_stat & hpriv->port_map;
  1774. spin_lock(&host->lock);
  1775. for (i = 0; i < host->n_ports; i++) {
  1776. struct ata_port *ap;
  1777. if (!(irq_masked & (1 << i)))
  1778. continue;
  1779. ap = host->ports[i];
  1780. if (ap) {
  1781. ahci_port_intr(ap);
  1782. VPRINTK("port %u\n", i);
  1783. } else {
  1784. VPRINTK("port %u (no irq)\n", i);
  1785. if (ata_ratelimit())
  1786. dev_printk(KERN_WARNING, host->dev,
  1787. "interrupt on disabled port %u\n", i);
  1788. }
  1789. handled = 1;
  1790. }
  1791. /* HOST_IRQ_STAT behaves as level triggered latch meaning that
  1792. * it should be cleared after all the port events are cleared;
  1793. * otherwise, it will raise a spurious interrupt after each
  1794. * valid one. Please read section 10.6.2 of ahci 1.1 for more
  1795. * information.
  1796. *
  1797. * Also, use the unmasked value to clear interrupt as spurious
  1798. * pending event on a dummy port might cause screaming IRQ.
  1799. */
  1800. writel(irq_stat, mmio + HOST_IRQ_STAT);
  1801. spin_unlock(&host->lock);
  1802. VPRINTK("EXIT\n");
  1803. return IRQ_RETVAL(handled);
  1804. }
  1805. static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
  1806. {
  1807. struct ata_port *ap = qc->ap;
  1808. void __iomem *port_mmio = ahci_port_base(ap);
  1809. struct ahci_port_priv *pp = ap->private_data;
  1810. /* Keep track of the currently active link. It will be used
  1811. * in completion path to determine whether NCQ phase is in
  1812. * progress.
  1813. */
  1814. pp->active_link = qc->dev->link;
  1815. if (qc->tf.protocol == ATA_PROT_NCQ)
  1816. writel(1 << qc->tag, port_mmio + PORT_SCR_ACT);
  1817. writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE);
  1818. ahci_sw_activity(qc->dev->link);
  1819. return 0;
  1820. }
  1821. static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
  1822. {
  1823. struct ahci_port_priv *pp = qc->ap->private_data;
  1824. u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
  1825. ata_tf_from_fis(d2h_fis, &qc->result_tf);
  1826. return true;
  1827. }
  1828. static void ahci_freeze(struct ata_port *ap)
  1829. {
  1830. void __iomem *port_mmio = ahci_port_base(ap);
  1831. /* turn IRQ off */
  1832. writel(0, port_mmio + PORT_IRQ_MASK);
  1833. }
  1834. static void ahci_thaw(struct ata_port *ap)
  1835. {
  1836. void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
  1837. void __iomem *port_mmio = ahci_port_base(ap);
  1838. u32 tmp;
  1839. struct ahci_port_priv *pp = ap->private_data;
  1840. /* clear IRQ */
  1841. tmp = readl(port_mmio + PORT_IRQ_STAT);
  1842. writel(tmp, port_mmio + PORT_IRQ_STAT);
  1843. writel(1 << ap->port_no, mmio + HOST_IRQ_STAT);
  1844. /* turn IRQ back on */
  1845. writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
  1846. }
  1847. static void ahci_error_handler(struct ata_port *ap)
  1848. {
  1849. if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
  1850. /* restart engine */
  1851. ahci_stop_engine(ap);
  1852. ahci_start_engine(ap);
  1853. }
  1854. sata_pmp_error_handler(ap);
  1855. }
  1856. static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
  1857. {
  1858. struct ata_port *ap = qc->ap;
  1859. /* make DMA engine forget about the failed command */
  1860. if (qc->flags & ATA_QCFLAG_FAILED)
  1861. ahci_kick_engine(ap, 1);
  1862. }
  1863. static void ahci_pmp_attach(struct ata_port *ap)
  1864. {
  1865. void __iomem *port_mmio = ahci_port_base(ap);
  1866. struct ahci_port_priv *pp = ap->private_data;
  1867. u32 cmd;
  1868. cmd = readl(port_mmio + PORT_CMD);
  1869. cmd |= PORT_CMD_PMP;
  1870. writel(cmd, port_mmio + PORT_CMD);
  1871. pp->intr_mask |= PORT_IRQ_BAD_PMP;
  1872. writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
  1873. }
  1874. static void ahci_pmp_detach(struct ata_port *ap)
  1875. {
  1876. void __iomem *port_mmio = ahci_port_base(ap);
  1877. struct ahci_port_priv *pp = ap->private_data;
  1878. u32 cmd;
  1879. cmd = readl(port_mmio + PORT_CMD);
  1880. cmd &= ~PORT_CMD_PMP;
  1881. writel(cmd, port_mmio + PORT_CMD);
  1882. pp->intr_mask &= ~PORT_IRQ_BAD_PMP;
  1883. writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
  1884. }
  1885. static int ahci_port_resume(struct ata_port *ap)
  1886. {
  1887. ahci_power_up(ap);
  1888. ahci_start_port(ap);
  1889. if (sata_pmp_attached(ap))
  1890. ahci_pmp_attach(ap);
  1891. else
  1892. ahci_pmp_detach(ap);
  1893. return 0;
  1894. }
  1895. #ifdef CONFIG_PM
  1896. static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
  1897. {
  1898. const char *emsg = NULL;
  1899. int rc;
  1900. rc = ahci_deinit_port(ap, &emsg);
  1901. if (rc == 0)
  1902. ahci_power_down(ap);
  1903. else {
  1904. ata_port_printk(ap, KERN_ERR, "%s (%d)\n", emsg, rc);
  1905. ahci_start_port(ap);
  1906. }
  1907. return rc;
  1908. }
  1909. static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
  1910. {
  1911. struct ata_host *host = dev_get_drvdata(&pdev->dev);
  1912. void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
  1913. u32 ctl;
  1914. if (mesg.event & PM_EVENT_SLEEP) {
  1915. /* AHCI spec rev1.1 section 8.3.3:
  1916. * Software must disable interrupts prior to requesting a
  1917. * transition of the HBA to D3 state.
  1918. */
  1919. ctl = readl(mmio + HOST_CTL);
  1920. ctl &= ~HOST_IRQ_EN;
  1921. writel(ctl, mmio + HOST_CTL);
  1922. readl(mmio + HOST_CTL); /* flush */
  1923. }
  1924. return ata_pci_device_suspend(pdev, mesg);
  1925. }
  1926. static int ahci_pci_device_resume(struct pci_dev *pdev)
  1927. {
  1928. struct ata_host *host = dev_get_drvdata(&pdev->dev);
  1929. int rc;
  1930. rc = ata_pci_device_do_resume(pdev);
  1931. if (rc)
  1932. return rc;
  1933. if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
  1934. rc = ahci_reset_controller(host);
  1935. if (rc)
  1936. return rc;
  1937. ahci_init_controller(host);
  1938. }
  1939. ata_host_resume(host);
  1940. return 0;
  1941. }
  1942. #endif
  1943. static int ahci_port_start(struct ata_port *ap)
  1944. {
  1945. struct device *dev = ap->host->dev;
  1946. struct ahci_port_priv *pp;
  1947. void *mem;
  1948. dma_addr_t mem_dma;
  1949. pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
  1950. if (!pp)
  1951. return -ENOMEM;
  1952. mem = dmam_alloc_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, &mem_dma,
  1953. GFP_KERNEL);
  1954. if (!mem)
  1955. return -ENOMEM;
  1956. memset(mem, 0, AHCI_PORT_PRIV_DMA_SZ);
  1957. /*
  1958. * First item in chunk of DMA memory: 32-slot command table,
  1959. * 32 bytes each in size
  1960. */
  1961. pp->cmd_slot = mem;
  1962. pp->cmd_slot_dma = mem_dma;
  1963. mem += AHCI_CMD_SLOT_SZ;
  1964. mem_dma += AHCI_CMD_SLOT_SZ;
  1965. /*
  1966. * Second item: Received-FIS area
  1967. */
  1968. pp->rx_fis = mem;
  1969. pp->rx_fis_dma = mem_dma;
  1970. mem += AHCI_RX_FIS_SZ;
  1971. mem_dma += AHCI_RX_FIS_SZ;
  1972. /*
  1973. * Third item: data area for storing a single command
  1974. * and its scatter-gather table
  1975. */
  1976. pp->cmd_tbl = mem;
  1977. pp->cmd_tbl_dma = mem_dma;
  1978. /*
  1979. * Save off initial list of interrupts to be enabled.
  1980. * This could be changed later
  1981. */
  1982. pp->intr_mask = DEF_PORT_IRQ;
  1983. ap->private_data = pp;
  1984. /* engage engines, captain */
  1985. return ahci_port_resume(ap);
  1986. }
  1987. static void ahci_port_stop(struct ata_port *ap)
  1988. {
  1989. const char *emsg = NULL;
  1990. int rc;
  1991. /* de-initialize port */
  1992. rc = ahci_deinit_port(ap, &emsg);
  1993. if (rc)
  1994. ata_port_printk(ap, KERN_WARNING, "%s (%d)\n", emsg, rc);
  1995. }
  1996. static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
  1997. {
  1998. int rc;
  1999. if (using_dac &&
  2000. !pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
  2001. rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
  2002. if (rc) {
  2003. rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
  2004. if (rc) {
  2005. dev_printk(KERN_ERR, &pdev->dev,
  2006. "64-bit DMA enable failed\n");
  2007. return rc;
  2008. }
  2009. }
  2010. } else {
  2011. rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
  2012. if (rc) {
  2013. dev_printk(KERN_ERR, &pdev->dev,
  2014. "32-bit DMA enable failed\n");
  2015. return rc;
  2016. }
  2017. rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
  2018. if (rc) {
  2019. dev_printk(KERN_ERR, &pdev->dev,
  2020. "32-bit consistent DMA enable failed\n");
  2021. return rc;
  2022. }
  2023. }
  2024. return 0;
  2025. }
  2026. static void ahci_print_info(struct ata_host *host)
  2027. {
  2028. struct ahci_host_priv *hpriv = host->private_data;
  2029. struct pci_dev *pdev = to_pci_dev(host->dev);
  2030. void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
  2031. u32 vers, cap, impl, speed;
  2032. const char *speed_s;
  2033. u16 cc;
  2034. const char *scc_s;
  2035. vers = readl(mmio + HOST_VERSION);
  2036. cap = hpriv->cap;
  2037. impl = hpriv->port_map;
  2038. speed = (cap >> 20) & 0xf;
  2039. if (speed == 1)
  2040. speed_s = "1.5";
  2041. else if (speed == 2)
  2042. speed_s = "3";
  2043. else
  2044. speed_s = "?";
  2045. pci_read_config_word(pdev, 0x0a, &cc);
  2046. if (cc == PCI_CLASS_STORAGE_IDE)
  2047. scc_s = "IDE";
  2048. else if (cc == PCI_CLASS_STORAGE_SATA)
  2049. scc_s = "SATA";
  2050. else if (cc == PCI_CLASS_STORAGE_RAID)
  2051. scc_s = "RAID";
  2052. else
  2053. scc_s = "unknown";
  2054. dev_printk(KERN_INFO, &pdev->dev,
  2055. "AHCI %02x%02x.%02x%02x "
  2056. "%u slots %u ports %s Gbps 0x%x impl %s mode\n"
  2057. ,
  2058. (vers >> 24) & 0xff,
  2059. (vers >> 16) & 0xff,
  2060. (vers >> 8) & 0xff,
  2061. vers & 0xff,
  2062. ((cap >> 8) & 0x1f) + 1,
  2063. (cap & 0x1f) + 1,
  2064. speed_s,
  2065. impl,
  2066. scc_s);
  2067. dev_printk(KERN_INFO, &pdev->dev,
  2068. "flags: "
  2069. "%s%s%s%s%s%s%s"
  2070. "%s%s%s%s%s%s%s"
  2071. "%s\n"
  2072. ,
  2073. cap & (1 << 31) ? "64bit " : "",
  2074. cap & (1 << 30) ? "ncq " : "",
  2075. cap & (1 << 29) ? "sntf " : "",
  2076. cap & (1 << 28) ? "ilck " : "",
  2077. cap & (1 << 27) ? "stag " : "",
  2078. cap & (1 << 26) ? "pm " : "",
  2079. cap & (1 << 25) ? "led " : "",
  2080. cap & (1 << 24) ? "clo " : "",
  2081. cap & (1 << 19) ? "nz " : "",
  2082. cap & (1 << 18) ? "only " : "",
  2083. cap & (1 << 17) ? "pmp " : "",
  2084. cap & (1 << 15) ? "pio " : "",
  2085. cap & (1 << 14) ? "slum " : "",
  2086. cap & (1 << 13) ? "part " : "",
  2087. cap & (1 << 6) ? "ems ": ""
  2088. );
  2089. }
  2090. /* On ASUS P5W DH Deluxe, the second port of PCI device 00:1f.2 is
  2091. * hardwired to on-board SIMG 4726. The chipset is ICH8 and doesn't
  2092. * support PMP and the 4726 either directly exports the device
  2093. * attached to the first downstream port or acts as a hardware storage
  2094. * controller and emulate a single ATA device (can be RAID 0/1 or some
  2095. * other configuration).
  2096. *
  2097. * When there's no device attached to the first downstream port of the
  2098. * 4726, "Config Disk" appears, which is a pseudo ATA device to
  2099. * configure the 4726. However, ATA emulation of the device is very
  2100. * lame. It doesn't send signature D2H Reg FIS after the initial
  2101. * hardreset, pukes on SRST w/ PMP==0 and has bunch of other issues.
  2102. *
  2103. * The following function works around the problem by always using
  2104. * hardreset on the port and not depending on receiving signature FIS
  2105. * afterward. If signature FIS isn't received soon, ATA class is
  2106. * assumed without follow-up softreset.
  2107. */
  2108. static void ahci_p5wdh_workaround(struct ata_host *host)
  2109. {
  2110. static struct dmi_system_id sysids[] = {
  2111. {
  2112. .ident = "P5W DH Deluxe",
  2113. .matches = {
  2114. DMI_MATCH(DMI_SYS_VENDOR,
  2115. "ASUSTEK COMPUTER INC"),
  2116. DMI_MATCH(DMI_PRODUCT_NAME, "P5W DH Deluxe"),
  2117. },
  2118. },
  2119. { }
  2120. };
  2121. struct pci_dev *pdev = to_pci_dev(host->dev);
  2122. if (pdev->bus->number == 0 && pdev->devfn == PCI_DEVFN(0x1f, 2) &&
  2123. dmi_check_system(sysids)) {
  2124. struct ata_port *ap = host->ports[1];
  2125. dev_printk(KERN_INFO, &pdev->dev, "enabling ASUS P5W DH "
  2126. "Deluxe on-board SIMG4726 workaround\n");
  2127. ap->ops = &ahci_p5wdh_ops;
  2128. ap->link.flags |= ATA_LFLAG_NO_SRST | ATA_LFLAG_ASSUME_ATA;
  2129. }
  2130. }
  2131. static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
  2132. {
  2133. static int printed_version;
  2134. unsigned int board_id = ent->driver_data;
  2135. struct ata_port_info pi = ahci_port_info[board_id];
  2136. const struct ata_port_info *ppi[] = { &pi, NULL };
  2137. struct device *dev = &pdev->dev;
  2138. struct ahci_host_priv *hpriv;
  2139. struct ata_host *host;
  2140. int n_ports, i, rc;
  2141. VPRINTK("ENTER\n");
  2142. WARN_ON(ATA_MAX_QUEUE > AHCI_MAX_CMDS);
  2143. if (!printed_version++)
  2144. dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
  2145. /* acquire resources */
  2146. rc = pcim_enable_device(pdev);
  2147. if (rc)
  2148. return rc;
  2149. /* AHCI controllers often implement SFF compatible interface.
  2150. * Grab all PCI BARs just in case.
  2151. */
  2152. rc = pcim_iomap_regions_request_all(pdev, 1 << AHCI_PCI_BAR, DRV_NAME);
  2153. if (rc == -EBUSY)
  2154. pcim_pin_device(pdev);
  2155. if (rc)
  2156. return rc;
  2157. if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
  2158. (pdev->device == 0x2652 || pdev->device == 0x2653)) {
  2159. u8 map;
  2160. /* ICH6s share the same PCI ID for both piix and ahci
  2161. * modes. Enabling ahci mode while MAP indicates
  2162. * combined mode is a bad idea. Yield to ata_piix.
  2163. */
  2164. pci_read_config_byte(pdev, ICH_MAP, &map);
  2165. if (map & 0x3) {
  2166. dev_printk(KERN_INFO, &pdev->dev, "controller is in "
  2167. "combined mode, can't enable AHCI mode\n");
  2168. return -ENODEV;
  2169. }
  2170. }
  2171. hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
  2172. if (!hpriv)
  2173. return -ENOMEM;
  2174. hpriv->flags |= (unsigned long)pi.private_data;
  2175. /* MCP65 revision A1 and A2 can't do MSI */
  2176. if (board_id == board_ahci_mcp65 &&
  2177. (pdev->revision == 0xa1 || pdev->revision == 0xa2))
  2178. hpriv->flags |= AHCI_HFLAG_NO_MSI;
  2179. if ((hpriv->flags & AHCI_HFLAG_NO_MSI) || pci_enable_msi(pdev))
  2180. pci_intx(pdev, 1);
  2181. /* save initial config */
  2182. ahci_save_initial_config(pdev, hpriv);
  2183. /* prepare host */
  2184. if (hpriv->cap & HOST_CAP_NCQ)
  2185. pi.flags |= ATA_FLAG_NCQ;
  2186. if (hpriv->cap & HOST_CAP_PMP)
  2187. pi.flags |= ATA_FLAG_PMP;
  2188. if (ahci_em_messages && (hpriv->cap & HOST_CAP_EMS)) {
  2189. u8 messages;
  2190. void __iomem *mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
  2191. u32 em_loc = readl(mmio + HOST_EM_LOC);
  2192. u32 em_ctl = readl(mmio + HOST_EM_CTL);
  2193. messages = (em_ctl & 0x000f0000) >> 16;
  2194. /* we only support LED message type right now */
  2195. if ((messages & 0x01) && (ahci_em_messages == 1)) {
  2196. /* store em_loc */
  2197. hpriv->em_loc = ((em_loc >> 16) * 4);
  2198. pi.flags |= ATA_FLAG_EM;
  2199. if (!(em_ctl & EM_CTL_ALHD))
  2200. pi.flags |= ATA_FLAG_SW_ACTIVITY;
  2201. }
  2202. }
  2203. /* CAP.NP sometimes indicate the index of the last enabled
  2204. * port, at other times, that of the last possible port, so
  2205. * determining the maximum port number requires looking at
  2206. * both CAP.NP and port_map.
  2207. */
  2208. n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
  2209. host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
  2210. if (!host)
  2211. return -ENOMEM;
  2212. host->iomap = pcim_iomap_table(pdev);
  2213. host->private_data = hpriv;
  2214. if (pi.flags & ATA_FLAG_EM)
  2215. ahci_reset_em(host);
  2216. for (i = 0; i < host->n_ports; i++) {
  2217. struct ata_port *ap = host->ports[i];
  2218. ata_port_pbar_desc(ap, AHCI_PCI_BAR, -1, "abar");
  2219. ata_port_pbar_desc(ap, AHCI_PCI_BAR,
  2220. 0x100 + ap->port_no * 0x80, "port");
  2221. /* set initial link pm policy */
  2222. ap->pm_policy = NOT_AVAILABLE;
  2223. /* set enclosure management message type */
  2224. if (ap->flags & ATA_FLAG_EM)
  2225. ap->em_message_type = ahci_em_messages;
  2226. /* disabled/not-implemented port */
  2227. if (!(hpriv->port_map & (1 << i)))
  2228. ap->ops = &ata_dummy_port_ops;
  2229. }
  2230. /* apply workaround for ASUS P5W DH Deluxe mainboard */
  2231. ahci_p5wdh_workaround(host);
  2232. /* initialize adapter */
  2233. rc = ahci_configure_dma_masks(pdev, hpriv->cap & HOST_CAP_64);
  2234. if (rc)
  2235. return rc;
  2236. rc = ahci_reset_controller(host);
  2237. if (rc)
  2238. return rc;
  2239. ahci_init_controller(host);
  2240. ahci_print_info(host);
  2241. pci_set_master(pdev);
  2242. return ata_host_activate(host, pdev->irq, ahci_interrupt, IRQF_SHARED,
  2243. &ahci_sht);
  2244. }
  2245. static int __init ahci_init(void)
  2246. {
  2247. return pci_register_driver(&ahci_pci_driver);
  2248. }
  2249. static void __exit ahci_exit(void)
  2250. {
  2251. pci_unregister_driver(&ahci_pci_driver);
  2252. }
  2253. MODULE_AUTHOR("Jeff Garzik");
  2254. MODULE_DESCRIPTION("AHCI SATA low-level driver");
  2255. MODULE_LICENSE("GPL");
  2256. MODULE_DEVICE_TABLE(pci, ahci_pci_tbl);
  2257. MODULE_VERSION(DRV_VERSION);
  2258. module_init(ahci_init);
  2259. module_exit(ahci_exit);