ahci.c 61 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249
  1. /*
  2. * ahci.c - AHCI SATA support
  3. *
  4. * Maintained by: Jeff Garzik <jgarzik@pobox.com>
  5. * Please ALWAYS copy linux-ide@vger.kernel.org
  6. * on emails.
  7. *
  8. * Copyright 2004-2005 Red Hat, Inc.
  9. *
  10. *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of the GNU General Public License as published by
  13. * the Free Software Foundation; either version 2, or (at your option)
  14. * any later version.
  15. *
  16. * This program is distributed in the hope that it will be useful,
  17. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  19. * GNU General Public License for more details.
  20. *
  21. * You should have received a copy of the GNU General Public License
  22. * along with this program; see the file COPYING. If not, write to
  23. * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
  24. *
  25. *
  26. * libata documentation is available via 'make {ps|pdf}docs',
  27. * as Documentation/DocBook/libata.*
  28. *
  29. * AHCI hardware documentation:
  30. * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
  31. * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
  32. *
  33. */
  34. #include <linux/kernel.h>
  35. #include <linux/module.h>
  36. #include <linux/pci.h>
  37. #include <linux/init.h>
  38. #include <linux/blkdev.h>
  39. #include <linux/delay.h>
  40. #include <linux/interrupt.h>
  41. #include <linux/dma-mapping.h>
  42. #include <linux/device.h>
  43. #include <linux/dmi.h>
  44. #include <scsi/scsi_host.h>
  45. #include <scsi/scsi_cmnd.h>
  46. #include <linux/libata.h>
  47. #define DRV_NAME "ahci"
  48. #define DRV_VERSION "3.0"
  49. static int ahci_skip_host_reset;
  50. module_param_named(skip_host_reset, ahci_skip_host_reset, int, 0444);
  51. MODULE_PARM_DESC(skip_host_reset, "skip global host reset (0=don't skip, 1=skip)");
  52. static int ahci_enable_alpm(struct ata_port *ap,
  53. enum link_pm policy);
  54. static void ahci_disable_alpm(struct ata_port *ap);
  55. enum {
  56. AHCI_PCI_BAR = 5,
  57. AHCI_MAX_PORTS = 32,
  58. AHCI_MAX_SG = 168, /* hardware max is 64K */
  59. AHCI_DMA_BOUNDARY = 0xffffffff,
  60. AHCI_MAX_CMDS = 32,
  61. AHCI_CMD_SZ = 32,
  62. AHCI_CMD_SLOT_SZ = AHCI_MAX_CMDS * AHCI_CMD_SZ,
  63. AHCI_RX_FIS_SZ = 256,
  64. AHCI_CMD_TBL_CDB = 0x40,
  65. AHCI_CMD_TBL_HDR_SZ = 0x80,
  66. AHCI_CMD_TBL_SZ = AHCI_CMD_TBL_HDR_SZ + (AHCI_MAX_SG * 16),
  67. AHCI_CMD_TBL_AR_SZ = AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS,
  68. AHCI_PORT_PRIV_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ +
  69. AHCI_RX_FIS_SZ,
  70. AHCI_IRQ_ON_SG = (1 << 31),
  71. AHCI_CMD_ATAPI = (1 << 5),
  72. AHCI_CMD_WRITE = (1 << 6),
  73. AHCI_CMD_PREFETCH = (1 << 7),
  74. AHCI_CMD_RESET = (1 << 8),
  75. AHCI_CMD_CLR_BUSY = (1 << 10),
  76. RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */
  77. RX_FIS_SDB = 0x58, /* offset of SDB FIS data */
  78. RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */
  79. board_ahci = 0,
  80. board_ahci_vt8251 = 1,
  81. board_ahci_ign_iferr = 2,
  82. board_ahci_sb600 = 3,
  83. board_ahci_mv = 4,
  84. board_ahci_sb700 = 5,
  85. /* global controller registers */
  86. HOST_CAP = 0x00, /* host capabilities */
  87. HOST_CTL = 0x04, /* global host control */
  88. HOST_IRQ_STAT = 0x08, /* interrupt status */
  89. HOST_PORTS_IMPL = 0x0c, /* bitmap of implemented ports */
  90. HOST_VERSION = 0x10, /* AHCI spec. version compliancy */
  91. /* HOST_CTL bits */
  92. HOST_RESET = (1 << 0), /* reset controller; self-clear */
  93. HOST_IRQ_EN = (1 << 1), /* global IRQ enable */
  94. HOST_AHCI_EN = (1 << 31), /* AHCI enabled */
  95. /* HOST_CAP bits */
  96. HOST_CAP_SSC = (1 << 14), /* Slumber capable */
  97. HOST_CAP_PMP = (1 << 17), /* Port Multiplier support */
  98. HOST_CAP_CLO = (1 << 24), /* Command List Override support */
  99. HOST_CAP_ALPM = (1 << 26), /* Aggressive Link PM support */
  100. HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */
  101. HOST_CAP_SNTF = (1 << 29), /* SNotification register */
  102. HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */
  103. HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */
  104. /* registers for each SATA port */
  105. PORT_LST_ADDR = 0x00, /* command list DMA addr */
  106. PORT_LST_ADDR_HI = 0x04, /* command list DMA addr hi */
  107. PORT_FIS_ADDR = 0x08, /* FIS rx buf addr */
  108. PORT_FIS_ADDR_HI = 0x0c, /* FIS rx buf addr hi */
  109. PORT_IRQ_STAT = 0x10, /* interrupt status */
  110. PORT_IRQ_MASK = 0x14, /* interrupt enable/disable mask */
  111. PORT_CMD = 0x18, /* port command */
  112. PORT_TFDATA = 0x20, /* taskfile data */
  113. PORT_SIG = 0x24, /* device TF signature */
  114. PORT_CMD_ISSUE = 0x38, /* command issue */
  115. PORT_SCR_STAT = 0x28, /* SATA phy register: SStatus */
  116. PORT_SCR_CTL = 0x2c, /* SATA phy register: SControl */
  117. PORT_SCR_ERR = 0x30, /* SATA phy register: SError */
  118. PORT_SCR_ACT = 0x34, /* SATA phy register: SActive */
  119. PORT_SCR_NTF = 0x3c, /* SATA phy register: SNotification */
  120. /* PORT_IRQ_{STAT,MASK} bits */
  121. PORT_IRQ_COLD_PRES = (1 << 31), /* cold presence detect */
  122. PORT_IRQ_TF_ERR = (1 << 30), /* task file error */
  123. PORT_IRQ_HBUS_ERR = (1 << 29), /* host bus fatal error */
  124. PORT_IRQ_HBUS_DATA_ERR = (1 << 28), /* host bus data error */
  125. PORT_IRQ_IF_ERR = (1 << 27), /* interface fatal error */
  126. PORT_IRQ_IF_NONFATAL = (1 << 26), /* interface non-fatal error */
  127. PORT_IRQ_OVERFLOW = (1 << 24), /* xfer exhausted available S/G */
  128. PORT_IRQ_BAD_PMP = (1 << 23), /* incorrect port multiplier */
  129. PORT_IRQ_PHYRDY = (1 << 22), /* PhyRdy changed */
  130. PORT_IRQ_DEV_ILCK = (1 << 7), /* device interlock */
  131. PORT_IRQ_CONNECT = (1 << 6), /* port connect change status */
  132. PORT_IRQ_SG_DONE = (1 << 5), /* descriptor processed */
  133. PORT_IRQ_UNK_FIS = (1 << 4), /* unknown FIS rx'd */
  134. PORT_IRQ_SDB_FIS = (1 << 3), /* Set Device Bits FIS rx'd */
  135. PORT_IRQ_DMAS_FIS = (1 << 2), /* DMA Setup FIS rx'd */
  136. PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */
  137. PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */
  138. PORT_IRQ_FREEZE = PORT_IRQ_HBUS_ERR |
  139. PORT_IRQ_IF_ERR |
  140. PORT_IRQ_CONNECT |
  141. PORT_IRQ_PHYRDY |
  142. PORT_IRQ_UNK_FIS |
  143. PORT_IRQ_BAD_PMP,
  144. PORT_IRQ_ERROR = PORT_IRQ_FREEZE |
  145. PORT_IRQ_TF_ERR |
  146. PORT_IRQ_HBUS_DATA_ERR,
  147. DEF_PORT_IRQ = PORT_IRQ_ERROR | PORT_IRQ_SG_DONE |
  148. PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS |
  149. PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS,
  150. /* PORT_CMD bits */
  151. PORT_CMD_ASP = (1 << 27), /* Aggressive Slumber/Partial */
  152. PORT_CMD_ALPE = (1 << 26), /* Aggressive Link PM enable */
  153. PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */
  154. PORT_CMD_PMP = (1 << 17), /* PMP attached */
  155. PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */
  156. PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */
  157. PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */
  158. PORT_CMD_CLO = (1 << 3), /* Command list override */
  159. PORT_CMD_POWER_ON = (1 << 2), /* Power up device */
  160. PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */
  161. PORT_CMD_START = (1 << 0), /* Enable port DMA engine */
  162. PORT_CMD_ICC_MASK = (0xf << 28), /* i/f ICC state mask */
  163. PORT_CMD_ICC_ACTIVE = (0x1 << 28), /* Put i/f in active state */
  164. PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */
  165. PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */
  166. /* hpriv->flags bits */
  167. AHCI_HFLAG_NO_NCQ = (1 << 0),
  168. AHCI_HFLAG_IGN_IRQ_IF_ERR = (1 << 1), /* ignore IRQ_IF_ERR */
  169. AHCI_HFLAG_IGN_SERR_INTERNAL = (1 << 2), /* ignore SERR_INTERNAL */
  170. AHCI_HFLAG_32BIT_ONLY = (1 << 3), /* force 32bit */
  171. AHCI_HFLAG_MV_PATA = (1 << 4), /* PATA port */
  172. AHCI_HFLAG_NO_MSI = (1 << 5), /* no PCI MSI */
  173. AHCI_HFLAG_NO_PMP = (1 << 6), /* no PMP */
  174. AHCI_HFLAG_NO_HOTPLUG = (1 << 7), /* ignore PxSERR.DIAG.N */
  175. AHCI_HFLAG_SECT255 = (1 << 8), /* max 255 sectors */
  176. /* ap->flags bits */
  177. AHCI_FLAG_COMMON = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
  178. ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
  179. ATA_FLAG_ACPI_SATA | ATA_FLAG_AN |
  180. ATA_FLAG_IPM,
  181. ICH_MAP = 0x90, /* ICH MAP register */
  182. };
  183. struct ahci_cmd_hdr {
  184. __le32 opts;
  185. __le32 status;
  186. __le32 tbl_addr;
  187. __le32 tbl_addr_hi;
  188. __le32 reserved[4];
  189. };
  190. struct ahci_sg {
  191. __le32 addr;
  192. __le32 addr_hi;
  193. __le32 reserved;
  194. __le32 flags_size;
  195. };
  196. struct ahci_host_priv {
  197. unsigned int flags; /* AHCI_HFLAG_* */
  198. u32 cap; /* cap to use */
  199. u32 port_map; /* port map to use */
  200. u32 saved_cap; /* saved initial cap */
  201. u32 saved_port_map; /* saved initial port_map */
  202. };
  203. struct ahci_port_priv {
  204. struct ata_link *active_link;
  205. struct ahci_cmd_hdr *cmd_slot;
  206. dma_addr_t cmd_slot_dma;
  207. void *cmd_tbl;
  208. dma_addr_t cmd_tbl_dma;
  209. void *rx_fis;
  210. dma_addr_t rx_fis_dma;
  211. /* for NCQ spurious interrupt analysis */
  212. unsigned int ncq_saw_d2h:1;
  213. unsigned int ncq_saw_dmas:1;
  214. unsigned int ncq_saw_sdb:1;
  215. u32 intr_mask; /* interrupts to enable */
  216. };
  217. static int ahci_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val);
  218. static int ahci_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val);
  219. static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
  220. static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
  221. static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
  222. static int ahci_port_start(struct ata_port *ap);
  223. static void ahci_port_stop(struct ata_port *ap);
  224. static void ahci_qc_prep(struct ata_queued_cmd *qc);
  225. static void ahci_freeze(struct ata_port *ap);
  226. static void ahci_thaw(struct ata_port *ap);
  227. static void ahci_pmp_attach(struct ata_port *ap);
  228. static void ahci_pmp_detach(struct ata_port *ap);
  229. static int ahci_softreset(struct ata_link *link, unsigned int *class,
  230. unsigned long deadline);
  231. static int ahci_hardreset(struct ata_link *link, unsigned int *class,
  232. unsigned long deadline);
  233. static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
  234. unsigned long deadline);
  235. static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
  236. unsigned long deadline);
  237. static void ahci_postreset(struct ata_link *link, unsigned int *class);
  238. static void ahci_error_handler(struct ata_port *ap);
  239. static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
  240. static int ahci_port_resume(struct ata_port *ap);
  241. static void ahci_dev_config(struct ata_device *dev);
  242. static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl);
  243. static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
  244. u32 opts);
  245. #ifdef CONFIG_PM
  246. static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg);
  247. static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
  248. static int ahci_pci_device_resume(struct pci_dev *pdev);
  249. #endif
  250. static struct device_attribute *ahci_shost_attrs[] = {
  251. &dev_attr_link_power_management_policy,
  252. NULL
  253. };
  254. static struct scsi_host_template ahci_sht = {
  255. ATA_NCQ_SHT(DRV_NAME),
  256. .can_queue = AHCI_MAX_CMDS - 1,
  257. .sg_tablesize = AHCI_MAX_SG,
  258. .dma_boundary = AHCI_DMA_BOUNDARY,
  259. .shost_attrs = ahci_shost_attrs,
  260. };
  261. static struct ata_port_operations ahci_ops = {
  262. .inherits = &sata_pmp_port_ops,
  263. .qc_defer = sata_pmp_qc_defer_cmd_switch,
  264. .qc_prep = ahci_qc_prep,
  265. .qc_issue = ahci_qc_issue,
  266. .qc_fill_rtf = ahci_qc_fill_rtf,
  267. .freeze = ahci_freeze,
  268. .thaw = ahci_thaw,
  269. .softreset = ahci_softreset,
  270. .hardreset = ahci_hardreset,
  271. .postreset = ahci_postreset,
  272. .pmp_softreset = ahci_softreset,
  273. .error_handler = ahci_error_handler,
  274. .post_internal_cmd = ahci_post_internal_cmd,
  275. .dev_config = ahci_dev_config,
  276. .scr_read = ahci_scr_read,
  277. .scr_write = ahci_scr_write,
  278. .pmp_attach = ahci_pmp_attach,
  279. .pmp_detach = ahci_pmp_detach,
  280. .enable_pm = ahci_enable_alpm,
  281. .disable_pm = ahci_disable_alpm,
  282. #ifdef CONFIG_PM
  283. .port_suspend = ahci_port_suspend,
  284. .port_resume = ahci_port_resume,
  285. #endif
  286. .port_start = ahci_port_start,
  287. .port_stop = ahci_port_stop,
  288. };
  289. static struct ata_port_operations ahci_vt8251_ops = {
  290. .inherits = &ahci_ops,
  291. .hardreset = ahci_vt8251_hardreset,
  292. };
  293. static struct ata_port_operations ahci_p5wdh_ops = {
  294. .inherits = &ahci_ops,
  295. .hardreset = ahci_p5wdh_hardreset,
  296. };
  297. #define AHCI_HFLAGS(flags) .private_data = (void *)(flags)
  298. static const struct ata_port_info ahci_port_info[] = {
  299. /* board_ahci */
  300. {
  301. .flags = AHCI_FLAG_COMMON,
  302. .pio_mask = 0x1f, /* pio0-4 */
  303. .udma_mask = ATA_UDMA6,
  304. .port_ops = &ahci_ops,
  305. },
  306. /* board_ahci_vt8251 */
  307. {
  308. AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_PMP),
  309. .flags = AHCI_FLAG_COMMON,
  310. .pio_mask = 0x1f, /* pio0-4 */
  311. .udma_mask = ATA_UDMA6,
  312. .port_ops = &ahci_vt8251_ops,
  313. },
  314. /* board_ahci_ign_iferr */
  315. {
  316. AHCI_HFLAGS (AHCI_HFLAG_IGN_IRQ_IF_ERR),
  317. .flags = AHCI_FLAG_COMMON,
  318. .pio_mask = 0x1f, /* pio0-4 */
  319. .udma_mask = ATA_UDMA6,
  320. .port_ops = &ahci_ops,
  321. },
  322. /* board_ahci_sb600 */
  323. {
  324. AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL |
  325. AHCI_HFLAG_32BIT_ONLY | AHCI_HFLAG_NO_MSI |
  326. AHCI_HFLAG_SECT255 | AHCI_HFLAG_NO_PMP),
  327. .flags = AHCI_FLAG_COMMON,
  328. .pio_mask = 0x1f, /* pio0-4 */
  329. .udma_mask = ATA_UDMA6,
  330. .port_ops = &ahci_ops,
  331. },
  332. /* board_ahci_mv */
  333. {
  334. AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_MSI |
  335. AHCI_HFLAG_MV_PATA),
  336. .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
  337. ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
  338. .pio_mask = 0x1f, /* pio0-4 */
  339. .udma_mask = ATA_UDMA6,
  340. .port_ops = &ahci_ops,
  341. },
  342. /* board_ahci_sb700 */
  343. {
  344. AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL |
  345. AHCI_HFLAG_NO_PMP),
  346. .flags = AHCI_FLAG_COMMON,
  347. .pio_mask = 0x1f, /* pio0-4 */
  348. .udma_mask = ATA_UDMA6,
  349. .port_ops = &ahci_ops,
  350. },
  351. };
  352. static const struct pci_device_id ahci_pci_tbl[] = {
  353. /* Intel */
  354. { PCI_VDEVICE(INTEL, 0x2652), board_ahci }, /* ICH6 */
  355. { PCI_VDEVICE(INTEL, 0x2653), board_ahci }, /* ICH6M */
  356. { PCI_VDEVICE(INTEL, 0x27c1), board_ahci }, /* ICH7 */
  357. { PCI_VDEVICE(INTEL, 0x27c5), board_ahci }, /* ICH7M */
  358. { PCI_VDEVICE(INTEL, 0x27c3), board_ahci }, /* ICH7R */
  359. { PCI_VDEVICE(AL, 0x5288), board_ahci_ign_iferr }, /* ULi M5288 */
  360. { PCI_VDEVICE(INTEL, 0x2681), board_ahci }, /* ESB2 */
  361. { PCI_VDEVICE(INTEL, 0x2682), board_ahci }, /* ESB2 */
  362. { PCI_VDEVICE(INTEL, 0x2683), board_ahci }, /* ESB2 */
  363. { PCI_VDEVICE(INTEL, 0x27c6), board_ahci }, /* ICH7-M DH */
  364. { PCI_VDEVICE(INTEL, 0x2821), board_ahci }, /* ICH8 */
  365. { PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* ICH8 */
  366. { PCI_VDEVICE(INTEL, 0x2824), board_ahci }, /* ICH8 */
  367. { PCI_VDEVICE(INTEL, 0x2829), board_ahci }, /* ICH8M */
  368. { PCI_VDEVICE(INTEL, 0x282a), board_ahci }, /* ICH8M */
  369. { PCI_VDEVICE(INTEL, 0x2922), board_ahci }, /* ICH9 */
  370. { PCI_VDEVICE(INTEL, 0x2923), board_ahci }, /* ICH9 */
  371. { PCI_VDEVICE(INTEL, 0x2924), board_ahci }, /* ICH9 */
  372. { PCI_VDEVICE(INTEL, 0x2925), board_ahci }, /* ICH9 */
  373. { PCI_VDEVICE(INTEL, 0x2927), board_ahci }, /* ICH9 */
  374. { PCI_VDEVICE(INTEL, 0x2929), board_ahci }, /* ICH9M */
  375. { PCI_VDEVICE(INTEL, 0x292a), board_ahci }, /* ICH9M */
  376. { PCI_VDEVICE(INTEL, 0x292b), board_ahci }, /* ICH9M */
  377. { PCI_VDEVICE(INTEL, 0x292c), board_ahci }, /* ICH9M */
  378. { PCI_VDEVICE(INTEL, 0x292f), board_ahci }, /* ICH9M */
  379. { PCI_VDEVICE(INTEL, 0x294d), board_ahci }, /* ICH9 */
  380. { PCI_VDEVICE(INTEL, 0x294e), board_ahci }, /* ICH9M */
  381. { PCI_VDEVICE(INTEL, 0x502a), board_ahci }, /* Tolapai */
  382. { PCI_VDEVICE(INTEL, 0x502b), board_ahci }, /* Tolapai */
  383. { PCI_VDEVICE(INTEL, 0x3a05), board_ahci }, /* ICH10 */
  384. { PCI_VDEVICE(INTEL, 0x3a25), board_ahci }, /* ICH10 */
  385. /* JMicron 360/1/3/5/6, match class to avoid IDE function */
  386. { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
  387. PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci_ign_iferr },
  388. /* ATI */
  389. { PCI_VDEVICE(ATI, 0x4380), board_ahci_sb600 }, /* ATI SB600 */
  390. { PCI_VDEVICE(ATI, 0x4390), board_ahci_sb700 }, /* ATI SB700/800 */
  391. { PCI_VDEVICE(ATI, 0x4391), board_ahci_sb700 }, /* ATI SB700/800 */
  392. { PCI_VDEVICE(ATI, 0x4392), board_ahci_sb700 }, /* ATI SB700/800 */
  393. { PCI_VDEVICE(ATI, 0x4393), board_ahci_sb700 }, /* ATI SB700/800 */
  394. { PCI_VDEVICE(ATI, 0x4394), board_ahci_sb700 }, /* ATI SB700/800 */
  395. { PCI_VDEVICE(ATI, 0x4395), board_ahci_sb700 }, /* ATI SB700/800 */
  396. /* VIA */
  397. { PCI_VDEVICE(VIA, 0x3349), board_ahci_vt8251 }, /* VIA VT8251 */
  398. { PCI_VDEVICE(VIA, 0x6287), board_ahci_vt8251 }, /* VIA VT8251 */
  399. /* NVIDIA */
  400. { PCI_VDEVICE(NVIDIA, 0x044c), board_ahci }, /* MCP65 */
  401. { PCI_VDEVICE(NVIDIA, 0x044d), board_ahci }, /* MCP65 */
  402. { PCI_VDEVICE(NVIDIA, 0x044e), board_ahci }, /* MCP65 */
  403. { PCI_VDEVICE(NVIDIA, 0x044f), board_ahci }, /* MCP65 */
  404. { PCI_VDEVICE(NVIDIA, 0x045c), board_ahci }, /* MCP65 */
  405. { PCI_VDEVICE(NVIDIA, 0x045d), board_ahci }, /* MCP65 */
  406. { PCI_VDEVICE(NVIDIA, 0x045e), board_ahci }, /* MCP65 */
  407. { PCI_VDEVICE(NVIDIA, 0x045f), board_ahci }, /* MCP65 */
  408. { PCI_VDEVICE(NVIDIA, 0x0550), board_ahci }, /* MCP67 */
  409. { PCI_VDEVICE(NVIDIA, 0x0551), board_ahci }, /* MCP67 */
  410. { PCI_VDEVICE(NVIDIA, 0x0552), board_ahci }, /* MCP67 */
  411. { PCI_VDEVICE(NVIDIA, 0x0553), board_ahci }, /* MCP67 */
  412. { PCI_VDEVICE(NVIDIA, 0x0554), board_ahci }, /* MCP67 */
  413. { PCI_VDEVICE(NVIDIA, 0x0555), board_ahci }, /* MCP67 */
  414. { PCI_VDEVICE(NVIDIA, 0x0556), board_ahci }, /* MCP67 */
  415. { PCI_VDEVICE(NVIDIA, 0x0557), board_ahci }, /* MCP67 */
  416. { PCI_VDEVICE(NVIDIA, 0x0558), board_ahci }, /* MCP67 */
  417. { PCI_VDEVICE(NVIDIA, 0x0559), board_ahci }, /* MCP67 */
  418. { PCI_VDEVICE(NVIDIA, 0x055a), board_ahci }, /* MCP67 */
  419. { PCI_VDEVICE(NVIDIA, 0x055b), board_ahci }, /* MCP67 */
  420. { PCI_VDEVICE(NVIDIA, 0x07f0), board_ahci }, /* MCP73 */
  421. { PCI_VDEVICE(NVIDIA, 0x07f1), board_ahci }, /* MCP73 */
  422. { PCI_VDEVICE(NVIDIA, 0x07f2), board_ahci }, /* MCP73 */
  423. { PCI_VDEVICE(NVIDIA, 0x07f3), board_ahci }, /* MCP73 */
  424. { PCI_VDEVICE(NVIDIA, 0x07f4), board_ahci }, /* MCP73 */
  425. { PCI_VDEVICE(NVIDIA, 0x07f5), board_ahci }, /* MCP73 */
  426. { PCI_VDEVICE(NVIDIA, 0x07f6), board_ahci }, /* MCP73 */
  427. { PCI_VDEVICE(NVIDIA, 0x07f7), board_ahci }, /* MCP73 */
  428. { PCI_VDEVICE(NVIDIA, 0x07f8), board_ahci }, /* MCP73 */
  429. { PCI_VDEVICE(NVIDIA, 0x07f9), board_ahci }, /* MCP73 */
  430. { PCI_VDEVICE(NVIDIA, 0x07fa), board_ahci }, /* MCP73 */
  431. { PCI_VDEVICE(NVIDIA, 0x07fb), board_ahci }, /* MCP73 */
  432. { PCI_VDEVICE(NVIDIA, 0x0ad0), board_ahci }, /* MCP77 */
  433. { PCI_VDEVICE(NVIDIA, 0x0ad1), board_ahci }, /* MCP77 */
  434. { PCI_VDEVICE(NVIDIA, 0x0ad2), board_ahci }, /* MCP77 */
  435. { PCI_VDEVICE(NVIDIA, 0x0ad3), board_ahci }, /* MCP77 */
  436. { PCI_VDEVICE(NVIDIA, 0x0ad4), board_ahci }, /* MCP77 */
  437. { PCI_VDEVICE(NVIDIA, 0x0ad5), board_ahci }, /* MCP77 */
  438. { PCI_VDEVICE(NVIDIA, 0x0ad6), board_ahci }, /* MCP77 */
  439. { PCI_VDEVICE(NVIDIA, 0x0ad7), board_ahci }, /* MCP77 */
  440. { PCI_VDEVICE(NVIDIA, 0x0ad8), board_ahci }, /* MCP77 */
  441. { PCI_VDEVICE(NVIDIA, 0x0ad9), board_ahci }, /* MCP77 */
  442. { PCI_VDEVICE(NVIDIA, 0x0ada), board_ahci }, /* MCP77 */
  443. { PCI_VDEVICE(NVIDIA, 0x0adb), board_ahci }, /* MCP77 */
  444. { PCI_VDEVICE(NVIDIA, 0x0ab4), board_ahci }, /* MCP79 */
  445. { PCI_VDEVICE(NVIDIA, 0x0ab5), board_ahci }, /* MCP79 */
  446. { PCI_VDEVICE(NVIDIA, 0x0ab6), board_ahci }, /* MCP79 */
  447. { PCI_VDEVICE(NVIDIA, 0x0ab7), board_ahci }, /* MCP79 */
  448. { PCI_VDEVICE(NVIDIA, 0x0ab8), board_ahci }, /* MCP79 */
  449. { PCI_VDEVICE(NVIDIA, 0x0ab9), board_ahci }, /* MCP79 */
  450. { PCI_VDEVICE(NVIDIA, 0x0aba), board_ahci }, /* MCP79 */
  451. { PCI_VDEVICE(NVIDIA, 0x0abb), board_ahci }, /* MCP79 */
  452. { PCI_VDEVICE(NVIDIA, 0x0abc), board_ahci }, /* MCP79 */
  453. { PCI_VDEVICE(NVIDIA, 0x0abd), board_ahci }, /* MCP79 */
  454. { PCI_VDEVICE(NVIDIA, 0x0abe), board_ahci }, /* MCP79 */
  455. { PCI_VDEVICE(NVIDIA, 0x0abf), board_ahci }, /* MCP79 */
  456. { PCI_VDEVICE(NVIDIA, 0x0bc8), board_ahci }, /* MCP7B */
  457. { PCI_VDEVICE(NVIDIA, 0x0bc9), board_ahci }, /* MCP7B */
  458. { PCI_VDEVICE(NVIDIA, 0x0bca), board_ahci }, /* MCP7B */
  459. { PCI_VDEVICE(NVIDIA, 0x0bcb), board_ahci }, /* MCP7B */
  460. { PCI_VDEVICE(NVIDIA, 0x0bcc), board_ahci }, /* MCP7B */
  461. { PCI_VDEVICE(NVIDIA, 0x0bcd), board_ahci }, /* MCP7B */
  462. { PCI_VDEVICE(NVIDIA, 0x0bce), board_ahci }, /* MCP7B */
  463. { PCI_VDEVICE(NVIDIA, 0x0bcf), board_ahci }, /* MCP7B */
  464. { PCI_VDEVICE(NVIDIA, 0x0bd0), board_ahci }, /* MCP7B */
  465. { PCI_VDEVICE(NVIDIA, 0x0bd1), board_ahci }, /* MCP7B */
  466. { PCI_VDEVICE(NVIDIA, 0x0bd2), board_ahci }, /* MCP7B */
  467. { PCI_VDEVICE(NVIDIA, 0x0bd3), board_ahci }, /* MCP7B */
  468. /* SiS */
  469. { PCI_VDEVICE(SI, 0x1184), board_ahci }, /* SiS 966 */
  470. { PCI_VDEVICE(SI, 0x1185), board_ahci }, /* SiS 966 */
  471. { PCI_VDEVICE(SI, 0x0186), board_ahci }, /* SiS 968 */
  472. /* Marvell */
  473. { PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */
  474. { PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv }, /* 6121 */
  475. /* Generic, PCI class code for AHCI */
  476. { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
  477. PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci },
  478. { } /* terminate list */
  479. };
  480. static struct pci_driver ahci_pci_driver = {
  481. .name = DRV_NAME,
  482. .id_table = ahci_pci_tbl,
  483. .probe = ahci_init_one,
  484. .remove = ata_pci_remove_one,
  485. #ifdef CONFIG_PM
  486. .suspend = ahci_pci_device_suspend,
  487. .resume = ahci_pci_device_resume,
  488. #endif
  489. };
  490. static inline int ahci_nr_ports(u32 cap)
  491. {
  492. return (cap & 0x1f) + 1;
  493. }
  494. static inline void __iomem *__ahci_port_base(struct ata_host *host,
  495. unsigned int port_no)
  496. {
  497. void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
  498. return mmio + 0x100 + (port_no * 0x80);
  499. }
  500. static inline void __iomem *ahci_port_base(struct ata_port *ap)
  501. {
  502. return __ahci_port_base(ap->host, ap->port_no);
  503. }
  504. static void ahci_enable_ahci(void __iomem *mmio)
  505. {
  506. int i;
  507. u32 tmp;
  508. /* turn on AHCI_EN */
  509. tmp = readl(mmio + HOST_CTL);
  510. if (tmp & HOST_AHCI_EN)
  511. return;
  512. /* Some controllers need AHCI_EN to be written multiple times.
  513. * Try a few times before giving up.
  514. */
  515. for (i = 0; i < 5; i++) {
  516. tmp |= HOST_AHCI_EN;
  517. writel(tmp, mmio + HOST_CTL);
  518. tmp = readl(mmio + HOST_CTL); /* flush && sanity check */
  519. if (tmp & HOST_AHCI_EN)
  520. return;
  521. msleep(10);
  522. }
  523. WARN_ON(1);
  524. }
  525. /**
  526. * ahci_save_initial_config - Save and fixup initial config values
  527. * @pdev: target PCI device
  528. * @hpriv: host private area to store config values
  529. *
  530. * Some registers containing configuration info might be setup by
  531. * BIOS and might be cleared on reset. This function saves the
  532. * initial values of those registers into @hpriv such that they
  533. * can be restored after controller reset.
  534. *
  535. * If inconsistent, config values are fixed up by this function.
  536. *
  537. * LOCKING:
  538. * None.
  539. */
  540. static void ahci_save_initial_config(struct pci_dev *pdev,
  541. struct ahci_host_priv *hpriv)
  542. {
  543. void __iomem *mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
  544. u32 cap, port_map;
  545. int i;
  546. int mv;
  547. /* make sure AHCI mode is enabled before accessing CAP */
  548. ahci_enable_ahci(mmio);
  549. /* Values prefixed with saved_ are written back to host after
  550. * reset. Values without are used for driver operation.
  551. */
  552. hpriv->saved_cap = cap = readl(mmio + HOST_CAP);
  553. hpriv->saved_port_map = port_map = readl(mmio + HOST_PORTS_IMPL);
  554. /* some chips have errata preventing 64bit use */
  555. if ((cap & HOST_CAP_64) && (hpriv->flags & AHCI_HFLAG_32BIT_ONLY)) {
  556. dev_printk(KERN_INFO, &pdev->dev,
  557. "controller can't do 64bit DMA, forcing 32bit\n");
  558. cap &= ~HOST_CAP_64;
  559. }
  560. if ((cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_NO_NCQ)) {
  561. dev_printk(KERN_INFO, &pdev->dev,
  562. "controller can't do NCQ, turning off CAP_NCQ\n");
  563. cap &= ~HOST_CAP_NCQ;
  564. }
  565. if ((cap & HOST_CAP_PMP) && (hpriv->flags & AHCI_HFLAG_NO_PMP)) {
  566. dev_printk(KERN_INFO, &pdev->dev,
  567. "controller can't do PMP, turning off CAP_PMP\n");
  568. cap &= ~HOST_CAP_PMP;
  569. }
  570. /*
  571. * Temporary Marvell 6145 hack: PATA port presence
  572. * is asserted through the standard AHCI port
  573. * presence register, as bit 4 (counting from 0)
  574. */
  575. if (hpriv->flags & AHCI_HFLAG_MV_PATA) {
  576. if (pdev->device == 0x6121)
  577. mv = 0x3;
  578. else
  579. mv = 0xf;
  580. dev_printk(KERN_ERR, &pdev->dev,
  581. "MV_AHCI HACK: port_map %x -> %x\n",
  582. port_map,
  583. port_map & mv);
  584. port_map &= mv;
  585. }
  586. /* cross check port_map and cap.n_ports */
  587. if (port_map) {
  588. int map_ports = 0;
  589. for (i = 0; i < AHCI_MAX_PORTS; i++)
  590. if (port_map & (1 << i))
  591. map_ports++;
  592. /* If PI has more ports than n_ports, whine, clear
  593. * port_map and let it be generated from n_ports.
  594. */
  595. if (map_ports > ahci_nr_ports(cap)) {
  596. dev_printk(KERN_WARNING, &pdev->dev,
  597. "implemented port map (0x%x) contains more "
  598. "ports than nr_ports (%u), using nr_ports\n",
  599. port_map, ahci_nr_ports(cap));
  600. port_map = 0;
  601. }
  602. }
  603. /* fabricate port_map from cap.nr_ports */
  604. if (!port_map) {
  605. port_map = (1 << ahci_nr_ports(cap)) - 1;
  606. dev_printk(KERN_WARNING, &pdev->dev,
  607. "forcing PORTS_IMPL to 0x%x\n", port_map);
  608. /* write the fixed up value to the PI register */
  609. hpriv->saved_port_map = port_map;
  610. }
  611. /* record values to use during operation */
  612. hpriv->cap = cap;
  613. hpriv->port_map = port_map;
  614. }
  615. /**
  616. * ahci_restore_initial_config - Restore initial config
  617. * @host: target ATA host
  618. *
  619. * Restore initial config stored by ahci_save_initial_config().
  620. *
  621. * LOCKING:
  622. * None.
  623. */
  624. static void ahci_restore_initial_config(struct ata_host *host)
  625. {
  626. struct ahci_host_priv *hpriv = host->private_data;
  627. void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
  628. writel(hpriv->saved_cap, mmio + HOST_CAP);
  629. writel(hpriv->saved_port_map, mmio + HOST_PORTS_IMPL);
  630. (void) readl(mmio + HOST_PORTS_IMPL); /* flush */
  631. }
  632. static unsigned ahci_scr_offset(struct ata_port *ap, unsigned int sc_reg)
  633. {
  634. static const int offset[] = {
  635. [SCR_STATUS] = PORT_SCR_STAT,
  636. [SCR_CONTROL] = PORT_SCR_CTL,
  637. [SCR_ERROR] = PORT_SCR_ERR,
  638. [SCR_ACTIVE] = PORT_SCR_ACT,
  639. [SCR_NOTIFICATION] = PORT_SCR_NTF,
  640. };
  641. struct ahci_host_priv *hpriv = ap->host->private_data;
  642. if (sc_reg < ARRAY_SIZE(offset) &&
  643. (sc_reg != SCR_NOTIFICATION || (hpriv->cap & HOST_CAP_SNTF)))
  644. return offset[sc_reg];
  645. return 0;
  646. }
  647. static int ahci_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
  648. {
  649. void __iomem *port_mmio = ahci_port_base(ap);
  650. int offset = ahci_scr_offset(ap, sc_reg);
  651. if (offset) {
  652. *val = readl(port_mmio + offset);
  653. return 0;
  654. }
  655. return -EINVAL;
  656. }
  657. static int ahci_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val)
  658. {
  659. void __iomem *port_mmio = ahci_port_base(ap);
  660. int offset = ahci_scr_offset(ap, sc_reg);
  661. if (offset) {
  662. writel(val, port_mmio + offset);
  663. return 0;
  664. }
  665. return -EINVAL;
  666. }
  667. static void ahci_start_engine(struct ata_port *ap)
  668. {
  669. void __iomem *port_mmio = ahci_port_base(ap);
  670. u32 tmp;
  671. /* start DMA */
  672. tmp = readl(port_mmio + PORT_CMD);
  673. tmp |= PORT_CMD_START;
  674. writel(tmp, port_mmio + PORT_CMD);
  675. readl(port_mmio + PORT_CMD); /* flush */
  676. }
  677. static int ahci_stop_engine(struct ata_port *ap)
  678. {
  679. void __iomem *port_mmio = ahci_port_base(ap);
  680. u32 tmp;
  681. tmp = readl(port_mmio + PORT_CMD);
  682. /* check if the HBA is idle */
  683. if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0)
  684. return 0;
  685. /* setting HBA to idle */
  686. tmp &= ~PORT_CMD_START;
  687. writel(tmp, port_mmio + PORT_CMD);
  688. /* wait for engine to stop. This could be as long as 500 msec */
  689. tmp = ata_wait_register(port_mmio + PORT_CMD,
  690. PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500);
  691. if (tmp & PORT_CMD_LIST_ON)
  692. return -EIO;
  693. return 0;
  694. }
  695. static void ahci_start_fis_rx(struct ata_port *ap)
  696. {
  697. void __iomem *port_mmio = ahci_port_base(ap);
  698. struct ahci_host_priv *hpriv = ap->host->private_data;
  699. struct ahci_port_priv *pp = ap->private_data;
  700. u32 tmp;
  701. /* set FIS registers */
  702. if (hpriv->cap & HOST_CAP_64)
  703. writel((pp->cmd_slot_dma >> 16) >> 16,
  704. port_mmio + PORT_LST_ADDR_HI);
  705. writel(pp->cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR);
  706. if (hpriv->cap & HOST_CAP_64)
  707. writel((pp->rx_fis_dma >> 16) >> 16,
  708. port_mmio + PORT_FIS_ADDR_HI);
  709. writel(pp->rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR);
  710. /* enable FIS reception */
  711. tmp = readl(port_mmio + PORT_CMD);
  712. tmp |= PORT_CMD_FIS_RX;
  713. writel(tmp, port_mmio + PORT_CMD);
  714. /* flush */
  715. readl(port_mmio + PORT_CMD);
  716. }
  717. static int ahci_stop_fis_rx(struct ata_port *ap)
  718. {
  719. void __iomem *port_mmio = ahci_port_base(ap);
  720. u32 tmp;
  721. /* disable FIS reception */
  722. tmp = readl(port_mmio + PORT_CMD);
  723. tmp &= ~PORT_CMD_FIS_RX;
  724. writel(tmp, port_mmio + PORT_CMD);
  725. /* wait for completion, spec says 500ms, give it 1000 */
  726. tmp = ata_wait_register(port_mmio + PORT_CMD, PORT_CMD_FIS_ON,
  727. PORT_CMD_FIS_ON, 10, 1000);
  728. if (tmp & PORT_CMD_FIS_ON)
  729. return -EBUSY;
  730. return 0;
  731. }
  732. static void ahci_power_up(struct ata_port *ap)
  733. {
  734. struct ahci_host_priv *hpriv = ap->host->private_data;
  735. void __iomem *port_mmio = ahci_port_base(ap);
  736. u32 cmd;
  737. cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
  738. /* spin up device */
  739. if (hpriv->cap & HOST_CAP_SSS) {
  740. cmd |= PORT_CMD_SPIN_UP;
  741. writel(cmd, port_mmio + PORT_CMD);
  742. }
  743. /* wake up link */
  744. writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD);
  745. }
  746. static void ahci_disable_alpm(struct ata_port *ap)
  747. {
  748. struct ahci_host_priv *hpriv = ap->host->private_data;
  749. void __iomem *port_mmio = ahci_port_base(ap);
  750. u32 cmd;
  751. struct ahci_port_priv *pp = ap->private_data;
  752. /* IPM bits should be disabled by libata-core */
  753. /* get the existing command bits */
  754. cmd = readl(port_mmio + PORT_CMD);
  755. /* disable ALPM and ASP */
  756. cmd &= ~PORT_CMD_ASP;
  757. cmd &= ~PORT_CMD_ALPE;
  758. /* force the interface back to active */
  759. cmd |= PORT_CMD_ICC_ACTIVE;
  760. /* write out new cmd value */
  761. writel(cmd, port_mmio + PORT_CMD);
  762. cmd = readl(port_mmio + PORT_CMD);
  763. /* wait 10ms to be sure we've come out of any low power state */
  764. msleep(10);
  765. /* clear out any PhyRdy stuff from interrupt status */
  766. writel(PORT_IRQ_PHYRDY, port_mmio + PORT_IRQ_STAT);
  767. /* go ahead and clean out PhyRdy Change from Serror too */
  768. ahci_scr_write(ap, SCR_ERROR, ((1 << 16) | (1 << 18)));
  769. /*
  770. * Clear flag to indicate that we should ignore all PhyRdy
  771. * state changes
  772. */
  773. hpriv->flags &= ~AHCI_HFLAG_NO_HOTPLUG;
  774. /*
  775. * Enable interrupts on Phy Ready.
  776. */
  777. pp->intr_mask |= PORT_IRQ_PHYRDY;
  778. writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
  779. /*
  780. * don't change the link pm policy - we can be called
  781. * just to turn of link pm temporarily
  782. */
  783. }
  784. static int ahci_enable_alpm(struct ata_port *ap,
  785. enum link_pm policy)
  786. {
  787. struct ahci_host_priv *hpriv = ap->host->private_data;
  788. void __iomem *port_mmio = ahci_port_base(ap);
  789. u32 cmd;
  790. struct ahci_port_priv *pp = ap->private_data;
  791. u32 asp;
  792. /* Make sure the host is capable of link power management */
  793. if (!(hpriv->cap & HOST_CAP_ALPM))
  794. return -EINVAL;
  795. switch (policy) {
  796. case MAX_PERFORMANCE:
  797. case NOT_AVAILABLE:
  798. /*
  799. * if we came here with NOT_AVAILABLE,
  800. * it just means this is the first time we
  801. * have tried to enable - default to max performance,
  802. * and let the user go to lower power modes on request.
  803. */
  804. ahci_disable_alpm(ap);
  805. return 0;
  806. case MIN_POWER:
  807. /* configure HBA to enter SLUMBER */
  808. asp = PORT_CMD_ASP;
  809. break;
  810. case MEDIUM_POWER:
  811. /* configure HBA to enter PARTIAL */
  812. asp = 0;
  813. break;
  814. default:
  815. return -EINVAL;
  816. }
  817. /*
  818. * Disable interrupts on Phy Ready. This keeps us from
  819. * getting woken up due to spurious phy ready interrupts
  820. * TBD - Hot plug should be done via polling now, is
  821. * that even supported?
  822. */
  823. pp->intr_mask &= ~PORT_IRQ_PHYRDY;
  824. writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
  825. /*
  826. * Set a flag to indicate that we should ignore all PhyRdy
  827. * state changes since these can happen now whenever we
  828. * change link state
  829. */
  830. hpriv->flags |= AHCI_HFLAG_NO_HOTPLUG;
  831. /* get the existing command bits */
  832. cmd = readl(port_mmio + PORT_CMD);
  833. /*
  834. * Set ASP based on Policy
  835. */
  836. cmd |= asp;
  837. /*
  838. * Setting this bit will instruct the HBA to aggressively
  839. * enter a lower power link state when it's appropriate and
  840. * based on the value set above for ASP
  841. */
  842. cmd |= PORT_CMD_ALPE;
  843. /* write out new cmd value */
  844. writel(cmd, port_mmio + PORT_CMD);
  845. cmd = readl(port_mmio + PORT_CMD);
  846. /* IPM bits should be set by libata-core */
  847. return 0;
  848. }
  849. #ifdef CONFIG_PM
  850. static void ahci_power_down(struct ata_port *ap)
  851. {
  852. struct ahci_host_priv *hpriv = ap->host->private_data;
  853. void __iomem *port_mmio = ahci_port_base(ap);
  854. u32 cmd, scontrol;
  855. if (!(hpriv->cap & HOST_CAP_SSS))
  856. return;
  857. /* put device into listen mode, first set PxSCTL.DET to 0 */
  858. scontrol = readl(port_mmio + PORT_SCR_CTL);
  859. scontrol &= ~0xf;
  860. writel(scontrol, port_mmio + PORT_SCR_CTL);
  861. /* then set PxCMD.SUD to 0 */
  862. cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
  863. cmd &= ~PORT_CMD_SPIN_UP;
  864. writel(cmd, port_mmio + PORT_CMD);
  865. }
  866. #endif
  867. static void ahci_start_port(struct ata_port *ap)
  868. {
  869. /* enable FIS reception */
  870. ahci_start_fis_rx(ap);
  871. /* enable DMA */
  872. ahci_start_engine(ap);
  873. }
  874. static int ahci_deinit_port(struct ata_port *ap, const char **emsg)
  875. {
  876. int rc;
  877. /* disable DMA */
  878. rc = ahci_stop_engine(ap);
  879. if (rc) {
  880. *emsg = "failed to stop engine";
  881. return rc;
  882. }
  883. /* disable FIS reception */
  884. rc = ahci_stop_fis_rx(ap);
  885. if (rc) {
  886. *emsg = "failed stop FIS RX";
  887. return rc;
  888. }
  889. return 0;
  890. }
  891. static int ahci_reset_controller(struct ata_host *host)
  892. {
  893. struct pci_dev *pdev = to_pci_dev(host->dev);
  894. struct ahci_host_priv *hpriv = host->private_data;
  895. void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
  896. u32 tmp;
  897. /* we must be in AHCI mode, before using anything
  898. * AHCI-specific, such as HOST_RESET.
  899. */
  900. ahci_enable_ahci(mmio);
  901. /* global controller reset */
  902. if (!ahci_skip_host_reset) {
  903. tmp = readl(mmio + HOST_CTL);
  904. if ((tmp & HOST_RESET) == 0) {
  905. writel(tmp | HOST_RESET, mmio + HOST_CTL);
  906. readl(mmio + HOST_CTL); /* flush */
  907. }
  908. /* reset must complete within 1 second, or
  909. * the hardware should be considered fried.
  910. */
  911. ssleep(1);
  912. tmp = readl(mmio + HOST_CTL);
  913. if (tmp & HOST_RESET) {
  914. dev_printk(KERN_ERR, host->dev,
  915. "controller reset failed (0x%x)\n", tmp);
  916. return -EIO;
  917. }
  918. /* turn on AHCI mode */
  919. ahci_enable_ahci(mmio);
  920. /* Some registers might be cleared on reset. Restore
  921. * initial values.
  922. */
  923. ahci_restore_initial_config(host);
  924. } else
  925. dev_printk(KERN_INFO, host->dev,
  926. "skipping global host reset\n");
  927. if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
  928. u16 tmp16;
  929. /* configure PCS */
  930. pci_read_config_word(pdev, 0x92, &tmp16);
  931. if ((tmp16 & hpriv->port_map) != hpriv->port_map) {
  932. tmp16 |= hpriv->port_map;
  933. pci_write_config_word(pdev, 0x92, tmp16);
  934. }
  935. }
  936. return 0;
  937. }
  938. static void ahci_port_init(struct pci_dev *pdev, struct ata_port *ap,
  939. int port_no, void __iomem *mmio,
  940. void __iomem *port_mmio)
  941. {
  942. const char *emsg = NULL;
  943. int rc;
  944. u32 tmp;
  945. /* make sure port is not active */
  946. rc = ahci_deinit_port(ap, &emsg);
  947. if (rc)
  948. dev_printk(KERN_WARNING, &pdev->dev,
  949. "%s (%d)\n", emsg, rc);
  950. /* clear SError */
  951. tmp = readl(port_mmio + PORT_SCR_ERR);
  952. VPRINTK("PORT_SCR_ERR 0x%x\n", tmp);
  953. writel(tmp, port_mmio + PORT_SCR_ERR);
  954. /* clear port IRQ */
  955. tmp = readl(port_mmio + PORT_IRQ_STAT);
  956. VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
  957. if (tmp)
  958. writel(tmp, port_mmio + PORT_IRQ_STAT);
  959. writel(1 << port_no, mmio + HOST_IRQ_STAT);
  960. }
  961. static void ahci_init_controller(struct ata_host *host)
  962. {
  963. struct ahci_host_priv *hpriv = host->private_data;
  964. struct pci_dev *pdev = to_pci_dev(host->dev);
  965. void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
  966. int i;
  967. void __iomem *port_mmio;
  968. u32 tmp;
  969. int mv;
  970. if (hpriv->flags & AHCI_HFLAG_MV_PATA) {
  971. if (pdev->device == 0x6121)
  972. mv = 2;
  973. else
  974. mv = 4;
  975. port_mmio = __ahci_port_base(host, mv);
  976. writel(0, port_mmio + PORT_IRQ_MASK);
  977. /* clear port IRQ */
  978. tmp = readl(port_mmio + PORT_IRQ_STAT);
  979. VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
  980. if (tmp)
  981. writel(tmp, port_mmio + PORT_IRQ_STAT);
  982. }
  983. for (i = 0; i < host->n_ports; i++) {
  984. struct ata_port *ap = host->ports[i];
  985. port_mmio = ahci_port_base(ap);
  986. if (ata_port_is_dummy(ap))
  987. continue;
  988. ahci_port_init(pdev, ap, i, mmio, port_mmio);
  989. }
  990. tmp = readl(mmio + HOST_CTL);
  991. VPRINTK("HOST_CTL 0x%x\n", tmp);
  992. writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL);
  993. tmp = readl(mmio + HOST_CTL);
  994. VPRINTK("HOST_CTL 0x%x\n", tmp);
  995. }
  996. static void ahci_dev_config(struct ata_device *dev)
  997. {
  998. struct ahci_host_priv *hpriv = dev->link->ap->host->private_data;
  999. if (hpriv->flags & AHCI_HFLAG_SECT255) {
  1000. dev->max_sectors = 255;
  1001. ata_dev_printk(dev, KERN_INFO,
  1002. "SB600 AHCI: limiting to 255 sectors per cmd\n");
  1003. }
  1004. }
  1005. static unsigned int ahci_dev_classify(struct ata_port *ap)
  1006. {
  1007. void __iomem *port_mmio = ahci_port_base(ap);
  1008. struct ata_taskfile tf;
  1009. u32 tmp;
  1010. tmp = readl(port_mmio + PORT_SIG);
  1011. tf.lbah = (tmp >> 24) & 0xff;
  1012. tf.lbam = (tmp >> 16) & 0xff;
  1013. tf.lbal = (tmp >> 8) & 0xff;
  1014. tf.nsect = (tmp) & 0xff;
  1015. return ata_dev_classify(&tf);
  1016. }
  1017. static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
  1018. u32 opts)
  1019. {
  1020. dma_addr_t cmd_tbl_dma;
  1021. cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ;
  1022. pp->cmd_slot[tag].opts = cpu_to_le32(opts);
  1023. pp->cmd_slot[tag].status = 0;
  1024. pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff);
  1025. pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16);
  1026. }
  1027. static int ahci_kick_engine(struct ata_port *ap, int force_restart)
  1028. {
  1029. void __iomem *port_mmio = ahci_port_base(ap);
  1030. struct ahci_host_priv *hpriv = ap->host->private_data;
  1031. u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
  1032. u32 tmp;
  1033. int busy, rc;
  1034. /* do we need to kick the port? */
  1035. busy = status & (ATA_BUSY | ATA_DRQ);
  1036. if (!busy && !force_restart)
  1037. return 0;
  1038. /* stop engine */
  1039. rc = ahci_stop_engine(ap);
  1040. if (rc)
  1041. goto out_restart;
  1042. /* need to do CLO? */
  1043. if (!busy) {
  1044. rc = 0;
  1045. goto out_restart;
  1046. }
  1047. if (!(hpriv->cap & HOST_CAP_CLO)) {
  1048. rc = -EOPNOTSUPP;
  1049. goto out_restart;
  1050. }
  1051. /* perform CLO */
  1052. tmp = readl(port_mmio + PORT_CMD);
  1053. tmp |= PORT_CMD_CLO;
  1054. writel(tmp, port_mmio + PORT_CMD);
  1055. rc = 0;
  1056. tmp = ata_wait_register(port_mmio + PORT_CMD,
  1057. PORT_CMD_CLO, PORT_CMD_CLO, 1, 500);
  1058. if (tmp & PORT_CMD_CLO)
  1059. rc = -EIO;
  1060. /* restart engine */
  1061. out_restart:
  1062. ahci_start_engine(ap);
  1063. return rc;
  1064. }
  1065. static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
  1066. struct ata_taskfile *tf, int is_cmd, u16 flags,
  1067. unsigned long timeout_msec)
  1068. {
  1069. const u32 cmd_fis_len = 5; /* five dwords */
  1070. struct ahci_port_priv *pp = ap->private_data;
  1071. void __iomem *port_mmio = ahci_port_base(ap);
  1072. u8 *fis = pp->cmd_tbl;
  1073. u32 tmp;
  1074. /* prep the command */
  1075. ata_tf_to_fis(tf, pmp, is_cmd, fis);
  1076. ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12));
  1077. /* issue & wait */
  1078. writel(1, port_mmio + PORT_CMD_ISSUE);
  1079. if (timeout_msec) {
  1080. tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1,
  1081. 1, timeout_msec);
  1082. if (tmp & 0x1) {
  1083. ahci_kick_engine(ap, 1);
  1084. return -EBUSY;
  1085. }
  1086. } else
  1087. readl(port_mmio + PORT_CMD_ISSUE); /* flush */
  1088. return 0;
  1089. }
  1090. static int ahci_check_ready(struct ata_link *link)
  1091. {
  1092. void __iomem *port_mmio = ahci_port_base(link->ap);
  1093. u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
  1094. return ata_check_ready(status);
  1095. }
  1096. static int ahci_softreset(struct ata_link *link, unsigned int *class,
  1097. unsigned long deadline)
  1098. {
  1099. struct ata_port *ap = link->ap;
  1100. int pmp = sata_srst_pmp(link);
  1101. const char *reason = NULL;
  1102. unsigned long now, msecs;
  1103. struct ata_taskfile tf;
  1104. int rc;
  1105. DPRINTK("ENTER\n");
  1106. /* prepare for SRST (AHCI-1.1 10.4.1) */
  1107. rc = ahci_kick_engine(ap, 1);
  1108. if (rc && rc != -EOPNOTSUPP)
  1109. ata_link_printk(link, KERN_WARNING,
  1110. "failed to reset engine (errno=%d)\n", rc);
  1111. ata_tf_init(link->device, &tf);
  1112. /* issue the first D2H Register FIS */
  1113. msecs = 0;
  1114. now = jiffies;
  1115. if (time_after(now, deadline))
  1116. msecs = jiffies_to_msecs(deadline - now);
  1117. tf.ctl |= ATA_SRST;
  1118. if (ahci_exec_polled_cmd(ap, pmp, &tf, 0,
  1119. AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY, msecs)) {
  1120. rc = -EIO;
  1121. reason = "1st FIS failed";
  1122. goto fail;
  1123. }
  1124. /* spec says at least 5us, but be generous and sleep for 1ms */
  1125. msleep(1);
  1126. /* issue the second D2H Register FIS */
  1127. tf.ctl &= ~ATA_SRST;
  1128. ahci_exec_polled_cmd(ap, pmp, &tf, 0, 0, 0);
  1129. /* wait for link to become ready */
  1130. rc = ata_wait_after_reset(link, deadline, ahci_check_ready);
  1131. /* link occupied, -ENODEV too is an error */
  1132. if (rc) {
  1133. reason = "device not ready";
  1134. goto fail;
  1135. }
  1136. *class = ahci_dev_classify(ap);
  1137. DPRINTK("EXIT, class=%u\n", *class);
  1138. return 0;
  1139. fail:
  1140. ata_link_printk(link, KERN_ERR, "softreset failed (%s)\n", reason);
  1141. return rc;
  1142. }
  1143. static int ahci_hardreset(struct ata_link *link, unsigned int *class,
  1144. unsigned long deadline)
  1145. {
  1146. const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
  1147. struct ata_port *ap = link->ap;
  1148. struct ahci_port_priv *pp = ap->private_data;
  1149. u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
  1150. struct ata_taskfile tf;
  1151. bool online;
  1152. int rc;
  1153. DPRINTK("ENTER\n");
  1154. ahci_stop_engine(ap);
  1155. /* clear D2H reception area to properly wait for D2H FIS */
  1156. ata_tf_init(link->device, &tf);
  1157. tf.command = 0x80;
  1158. ata_tf_to_fis(&tf, 0, 0, d2h_fis);
  1159. rc = sata_link_hardreset(link, timing, deadline, &online,
  1160. ahci_check_ready);
  1161. ahci_start_engine(ap);
  1162. if (online)
  1163. *class = ahci_dev_classify(ap);
  1164. DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
  1165. return rc;
  1166. }
  1167. static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
  1168. unsigned long deadline)
  1169. {
  1170. struct ata_port *ap = link->ap;
  1171. bool online;
  1172. int rc;
  1173. DPRINTK("ENTER\n");
  1174. ahci_stop_engine(ap);
  1175. rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
  1176. deadline, &online, NULL);
  1177. ahci_start_engine(ap);
  1178. DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
  1179. /* vt8251 doesn't clear BSY on signature FIS reception,
  1180. * request follow-up softreset.
  1181. */
  1182. return online ? -EAGAIN : rc;
  1183. }
  1184. static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
  1185. unsigned long deadline)
  1186. {
  1187. struct ata_port *ap = link->ap;
  1188. struct ahci_port_priv *pp = ap->private_data;
  1189. u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
  1190. struct ata_taskfile tf;
  1191. bool online;
  1192. int rc;
  1193. ahci_stop_engine(ap);
  1194. /* clear D2H reception area to properly wait for D2H FIS */
  1195. ata_tf_init(link->device, &tf);
  1196. tf.command = 0x80;
  1197. ata_tf_to_fis(&tf, 0, 0, d2h_fis);
  1198. rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
  1199. deadline, &online, NULL);
  1200. ahci_start_engine(ap);
  1201. /* The pseudo configuration device on SIMG4726 attached to
  1202. * ASUS P5W-DH Deluxe doesn't send signature FIS after
  1203. * hardreset if no device is attached to the first downstream
  1204. * port && the pseudo device locks up on SRST w/ PMP==0. To
  1205. * work around this, wait for !BSY only briefly. If BSY isn't
  1206. * cleared, perform CLO and proceed to IDENTIFY (achieved by
  1207. * ATA_LFLAG_NO_SRST and ATA_LFLAG_ASSUME_ATA).
  1208. *
  1209. * Wait for two seconds. Devices attached to downstream port
  1210. * which can't process the following IDENTIFY after this will
  1211. * have to be reset again. For most cases, this should
  1212. * suffice while making probing snappish enough.
  1213. */
  1214. if (online) {
  1215. rc = ata_wait_after_reset(link, jiffies + 2 * HZ,
  1216. ahci_check_ready);
  1217. if (rc)
  1218. ahci_kick_engine(ap, 0);
  1219. }
  1220. return rc;
  1221. }
  1222. static void ahci_postreset(struct ata_link *link, unsigned int *class)
  1223. {
  1224. struct ata_port *ap = link->ap;
  1225. void __iomem *port_mmio = ahci_port_base(ap);
  1226. u32 new_tmp, tmp;
  1227. ata_std_postreset(link, class);
  1228. /* Make sure port's ATAPI bit is set appropriately */
  1229. new_tmp = tmp = readl(port_mmio + PORT_CMD);
  1230. if (*class == ATA_DEV_ATAPI)
  1231. new_tmp |= PORT_CMD_ATAPI;
  1232. else
  1233. new_tmp &= ~PORT_CMD_ATAPI;
  1234. if (new_tmp != tmp) {
  1235. writel(new_tmp, port_mmio + PORT_CMD);
  1236. readl(port_mmio + PORT_CMD); /* flush */
  1237. }
  1238. }
  1239. static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
  1240. {
  1241. struct scatterlist *sg;
  1242. struct ahci_sg *ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
  1243. unsigned int si;
  1244. VPRINTK("ENTER\n");
  1245. /*
  1246. * Next, the S/G list.
  1247. */
  1248. for_each_sg(qc->sg, sg, qc->n_elem, si) {
  1249. dma_addr_t addr = sg_dma_address(sg);
  1250. u32 sg_len = sg_dma_len(sg);
  1251. ahci_sg[si].addr = cpu_to_le32(addr & 0xffffffff);
  1252. ahci_sg[si].addr_hi = cpu_to_le32((addr >> 16) >> 16);
  1253. ahci_sg[si].flags_size = cpu_to_le32(sg_len - 1);
  1254. }
  1255. return si;
  1256. }
  1257. static void ahci_qc_prep(struct ata_queued_cmd *qc)
  1258. {
  1259. struct ata_port *ap = qc->ap;
  1260. struct ahci_port_priv *pp = ap->private_data;
  1261. int is_atapi = ata_is_atapi(qc->tf.protocol);
  1262. void *cmd_tbl;
  1263. u32 opts;
  1264. const u32 cmd_fis_len = 5; /* five dwords */
  1265. unsigned int n_elem;
  1266. /*
  1267. * Fill in command table information. First, the header,
  1268. * a SATA Register - Host to Device command FIS.
  1269. */
  1270. cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ;
  1271. ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl);
  1272. if (is_atapi) {
  1273. memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
  1274. memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
  1275. }
  1276. n_elem = 0;
  1277. if (qc->flags & ATA_QCFLAG_DMAMAP)
  1278. n_elem = ahci_fill_sg(qc, cmd_tbl);
  1279. /*
  1280. * Fill in command slot information.
  1281. */
  1282. opts = cmd_fis_len | n_elem << 16 | (qc->dev->link->pmp << 12);
  1283. if (qc->tf.flags & ATA_TFLAG_WRITE)
  1284. opts |= AHCI_CMD_WRITE;
  1285. if (is_atapi)
  1286. opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
  1287. ahci_fill_cmd_slot(pp, qc->tag, opts);
  1288. }
  1289. static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
  1290. {
  1291. struct ahci_host_priv *hpriv = ap->host->private_data;
  1292. struct ahci_port_priv *pp = ap->private_data;
  1293. struct ata_eh_info *host_ehi = &ap->link.eh_info;
  1294. struct ata_link *link = NULL;
  1295. struct ata_queued_cmd *active_qc;
  1296. struct ata_eh_info *active_ehi;
  1297. u32 serror;
  1298. /* determine active link */
  1299. ata_port_for_each_link(link, ap)
  1300. if (ata_link_active(link))
  1301. break;
  1302. if (!link)
  1303. link = &ap->link;
  1304. active_qc = ata_qc_from_tag(ap, link->active_tag);
  1305. active_ehi = &link->eh_info;
  1306. /* record irq stat */
  1307. ata_ehi_clear_desc(host_ehi);
  1308. ata_ehi_push_desc(host_ehi, "irq_stat 0x%08x", irq_stat);
  1309. /* AHCI needs SError cleared; otherwise, it might lock up */
  1310. ahci_scr_read(ap, SCR_ERROR, &serror);
  1311. ahci_scr_write(ap, SCR_ERROR, serror);
  1312. host_ehi->serror |= serror;
  1313. /* some controllers set IRQ_IF_ERR on device errors, ignore it */
  1314. if (hpriv->flags & AHCI_HFLAG_IGN_IRQ_IF_ERR)
  1315. irq_stat &= ~PORT_IRQ_IF_ERR;
  1316. if (irq_stat & PORT_IRQ_TF_ERR) {
  1317. /* If qc is active, charge it; otherwise, the active
  1318. * link. There's no active qc on NCQ errors. It will
  1319. * be determined by EH by reading log page 10h.
  1320. */
  1321. if (active_qc)
  1322. active_qc->err_mask |= AC_ERR_DEV;
  1323. else
  1324. active_ehi->err_mask |= AC_ERR_DEV;
  1325. if (hpriv->flags & AHCI_HFLAG_IGN_SERR_INTERNAL)
  1326. host_ehi->serror &= ~SERR_INTERNAL;
  1327. }
  1328. if (irq_stat & PORT_IRQ_UNK_FIS) {
  1329. u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK);
  1330. active_ehi->err_mask |= AC_ERR_HSM;
  1331. active_ehi->action |= ATA_EH_RESET;
  1332. ata_ehi_push_desc(active_ehi,
  1333. "unknown FIS %08x %08x %08x %08x" ,
  1334. unk[0], unk[1], unk[2], unk[3]);
  1335. }
  1336. if (sata_pmp_attached(ap) && (irq_stat & PORT_IRQ_BAD_PMP)) {
  1337. active_ehi->err_mask |= AC_ERR_HSM;
  1338. active_ehi->action |= ATA_EH_RESET;
  1339. ata_ehi_push_desc(active_ehi, "incorrect PMP");
  1340. }
  1341. if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) {
  1342. host_ehi->err_mask |= AC_ERR_HOST_BUS;
  1343. host_ehi->action |= ATA_EH_RESET;
  1344. ata_ehi_push_desc(host_ehi, "host bus error");
  1345. }
  1346. if (irq_stat & PORT_IRQ_IF_ERR) {
  1347. host_ehi->err_mask |= AC_ERR_ATA_BUS;
  1348. host_ehi->action |= ATA_EH_RESET;
  1349. ata_ehi_push_desc(host_ehi, "interface fatal error");
  1350. }
  1351. if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) {
  1352. ata_ehi_hotplugged(host_ehi);
  1353. ata_ehi_push_desc(host_ehi, "%s",
  1354. irq_stat & PORT_IRQ_CONNECT ?
  1355. "connection status changed" : "PHY RDY changed");
  1356. }
  1357. /* okay, let's hand over to EH */
  1358. if (irq_stat & PORT_IRQ_FREEZE)
  1359. ata_port_freeze(ap);
  1360. else
  1361. ata_port_abort(ap);
  1362. }
  1363. static void ahci_port_intr(struct ata_port *ap)
  1364. {
  1365. void __iomem *port_mmio = ahci_port_base(ap);
  1366. struct ata_eh_info *ehi = &ap->link.eh_info;
  1367. struct ahci_port_priv *pp = ap->private_data;
  1368. struct ahci_host_priv *hpriv = ap->host->private_data;
  1369. int resetting = !!(ap->pflags & ATA_PFLAG_RESETTING);
  1370. u32 status, qc_active;
  1371. int rc;
  1372. status = readl(port_mmio + PORT_IRQ_STAT);
  1373. writel(status, port_mmio + PORT_IRQ_STAT);
  1374. /* ignore BAD_PMP while resetting */
  1375. if (unlikely(resetting))
  1376. status &= ~PORT_IRQ_BAD_PMP;
  1377. /* If we are getting PhyRdy, this is
  1378. * just a power state change, we should
  1379. * clear out this, plus the PhyRdy/Comm
  1380. * Wake bits from Serror
  1381. */
  1382. if ((hpriv->flags & AHCI_HFLAG_NO_HOTPLUG) &&
  1383. (status & PORT_IRQ_PHYRDY)) {
  1384. status &= ~PORT_IRQ_PHYRDY;
  1385. ahci_scr_write(ap, SCR_ERROR, ((1 << 16) | (1 << 18)));
  1386. }
  1387. if (unlikely(status & PORT_IRQ_ERROR)) {
  1388. ahci_error_intr(ap, status);
  1389. return;
  1390. }
  1391. if (status & PORT_IRQ_SDB_FIS) {
  1392. /* If SNotification is available, leave notification
  1393. * handling to sata_async_notification(). If not,
  1394. * emulate it by snooping SDB FIS RX area.
  1395. *
  1396. * Snooping FIS RX area is probably cheaper than
  1397. * poking SNotification but some constrollers which
  1398. * implement SNotification, ICH9 for example, don't
  1399. * store AN SDB FIS into receive area.
  1400. */
  1401. if (hpriv->cap & HOST_CAP_SNTF)
  1402. sata_async_notification(ap);
  1403. else {
  1404. /* If the 'N' bit in word 0 of the FIS is set,
  1405. * we just received asynchronous notification.
  1406. * Tell libata about it.
  1407. */
  1408. const __le32 *f = pp->rx_fis + RX_FIS_SDB;
  1409. u32 f0 = le32_to_cpu(f[0]);
  1410. if (f0 & (1 << 15))
  1411. sata_async_notification(ap);
  1412. }
  1413. }
  1414. /* pp->active_link is valid iff any command is in flight */
  1415. if (ap->qc_active && pp->active_link->sactive)
  1416. qc_active = readl(port_mmio + PORT_SCR_ACT);
  1417. else
  1418. qc_active = readl(port_mmio + PORT_CMD_ISSUE);
  1419. rc = ata_qc_complete_multiple(ap, qc_active);
  1420. /* while resetting, invalid completions are expected */
  1421. if (unlikely(rc < 0 && !resetting)) {
  1422. ehi->err_mask |= AC_ERR_HSM;
  1423. ehi->action |= ATA_EH_RESET;
  1424. ata_port_freeze(ap);
  1425. }
  1426. }
  1427. static irqreturn_t ahci_interrupt(int irq, void *dev_instance)
  1428. {
  1429. struct ata_host *host = dev_instance;
  1430. struct ahci_host_priv *hpriv;
  1431. unsigned int i, handled = 0;
  1432. void __iomem *mmio;
  1433. u32 irq_stat, irq_ack = 0;
  1434. VPRINTK("ENTER\n");
  1435. hpriv = host->private_data;
  1436. mmio = host->iomap[AHCI_PCI_BAR];
  1437. /* sigh. 0xffffffff is a valid return from h/w */
  1438. irq_stat = readl(mmio + HOST_IRQ_STAT);
  1439. irq_stat &= hpriv->port_map;
  1440. if (!irq_stat)
  1441. return IRQ_NONE;
  1442. spin_lock(&host->lock);
  1443. for (i = 0; i < host->n_ports; i++) {
  1444. struct ata_port *ap;
  1445. if (!(irq_stat & (1 << i)))
  1446. continue;
  1447. ap = host->ports[i];
  1448. if (ap) {
  1449. ahci_port_intr(ap);
  1450. VPRINTK("port %u\n", i);
  1451. } else {
  1452. VPRINTK("port %u (no irq)\n", i);
  1453. if (ata_ratelimit())
  1454. dev_printk(KERN_WARNING, host->dev,
  1455. "interrupt on disabled port %u\n", i);
  1456. }
  1457. irq_ack |= (1 << i);
  1458. }
  1459. if (irq_ack) {
  1460. writel(irq_ack, mmio + HOST_IRQ_STAT);
  1461. handled = 1;
  1462. }
  1463. spin_unlock(&host->lock);
  1464. VPRINTK("EXIT\n");
  1465. return IRQ_RETVAL(handled);
  1466. }
  1467. static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
  1468. {
  1469. struct ata_port *ap = qc->ap;
  1470. void __iomem *port_mmio = ahci_port_base(ap);
  1471. struct ahci_port_priv *pp = ap->private_data;
  1472. /* Keep track of the currently active link. It will be used
  1473. * in completion path to determine whether NCQ phase is in
  1474. * progress.
  1475. */
  1476. pp->active_link = qc->dev->link;
  1477. if (qc->tf.protocol == ATA_PROT_NCQ)
  1478. writel(1 << qc->tag, port_mmio + PORT_SCR_ACT);
  1479. writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE);
  1480. readl(port_mmio + PORT_CMD_ISSUE); /* flush */
  1481. return 0;
  1482. }
  1483. static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
  1484. {
  1485. struct ahci_port_priv *pp = qc->ap->private_data;
  1486. u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
  1487. ata_tf_from_fis(d2h_fis, &qc->result_tf);
  1488. return true;
  1489. }
  1490. static void ahci_freeze(struct ata_port *ap)
  1491. {
  1492. void __iomem *port_mmio = ahci_port_base(ap);
  1493. /* turn IRQ off */
  1494. writel(0, port_mmio + PORT_IRQ_MASK);
  1495. }
  1496. static void ahci_thaw(struct ata_port *ap)
  1497. {
  1498. void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
  1499. void __iomem *port_mmio = ahci_port_base(ap);
  1500. u32 tmp;
  1501. struct ahci_port_priv *pp = ap->private_data;
  1502. /* clear IRQ */
  1503. tmp = readl(port_mmio + PORT_IRQ_STAT);
  1504. writel(tmp, port_mmio + PORT_IRQ_STAT);
  1505. writel(1 << ap->port_no, mmio + HOST_IRQ_STAT);
  1506. /* turn IRQ back on */
  1507. writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
  1508. }
  1509. static void ahci_error_handler(struct ata_port *ap)
  1510. {
  1511. if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
  1512. /* restart engine */
  1513. ahci_stop_engine(ap);
  1514. ahci_start_engine(ap);
  1515. }
  1516. sata_pmp_error_handler(ap);
  1517. }
  1518. static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
  1519. {
  1520. struct ata_port *ap = qc->ap;
  1521. /* make DMA engine forget about the failed command */
  1522. if (qc->flags & ATA_QCFLAG_FAILED)
  1523. ahci_kick_engine(ap, 1);
  1524. }
  1525. static void ahci_pmp_attach(struct ata_port *ap)
  1526. {
  1527. void __iomem *port_mmio = ahci_port_base(ap);
  1528. struct ahci_port_priv *pp = ap->private_data;
  1529. u32 cmd;
  1530. cmd = readl(port_mmio + PORT_CMD);
  1531. cmd |= PORT_CMD_PMP;
  1532. writel(cmd, port_mmio + PORT_CMD);
  1533. pp->intr_mask |= PORT_IRQ_BAD_PMP;
  1534. writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
  1535. }
  1536. static void ahci_pmp_detach(struct ata_port *ap)
  1537. {
  1538. void __iomem *port_mmio = ahci_port_base(ap);
  1539. struct ahci_port_priv *pp = ap->private_data;
  1540. u32 cmd;
  1541. cmd = readl(port_mmio + PORT_CMD);
  1542. cmd &= ~PORT_CMD_PMP;
  1543. writel(cmd, port_mmio + PORT_CMD);
  1544. pp->intr_mask &= ~PORT_IRQ_BAD_PMP;
  1545. writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
  1546. }
  1547. static int ahci_port_resume(struct ata_port *ap)
  1548. {
  1549. ahci_power_up(ap);
  1550. ahci_start_port(ap);
  1551. if (sata_pmp_attached(ap))
  1552. ahci_pmp_attach(ap);
  1553. else
  1554. ahci_pmp_detach(ap);
  1555. return 0;
  1556. }
  1557. #ifdef CONFIG_PM
  1558. static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
  1559. {
  1560. const char *emsg = NULL;
  1561. int rc;
  1562. rc = ahci_deinit_port(ap, &emsg);
  1563. if (rc == 0)
  1564. ahci_power_down(ap);
  1565. else {
  1566. ata_port_printk(ap, KERN_ERR, "%s (%d)\n", emsg, rc);
  1567. ahci_start_port(ap);
  1568. }
  1569. return rc;
  1570. }
  1571. static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
  1572. {
  1573. struct ata_host *host = dev_get_drvdata(&pdev->dev);
  1574. void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
  1575. u32 ctl;
  1576. if (mesg.event & PM_EVENT_SLEEP) {
  1577. /* AHCI spec rev1.1 section 8.3.3:
  1578. * Software must disable interrupts prior to requesting a
  1579. * transition of the HBA to D3 state.
  1580. */
  1581. ctl = readl(mmio + HOST_CTL);
  1582. ctl &= ~HOST_IRQ_EN;
  1583. writel(ctl, mmio + HOST_CTL);
  1584. readl(mmio + HOST_CTL); /* flush */
  1585. }
  1586. return ata_pci_device_suspend(pdev, mesg);
  1587. }
  1588. static int ahci_pci_device_resume(struct pci_dev *pdev)
  1589. {
  1590. struct ata_host *host = dev_get_drvdata(&pdev->dev);
  1591. int rc;
  1592. rc = ata_pci_device_do_resume(pdev);
  1593. if (rc)
  1594. return rc;
  1595. if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
  1596. rc = ahci_reset_controller(host);
  1597. if (rc)
  1598. return rc;
  1599. ahci_init_controller(host);
  1600. }
  1601. ata_host_resume(host);
  1602. return 0;
  1603. }
  1604. #endif
  1605. static int ahci_port_start(struct ata_port *ap)
  1606. {
  1607. struct device *dev = ap->host->dev;
  1608. struct ahci_port_priv *pp;
  1609. void *mem;
  1610. dma_addr_t mem_dma;
  1611. pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
  1612. if (!pp)
  1613. return -ENOMEM;
  1614. mem = dmam_alloc_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, &mem_dma,
  1615. GFP_KERNEL);
  1616. if (!mem)
  1617. return -ENOMEM;
  1618. memset(mem, 0, AHCI_PORT_PRIV_DMA_SZ);
  1619. /*
  1620. * First item in chunk of DMA memory: 32-slot command table,
  1621. * 32 bytes each in size
  1622. */
  1623. pp->cmd_slot = mem;
  1624. pp->cmd_slot_dma = mem_dma;
  1625. mem += AHCI_CMD_SLOT_SZ;
  1626. mem_dma += AHCI_CMD_SLOT_SZ;
  1627. /*
  1628. * Second item: Received-FIS area
  1629. */
  1630. pp->rx_fis = mem;
  1631. pp->rx_fis_dma = mem_dma;
  1632. mem += AHCI_RX_FIS_SZ;
  1633. mem_dma += AHCI_RX_FIS_SZ;
  1634. /*
  1635. * Third item: data area for storing a single command
  1636. * and its scatter-gather table
  1637. */
  1638. pp->cmd_tbl = mem;
  1639. pp->cmd_tbl_dma = mem_dma;
  1640. /*
  1641. * Save off initial list of interrupts to be enabled.
  1642. * This could be changed later
  1643. */
  1644. pp->intr_mask = DEF_PORT_IRQ;
  1645. ap->private_data = pp;
  1646. /* engage engines, captain */
  1647. return ahci_port_resume(ap);
  1648. }
  1649. static void ahci_port_stop(struct ata_port *ap)
  1650. {
  1651. const char *emsg = NULL;
  1652. int rc;
  1653. /* de-initialize port */
  1654. rc = ahci_deinit_port(ap, &emsg);
  1655. if (rc)
  1656. ata_port_printk(ap, KERN_WARNING, "%s (%d)\n", emsg, rc);
  1657. }
  1658. static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
  1659. {
  1660. int rc;
  1661. if (using_dac &&
  1662. !pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
  1663. rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
  1664. if (rc) {
  1665. rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
  1666. if (rc) {
  1667. dev_printk(KERN_ERR, &pdev->dev,
  1668. "64-bit DMA enable failed\n");
  1669. return rc;
  1670. }
  1671. }
  1672. } else {
  1673. rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
  1674. if (rc) {
  1675. dev_printk(KERN_ERR, &pdev->dev,
  1676. "32-bit DMA enable failed\n");
  1677. return rc;
  1678. }
  1679. rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
  1680. if (rc) {
  1681. dev_printk(KERN_ERR, &pdev->dev,
  1682. "32-bit consistent DMA enable failed\n");
  1683. return rc;
  1684. }
  1685. }
  1686. return 0;
  1687. }
  1688. static void ahci_print_info(struct ata_host *host)
  1689. {
  1690. struct ahci_host_priv *hpriv = host->private_data;
  1691. struct pci_dev *pdev = to_pci_dev(host->dev);
  1692. void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
  1693. u32 vers, cap, impl, speed;
  1694. const char *speed_s;
  1695. u16 cc;
  1696. const char *scc_s;
  1697. vers = readl(mmio + HOST_VERSION);
  1698. cap = hpriv->cap;
  1699. impl = hpriv->port_map;
  1700. speed = (cap >> 20) & 0xf;
  1701. if (speed == 1)
  1702. speed_s = "1.5";
  1703. else if (speed == 2)
  1704. speed_s = "3";
  1705. else
  1706. speed_s = "?";
  1707. pci_read_config_word(pdev, 0x0a, &cc);
  1708. if (cc == PCI_CLASS_STORAGE_IDE)
  1709. scc_s = "IDE";
  1710. else if (cc == PCI_CLASS_STORAGE_SATA)
  1711. scc_s = "SATA";
  1712. else if (cc == PCI_CLASS_STORAGE_RAID)
  1713. scc_s = "RAID";
  1714. else
  1715. scc_s = "unknown";
  1716. dev_printk(KERN_INFO, &pdev->dev,
  1717. "AHCI %02x%02x.%02x%02x "
  1718. "%u slots %u ports %s Gbps 0x%x impl %s mode\n"
  1719. ,
  1720. (vers >> 24) & 0xff,
  1721. (vers >> 16) & 0xff,
  1722. (vers >> 8) & 0xff,
  1723. vers & 0xff,
  1724. ((cap >> 8) & 0x1f) + 1,
  1725. (cap & 0x1f) + 1,
  1726. speed_s,
  1727. impl,
  1728. scc_s);
  1729. dev_printk(KERN_INFO, &pdev->dev,
  1730. "flags: "
  1731. "%s%s%s%s%s%s%s"
  1732. "%s%s%s%s%s%s%s\n"
  1733. ,
  1734. cap & (1 << 31) ? "64bit " : "",
  1735. cap & (1 << 30) ? "ncq " : "",
  1736. cap & (1 << 29) ? "sntf " : "",
  1737. cap & (1 << 28) ? "ilck " : "",
  1738. cap & (1 << 27) ? "stag " : "",
  1739. cap & (1 << 26) ? "pm " : "",
  1740. cap & (1 << 25) ? "led " : "",
  1741. cap & (1 << 24) ? "clo " : "",
  1742. cap & (1 << 19) ? "nz " : "",
  1743. cap & (1 << 18) ? "only " : "",
  1744. cap & (1 << 17) ? "pmp " : "",
  1745. cap & (1 << 15) ? "pio " : "",
  1746. cap & (1 << 14) ? "slum " : "",
  1747. cap & (1 << 13) ? "part " : ""
  1748. );
  1749. }
  1750. /* On ASUS P5W DH Deluxe, the second port of PCI device 00:1f.2 is
  1751. * hardwired to on-board SIMG 4726. The chipset is ICH8 and doesn't
  1752. * support PMP and the 4726 either directly exports the device
  1753. * attached to the first downstream port or acts as a hardware storage
  1754. * controller and emulate a single ATA device (can be RAID 0/1 or some
  1755. * other configuration).
  1756. *
  1757. * When there's no device attached to the first downstream port of the
  1758. * 4726, "Config Disk" appears, which is a pseudo ATA device to
  1759. * configure the 4726. However, ATA emulation of the device is very
  1760. * lame. It doesn't send signature D2H Reg FIS after the initial
  1761. * hardreset, pukes on SRST w/ PMP==0 and has bunch of other issues.
  1762. *
  1763. * The following function works around the problem by always using
  1764. * hardreset on the port and not depending on receiving signature FIS
  1765. * afterward. If signature FIS isn't received soon, ATA class is
  1766. * assumed without follow-up softreset.
  1767. */
  1768. static void ahci_p5wdh_workaround(struct ata_host *host)
  1769. {
  1770. static struct dmi_system_id sysids[] = {
  1771. {
  1772. .ident = "P5W DH Deluxe",
  1773. .matches = {
  1774. DMI_MATCH(DMI_SYS_VENDOR,
  1775. "ASUSTEK COMPUTER INC"),
  1776. DMI_MATCH(DMI_PRODUCT_NAME, "P5W DH Deluxe"),
  1777. },
  1778. },
  1779. { }
  1780. };
  1781. struct pci_dev *pdev = to_pci_dev(host->dev);
  1782. if (pdev->bus->number == 0 && pdev->devfn == PCI_DEVFN(0x1f, 2) &&
  1783. dmi_check_system(sysids)) {
  1784. struct ata_port *ap = host->ports[1];
  1785. dev_printk(KERN_INFO, &pdev->dev, "enabling ASUS P5W DH "
  1786. "Deluxe on-board SIMG4726 workaround\n");
  1787. ap->ops = &ahci_p5wdh_ops;
  1788. ap->link.flags |= ATA_LFLAG_NO_SRST | ATA_LFLAG_ASSUME_ATA;
  1789. }
  1790. }
  1791. static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
  1792. {
  1793. static int printed_version;
  1794. struct ata_port_info pi = ahci_port_info[ent->driver_data];
  1795. const struct ata_port_info *ppi[] = { &pi, NULL };
  1796. struct device *dev = &pdev->dev;
  1797. struct ahci_host_priv *hpriv;
  1798. struct ata_host *host;
  1799. int n_ports, i, rc;
  1800. VPRINTK("ENTER\n");
  1801. WARN_ON(ATA_MAX_QUEUE > AHCI_MAX_CMDS);
  1802. if (!printed_version++)
  1803. dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
  1804. /* acquire resources */
  1805. rc = pcim_enable_device(pdev);
  1806. if (rc)
  1807. return rc;
  1808. /* AHCI controllers often implement SFF compatible interface.
  1809. * Grab all PCI BARs just in case.
  1810. */
  1811. rc = pcim_iomap_regions_request_all(pdev, 1 << AHCI_PCI_BAR, DRV_NAME);
  1812. if (rc == -EBUSY)
  1813. pcim_pin_device(pdev);
  1814. if (rc)
  1815. return rc;
  1816. if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
  1817. (pdev->device == 0x2652 || pdev->device == 0x2653)) {
  1818. u8 map;
  1819. /* ICH6s share the same PCI ID for both piix and ahci
  1820. * modes. Enabling ahci mode while MAP indicates
  1821. * combined mode is a bad idea. Yield to ata_piix.
  1822. */
  1823. pci_read_config_byte(pdev, ICH_MAP, &map);
  1824. if (map & 0x3) {
  1825. dev_printk(KERN_INFO, &pdev->dev, "controller is in "
  1826. "combined mode, can't enable AHCI mode\n");
  1827. return -ENODEV;
  1828. }
  1829. }
  1830. hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
  1831. if (!hpriv)
  1832. return -ENOMEM;
  1833. hpriv->flags |= (unsigned long)pi.private_data;
  1834. if ((hpriv->flags & AHCI_HFLAG_NO_MSI) || pci_enable_msi(pdev))
  1835. pci_intx(pdev, 1);
  1836. /* save initial config */
  1837. ahci_save_initial_config(pdev, hpriv);
  1838. /* prepare host */
  1839. if (hpriv->cap & HOST_CAP_NCQ)
  1840. pi.flags |= ATA_FLAG_NCQ;
  1841. if (hpriv->cap & HOST_CAP_PMP)
  1842. pi.flags |= ATA_FLAG_PMP;
  1843. /* CAP.NP sometimes indicate the index of the last enabled
  1844. * port, at other times, that of the last possible port, so
  1845. * determining the maximum port number requires looking at
  1846. * both CAP.NP and port_map.
  1847. */
  1848. n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
  1849. host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
  1850. if (!host)
  1851. return -ENOMEM;
  1852. host->iomap = pcim_iomap_table(pdev);
  1853. host->private_data = hpriv;
  1854. for (i = 0; i < host->n_ports; i++) {
  1855. struct ata_port *ap = host->ports[i];
  1856. ata_port_pbar_desc(ap, AHCI_PCI_BAR, -1, "abar");
  1857. ata_port_pbar_desc(ap, AHCI_PCI_BAR,
  1858. 0x100 + ap->port_no * 0x80, "port");
  1859. /* set initial link pm policy */
  1860. ap->pm_policy = NOT_AVAILABLE;
  1861. /* disabled/not-implemented port */
  1862. if (!(hpriv->port_map & (1 << i)))
  1863. ap->ops = &ata_dummy_port_ops;
  1864. }
  1865. /* apply workaround for ASUS P5W DH Deluxe mainboard */
  1866. ahci_p5wdh_workaround(host);
  1867. /* initialize adapter */
  1868. rc = ahci_configure_dma_masks(pdev, hpriv->cap & HOST_CAP_64);
  1869. if (rc)
  1870. return rc;
  1871. rc = ahci_reset_controller(host);
  1872. if (rc)
  1873. return rc;
  1874. ahci_init_controller(host);
  1875. ahci_print_info(host);
  1876. pci_set_master(pdev);
  1877. return ata_host_activate(host, pdev->irq, ahci_interrupt, IRQF_SHARED,
  1878. &ahci_sht);
  1879. }
  1880. static int __init ahci_init(void)
  1881. {
  1882. return pci_register_driver(&ahci_pci_driver);
  1883. }
  1884. static void __exit ahci_exit(void)
  1885. {
  1886. pci_unregister_driver(&ahci_pci_driver);
  1887. }
  1888. MODULE_AUTHOR("Jeff Garzik");
  1889. MODULE_DESCRIPTION("AHCI SATA low-level driver");
  1890. MODULE_LICENSE("GPL");
  1891. MODULE_DEVICE_TABLE(pci, ahci_pci_tbl);
  1892. MODULE_VERSION(DRV_VERSION);
  1893. module_init(ahci_init);
  1894. module_exit(ahci_exit);