ahci.c 61 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251
  1. /*
  2. * ahci.c - AHCI SATA support
  3. *
  4. * Maintained by: Jeff Garzik <jgarzik@pobox.com>
  5. * Please ALWAYS copy linux-ide@vger.kernel.org
  6. * on emails.
  7. *
  8. * Copyright 2004-2005 Red Hat, Inc.
  9. *
  10. *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of the GNU General Public License as published by
  13. * the Free Software Foundation; either version 2, or (at your option)
  14. * any later version.
  15. *
  16. * This program is distributed in the hope that it will be useful,
  17. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  19. * GNU General Public License for more details.
  20. *
  21. * You should have received a copy of the GNU General Public License
  22. * along with this program; see the file COPYING. If not, write to
  23. * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
  24. *
  25. *
  26. * libata documentation is available via 'make {ps|pdf}docs',
  27. * as Documentation/DocBook/libata.*
  28. *
  29. * AHCI hardware documentation:
  30. * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
  31. * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
  32. *
  33. */
  34. #include <linux/kernel.h>
  35. #include <linux/module.h>
  36. #include <linux/pci.h>
  37. #include <linux/init.h>
  38. #include <linux/blkdev.h>
  39. #include <linux/delay.h>
  40. #include <linux/interrupt.h>
  41. #include <linux/dma-mapping.h>
  42. #include <linux/device.h>
  43. #include <linux/dmi.h>
  44. #include <scsi/scsi_host.h>
  45. #include <scsi/scsi_cmnd.h>
  46. #include <linux/libata.h>
  47. #define DRV_NAME "ahci"
  48. #define DRV_VERSION "3.0"
  49. static int ahci_skip_host_reset;
  50. module_param_named(skip_host_reset, ahci_skip_host_reset, int, 0444);
  51. MODULE_PARM_DESC(skip_host_reset, "skip global host reset (0=don't skip, 1=skip)");
  52. static int ahci_enable_alpm(struct ata_port *ap,
  53. enum link_pm policy);
  54. static void ahci_disable_alpm(struct ata_port *ap);
  55. enum {
  56. AHCI_PCI_BAR = 5,
  57. AHCI_MAX_PORTS = 32,
  58. AHCI_MAX_SG = 168, /* hardware max is 64K */
  59. AHCI_DMA_BOUNDARY = 0xffffffff,
  60. AHCI_MAX_CMDS = 32,
  61. AHCI_CMD_SZ = 32,
  62. AHCI_CMD_SLOT_SZ = AHCI_MAX_CMDS * AHCI_CMD_SZ,
  63. AHCI_RX_FIS_SZ = 256,
  64. AHCI_CMD_TBL_CDB = 0x40,
  65. AHCI_CMD_TBL_HDR_SZ = 0x80,
  66. AHCI_CMD_TBL_SZ = AHCI_CMD_TBL_HDR_SZ + (AHCI_MAX_SG * 16),
  67. AHCI_CMD_TBL_AR_SZ = AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS,
  68. AHCI_PORT_PRIV_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ +
  69. AHCI_RX_FIS_SZ,
  70. AHCI_IRQ_ON_SG = (1 << 31),
  71. AHCI_CMD_ATAPI = (1 << 5),
  72. AHCI_CMD_WRITE = (1 << 6),
  73. AHCI_CMD_PREFETCH = (1 << 7),
  74. AHCI_CMD_RESET = (1 << 8),
  75. AHCI_CMD_CLR_BUSY = (1 << 10),
  76. RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */
  77. RX_FIS_SDB = 0x58, /* offset of SDB FIS data */
  78. RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */
  79. board_ahci = 0,
  80. board_ahci_vt8251 = 1,
  81. board_ahci_ign_iferr = 2,
  82. board_ahci_sb600 = 3,
  83. board_ahci_mv = 4,
  84. board_ahci_sb700 = 5,
  85. /* global controller registers */
  86. HOST_CAP = 0x00, /* host capabilities */
  87. HOST_CTL = 0x04, /* global host control */
  88. HOST_IRQ_STAT = 0x08, /* interrupt status */
  89. HOST_PORTS_IMPL = 0x0c, /* bitmap of implemented ports */
  90. HOST_VERSION = 0x10, /* AHCI spec. version compliancy */
  91. /* HOST_CTL bits */
  92. HOST_RESET = (1 << 0), /* reset controller; self-clear */
  93. HOST_IRQ_EN = (1 << 1), /* global IRQ enable */
  94. HOST_AHCI_EN = (1 << 31), /* AHCI enabled */
  95. /* HOST_CAP bits */
  96. HOST_CAP_SSC = (1 << 14), /* Slumber capable */
  97. HOST_CAP_PMP = (1 << 17), /* Port Multiplier support */
  98. HOST_CAP_CLO = (1 << 24), /* Command List Override support */
  99. HOST_CAP_ALPM = (1 << 26), /* Aggressive Link PM support */
  100. HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */
  101. HOST_CAP_SNTF = (1 << 29), /* SNotification register */
  102. HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */
  103. HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */
  104. /* registers for each SATA port */
  105. PORT_LST_ADDR = 0x00, /* command list DMA addr */
  106. PORT_LST_ADDR_HI = 0x04, /* command list DMA addr hi */
  107. PORT_FIS_ADDR = 0x08, /* FIS rx buf addr */
  108. PORT_FIS_ADDR_HI = 0x0c, /* FIS rx buf addr hi */
  109. PORT_IRQ_STAT = 0x10, /* interrupt status */
  110. PORT_IRQ_MASK = 0x14, /* interrupt enable/disable mask */
  111. PORT_CMD = 0x18, /* port command */
  112. PORT_TFDATA = 0x20, /* taskfile data */
  113. PORT_SIG = 0x24, /* device TF signature */
  114. PORT_CMD_ISSUE = 0x38, /* command issue */
  115. PORT_SCR_STAT = 0x28, /* SATA phy register: SStatus */
  116. PORT_SCR_CTL = 0x2c, /* SATA phy register: SControl */
  117. PORT_SCR_ERR = 0x30, /* SATA phy register: SError */
  118. PORT_SCR_ACT = 0x34, /* SATA phy register: SActive */
  119. PORT_SCR_NTF = 0x3c, /* SATA phy register: SNotification */
  120. /* PORT_IRQ_{STAT,MASK} bits */
  121. PORT_IRQ_COLD_PRES = (1 << 31), /* cold presence detect */
  122. PORT_IRQ_TF_ERR = (1 << 30), /* task file error */
  123. PORT_IRQ_HBUS_ERR = (1 << 29), /* host bus fatal error */
  124. PORT_IRQ_HBUS_DATA_ERR = (1 << 28), /* host bus data error */
  125. PORT_IRQ_IF_ERR = (1 << 27), /* interface fatal error */
  126. PORT_IRQ_IF_NONFATAL = (1 << 26), /* interface non-fatal error */
  127. PORT_IRQ_OVERFLOW = (1 << 24), /* xfer exhausted available S/G */
  128. PORT_IRQ_BAD_PMP = (1 << 23), /* incorrect port multiplier */
  129. PORT_IRQ_PHYRDY = (1 << 22), /* PhyRdy changed */
  130. PORT_IRQ_DEV_ILCK = (1 << 7), /* device interlock */
  131. PORT_IRQ_CONNECT = (1 << 6), /* port connect change status */
  132. PORT_IRQ_SG_DONE = (1 << 5), /* descriptor processed */
  133. PORT_IRQ_UNK_FIS = (1 << 4), /* unknown FIS rx'd */
  134. PORT_IRQ_SDB_FIS = (1 << 3), /* Set Device Bits FIS rx'd */
  135. PORT_IRQ_DMAS_FIS = (1 << 2), /* DMA Setup FIS rx'd */
  136. PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */
  137. PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */
  138. PORT_IRQ_FREEZE = PORT_IRQ_HBUS_ERR |
  139. PORT_IRQ_IF_ERR |
  140. PORT_IRQ_CONNECT |
  141. PORT_IRQ_PHYRDY |
  142. PORT_IRQ_UNK_FIS |
  143. PORT_IRQ_BAD_PMP,
  144. PORT_IRQ_ERROR = PORT_IRQ_FREEZE |
  145. PORT_IRQ_TF_ERR |
  146. PORT_IRQ_HBUS_DATA_ERR,
  147. DEF_PORT_IRQ = PORT_IRQ_ERROR | PORT_IRQ_SG_DONE |
  148. PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS |
  149. PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS,
  150. /* PORT_CMD bits */
  151. PORT_CMD_ASP = (1 << 27), /* Aggressive Slumber/Partial */
  152. PORT_CMD_ALPE = (1 << 26), /* Aggressive Link PM enable */
  153. PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */
  154. PORT_CMD_PMP = (1 << 17), /* PMP attached */
  155. PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */
  156. PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */
  157. PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */
  158. PORT_CMD_CLO = (1 << 3), /* Command list override */
  159. PORT_CMD_POWER_ON = (1 << 2), /* Power up device */
  160. PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */
  161. PORT_CMD_START = (1 << 0), /* Enable port DMA engine */
  162. PORT_CMD_ICC_MASK = (0xf << 28), /* i/f ICC state mask */
  163. PORT_CMD_ICC_ACTIVE = (0x1 << 28), /* Put i/f in active state */
  164. PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */
  165. PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */
  166. /* hpriv->flags bits */
  167. AHCI_HFLAG_NO_NCQ = (1 << 0),
  168. AHCI_HFLAG_IGN_IRQ_IF_ERR = (1 << 1), /* ignore IRQ_IF_ERR */
  169. AHCI_HFLAG_IGN_SERR_INTERNAL = (1 << 2), /* ignore SERR_INTERNAL */
  170. AHCI_HFLAG_32BIT_ONLY = (1 << 3), /* force 32bit */
  171. AHCI_HFLAG_MV_PATA = (1 << 4), /* PATA port */
  172. AHCI_HFLAG_NO_MSI = (1 << 5), /* no PCI MSI */
  173. AHCI_HFLAG_NO_PMP = (1 << 6), /* no PMP */
  174. AHCI_HFLAG_NO_HOTPLUG = (1 << 7), /* ignore PxSERR.DIAG.N */
  175. AHCI_HFLAG_SECT255 = (1 << 8), /* max 255 sectors */
  176. /* ap->flags bits */
  177. AHCI_FLAG_COMMON = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
  178. ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
  179. ATA_FLAG_ACPI_SATA | ATA_FLAG_AN |
  180. ATA_FLAG_IPM,
  181. ICH_MAP = 0x90, /* ICH MAP register */
  182. };
  183. struct ahci_cmd_hdr {
  184. __le32 opts;
  185. __le32 status;
  186. __le32 tbl_addr;
  187. __le32 tbl_addr_hi;
  188. __le32 reserved[4];
  189. };
  190. struct ahci_sg {
  191. __le32 addr;
  192. __le32 addr_hi;
  193. __le32 reserved;
  194. __le32 flags_size;
  195. };
  196. struct ahci_host_priv {
  197. unsigned int flags; /* AHCI_HFLAG_* */
  198. u32 cap; /* cap to use */
  199. u32 port_map; /* port map to use */
  200. u32 saved_cap; /* saved initial cap */
  201. u32 saved_port_map; /* saved initial port_map */
  202. };
  203. struct ahci_port_priv {
  204. struct ata_link *active_link;
  205. struct ahci_cmd_hdr *cmd_slot;
  206. dma_addr_t cmd_slot_dma;
  207. void *cmd_tbl;
  208. dma_addr_t cmd_tbl_dma;
  209. void *rx_fis;
  210. dma_addr_t rx_fis_dma;
  211. /* for NCQ spurious interrupt analysis */
  212. unsigned int ncq_saw_d2h:1;
  213. unsigned int ncq_saw_dmas:1;
  214. unsigned int ncq_saw_sdb:1;
  215. u32 intr_mask; /* interrupts to enable */
  216. };
  217. static int ahci_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val);
  218. static int ahci_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val);
  219. static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
  220. static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
  221. static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
  222. static int ahci_port_start(struct ata_port *ap);
  223. static void ahci_port_stop(struct ata_port *ap);
  224. static void ahci_qc_prep(struct ata_queued_cmd *qc);
  225. static void ahci_freeze(struct ata_port *ap);
  226. static void ahci_thaw(struct ata_port *ap);
  227. static void ahci_pmp_attach(struct ata_port *ap);
  228. static void ahci_pmp_detach(struct ata_port *ap);
  229. static int ahci_softreset(struct ata_link *link, unsigned int *class,
  230. unsigned long deadline);
  231. static int ahci_hardreset(struct ata_link *link, unsigned int *class,
  232. unsigned long deadline);
  233. static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
  234. unsigned long deadline);
  235. static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
  236. unsigned long deadline);
  237. static void ahci_postreset(struct ata_link *link, unsigned int *class);
  238. static void ahci_error_handler(struct ata_port *ap);
  239. static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
  240. static int ahci_port_resume(struct ata_port *ap);
  241. static void ahci_dev_config(struct ata_device *dev);
  242. static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl);
  243. static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
  244. u32 opts);
  245. #ifdef CONFIG_PM
  246. static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg);
  247. static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
  248. static int ahci_pci_device_resume(struct pci_dev *pdev);
  249. #endif
  250. static struct device_attribute *ahci_shost_attrs[] = {
  251. &dev_attr_link_power_management_policy,
  252. NULL
  253. };
  254. static struct scsi_host_template ahci_sht = {
  255. ATA_NCQ_SHT(DRV_NAME),
  256. .can_queue = AHCI_MAX_CMDS - 1,
  257. .sg_tablesize = AHCI_MAX_SG,
  258. .dma_boundary = AHCI_DMA_BOUNDARY,
  259. .shost_attrs = ahci_shost_attrs,
  260. };
  261. static struct ata_port_operations ahci_ops = {
  262. .inherits = &sata_pmp_port_ops,
  263. .qc_defer = sata_pmp_qc_defer_cmd_switch,
  264. .qc_prep = ahci_qc_prep,
  265. .qc_issue = ahci_qc_issue,
  266. .qc_fill_rtf = ahci_qc_fill_rtf,
  267. .freeze = ahci_freeze,
  268. .thaw = ahci_thaw,
  269. .softreset = ahci_softreset,
  270. .hardreset = ahci_hardreset,
  271. .postreset = ahci_postreset,
  272. .pmp_softreset = ahci_softreset,
  273. .error_handler = ahci_error_handler,
  274. .post_internal_cmd = ahci_post_internal_cmd,
  275. .dev_config = ahci_dev_config,
  276. .scr_read = ahci_scr_read,
  277. .scr_write = ahci_scr_write,
  278. .pmp_attach = ahci_pmp_attach,
  279. .pmp_detach = ahci_pmp_detach,
  280. .enable_pm = ahci_enable_alpm,
  281. .disable_pm = ahci_disable_alpm,
  282. #ifdef CONFIG_PM
  283. .port_suspend = ahci_port_suspend,
  284. .port_resume = ahci_port_resume,
  285. #endif
  286. .port_start = ahci_port_start,
  287. .port_stop = ahci_port_stop,
  288. };
  289. static struct ata_port_operations ahci_vt8251_ops = {
  290. .inherits = &ahci_ops,
  291. .hardreset = ahci_vt8251_hardreset,
  292. };
  293. static struct ata_port_operations ahci_p5wdh_ops = {
  294. .inherits = &ahci_ops,
  295. .hardreset = ahci_p5wdh_hardreset,
  296. };
  297. #define AHCI_HFLAGS(flags) .private_data = (void *)(flags)
  298. static const struct ata_port_info ahci_port_info[] = {
  299. /* board_ahci */
  300. {
  301. .flags = AHCI_FLAG_COMMON,
  302. .pio_mask = 0x1f, /* pio0-4 */
  303. .udma_mask = ATA_UDMA6,
  304. .port_ops = &ahci_ops,
  305. },
  306. /* board_ahci_vt8251 */
  307. {
  308. AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_PMP),
  309. .flags = AHCI_FLAG_COMMON,
  310. .pio_mask = 0x1f, /* pio0-4 */
  311. .udma_mask = ATA_UDMA6,
  312. .port_ops = &ahci_vt8251_ops,
  313. },
  314. /* board_ahci_ign_iferr */
  315. {
  316. AHCI_HFLAGS (AHCI_HFLAG_IGN_IRQ_IF_ERR),
  317. .flags = AHCI_FLAG_COMMON,
  318. .pio_mask = 0x1f, /* pio0-4 */
  319. .udma_mask = ATA_UDMA6,
  320. .port_ops = &ahci_ops,
  321. },
  322. /* board_ahci_sb600 */
  323. {
  324. AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL |
  325. AHCI_HFLAG_32BIT_ONLY |
  326. AHCI_HFLAG_SECT255 | AHCI_HFLAG_NO_PMP),
  327. .flags = AHCI_FLAG_COMMON,
  328. .pio_mask = 0x1f, /* pio0-4 */
  329. .udma_mask = ATA_UDMA6,
  330. .port_ops = &ahci_ops,
  331. },
  332. /* board_ahci_mv */
  333. {
  334. AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_MSI |
  335. AHCI_HFLAG_MV_PATA),
  336. .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
  337. ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
  338. .pio_mask = 0x1f, /* pio0-4 */
  339. .udma_mask = ATA_UDMA6,
  340. .port_ops = &ahci_ops,
  341. },
  342. /* board_ahci_sb700 */
  343. {
  344. AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL |
  345. AHCI_HFLAG_NO_PMP),
  346. .flags = AHCI_FLAG_COMMON,
  347. .pio_mask = 0x1f, /* pio0-4 */
  348. .udma_mask = ATA_UDMA6,
  349. .port_ops = &ahci_ops,
  350. },
  351. };
  352. static const struct pci_device_id ahci_pci_tbl[] = {
  353. /* Intel */
  354. { PCI_VDEVICE(INTEL, 0x2652), board_ahci }, /* ICH6 */
  355. { PCI_VDEVICE(INTEL, 0x2653), board_ahci }, /* ICH6M */
  356. { PCI_VDEVICE(INTEL, 0x27c1), board_ahci }, /* ICH7 */
  357. { PCI_VDEVICE(INTEL, 0x27c5), board_ahci }, /* ICH7M */
  358. { PCI_VDEVICE(INTEL, 0x27c3), board_ahci }, /* ICH7R */
  359. { PCI_VDEVICE(AL, 0x5288), board_ahci_ign_iferr }, /* ULi M5288 */
  360. { PCI_VDEVICE(INTEL, 0x2681), board_ahci }, /* ESB2 */
  361. { PCI_VDEVICE(INTEL, 0x2682), board_ahci }, /* ESB2 */
  362. { PCI_VDEVICE(INTEL, 0x2683), board_ahci }, /* ESB2 */
  363. { PCI_VDEVICE(INTEL, 0x27c6), board_ahci }, /* ICH7-M DH */
  364. { PCI_VDEVICE(INTEL, 0x2821), board_ahci }, /* ICH8 */
  365. { PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* ICH8 */
  366. { PCI_VDEVICE(INTEL, 0x2824), board_ahci }, /* ICH8 */
  367. { PCI_VDEVICE(INTEL, 0x2829), board_ahci }, /* ICH8M */
  368. { PCI_VDEVICE(INTEL, 0x282a), board_ahci }, /* ICH8M */
  369. { PCI_VDEVICE(INTEL, 0x2922), board_ahci }, /* ICH9 */
  370. { PCI_VDEVICE(INTEL, 0x2923), board_ahci }, /* ICH9 */
  371. { PCI_VDEVICE(INTEL, 0x2924), board_ahci }, /* ICH9 */
  372. { PCI_VDEVICE(INTEL, 0x2925), board_ahci }, /* ICH9 */
  373. { PCI_VDEVICE(INTEL, 0x2927), board_ahci }, /* ICH9 */
  374. { PCI_VDEVICE(INTEL, 0x2929), board_ahci }, /* ICH9M */
  375. { PCI_VDEVICE(INTEL, 0x292a), board_ahci }, /* ICH9M */
  376. { PCI_VDEVICE(INTEL, 0x292b), board_ahci }, /* ICH9M */
  377. { PCI_VDEVICE(INTEL, 0x292c), board_ahci }, /* ICH9M */
  378. { PCI_VDEVICE(INTEL, 0x292f), board_ahci }, /* ICH9M */
  379. { PCI_VDEVICE(INTEL, 0x294d), board_ahci }, /* ICH9 */
  380. { PCI_VDEVICE(INTEL, 0x294e), board_ahci }, /* ICH9M */
  381. { PCI_VDEVICE(INTEL, 0x502a), board_ahci }, /* Tolapai */
  382. { PCI_VDEVICE(INTEL, 0x502b), board_ahci }, /* Tolapai */
  383. { PCI_VDEVICE(INTEL, 0x3a05), board_ahci }, /* ICH10 */
  384. { PCI_VDEVICE(INTEL, 0x3a25), board_ahci }, /* ICH10 */
  385. /* JMicron 360/1/3/5/6, match class to avoid IDE function */
  386. { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
  387. PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci_ign_iferr },
  388. /* ATI */
  389. { PCI_VDEVICE(ATI, 0x4380), board_ahci_sb600 }, /* ATI SB600 */
  390. { PCI_VDEVICE(ATI, 0x4390), board_ahci_sb700 }, /* ATI SB700/800 */
  391. { PCI_VDEVICE(ATI, 0x4391), board_ahci_sb700 }, /* ATI SB700/800 */
  392. { PCI_VDEVICE(ATI, 0x4392), board_ahci_sb700 }, /* ATI SB700/800 */
  393. { PCI_VDEVICE(ATI, 0x4393), board_ahci_sb700 }, /* ATI SB700/800 */
  394. { PCI_VDEVICE(ATI, 0x4394), board_ahci_sb700 }, /* ATI SB700/800 */
  395. { PCI_VDEVICE(ATI, 0x4395), board_ahci_sb700 }, /* ATI SB700/800 */
  396. /* VIA */
  397. { PCI_VDEVICE(VIA, 0x3349), board_ahci_vt8251 }, /* VIA VT8251 */
  398. { PCI_VDEVICE(VIA, 0x6287), board_ahci_vt8251 }, /* VIA VT8251 */
  399. /* NVIDIA */
  400. { PCI_VDEVICE(NVIDIA, 0x044c), board_ahci }, /* MCP65 */
  401. { PCI_VDEVICE(NVIDIA, 0x044d), board_ahci }, /* MCP65 */
  402. { PCI_VDEVICE(NVIDIA, 0x044e), board_ahci }, /* MCP65 */
  403. { PCI_VDEVICE(NVIDIA, 0x044f), board_ahci }, /* MCP65 */
  404. { PCI_VDEVICE(NVIDIA, 0x045c), board_ahci }, /* MCP65 */
  405. { PCI_VDEVICE(NVIDIA, 0x045d), board_ahci }, /* MCP65 */
  406. { PCI_VDEVICE(NVIDIA, 0x045e), board_ahci }, /* MCP65 */
  407. { PCI_VDEVICE(NVIDIA, 0x045f), board_ahci }, /* MCP65 */
  408. { PCI_VDEVICE(NVIDIA, 0x0550), board_ahci }, /* MCP67 */
  409. { PCI_VDEVICE(NVIDIA, 0x0551), board_ahci }, /* MCP67 */
  410. { PCI_VDEVICE(NVIDIA, 0x0552), board_ahci }, /* MCP67 */
  411. { PCI_VDEVICE(NVIDIA, 0x0553), board_ahci }, /* MCP67 */
  412. { PCI_VDEVICE(NVIDIA, 0x0554), board_ahci }, /* MCP67 */
  413. { PCI_VDEVICE(NVIDIA, 0x0555), board_ahci }, /* MCP67 */
  414. { PCI_VDEVICE(NVIDIA, 0x0556), board_ahci }, /* MCP67 */
  415. { PCI_VDEVICE(NVIDIA, 0x0557), board_ahci }, /* MCP67 */
  416. { PCI_VDEVICE(NVIDIA, 0x0558), board_ahci }, /* MCP67 */
  417. { PCI_VDEVICE(NVIDIA, 0x0559), board_ahci }, /* MCP67 */
  418. { PCI_VDEVICE(NVIDIA, 0x055a), board_ahci }, /* MCP67 */
  419. { PCI_VDEVICE(NVIDIA, 0x055b), board_ahci }, /* MCP67 */
  420. { PCI_VDEVICE(NVIDIA, 0x07f0), board_ahci }, /* MCP73 */
  421. { PCI_VDEVICE(NVIDIA, 0x07f1), board_ahci }, /* MCP73 */
  422. { PCI_VDEVICE(NVIDIA, 0x07f2), board_ahci }, /* MCP73 */
  423. { PCI_VDEVICE(NVIDIA, 0x07f3), board_ahci }, /* MCP73 */
  424. { PCI_VDEVICE(NVIDIA, 0x07f4), board_ahci }, /* MCP73 */
  425. { PCI_VDEVICE(NVIDIA, 0x07f5), board_ahci }, /* MCP73 */
  426. { PCI_VDEVICE(NVIDIA, 0x07f6), board_ahci }, /* MCP73 */
  427. { PCI_VDEVICE(NVIDIA, 0x07f7), board_ahci }, /* MCP73 */
  428. { PCI_VDEVICE(NVIDIA, 0x07f8), board_ahci }, /* MCP73 */
  429. { PCI_VDEVICE(NVIDIA, 0x07f9), board_ahci }, /* MCP73 */
  430. { PCI_VDEVICE(NVIDIA, 0x07fa), board_ahci }, /* MCP73 */
  431. { PCI_VDEVICE(NVIDIA, 0x07fb), board_ahci }, /* MCP73 */
  432. { PCI_VDEVICE(NVIDIA, 0x0ad0), board_ahci }, /* MCP77 */
  433. { PCI_VDEVICE(NVIDIA, 0x0ad1), board_ahci }, /* MCP77 */
  434. { PCI_VDEVICE(NVIDIA, 0x0ad2), board_ahci }, /* MCP77 */
  435. { PCI_VDEVICE(NVIDIA, 0x0ad3), board_ahci }, /* MCP77 */
  436. { PCI_VDEVICE(NVIDIA, 0x0ad4), board_ahci }, /* MCP77 */
  437. { PCI_VDEVICE(NVIDIA, 0x0ad5), board_ahci }, /* MCP77 */
  438. { PCI_VDEVICE(NVIDIA, 0x0ad6), board_ahci }, /* MCP77 */
  439. { PCI_VDEVICE(NVIDIA, 0x0ad7), board_ahci }, /* MCP77 */
  440. { PCI_VDEVICE(NVIDIA, 0x0ad8), board_ahci }, /* MCP77 */
  441. { PCI_VDEVICE(NVIDIA, 0x0ad9), board_ahci }, /* MCP77 */
  442. { PCI_VDEVICE(NVIDIA, 0x0ada), board_ahci }, /* MCP77 */
  443. { PCI_VDEVICE(NVIDIA, 0x0adb), board_ahci }, /* MCP77 */
  444. { PCI_VDEVICE(NVIDIA, 0x0ab4), board_ahci }, /* MCP79 */
  445. { PCI_VDEVICE(NVIDIA, 0x0ab5), board_ahci }, /* MCP79 */
  446. { PCI_VDEVICE(NVIDIA, 0x0ab6), board_ahci }, /* MCP79 */
  447. { PCI_VDEVICE(NVIDIA, 0x0ab7), board_ahci }, /* MCP79 */
  448. { PCI_VDEVICE(NVIDIA, 0x0ab8), board_ahci }, /* MCP79 */
  449. { PCI_VDEVICE(NVIDIA, 0x0ab9), board_ahci }, /* MCP79 */
  450. { PCI_VDEVICE(NVIDIA, 0x0aba), board_ahci }, /* MCP79 */
  451. { PCI_VDEVICE(NVIDIA, 0x0abb), board_ahci }, /* MCP79 */
  452. { PCI_VDEVICE(NVIDIA, 0x0abc), board_ahci }, /* MCP79 */
  453. { PCI_VDEVICE(NVIDIA, 0x0abd), board_ahci }, /* MCP79 */
  454. { PCI_VDEVICE(NVIDIA, 0x0abe), board_ahci }, /* MCP79 */
  455. { PCI_VDEVICE(NVIDIA, 0x0abf), board_ahci }, /* MCP79 */
  456. { PCI_VDEVICE(NVIDIA, 0x0bc8), board_ahci }, /* MCP7B */
  457. { PCI_VDEVICE(NVIDIA, 0x0bc9), board_ahci }, /* MCP7B */
  458. { PCI_VDEVICE(NVIDIA, 0x0bca), board_ahci }, /* MCP7B */
  459. { PCI_VDEVICE(NVIDIA, 0x0bcb), board_ahci }, /* MCP7B */
  460. { PCI_VDEVICE(NVIDIA, 0x0bcc), board_ahci }, /* MCP7B */
  461. { PCI_VDEVICE(NVIDIA, 0x0bcd), board_ahci }, /* MCP7B */
  462. { PCI_VDEVICE(NVIDIA, 0x0bce), board_ahci }, /* MCP7B */
  463. { PCI_VDEVICE(NVIDIA, 0x0bcf), board_ahci }, /* MCP7B */
  464. { PCI_VDEVICE(NVIDIA, 0x0bd0), board_ahci }, /* MCP7B */
  465. { PCI_VDEVICE(NVIDIA, 0x0bd1), board_ahci }, /* MCP7B */
  466. { PCI_VDEVICE(NVIDIA, 0x0bd2), board_ahci }, /* MCP7B */
  467. { PCI_VDEVICE(NVIDIA, 0x0bd3), board_ahci }, /* MCP7B */
  468. /* SiS */
  469. { PCI_VDEVICE(SI, 0x1184), board_ahci }, /* SiS 966 */
  470. { PCI_VDEVICE(SI, 0x1185), board_ahci }, /* SiS 966 */
  471. { PCI_VDEVICE(SI, 0x0186), board_ahci }, /* SiS 968 */
  472. /* Marvell */
  473. { PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */
  474. { PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv }, /* 6121 */
  475. /* Generic, PCI class code for AHCI */
  476. { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
  477. PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci },
  478. { } /* terminate list */
  479. };
  480. static struct pci_driver ahci_pci_driver = {
  481. .name = DRV_NAME,
  482. .id_table = ahci_pci_tbl,
  483. .probe = ahci_init_one,
  484. .remove = ata_pci_remove_one,
  485. #ifdef CONFIG_PM
  486. .suspend = ahci_pci_device_suspend,
  487. .resume = ahci_pci_device_resume,
  488. #endif
  489. };
  490. static inline int ahci_nr_ports(u32 cap)
  491. {
  492. return (cap & 0x1f) + 1;
  493. }
  494. static inline void __iomem *__ahci_port_base(struct ata_host *host,
  495. unsigned int port_no)
  496. {
  497. void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
  498. return mmio + 0x100 + (port_no * 0x80);
  499. }
  500. static inline void __iomem *ahci_port_base(struct ata_port *ap)
  501. {
  502. return __ahci_port_base(ap->host, ap->port_no);
  503. }
  504. static void ahci_enable_ahci(void __iomem *mmio)
  505. {
  506. int i;
  507. u32 tmp;
  508. /* turn on AHCI_EN */
  509. tmp = readl(mmio + HOST_CTL);
  510. if (tmp & HOST_AHCI_EN)
  511. return;
  512. /* Some controllers need AHCI_EN to be written multiple times.
  513. * Try a few times before giving up.
  514. */
  515. for (i = 0; i < 5; i++) {
  516. tmp |= HOST_AHCI_EN;
  517. writel(tmp, mmio + HOST_CTL);
  518. tmp = readl(mmio + HOST_CTL); /* flush && sanity check */
  519. if (tmp & HOST_AHCI_EN)
  520. return;
  521. msleep(10);
  522. }
  523. WARN_ON(1);
  524. }
  525. /**
  526. * ahci_save_initial_config - Save and fixup initial config values
  527. * @pdev: target PCI device
  528. * @hpriv: host private area to store config values
  529. *
  530. * Some registers containing configuration info might be setup by
  531. * BIOS and might be cleared on reset. This function saves the
  532. * initial values of those registers into @hpriv such that they
  533. * can be restored after controller reset.
  534. *
  535. * If inconsistent, config values are fixed up by this function.
  536. *
  537. * LOCKING:
  538. * None.
  539. */
  540. static void ahci_save_initial_config(struct pci_dev *pdev,
  541. struct ahci_host_priv *hpriv)
  542. {
  543. void __iomem *mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
  544. u32 cap, port_map;
  545. int i;
  546. int mv;
  547. /* make sure AHCI mode is enabled before accessing CAP */
  548. ahci_enable_ahci(mmio);
  549. /* Values prefixed with saved_ are written back to host after
  550. * reset. Values without are used for driver operation.
  551. */
  552. hpriv->saved_cap = cap = readl(mmio + HOST_CAP);
  553. hpriv->saved_port_map = port_map = readl(mmio + HOST_PORTS_IMPL);
  554. /* some chips have errata preventing 64bit use */
  555. if ((cap & HOST_CAP_64) && (hpriv->flags & AHCI_HFLAG_32BIT_ONLY)) {
  556. dev_printk(KERN_INFO, &pdev->dev,
  557. "controller can't do 64bit DMA, forcing 32bit\n");
  558. cap &= ~HOST_CAP_64;
  559. }
  560. if ((cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_NO_NCQ)) {
  561. dev_printk(KERN_INFO, &pdev->dev,
  562. "controller can't do NCQ, turning off CAP_NCQ\n");
  563. cap &= ~HOST_CAP_NCQ;
  564. }
  565. if ((cap & HOST_CAP_PMP) && (hpriv->flags & AHCI_HFLAG_NO_PMP)) {
  566. dev_printk(KERN_INFO, &pdev->dev,
  567. "controller can't do PMP, turning off CAP_PMP\n");
  568. cap &= ~HOST_CAP_PMP;
  569. }
  570. /*
  571. * Temporary Marvell 6145 hack: PATA port presence
  572. * is asserted through the standard AHCI port
  573. * presence register, as bit 4 (counting from 0)
  574. */
  575. if (hpriv->flags & AHCI_HFLAG_MV_PATA) {
  576. if (pdev->device == 0x6121)
  577. mv = 0x3;
  578. else
  579. mv = 0xf;
  580. dev_printk(KERN_ERR, &pdev->dev,
  581. "MV_AHCI HACK: port_map %x -> %x\n",
  582. port_map,
  583. port_map & mv);
  584. port_map &= mv;
  585. }
  586. /* cross check port_map and cap.n_ports */
  587. if (port_map) {
  588. int map_ports = 0;
  589. for (i = 0; i < AHCI_MAX_PORTS; i++)
  590. if (port_map & (1 << i))
  591. map_ports++;
  592. /* If PI has more ports than n_ports, whine, clear
  593. * port_map and let it be generated from n_ports.
  594. */
  595. if (map_ports > ahci_nr_ports(cap)) {
  596. dev_printk(KERN_WARNING, &pdev->dev,
  597. "implemented port map (0x%x) contains more "
  598. "ports than nr_ports (%u), using nr_ports\n",
  599. port_map, ahci_nr_ports(cap));
  600. port_map = 0;
  601. }
  602. }
  603. /* fabricate port_map from cap.nr_ports */
  604. if (!port_map) {
  605. port_map = (1 << ahci_nr_ports(cap)) - 1;
  606. dev_printk(KERN_WARNING, &pdev->dev,
  607. "forcing PORTS_IMPL to 0x%x\n", port_map);
  608. /* write the fixed up value to the PI register */
  609. hpriv->saved_port_map = port_map;
  610. }
  611. /* record values to use during operation */
  612. hpriv->cap = cap;
  613. hpriv->port_map = port_map;
  614. }
  615. /**
  616. * ahci_restore_initial_config - Restore initial config
  617. * @host: target ATA host
  618. *
  619. * Restore initial config stored by ahci_save_initial_config().
  620. *
  621. * LOCKING:
  622. * None.
  623. */
  624. static void ahci_restore_initial_config(struct ata_host *host)
  625. {
  626. struct ahci_host_priv *hpriv = host->private_data;
  627. void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
  628. writel(hpriv->saved_cap, mmio + HOST_CAP);
  629. writel(hpriv->saved_port_map, mmio + HOST_PORTS_IMPL);
  630. (void) readl(mmio + HOST_PORTS_IMPL); /* flush */
  631. }
  632. static unsigned ahci_scr_offset(struct ata_port *ap, unsigned int sc_reg)
  633. {
  634. static const int offset[] = {
  635. [SCR_STATUS] = PORT_SCR_STAT,
  636. [SCR_CONTROL] = PORT_SCR_CTL,
  637. [SCR_ERROR] = PORT_SCR_ERR,
  638. [SCR_ACTIVE] = PORT_SCR_ACT,
  639. [SCR_NOTIFICATION] = PORT_SCR_NTF,
  640. };
  641. struct ahci_host_priv *hpriv = ap->host->private_data;
  642. if (sc_reg < ARRAY_SIZE(offset) &&
  643. (sc_reg != SCR_NOTIFICATION || (hpriv->cap & HOST_CAP_SNTF)))
  644. return offset[sc_reg];
  645. return 0;
  646. }
  647. static int ahci_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
  648. {
  649. void __iomem *port_mmio = ahci_port_base(ap);
  650. int offset = ahci_scr_offset(ap, sc_reg);
  651. if (offset) {
  652. *val = readl(port_mmio + offset);
  653. return 0;
  654. }
  655. return -EINVAL;
  656. }
  657. static int ahci_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val)
  658. {
  659. void __iomem *port_mmio = ahci_port_base(ap);
  660. int offset = ahci_scr_offset(ap, sc_reg);
  661. if (offset) {
  662. writel(val, port_mmio + offset);
  663. return 0;
  664. }
  665. return -EINVAL;
  666. }
  667. static void ahci_start_engine(struct ata_port *ap)
  668. {
  669. void __iomem *port_mmio = ahci_port_base(ap);
  670. u32 tmp;
  671. /* start DMA */
  672. tmp = readl(port_mmio + PORT_CMD);
  673. tmp |= PORT_CMD_START;
  674. writel(tmp, port_mmio + PORT_CMD);
  675. readl(port_mmio + PORT_CMD); /* flush */
  676. }
  677. static int ahci_stop_engine(struct ata_port *ap)
  678. {
  679. void __iomem *port_mmio = ahci_port_base(ap);
  680. u32 tmp;
  681. tmp = readl(port_mmio + PORT_CMD);
  682. /* check if the HBA is idle */
  683. if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0)
  684. return 0;
  685. /* setting HBA to idle */
  686. tmp &= ~PORT_CMD_START;
  687. writel(tmp, port_mmio + PORT_CMD);
  688. /* wait for engine to stop. This could be as long as 500 msec */
  689. tmp = ata_wait_register(port_mmio + PORT_CMD,
  690. PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500);
  691. if (tmp & PORT_CMD_LIST_ON)
  692. return -EIO;
  693. return 0;
  694. }
  695. static void ahci_start_fis_rx(struct ata_port *ap)
  696. {
  697. void __iomem *port_mmio = ahci_port_base(ap);
  698. struct ahci_host_priv *hpriv = ap->host->private_data;
  699. struct ahci_port_priv *pp = ap->private_data;
  700. u32 tmp;
  701. /* set FIS registers */
  702. if (hpriv->cap & HOST_CAP_64)
  703. writel((pp->cmd_slot_dma >> 16) >> 16,
  704. port_mmio + PORT_LST_ADDR_HI);
  705. writel(pp->cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR);
  706. if (hpriv->cap & HOST_CAP_64)
  707. writel((pp->rx_fis_dma >> 16) >> 16,
  708. port_mmio + PORT_FIS_ADDR_HI);
  709. writel(pp->rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR);
  710. /* enable FIS reception */
  711. tmp = readl(port_mmio + PORT_CMD);
  712. tmp |= PORT_CMD_FIS_RX;
  713. writel(tmp, port_mmio + PORT_CMD);
  714. /* flush */
  715. readl(port_mmio + PORT_CMD);
  716. }
  717. static int ahci_stop_fis_rx(struct ata_port *ap)
  718. {
  719. void __iomem *port_mmio = ahci_port_base(ap);
  720. u32 tmp;
  721. /* disable FIS reception */
  722. tmp = readl(port_mmio + PORT_CMD);
  723. tmp &= ~PORT_CMD_FIS_RX;
  724. writel(tmp, port_mmio + PORT_CMD);
  725. /* wait for completion, spec says 500ms, give it 1000 */
  726. tmp = ata_wait_register(port_mmio + PORT_CMD, PORT_CMD_FIS_ON,
  727. PORT_CMD_FIS_ON, 10, 1000);
  728. if (tmp & PORT_CMD_FIS_ON)
  729. return -EBUSY;
  730. return 0;
  731. }
  732. static void ahci_power_up(struct ata_port *ap)
  733. {
  734. struct ahci_host_priv *hpriv = ap->host->private_data;
  735. void __iomem *port_mmio = ahci_port_base(ap);
  736. u32 cmd;
  737. cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
  738. /* spin up device */
  739. if (hpriv->cap & HOST_CAP_SSS) {
  740. cmd |= PORT_CMD_SPIN_UP;
  741. writel(cmd, port_mmio + PORT_CMD);
  742. }
  743. /* wake up link */
  744. writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD);
  745. }
  746. static void ahci_disable_alpm(struct ata_port *ap)
  747. {
  748. struct ahci_host_priv *hpriv = ap->host->private_data;
  749. void __iomem *port_mmio = ahci_port_base(ap);
  750. u32 cmd;
  751. struct ahci_port_priv *pp = ap->private_data;
  752. /* IPM bits should be disabled by libata-core */
  753. /* get the existing command bits */
  754. cmd = readl(port_mmio + PORT_CMD);
  755. /* disable ALPM and ASP */
  756. cmd &= ~PORT_CMD_ASP;
  757. cmd &= ~PORT_CMD_ALPE;
  758. /* force the interface back to active */
  759. cmd |= PORT_CMD_ICC_ACTIVE;
  760. /* write out new cmd value */
  761. writel(cmd, port_mmio + PORT_CMD);
  762. cmd = readl(port_mmio + PORT_CMD);
  763. /* wait 10ms to be sure we've come out of any low power state */
  764. msleep(10);
  765. /* clear out any PhyRdy stuff from interrupt status */
  766. writel(PORT_IRQ_PHYRDY, port_mmio + PORT_IRQ_STAT);
  767. /* go ahead and clean out PhyRdy Change from Serror too */
  768. ahci_scr_write(ap, SCR_ERROR, ((1 << 16) | (1 << 18)));
  769. /*
  770. * Clear flag to indicate that we should ignore all PhyRdy
  771. * state changes
  772. */
  773. hpriv->flags &= ~AHCI_HFLAG_NO_HOTPLUG;
  774. /*
  775. * Enable interrupts on Phy Ready.
  776. */
  777. pp->intr_mask |= PORT_IRQ_PHYRDY;
  778. writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
  779. /*
  780. * don't change the link pm policy - we can be called
  781. * just to turn of link pm temporarily
  782. */
  783. }
  784. static int ahci_enable_alpm(struct ata_port *ap,
  785. enum link_pm policy)
  786. {
  787. struct ahci_host_priv *hpriv = ap->host->private_data;
  788. void __iomem *port_mmio = ahci_port_base(ap);
  789. u32 cmd;
  790. struct ahci_port_priv *pp = ap->private_data;
  791. u32 asp;
  792. /* Make sure the host is capable of link power management */
  793. if (!(hpriv->cap & HOST_CAP_ALPM))
  794. return -EINVAL;
  795. switch (policy) {
  796. case MAX_PERFORMANCE:
  797. case NOT_AVAILABLE:
  798. /*
  799. * if we came here with NOT_AVAILABLE,
  800. * it just means this is the first time we
  801. * have tried to enable - default to max performance,
  802. * and let the user go to lower power modes on request.
  803. */
  804. ahci_disable_alpm(ap);
  805. return 0;
  806. case MIN_POWER:
  807. /* configure HBA to enter SLUMBER */
  808. asp = PORT_CMD_ASP;
  809. break;
  810. case MEDIUM_POWER:
  811. /* configure HBA to enter PARTIAL */
  812. asp = 0;
  813. break;
  814. default:
  815. return -EINVAL;
  816. }
  817. /*
  818. * Disable interrupts on Phy Ready. This keeps us from
  819. * getting woken up due to spurious phy ready interrupts
  820. * TBD - Hot plug should be done via polling now, is
  821. * that even supported?
  822. */
  823. pp->intr_mask &= ~PORT_IRQ_PHYRDY;
  824. writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
  825. /*
  826. * Set a flag to indicate that we should ignore all PhyRdy
  827. * state changes since these can happen now whenever we
  828. * change link state
  829. */
  830. hpriv->flags |= AHCI_HFLAG_NO_HOTPLUG;
  831. /* get the existing command bits */
  832. cmd = readl(port_mmio + PORT_CMD);
  833. /*
  834. * Set ASP based on Policy
  835. */
  836. cmd |= asp;
  837. /*
  838. * Setting this bit will instruct the HBA to aggressively
  839. * enter a lower power link state when it's appropriate and
  840. * based on the value set above for ASP
  841. */
  842. cmd |= PORT_CMD_ALPE;
  843. /* write out new cmd value */
  844. writel(cmd, port_mmio + PORT_CMD);
  845. cmd = readl(port_mmio + PORT_CMD);
  846. /* IPM bits should be set by libata-core */
  847. return 0;
  848. }
  849. #ifdef CONFIG_PM
  850. static void ahci_power_down(struct ata_port *ap)
  851. {
  852. struct ahci_host_priv *hpriv = ap->host->private_data;
  853. void __iomem *port_mmio = ahci_port_base(ap);
  854. u32 cmd, scontrol;
  855. if (!(hpriv->cap & HOST_CAP_SSS))
  856. return;
  857. /* put device into listen mode, first set PxSCTL.DET to 0 */
  858. scontrol = readl(port_mmio + PORT_SCR_CTL);
  859. scontrol &= ~0xf;
  860. writel(scontrol, port_mmio + PORT_SCR_CTL);
  861. /* then set PxCMD.SUD to 0 */
  862. cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
  863. cmd &= ~PORT_CMD_SPIN_UP;
  864. writel(cmd, port_mmio + PORT_CMD);
  865. }
  866. #endif
  867. static void ahci_start_port(struct ata_port *ap)
  868. {
  869. /* enable FIS reception */
  870. ahci_start_fis_rx(ap);
  871. /* enable DMA */
  872. ahci_start_engine(ap);
  873. }
  874. static int ahci_deinit_port(struct ata_port *ap, const char **emsg)
  875. {
  876. int rc;
  877. /* disable DMA */
  878. rc = ahci_stop_engine(ap);
  879. if (rc) {
  880. *emsg = "failed to stop engine";
  881. return rc;
  882. }
  883. /* disable FIS reception */
  884. rc = ahci_stop_fis_rx(ap);
  885. if (rc) {
  886. *emsg = "failed stop FIS RX";
  887. return rc;
  888. }
  889. return 0;
  890. }
  891. static int ahci_reset_controller(struct ata_host *host)
  892. {
  893. struct pci_dev *pdev = to_pci_dev(host->dev);
  894. struct ahci_host_priv *hpriv = host->private_data;
  895. void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
  896. u32 tmp;
  897. /* we must be in AHCI mode, before using anything
  898. * AHCI-specific, such as HOST_RESET.
  899. */
  900. ahci_enable_ahci(mmio);
  901. /* global controller reset */
  902. if (!ahci_skip_host_reset) {
  903. tmp = readl(mmio + HOST_CTL);
  904. if ((tmp & HOST_RESET) == 0) {
  905. writel(tmp | HOST_RESET, mmio + HOST_CTL);
  906. readl(mmio + HOST_CTL); /* flush */
  907. }
  908. /* reset must complete within 1 second, or
  909. * the hardware should be considered fried.
  910. */
  911. ssleep(1);
  912. tmp = readl(mmio + HOST_CTL);
  913. if (tmp & HOST_RESET) {
  914. dev_printk(KERN_ERR, host->dev,
  915. "controller reset failed (0x%x)\n", tmp);
  916. return -EIO;
  917. }
  918. /* turn on AHCI mode */
  919. ahci_enable_ahci(mmio);
  920. /* Some registers might be cleared on reset. Restore
  921. * initial values.
  922. */
  923. ahci_restore_initial_config(host);
  924. } else
  925. dev_printk(KERN_INFO, host->dev,
  926. "skipping global host reset\n");
  927. if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
  928. u16 tmp16;
  929. /* configure PCS */
  930. pci_read_config_word(pdev, 0x92, &tmp16);
  931. if ((tmp16 & hpriv->port_map) != hpriv->port_map) {
  932. tmp16 |= hpriv->port_map;
  933. pci_write_config_word(pdev, 0x92, tmp16);
  934. }
  935. }
  936. return 0;
  937. }
  938. static void ahci_port_init(struct pci_dev *pdev, struct ata_port *ap,
  939. int port_no, void __iomem *mmio,
  940. void __iomem *port_mmio)
  941. {
  942. const char *emsg = NULL;
  943. int rc;
  944. u32 tmp;
  945. /* make sure port is not active */
  946. rc = ahci_deinit_port(ap, &emsg);
  947. if (rc)
  948. dev_printk(KERN_WARNING, &pdev->dev,
  949. "%s (%d)\n", emsg, rc);
  950. /* clear SError */
  951. tmp = readl(port_mmio + PORT_SCR_ERR);
  952. VPRINTK("PORT_SCR_ERR 0x%x\n", tmp);
  953. writel(tmp, port_mmio + PORT_SCR_ERR);
  954. /* clear port IRQ */
  955. tmp = readl(port_mmio + PORT_IRQ_STAT);
  956. VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
  957. if (tmp)
  958. writel(tmp, port_mmio + PORT_IRQ_STAT);
  959. writel(1 << port_no, mmio + HOST_IRQ_STAT);
  960. }
  961. static void ahci_init_controller(struct ata_host *host)
  962. {
  963. struct ahci_host_priv *hpriv = host->private_data;
  964. struct pci_dev *pdev = to_pci_dev(host->dev);
  965. void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
  966. int i;
  967. void __iomem *port_mmio;
  968. u32 tmp;
  969. int mv;
  970. if (hpriv->flags & AHCI_HFLAG_MV_PATA) {
  971. if (pdev->device == 0x6121)
  972. mv = 2;
  973. else
  974. mv = 4;
  975. port_mmio = __ahci_port_base(host, mv);
  976. writel(0, port_mmio + PORT_IRQ_MASK);
  977. /* clear port IRQ */
  978. tmp = readl(port_mmio + PORT_IRQ_STAT);
  979. VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
  980. if (tmp)
  981. writel(tmp, port_mmio + PORT_IRQ_STAT);
  982. }
  983. for (i = 0; i < host->n_ports; i++) {
  984. struct ata_port *ap = host->ports[i];
  985. port_mmio = ahci_port_base(ap);
  986. if (ata_port_is_dummy(ap))
  987. continue;
  988. ahci_port_init(pdev, ap, i, mmio, port_mmio);
  989. }
  990. tmp = readl(mmio + HOST_CTL);
  991. VPRINTK("HOST_CTL 0x%x\n", tmp);
  992. writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL);
  993. tmp = readl(mmio + HOST_CTL);
  994. VPRINTK("HOST_CTL 0x%x\n", tmp);
  995. }
  996. static void ahci_dev_config(struct ata_device *dev)
  997. {
  998. struct ahci_host_priv *hpriv = dev->link->ap->host->private_data;
  999. if (hpriv->flags & AHCI_HFLAG_SECT255) {
  1000. dev->max_sectors = 255;
  1001. ata_dev_printk(dev, KERN_INFO,
  1002. "SB600 AHCI: limiting to 255 sectors per cmd\n");
  1003. }
  1004. }
  1005. static unsigned int ahci_dev_classify(struct ata_port *ap)
  1006. {
  1007. void __iomem *port_mmio = ahci_port_base(ap);
  1008. struct ata_taskfile tf;
  1009. u32 tmp;
  1010. tmp = readl(port_mmio + PORT_SIG);
  1011. tf.lbah = (tmp >> 24) & 0xff;
  1012. tf.lbam = (tmp >> 16) & 0xff;
  1013. tf.lbal = (tmp >> 8) & 0xff;
  1014. tf.nsect = (tmp) & 0xff;
  1015. return ata_dev_classify(&tf);
  1016. }
  1017. static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
  1018. u32 opts)
  1019. {
  1020. dma_addr_t cmd_tbl_dma;
  1021. cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ;
  1022. pp->cmd_slot[tag].opts = cpu_to_le32(opts);
  1023. pp->cmd_slot[tag].status = 0;
  1024. pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff);
  1025. pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16);
  1026. }
  1027. static int ahci_kick_engine(struct ata_port *ap, int force_restart)
  1028. {
  1029. void __iomem *port_mmio = ahci_port_base(ap);
  1030. struct ahci_host_priv *hpriv = ap->host->private_data;
  1031. u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
  1032. u32 tmp;
  1033. int busy, rc;
  1034. /* do we need to kick the port? */
  1035. busy = status & (ATA_BUSY | ATA_DRQ);
  1036. if (!busy && !force_restart)
  1037. return 0;
  1038. /* stop engine */
  1039. rc = ahci_stop_engine(ap);
  1040. if (rc)
  1041. goto out_restart;
  1042. /* need to do CLO? */
  1043. if (!busy) {
  1044. rc = 0;
  1045. goto out_restart;
  1046. }
  1047. if (!(hpriv->cap & HOST_CAP_CLO)) {
  1048. rc = -EOPNOTSUPP;
  1049. goto out_restart;
  1050. }
  1051. /* perform CLO */
  1052. tmp = readl(port_mmio + PORT_CMD);
  1053. tmp |= PORT_CMD_CLO;
  1054. writel(tmp, port_mmio + PORT_CMD);
  1055. rc = 0;
  1056. tmp = ata_wait_register(port_mmio + PORT_CMD,
  1057. PORT_CMD_CLO, PORT_CMD_CLO, 1, 500);
  1058. if (tmp & PORT_CMD_CLO)
  1059. rc = -EIO;
  1060. /* restart engine */
  1061. out_restart:
  1062. ahci_start_engine(ap);
  1063. return rc;
  1064. }
  1065. static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
  1066. struct ata_taskfile *tf, int is_cmd, u16 flags,
  1067. unsigned long timeout_msec)
  1068. {
  1069. const u32 cmd_fis_len = 5; /* five dwords */
  1070. struct ahci_port_priv *pp = ap->private_data;
  1071. void __iomem *port_mmio = ahci_port_base(ap);
  1072. u8 *fis = pp->cmd_tbl;
  1073. u32 tmp;
  1074. /* prep the command */
  1075. ata_tf_to_fis(tf, pmp, is_cmd, fis);
  1076. ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12));
  1077. /* issue & wait */
  1078. writel(1, port_mmio + PORT_CMD_ISSUE);
  1079. if (timeout_msec) {
  1080. tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1,
  1081. 1, timeout_msec);
  1082. if (tmp & 0x1) {
  1083. ahci_kick_engine(ap, 1);
  1084. return -EBUSY;
  1085. }
  1086. } else
  1087. readl(port_mmio + PORT_CMD_ISSUE); /* flush */
  1088. return 0;
  1089. }
  1090. static int ahci_check_ready(struct ata_link *link)
  1091. {
  1092. void __iomem *port_mmio = ahci_port_base(link->ap);
  1093. u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
  1094. if (!(status & ATA_BUSY))
  1095. return 1;
  1096. return 0;
  1097. }
  1098. static int ahci_softreset(struct ata_link *link, unsigned int *class,
  1099. unsigned long deadline)
  1100. {
  1101. struct ata_port *ap = link->ap;
  1102. int pmp = sata_srst_pmp(link);
  1103. const char *reason = NULL;
  1104. unsigned long now, msecs;
  1105. struct ata_taskfile tf;
  1106. int rc;
  1107. DPRINTK("ENTER\n");
  1108. /* prepare for SRST (AHCI-1.1 10.4.1) */
  1109. rc = ahci_kick_engine(ap, 1);
  1110. if (rc && rc != -EOPNOTSUPP)
  1111. ata_link_printk(link, KERN_WARNING,
  1112. "failed to reset engine (errno=%d)\n", rc);
  1113. ata_tf_init(link->device, &tf);
  1114. /* issue the first D2H Register FIS */
  1115. msecs = 0;
  1116. now = jiffies;
  1117. if (time_after(now, deadline))
  1118. msecs = jiffies_to_msecs(deadline - now);
  1119. tf.ctl |= ATA_SRST;
  1120. if (ahci_exec_polled_cmd(ap, pmp, &tf, 0,
  1121. AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY, msecs)) {
  1122. rc = -EIO;
  1123. reason = "1st FIS failed";
  1124. goto fail;
  1125. }
  1126. /* spec says at least 5us, but be generous and sleep for 1ms */
  1127. msleep(1);
  1128. /* issue the second D2H Register FIS */
  1129. tf.ctl &= ~ATA_SRST;
  1130. ahci_exec_polled_cmd(ap, pmp, &tf, 0, 0, 0);
  1131. /* wait for link to become ready */
  1132. rc = ata_wait_after_reset(link, deadline, ahci_check_ready);
  1133. /* link occupied, -ENODEV too is an error */
  1134. if (rc) {
  1135. reason = "device not ready";
  1136. goto fail;
  1137. }
  1138. *class = ahci_dev_classify(ap);
  1139. DPRINTK("EXIT, class=%u\n", *class);
  1140. return 0;
  1141. fail:
  1142. ata_link_printk(link, KERN_ERR, "softreset failed (%s)\n", reason);
  1143. return rc;
  1144. }
  1145. static int ahci_hardreset(struct ata_link *link, unsigned int *class,
  1146. unsigned long deadline)
  1147. {
  1148. const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
  1149. struct ata_port *ap = link->ap;
  1150. struct ahci_port_priv *pp = ap->private_data;
  1151. u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
  1152. struct ata_taskfile tf;
  1153. bool online;
  1154. int rc;
  1155. DPRINTK("ENTER\n");
  1156. ahci_stop_engine(ap);
  1157. /* clear D2H reception area to properly wait for D2H FIS */
  1158. ata_tf_init(link->device, &tf);
  1159. tf.command = 0x80;
  1160. ata_tf_to_fis(&tf, 0, 0, d2h_fis);
  1161. rc = sata_link_hardreset(link, timing, deadline, &online,
  1162. ahci_check_ready);
  1163. ahci_start_engine(ap);
  1164. if (online)
  1165. *class = ahci_dev_classify(ap);
  1166. DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
  1167. return rc;
  1168. }
  1169. static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
  1170. unsigned long deadline)
  1171. {
  1172. struct ata_port *ap = link->ap;
  1173. bool online;
  1174. int rc;
  1175. DPRINTK("ENTER\n");
  1176. ahci_stop_engine(ap);
  1177. rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
  1178. deadline, &online, NULL);
  1179. ahci_start_engine(ap);
  1180. DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
  1181. /* vt8251 doesn't clear BSY on signature FIS reception,
  1182. * request follow-up softreset.
  1183. */
  1184. return online ? -EAGAIN : rc;
  1185. }
  1186. static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
  1187. unsigned long deadline)
  1188. {
  1189. struct ata_port *ap = link->ap;
  1190. struct ahci_port_priv *pp = ap->private_data;
  1191. u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
  1192. struct ata_taskfile tf;
  1193. bool online;
  1194. int rc;
  1195. ahci_stop_engine(ap);
  1196. /* clear D2H reception area to properly wait for D2H FIS */
  1197. ata_tf_init(link->device, &tf);
  1198. tf.command = 0x80;
  1199. ata_tf_to_fis(&tf, 0, 0, d2h_fis);
  1200. rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
  1201. deadline, &online, NULL);
  1202. ahci_start_engine(ap);
  1203. /* The pseudo configuration device on SIMG4726 attached to
  1204. * ASUS P5W-DH Deluxe doesn't send signature FIS after
  1205. * hardreset if no device is attached to the first downstream
  1206. * port && the pseudo device locks up on SRST w/ PMP==0. To
  1207. * work around this, wait for !BSY only briefly. If BSY isn't
  1208. * cleared, perform CLO and proceed to IDENTIFY (achieved by
  1209. * ATA_LFLAG_NO_SRST and ATA_LFLAG_ASSUME_ATA).
  1210. *
  1211. * Wait for two seconds. Devices attached to downstream port
  1212. * which can't process the following IDENTIFY after this will
  1213. * have to be reset again. For most cases, this should
  1214. * suffice while making probing snappish enough.
  1215. */
  1216. if (online) {
  1217. rc = ata_wait_after_reset(link, jiffies + 2 * HZ,
  1218. ahci_check_ready);
  1219. if (rc)
  1220. ahci_kick_engine(ap, 0);
  1221. }
  1222. return rc;
  1223. }
  1224. static void ahci_postreset(struct ata_link *link, unsigned int *class)
  1225. {
  1226. struct ata_port *ap = link->ap;
  1227. void __iomem *port_mmio = ahci_port_base(ap);
  1228. u32 new_tmp, tmp;
  1229. ata_std_postreset(link, class);
  1230. /* Make sure port's ATAPI bit is set appropriately */
  1231. new_tmp = tmp = readl(port_mmio + PORT_CMD);
  1232. if (*class == ATA_DEV_ATAPI)
  1233. new_tmp |= PORT_CMD_ATAPI;
  1234. else
  1235. new_tmp &= ~PORT_CMD_ATAPI;
  1236. if (new_tmp != tmp) {
  1237. writel(new_tmp, port_mmio + PORT_CMD);
  1238. readl(port_mmio + PORT_CMD); /* flush */
  1239. }
  1240. }
  1241. static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
  1242. {
  1243. struct scatterlist *sg;
  1244. struct ahci_sg *ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
  1245. unsigned int si;
  1246. VPRINTK("ENTER\n");
  1247. /*
  1248. * Next, the S/G list.
  1249. */
  1250. for_each_sg(qc->sg, sg, qc->n_elem, si) {
  1251. dma_addr_t addr = sg_dma_address(sg);
  1252. u32 sg_len = sg_dma_len(sg);
  1253. ahci_sg[si].addr = cpu_to_le32(addr & 0xffffffff);
  1254. ahci_sg[si].addr_hi = cpu_to_le32((addr >> 16) >> 16);
  1255. ahci_sg[si].flags_size = cpu_to_le32(sg_len - 1);
  1256. }
  1257. return si;
  1258. }
  1259. static void ahci_qc_prep(struct ata_queued_cmd *qc)
  1260. {
  1261. struct ata_port *ap = qc->ap;
  1262. struct ahci_port_priv *pp = ap->private_data;
  1263. int is_atapi = ata_is_atapi(qc->tf.protocol);
  1264. void *cmd_tbl;
  1265. u32 opts;
  1266. const u32 cmd_fis_len = 5; /* five dwords */
  1267. unsigned int n_elem;
  1268. /*
  1269. * Fill in command table information. First, the header,
  1270. * a SATA Register - Host to Device command FIS.
  1271. */
  1272. cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ;
  1273. ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl);
  1274. if (is_atapi) {
  1275. memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
  1276. memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
  1277. }
  1278. n_elem = 0;
  1279. if (qc->flags & ATA_QCFLAG_DMAMAP)
  1280. n_elem = ahci_fill_sg(qc, cmd_tbl);
  1281. /*
  1282. * Fill in command slot information.
  1283. */
  1284. opts = cmd_fis_len | n_elem << 16 | (qc->dev->link->pmp << 12);
  1285. if (qc->tf.flags & ATA_TFLAG_WRITE)
  1286. opts |= AHCI_CMD_WRITE;
  1287. if (is_atapi)
  1288. opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
  1289. ahci_fill_cmd_slot(pp, qc->tag, opts);
  1290. }
  1291. static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
  1292. {
  1293. struct ahci_host_priv *hpriv = ap->host->private_data;
  1294. struct ahci_port_priv *pp = ap->private_data;
  1295. struct ata_eh_info *host_ehi = &ap->link.eh_info;
  1296. struct ata_link *link = NULL;
  1297. struct ata_queued_cmd *active_qc;
  1298. struct ata_eh_info *active_ehi;
  1299. u32 serror;
  1300. /* determine active link */
  1301. ata_port_for_each_link(link, ap)
  1302. if (ata_link_active(link))
  1303. break;
  1304. if (!link)
  1305. link = &ap->link;
  1306. active_qc = ata_qc_from_tag(ap, link->active_tag);
  1307. active_ehi = &link->eh_info;
  1308. /* record irq stat */
  1309. ata_ehi_clear_desc(host_ehi);
  1310. ata_ehi_push_desc(host_ehi, "irq_stat 0x%08x", irq_stat);
  1311. /* AHCI needs SError cleared; otherwise, it might lock up */
  1312. ahci_scr_read(ap, SCR_ERROR, &serror);
  1313. ahci_scr_write(ap, SCR_ERROR, serror);
  1314. host_ehi->serror |= serror;
  1315. /* some controllers set IRQ_IF_ERR on device errors, ignore it */
  1316. if (hpriv->flags & AHCI_HFLAG_IGN_IRQ_IF_ERR)
  1317. irq_stat &= ~PORT_IRQ_IF_ERR;
  1318. if (irq_stat & PORT_IRQ_TF_ERR) {
  1319. /* If qc is active, charge it; otherwise, the active
  1320. * link. There's no active qc on NCQ errors. It will
  1321. * be determined by EH by reading log page 10h.
  1322. */
  1323. if (active_qc)
  1324. active_qc->err_mask |= AC_ERR_DEV;
  1325. else
  1326. active_ehi->err_mask |= AC_ERR_DEV;
  1327. if (hpriv->flags & AHCI_HFLAG_IGN_SERR_INTERNAL)
  1328. host_ehi->serror &= ~SERR_INTERNAL;
  1329. }
  1330. if (irq_stat & PORT_IRQ_UNK_FIS) {
  1331. u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK);
  1332. active_ehi->err_mask |= AC_ERR_HSM;
  1333. active_ehi->action |= ATA_EH_RESET;
  1334. ata_ehi_push_desc(active_ehi,
  1335. "unknown FIS %08x %08x %08x %08x" ,
  1336. unk[0], unk[1], unk[2], unk[3]);
  1337. }
  1338. if (sata_pmp_attached(ap) && (irq_stat & PORT_IRQ_BAD_PMP)) {
  1339. active_ehi->err_mask |= AC_ERR_HSM;
  1340. active_ehi->action |= ATA_EH_RESET;
  1341. ata_ehi_push_desc(active_ehi, "incorrect PMP");
  1342. }
  1343. if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) {
  1344. host_ehi->err_mask |= AC_ERR_HOST_BUS;
  1345. host_ehi->action |= ATA_EH_RESET;
  1346. ata_ehi_push_desc(host_ehi, "host bus error");
  1347. }
  1348. if (irq_stat & PORT_IRQ_IF_ERR) {
  1349. host_ehi->err_mask |= AC_ERR_ATA_BUS;
  1350. host_ehi->action |= ATA_EH_RESET;
  1351. ata_ehi_push_desc(host_ehi, "interface fatal error");
  1352. }
  1353. if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) {
  1354. ata_ehi_hotplugged(host_ehi);
  1355. ata_ehi_push_desc(host_ehi, "%s",
  1356. irq_stat & PORT_IRQ_CONNECT ?
  1357. "connection status changed" : "PHY RDY changed");
  1358. }
  1359. /* okay, let's hand over to EH */
  1360. if (irq_stat & PORT_IRQ_FREEZE)
  1361. ata_port_freeze(ap);
  1362. else
  1363. ata_port_abort(ap);
  1364. }
  1365. static void ahci_port_intr(struct ata_port *ap)
  1366. {
  1367. void __iomem *port_mmio = ahci_port_base(ap);
  1368. struct ata_eh_info *ehi = &ap->link.eh_info;
  1369. struct ahci_port_priv *pp = ap->private_data;
  1370. struct ahci_host_priv *hpriv = ap->host->private_data;
  1371. int resetting = !!(ap->pflags & ATA_PFLAG_RESETTING);
  1372. u32 status, qc_active;
  1373. int rc;
  1374. status = readl(port_mmio + PORT_IRQ_STAT);
  1375. writel(status, port_mmio + PORT_IRQ_STAT);
  1376. /* ignore BAD_PMP while resetting */
  1377. if (unlikely(resetting))
  1378. status &= ~PORT_IRQ_BAD_PMP;
  1379. /* If we are getting PhyRdy, this is
  1380. * just a power state change, we should
  1381. * clear out this, plus the PhyRdy/Comm
  1382. * Wake bits from Serror
  1383. */
  1384. if ((hpriv->flags & AHCI_HFLAG_NO_HOTPLUG) &&
  1385. (status & PORT_IRQ_PHYRDY)) {
  1386. status &= ~PORT_IRQ_PHYRDY;
  1387. ahci_scr_write(ap, SCR_ERROR, ((1 << 16) | (1 << 18)));
  1388. }
  1389. if (unlikely(status & PORT_IRQ_ERROR)) {
  1390. ahci_error_intr(ap, status);
  1391. return;
  1392. }
  1393. if (status & PORT_IRQ_SDB_FIS) {
  1394. /* If SNotification is available, leave notification
  1395. * handling to sata_async_notification(). If not,
  1396. * emulate it by snooping SDB FIS RX area.
  1397. *
  1398. * Snooping FIS RX area is probably cheaper than
  1399. * poking SNotification but some constrollers which
  1400. * implement SNotification, ICH9 for example, don't
  1401. * store AN SDB FIS into receive area.
  1402. */
  1403. if (hpriv->cap & HOST_CAP_SNTF)
  1404. sata_async_notification(ap);
  1405. else {
  1406. /* If the 'N' bit in word 0 of the FIS is set,
  1407. * we just received asynchronous notification.
  1408. * Tell libata about it.
  1409. */
  1410. const __le32 *f = pp->rx_fis + RX_FIS_SDB;
  1411. u32 f0 = le32_to_cpu(f[0]);
  1412. if (f0 & (1 << 15))
  1413. sata_async_notification(ap);
  1414. }
  1415. }
  1416. /* pp->active_link is valid iff any command is in flight */
  1417. if (ap->qc_active && pp->active_link->sactive)
  1418. qc_active = readl(port_mmio + PORT_SCR_ACT);
  1419. else
  1420. qc_active = readl(port_mmio + PORT_CMD_ISSUE);
  1421. rc = ata_qc_complete_multiple(ap, qc_active);
  1422. /* while resetting, invalid completions are expected */
  1423. if (unlikely(rc < 0 && !resetting)) {
  1424. ehi->err_mask |= AC_ERR_HSM;
  1425. ehi->action |= ATA_EH_RESET;
  1426. ata_port_freeze(ap);
  1427. }
  1428. }
  1429. static irqreturn_t ahci_interrupt(int irq, void *dev_instance)
  1430. {
  1431. struct ata_host *host = dev_instance;
  1432. struct ahci_host_priv *hpriv;
  1433. unsigned int i, handled = 0;
  1434. void __iomem *mmio;
  1435. u32 irq_stat, irq_ack = 0;
  1436. VPRINTK("ENTER\n");
  1437. hpriv = host->private_data;
  1438. mmio = host->iomap[AHCI_PCI_BAR];
  1439. /* sigh. 0xffffffff is a valid return from h/w */
  1440. irq_stat = readl(mmio + HOST_IRQ_STAT);
  1441. irq_stat &= hpriv->port_map;
  1442. if (!irq_stat)
  1443. return IRQ_NONE;
  1444. spin_lock(&host->lock);
  1445. for (i = 0; i < host->n_ports; i++) {
  1446. struct ata_port *ap;
  1447. if (!(irq_stat & (1 << i)))
  1448. continue;
  1449. ap = host->ports[i];
  1450. if (ap) {
  1451. ahci_port_intr(ap);
  1452. VPRINTK("port %u\n", i);
  1453. } else {
  1454. VPRINTK("port %u (no irq)\n", i);
  1455. if (ata_ratelimit())
  1456. dev_printk(KERN_WARNING, host->dev,
  1457. "interrupt on disabled port %u\n", i);
  1458. }
  1459. irq_ack |= (1 << i);
  1460. }
  1461. if (irq_ack) {
  1462. writel(irq_ack, mmio + HOST_IRQ_STAT);
  1463. handled = 1;
  1464. }
  1465. spin_unlock(&host->lock);
  1466. VPRINTK("EXIT\n");
  1467. return IRQ_RETVAL(handled);
  1468. }
  1469. static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
  1470. {
  1471. struct ata_port *ap = qc->ap;
  1472. void __iomem *port_mmio = ahci_port_base(ap);
  1473. struct ahci_port_priv *pp = ap->private_data;
  1474. /* Keep track of the currently active link. It will be used
  1475. * in completion path to determine whether NCQ phase is in
  1476. * progress.
  1477. */
  1478. pp->active_link = qc->dev->link;
  1479. if (qc->tf.protocol == ATA_PROT_NCQ)
  1480. writel(1 << qc->tag, port_mmio + PORT_SCR_ACT);
  1481. writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE);
  1482. readl(port_mmio + PORT_CMD_ISSUE); /* flush */
  1483. return 0;
  1484. }
  1485. static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
  1486. {
  1487. struct ahci_port_priv *pp = qc->ap->private_data;
  1488. u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
  1489. ata_tf_from_fis(d2h_fis, &qc->result_tf);
  1490. return true;
  1491. }
  1492. static void ahci_freeze(struct ata_port *ap)
  1493. {
  1494. void __iomem *port_mmio = ahci_port_base(ap);
  1495. /* turn IRQ off */
  1496. writel(0, port_mmio + PORT_IRQ_MASK);
  1497. }
  1498. static void ahci_thaw(struct ata_port *ap)
  1499. {
  1500. void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
  1501. void __iomem *port_mmio = ahci_port_base(ap);
  1502. u32 tmp;
  1503. struct ahci_port_priv *pp = ap->private_data;
  1504. /* clear IRQ */
  1505. tmp = readl(port_mmio + PORT_IRQ_STAT);
  1506. writel(tmp, port_mmio + PORT_IRQ_STAT);
  1507. writel(1 << ap->port_no, mmio + HOST_IRQ_STAT);
  1508. /* turn IRQ back on */
  1509. writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
  1510. }
  1511. static void ahci_error_handler(struct ata_port *ap)
  1512. {
  1513. if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
  1514. /* restart engine */
  1515. ahci_stop_engine(ap);
  1516. ahci_start_engine(ap);
  1517. }
  1518. sata_pmp_error_handler(ap);
  1519. }
  1520. static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
  1521. {
  1522. struct ata_port *ap = qc->ap;
  1523. /* make DMA engine forget about the failed command */
  1524. if (qc->flags & ATA_QCFLAG_FAILED)
  1525. ahci_kick_engine(ap, 1);
  1526. }
  1527. static void ahci_pmp_attach(struct ata_port *ap)
  1528. {
  1529. void __iomem *port_mmio = ahci_port_base(ap);
  1530. struct ahci_port_priv *pp = ap->private_data;
  1531. u32 cmd;
  1532. cmd = readl(port_mmio + PORT_CMD);
  1533. cmd |= PORT_CMD_PMP;
  1534. writel(cmd, port_mmio + PORT_CMD);
  1535. pp->intr_mask |= PORT_IRQ_BAD_PMP;
  1536. writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
  1537. }
  1538. static void ahci_pmp_detach(struct ata_port *ap)
  1539. {
  1540. void __iomem *port_mmio = ahci_port_base(ap);
  1541. struct ahci_port_priv *pp = ap->private_data;
  1542. u32 cmd;
  1543. cmd = readl(port_mmio + PORT_CMD);
  1544. cmd &= ~PORT_CMD_PMP;
  1545. writel(cmd, port_mmio + PORT_CMD);
  1546. pp->intr_mask &= ~PORT_IRQ_BAD_PMP;
  1547. writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
  1548. }
  1549. static int ahci_port_resume(struct ata_port *ap)
  1550. {
  1551. ahci_power_up(ap);
  1552. ahci_start_port(ap);
  1553. if (sata_pmp_attached(ap))
  1554. ahci_pmp_attach(ap);
  1555. else
  1556. ahci_pmp_detach(ap);
  1557. return 0;
  1558. }
  1559. #ifdef CONFIG_PM
  1560. static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
  1561. {
  1562. const char *emsg = NULL;
  1563. int rc;
  1564. rc = ahci_deinit_port(ap, &emsg);
  1565. if (rc == 0)
  1566. ahci_power_down(ap);
  1567. else {
  1568. ata_port_printk(ap, KERN_ERR, "%s (%d)\n", emsg, rc);
  1569. ahci_start_port(ap);
  1570. }
  1571. return rc;
  1572. }
  1573. static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
  1574. {
  1575. struct ata_host *host = dev_get_drvdata(&pdev->dev);
  1576. void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
  1577. u32 ctl;
  1578. if (mesg.event & PM_EVENT_SLEEP) {
  1579. /* AHCI spec rev1.1 section 8.3.3:
  1580. * Software must disable interrupts prior to requesting a
  1581. * transition of the HBA to D3 state.
  1582. */
  1583. ctl = readl(mmio + HOST_CTL);
  1584. ctl &= ~HOST_IRQ_EN;
  1585. writel(ctl, mmio + HOST_CTL);
  1586. readl(mmio + HOST_CTL); /* flush */
  1587. }
  1588. return ata_pci_device_suspend(pdev, mesg);
  1589. }
  1590. static int ahci_pci_device_resume(struct pci_dev *pdev)
  1591. {
  1592. struct ata_host *host = dev_get_drvdata(&pdev->dev);
  1593. int rc;
  1594. rc = ata_pci_device_do_resume(pdev);
  1595. if (rc)
  1596. return rc;
  1597. if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
  1598. rc = ahci_reset_controller(host);
  1599. if (rc)
  1600. return rc;
  1601. ahci_init_controller(host);
  1602. }
  1603. ata_host_resume(host);
  1604. return 0;
  1605. }
  1606. #endif
  1607. static int ahci_port_start(struct ata_port *ap)
  1608. {
  1609. struct device *dev = ap->host->dev;
  1610. struct ahci_port_priv *pp;
  1611. void *mem;
  1612. dma_addr_t mem_dma;
  1613. pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
  1614. if (!pp)
  1615. return -ENOMEM;
  1616. mem = dmam_alloc_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, &mem_dma,
  1617. GFP_KERNEL);
  1618. if (!mem)
  1619. return -ENOMEM;
  1620. memset(mem, 0, AHCI_PORT_PRIV_DMA_SZ);
  1621. /*
  1622. * First item in chunk of DMA memory: 32-slot command table,
  1623. * 32 bytes each in size
  1624. */
  1625. pp->cmd_slot = mem;
  1626. pp->cmd_slot_dma = mem_dma;
  1627. mem += AHCI_CMD_SLOT_SZ;
  1628. mem_dma += AHCI_CMD_SLOT_SZ;
  1629. /*
  1630. * Second item: Received-FIS area
  1631. */
  1632. pp->rx_fis = mem;
  1633. pp->rx_fis_dma = mem_dma;
  1634. mem += AHCI_RX_FIS_SZ;
  1635. mem_dma += AHCI_RX_FIS_SZ;
  1636. /*
  1637. * Third item: data area for storing a single command
  1638. * and its scatter-gather table
  1639. */
  1640. pp->cmd_tbl = mem;
  1641. pp->cmd_tbl_dma = mem_dma;
  1642. /*
  1643. * Save off initial list of interrupts to be enabled.
  1644. * This could be changed later
  1645. */
  1646. pp->intr_mask = DEF_PORT_IRQ;
  1647. ap->private_data = pp;
  1648. /* engage engines, captain */
  1649. return ahci_port_resume(ap);
  1650. }
  1651. static void ahci_port_stop(struct ata_port *ap)
  1652. {
  1653. const char *emsg = NULL;
  1654. int rc;
  1655. /* de-initialize port */
  1656. rc = ahci_deinit_port(ap, &emsg);
  1657. if (rc)
  1658. ata_port_printk(ap, KERN_WARNING, "%s (%d)\n", emsg, rc);
  1659. }
  1660. static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
  1661. {
  1662. int rc;
  1663. if (using_dac &&
  1664. !pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
  1665. rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
  1666. if (rc) {
  1667. rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
  1668. if (rc) {
  1669. dev_printk(KERN_ERR, &pdev->dev,
  1670. "64-bit DMA enable failed\n");
  1671. return rc;
  1672. }
  1673. }
  1674. } else {
  1675. rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
  1676. if (rc) {
  1677. dev_printk(KERN_ERR, &pdev->dev,
  1678. "32-bit DMA enable failed\n");
  1679. return rc;
  1680. }
  1681. rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
  1682. if (rc) {
  1683. dev_printk(KERN_ERR, &pdev->dev,
  1684. "32-bit consistent DMA enable failed\n");
  1685. return rc;
  1686. }
  1687. }
  1688. return 0;
  1689. }
  1690. static void ahci_print_info(struct ata_host *host)
  1691. {
  1692. struct ahci_host_priv *hpriv = host->private_data;
  1693. struct pci_dev *pdev = to_pci_dev(host->dev);
  1694. void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
  1695. u32 vers, cap, impl, speed;
  1696. const char *speed_s;
  1697. u16 cc;
  1698. const char *scc_s;
  1699. vers = readl(mmio + HOST_VERSION);
  1700. cap = hpriv->cap;
  1701. impl = hpriv->port_map;
  1702. speed = (cap >> 20) & 0xf;
  1703. if (speed == 1)
  1704. speed_s = "1.5";
  1705. else if (speed == 2)
  1706. speed_s = "3";
  1707. else
  1708. speed_s = "?";
  1709. pci_read_config_word(pdev, 0x0a, &cc);
  1710. if (cc == PCI_CLASS_STORAGE_IDE)
  1711. scc_s = "IDE";
  1712. else if (cc == PCI_CLASS_STORAGE_SATA)
  1713. scc_s = "SATA";
  1714. else if (cc == PCI_CLASS_STORAGE_RAID)
  1715. scc_s = "RAID";
  1716. else
  1717. scc_s = "unknown";
  1718. dev_printk(KERN_INFO, &pdev->dev,
  1719. "AHCI %02x%02x.%02x%02x "
  1720. "%u slots %u ports %s Gbps 0x%x impl %s mode\n"
  1721. ,
  1722. (vers >> 24) & 0xff,
  1723. (vers >> 16) & 0xff,
  1724. (vers >> 8) & 0xff,
  1725. vers & 0xff,
  1726. ((cap >> 8) & 0x1f) + 1,
  1727. (cap & 0x1f) + 1,
  1728. speed_s,
  1729. impl,
  1730. scc_s);
  1731. dev_printk(KERN_INFO, &pdev->dev,
  1732. "flags: "
  1733. "%s%s%s%s%s%s%s"
  1734. "%s%s%s%s%s%s%s\n"
  1735. ,
  1736. cap & (1 << 31) ? "64bit " : "",
  1737. cap & (1 << 30) ? "ncq " : "",
  1738. cap & (1 << 29) ? "sntf " : "",
  1739. cap & (1 << 28) ? "ilck " : "",
  1740. cap & (1 << 27) ? "stag " : "",
  1741. cap & (1 << 26) ? "pm " : "",
  1742. cap & (1 << 25) ? "led " : "",
  1743. cap & (1 << 24) ? "clo " : "",
  1744. cap & (1 << 19) ? "nz " : "",
  1745. cap & (1 << 18) ? "only " : "",
  1746. cap & (1 << 17) ? "pmp " : "",
  1747. cap & (1 << 15) ? "pio " : "",
  1748. cap & (1 << 14) ? "slum " : "",
  1749. cap & (1 << 13) ? "part " : ""
  1750. );
  1751. }
  1752. /* On ASUS P5W DH Deluxe, the second port of PCI device 00:1f.2 is
  1753. * hardwired to on-board SIMG 4726. The chipset is ICH8 and doesn't
  1754. * support PMP and the 4726 either directly exports the device
  1755. * attached to the first downstream port or acts as a hardware storage
  1756. * controller and emulate a single ATA device (can be RAID 0/1 or some
  1757. * other configuration).
  1758. *
  1759. * When there's no device attached to the first downstream port of the
  1760. * 4726, "Config Disk" appears, which is a pseudo ATA device to
  1761. * configure the 4726. However, ATA emulation of the device is very
  1762. * lame. It doesn't send signature D2H Reg FIS after the initial
  1763. * hardreset, pukes on SRST w/ PMP==0 and has bunch of other issues.
  1764. *
  1765. * The following function works around the problem by always using
  1766. * hardreset on the port and not depending on receiving signature FIS
  1767. * afterward. If signature FIS isn't received soon, ATA class is
  1768. * assumed without follow-up softreset.
  1769. */
  1770. static void ahci_p5wdh_workaround(struct ata_host *host)
  1771. {
  1772. static struct dmi_system_id sysids[] = {
  1773. {
  1774. .ident = "P5W DH Deluxe",
  1775. .matches = {
  1776. DMI_MATCH(DMI_SYS_VENDOR,
  1777. "ASUSTEK COMPUTER INC"),
  1778. DMI_MATCH(DMI_PRODUCT_NAME, "P5W DH Deluxe"),
  1779. },
  1780. },
  1781. { }
  1782. };
  1783. struct pci_dev *pdev = to_pci_dev(host->dev);
  1784. if (pdev->bus->number == 0 && pdev->devfn == PCI_DEVFN(0x1f, 2) &&
  1785. dmi_check_system(sysids)) {
  1786. struct ata_port *ap = host->ports[1];
  1787. dev_printk(KERN_INFO, &pdev->dev, "enabling ASUS P5W DH "
  1788. "Deluxe on-board SIMG4726 workaround\n");
  1789. ap->ops = &ahci_p5wdh_ops;
  1790. ap->link.flags |= ATA_LFLAG_NO_SRST | ATA_LFLAG_ASSUME_ATA;
  1791. }
  1792. }
  1793. static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
  1794. {
  1795. static int printed_version;
  1796. struct ata_port_info pi = ahci_port_info[ent->driver_data];
  1797. const struct ata_port_info *ppi[] = { &pi, NULL };
  1798. struct device *dev = &pdev->dev;
  1799. struct ahci_host_priv *hpriv;
  1800. struct ata_host *host;
  1801. int n_ports, i, rc;
  1802. VPRINTK("ENTER\n");
  1803. WARN_ON(ATA_MAX_QUEUE > AHCI_MAX_CMDS);
  1804. if (!printed_version++)
  1805. dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
  1806. /* acquire resources */
  1807. rc = pcim_enable_device(pdev);
  1808. if (rc)
  1809. return rc;
  1810. /* AHCI controllers often implement SFF compatible interface.
  1811. * Grab all PCI BARs just in case.
  1812. */
  1813. rc = pcim_iomap_regions_request_all(pdev, 1 << AHCI_PCI_BAR, DRV_NAME);
  1814. if (rc == -EBUSY)
  1815. pcim_pin_device(pdev);
  1816. if (rc)
  1817. return rc;
  1818. if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
  1819. (pdev->device == 0x2652 || pdev->device == 0x2653)) {
  1820. u8 map;
  1821. /* ICH6s share the same PCI ID for both piix and ahci
  1822. * modes. Enabling ahci mode while MAP indicates
  1823. * combined mode is a bad idea. Yield to ata_piix.
  1824. */
  1825. pci_read_config_byte(pdev, ICH_MAP, &map);
  1826. if (map & 0x3) {
  1827. dev_printk(KERN_INFO, &pdev->dev, "controller is in "
  1828. "combined mode, can't enable AHCI mode\n");
  1829. return -ENODEV;
  1830. }
  1831. }
  1832. hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
  1833. if (!hpriv)
  1834. return -ENOMEM;
  1835. hpriv->flags |= (unsigned long)pi.private_data;
  1836. if ((hpriv->flags & AHCI_HFLAG_NO_MSI) || pci_enable_msi(pdev))
  1837. pci_intx(pdev, 1);
  1838. /* save initial config */
  1839. ahci_save_initial_config(pdev, hpriv);
  1840. /* prepare host */
  1841. if (hpriv->cap & HOST_CAP_NCQ)
  1842. pi.flags |= ATA_FLAG_NCQ;
  1843. if (hpriv->cap & HOST_CAP_PMP)
  1844. pi.flags |= ATA_FLAG_PMP;
  1845. /* CAP.NP sometimes indicate the index of the last enabled
  1846. * port, at other times, that of the last possible port, so
  1847. * determining the maximum port number requires looking at
  1848. * both CAP.NP and port_map.
  1849. */
  1850. n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
  1851. host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
  1852. if (!host)
  1853. return -ENOMEM;
  1854. host->iomap = pcim_iomap_table(pdev);
  1855. host->private_data = hpriv;
  1856. for (i = 0; i < host->n_ports; i++) {
  1857. struct ata_port *ap = host->ports[i];
  1858. ata_port_pbar_desc(ap, AHCI_PCI_BAR, -1, "abar");
  1859. ata_port_pbar_desc(ap, AHCI_PCI_BAR,
  1860. 0x100 + ap->port_no * 0x80, "port");
  1861. /* set initial link pm policy */
  1862. ap->pm_policy = NOT_AVAILABLE;
  1863. /* disabled/not-implemented port */
  1864. if (!(hpriv->port_map & (1 << i)))
  1865. ap->ops = &ata_dummy_port_ops;
  1866. }
  1867. /* apply workaround for ASUS P5W DH Deluxe mainboard */
  1868. ahci_p5wdh_workaround(host);
  1869. /* initialize adapter */
  1870. rc = ahci_configure_dma_masks(pdev, hpriv->cap & HOST_CAP_64);
  1871. if (rc)
  1872. return rc;
  1873. rc = ahci_reset_controller(host);
  1874. if (rc)
  1875. return rc;
  1876. ahci_init_controller(host);
  1877. ahci_print_info(host);
  1878. pci_set_master(pdev);
  1879. return ata_host_activate(host, pdev->irq, ahci_interrupt, IRQF_SHARED,
  1880. &ahci_sht);
  1881. }
  1882. static int __init ahci_init(void)
  1883. {
  1884. return pci_register_driver(&ahci_pci_driver);
  1885. }
  1886. static void __exit ahci_exit(void)
  1887. {
  1888. pci_unregister_driver(&ahci_pci_driver);
  1889. }
  1890. MODULE_AUTHOR("Jeff Garzik");
  1891. MODULE_DESCRIPTION("AHCI SATA low-level driver");
  1892. MODULE_LICENSE("GPL");
  1893. MODULE_DEVICE_TABLE(pci, ahci_pci_tbl);
  1894. MODULE_VERSION(DRV_VERSION);
  1895. module_init(ahci_init);
  1896. module_exit(ahci_exit);