ahci.c 64 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353
  1. /*
  2. * ahci.c - AHCI SATA support
  3. *
  4. * Maintained by: Jeff Garzik <jgarzik@pobox.com>
  5. * Please ALWAYS copy linux-ide@vger.kernel.org
  6. * on emails.
  7. *
  8. * Copyright 2004-2005 Red Hat, Inc.
  9. *
  10. *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of the GNU General Public License as published by
  13. * the Free Software Foundation; either version 2, or (at your option)
  14. * any later version.
  15. *
  16. * This program is distributed in the hope that it will be useful,
  17. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  19. * GNU General Public License for more details.
  20. *
  21. * You should have received a copy of the GNU General Public License
  22. * along with this program; see the file COPYING. If not, write to
  23. * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
  24. *
  25. *
  26. * libata documentation is available via 'make {ps|pdf}docs',
  27. * as Documentation/DocBook/libata.*
  28. *
  29. * AHCI hardware documentation:
  30. * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
  31. * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
  32. *
  33. */
  34. #include <linux/kernel.h>
  35. #include <linux/module.h>
  36. #include <linux/pci.h>
  37. #include <linux/init.h>
  38. #include <linux/blkdev.h>
  39. #include <linux/delay.h>
  40. #include <linux/interrupt.h>
  41. #include <linux/dma-mapping.h>
  42. #include <linux/device.h>
  43. #include <linux/dmi.h>
  44. #include <scsi/scsi_host.h>
  45. #include <scsi/scsi_cmnd.h>
  46. #include <linux/libata.h>
  47. #define DRV_NAME "ahci"
  48. #define DRV_VERSION "3.0"
  49. static int ahci_skip_host_reset;
  50. module_param_named(skip_host_reset, ahci_skip_host_reset, int, 0444);
  51. MODULE_PARM_DESC(skip_host_reset, "skip global host reset (0=don't skip, 1=skip)");
  52. static int ahci_enable_alpm(struct ata_port *ap,
  53. enum link_pm policy);
  54. static void ahci_disable_alpm(struct ata_port *ap);
  55. enum {
  56. AHCI_PCI_BAR = 5,
  57. AHCI_MAX_PORTS = 32,
  58. AHCI_MAX_SG = 168, /* hardware max is 64K */
  59. AHCI_DMA_BOUNDARY = 0xffffffff,
  60. AHCI_MAX_CMDS = 32,
  61. AHCI_CMD_SZ = 32,
  62. AHCI_CMD_SLOT_SZ = AHCI_MAX_CMDS * AHCI_CMD_SZ,
  63. AHCI_RX_FIS_SZ = 256,
  64. AHCI_CMD_TBL_CDB = 0x40,
  65. AHCI_CMD_TBL_HDR_SZ = 0x80,
  66. AHCI_CMD_TBL_SZ = AHCI_CMD_TBL_HDR_SZ + (AHCI_MAX_SG * 16),
  67. AHCI_CMD_TBL_AR_SZ = AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS,
  68. AHCI_PORT_PRIV_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ +
  69. AHCI_RX_FIS_SZ,
  70. AHCI_IRQ_ON_SG = (1 << 31),
  71. AHCI_CMD_ATAPI = (1 << 5),
  72. AHCI_CMD_WRITE = (1 << 6),
  73. AHCI_CMD_PREFETCH = (1 << 7),
  74. AHCI_CMD_RESET = (1 << 8),
  75. AHCI_CMD_CLR_BUSY = (1 << 10),
  76. RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */
  77. RX_FIS_SDB = 0x58, /* offset of SDB FIS data */
  78. RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */
  79. board_ahci = 0,
  80. board_ahci_vt8251 = 1,
  81. board_ahci_ign_iferr = 2,
  82. board_ahci_sb600 = 3,
  83. board_ahci_mv = 4,
  84. board_ahci_sb700 = 5,
  85. board_ahci_mcp65 = 6,
  86. board_ahci_nopmp = 7,
  87. /* global controller registers */
  88. HOST_CAP = 0x00, /* host capabilities */
  89. HOST_CTL = 0x04, /* global host control */
  90. HOST_IRQ_STAT = 0x08, /* interrupt status */
  91. HOST_PORTS_IMPL = 0x0c, /* bitmap of implemented ports */
  92. HOST_VERSION = 0x10, /* AHCI spec. version compliancy */
  93. /* HOST_CTL bits */
  94. HOST_RESET = (1 << 0), /* reset controller; self-clear */
  95. HOST_IRQ_EN = (1 << 1), /* global IRQ enable */
  96. HOST_AHCI_EN = (1 << 31), /* AHCI enabled */
  97. /* HOST_CAP bits */
  98. HOST_CAP_SSC = (1 << 14), /* Slumber capable */
  99. HOST_CAP_PMP = (1 << 17), /* Port Multiplier support */
  100. HOST_CAP_CLO = (1 << 24), /* Command List Override support */
  101. HOST_CAP_ALPM = (1 << 26), /* Aggressive Link PM support */
  102. HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */
  103. HOST_CAP_SNTF = (1 << 29), /* SNotification register */
  104. HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */
  105. HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */
  106. /* registers for each SATA port */
  107. PORT_LST_ADDR = 0x00, /* command list DMA addr */
  108. PORT_LST_ADDR_HI = 0x04, /* command list DMA addr hi */
  109. PORT_FIS_ADDR = 0x08, /* FIS rx buf addr */
  110. PORT_FIS_ADDR_HI = 0x0c, /* FIS rx buf addr hi */
  111. PORT_IRQ_STAT = 0x10, /* interrupt status */
  112. PORT_IRQ_MASK = 0x14, /* interrupt enable/disable mask */
  113. PORT_CMD = 0x18, /* port command */
  114. PORT_TFDATA = 0x20, /* taskfile data */
  115. PORT_SIG = 0x24, /* device TF signature */
  116. PORT_CMD_ISSUE = 0x38, /* command issue */
  117. PORT_SCR_STAT = 0x28, /* SATA phy register: SStatus */
  118. PORT_SCR_CTL = 0x2c, /* SATA phy register: SControl */
  119. PORT_SCR_ERR = 0x30, /* SATA phy register: SError */
  120. PORT_SCR_ACT = 0x34, /* SATA phy register: SActive */
  121. PORT_SCR_NTF = 0x3c, /* SATA phy register: SNotification */
  122. /* PORT_IRQ_{STAT,MASK} bits */
  123. PORT_IRQ_COLD_PRES = (1 << 31), /* cold presence detect */
  124. PORT_IRQ_TF_ERR = (1 << 30), /* task file error */
  125. PORT_IRQ_HBUS_ERR = (1 << 29), /* host bus fatal error */
  126. PORT_IRQ_HBUS_DATA_ERR = (1 << 28), /* host bus data error */
  127. PORT_IRQ_IF_ERR = (1 << 27), /* interface fatal error */
  128. PORT_IRQ_IF_NONFATAL = (1 << 26), /* interface non-fatal error */
  129. PORT_IRQ_OVERFLOW = (1 << 24), /* xfer exhausted available S/G */
  130. PORT_IRQ_BAD_PMP = (1 << 23), /* incorrect port multiplier */
  131. PORT_IRQ_PHYRDY = (1 << 22), /* PhyRdy changed */
  132. PORT_IRQ_DEV_ILCK = (1 << 7), /* device interlock */
  133. PORT_IRQ_CONNECT = (1 << 6), /* port connect change status */
  134. PORT_IRQ_SG_DONE = (1 << 5), /* descriptor processed */
  135. PORT_IRQ_UNK_FIS = (1 << 4), /* unknown FIS rx'd */
  136. PORT_IRQ_SDB_FIS = (1 << 3), /* Set Device Bits FIS rx'd */
  137. PORT_IRQ_DMAS_FIS = (1 << 2), /* DMA Setup FIS rx'd */
  138. PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */
  139. PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */
  140. PORT_IRQ_FREEZE = PORT_IRQ_HBUS_ERR |
  141. PORT_IRQ_IF_ERR |
  142. PORT_IRQ_CONNECT |
  143. PORT_IRQ_PHYRDY |
  144. PORT_IRQ_UNK_FIS |
  145. PORT_IRQ_BAD_PMP,
  146. PORT_IRQ_ERROR = PORT_IRQ_FREEZE |
  147. PORT_IRQ_TF_ERR |
  148. PORT_IRQ_HBUS_DATA_ERR,
  149. DEF_PORT_IRQ = PORT_IRQ_ERROR | PORT_IRQ_SG_DONE |
  150. PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS |
  151. PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS,
  152. /* PORT_CMD bits */
  153. PORT_CMD_ASP = (1 << 27), /* Aggressive Slumber/Partial */
  154. PORT_CMD_ALPE = (1 << 26), /* Aggressive Link PM enable */
  155. PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */
  156. PORT_CMD_PMP = (1 << 17), /* PMP attached */
  157. PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */
  158. PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */
  159. PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */
  160. PORT_CMD_CLO = (1 << 3), /* Command list override */
  161. PORT_CMD_POWER_ON = (1 << 2), /* Power up device */
  162. PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */
  163. PORT_CMD_START = (1 << 0), /* Enable port DMA engine */
  164. PORT_CMD_ICC_MASK = (0xf << 28), /* i/f ICC state mask */
  165. PORT_CMD_ICC_ACTIVE = (0x1 << 28), /* Put i/f in active state */
  166. PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */
  167. PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */
  168. /* hpriv->flags bits */
  169. AHCI_HFLAG_NO_NCQ = (1 << 0),
  170. AHCI_HFLAG_IGN_IRQ_IF_ERR = (1 << 1), /* ignore IRQ_IF_ERR */
  171. AHCI_HFLAG_IGN_SERR_INTERNAL = (1 << 2), /* ignore SERR_INTERNAL */
  172. AHCI_HFLAG_32BIT_ONLY = (1 << 3), /* force 32bit */
  173. AHCI_HFLAG_MV_PATA = (1 << 4), /* PATA port */
  174. AHCI_HFLAG_NO_MSI = (1 << 5), /* no PCI MSI */
  175. AHCI_HFLAG_NO_PMP = (1 << 6), /* no PMP */
  176. AHCI_HFLAG_NO_HOTPLUG = (1 << 7), /* ignore PxSERR.DIAG.N */
  177. AHCI_HFLAG_SECT255 = (1 << 8), /* max 255 sectors */
  178. AHCI_HFLAG_YES_NCQ = (1 << 9), /* force NCQ cap on */
  179. /* ap->flags bits */
  180. AHCI_FLAG_COMMON = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
  181. ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
  182. ATA_FLAG_ACPI_SATA | ATA_FLAG_AN |
  183. ATA_FLAG_IPM,
  184. ICH_MAP = 0x90, /* ICH MAP register */
  185. };
  186. struct ahci_cmd_hdr {
  187. __le32 opts;
  188. __le32 status;
  189. __le32 tbl_addr;
  190. __le32 tbl_addr_hi;
  191. __le32 reserved[4];
  192. };
  193. struct ahci_sg {
  194. __le32 addr;
  195. __le32 addr_hi;
  196. __le32 reserved;
  197. __le32 flags_size;
  198. };
  199. struct ahci_host_priv {
  200. unsigned int flags; /* AHCI_HFLAG_* */
  201. u32 cap; /* cap to use */
  202. u32 port_map; /* port map to use */
  203. u32 saved_cap; /* saved initial cap */
  204. u32 saved_port_map; /* saved initial port_map */
  205. };
  206. struct ahci_port_priv {
  207. struct ata_link *active_link;
  208. struct ahci_cmd_hdr *cmd_slot;
  209. dma_addr_t cmd_slot_dma;
  210. void *cmd_tbl;
  211. dma_addr_t cmd_tbl_dma;
  212. void *rx_fis;
  213. dma_addr_t rx_fis_dma;
  214. /* for NCQ spurious interrupt analysis */
  215. unsigned int ncq_saw_d2h:1;
  216. unsigned int ncq_saw_dmas:1;
  217. unsigned int ncq_saw_sdb:1;
  218. u32 intr_mask; /* interrupts to enable */
  219. };
  220. static int ahci_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val);
  221. static int ahci_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val);
  222. static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
  223. static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
  224. static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
  225. static int ahci_port_start(struct ata_port *ap);
  226. static void ahci_port_stop(struct ata_port *ap);
  227. static void ahci_qc_prep(struct ata_queued_cmd *qc);
  228. static void ahci_freeze(struct ata_port *ap);
  229. static void ahci_thaw(struct ata_port *ap);
  230. static void ahci_pmp_attach(struct ata_port *ap);
  231. static void ahci_pmp_detach(struct ata_port *ap);
  232. static int ahci_softreset(struct ata_link *link, unsigned int *class,
  233. unsigned long deadline);
  234. static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class,
  235. unsigned long deadline);
  236. static int ahci_hardreset(struct ata_link *link, unsigned int *class,
  237. unsigned long deadline);
  238. static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
  239. unsigned long deadline);
  240. static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
  241. unsigned long deadline);
  242. static void ahci_postreset(struct ata_link *link, unsigned int *class);
  243. static void ahci_error_handler(struct ata_port *ap);
  244. static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
  245. static int ahci_port_resume(struct ata_port *ap);
  246. static void ahci_dev_config(struct ata_device *dev);
  247. static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl);
  248. static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
  249. u32 opts);
  250. #ifdef CONFIG_PM
  251. static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg);
  252. static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
  253. static int ahci_pci_device_resume(struct pci_dev *pdev);
  254. #endif
  255. static struct device_attribute *ahci_shost_attrs[] = {
  256. &dev_attr_link_power_management_policy,
  257. NULL
  258. };
  259. static struct scsi_host_template ahci_sht = {
  260. ATA_NCQ_SHT(DRV_NAME),
  261. .can_queue = AHCI_MAX_CMDS - 1,
  262. .sg_tablesize = AHCI_MAX_SG,
  263. .dma_boundary = AHCI_DMA_BOUNDARY,
  264. .shost_attrs = ahci_shost_attrs,
  265. };
  266. static struct ata_port_operations ahci_ops = {
  267. .inherits = &sata_pmp_port_ops,
  268. .qc_defer = sata_pmp_qc_defer_cmd_switch,
  269. .qc_prep = ahci_qc_prep,
  270. .qc_issue = ahci_qc_issue,
  271. .qc_fill_rtf = ahci_qc_fill_rtf,
  272. .freeze = ahci_freeze,
  273. .thaw = ahci_thaw,
  274. .softreset = ahci_softreset,
  275. .hardreset = ahci_hardreset,
  276. .postreset = ahci_postreset,
  277. .pmp_softreset = ahci_softreset,
  278. .error_handler = ahci_error_handler,
  279. .post_internal_cmd = ahci_post_internal_cmd,
  280. .dev_config = ahci_dev_config,
  281. .scr_read = ahci_scr_read,
  282. .scr_write = ahci_scr_write,
  283. .pmp_attach = ahci_pmp_attach,
  284. .pmp_detach = ahci_pmp_detach,
  285. .enable_pm = ahci_enable_alpm,
  286. .disable_pm = ahci_disable_alpm,
  287. #ifdef CONFIG_PM
  288. .port_suspend = ahci_port_suspend,
  289. .port_resume = ahci_port_resume,
  290. #endif
  291. .port_start = ahci_port_start,
  292. .port_stop = ahci_port_stop,
  293. };
  294. static struct ata_port_operations ahci_vt8251_ops = {
  295. .inherits = &ahci_ops,
  296. .hardreset = ahci_vt8251_hardreset,
  297. };
  298. static struct ata_port_operations ahci_p5wdh_ops = {
  299. .inherits = &ahci_ops,
  300. .hardreset = ahci_p5wdh_hardreset,
  301. };
  302. static struct ata_port_operations ahci_sb600_ops = {
  303. .inherits = &ahci_ops,
  304. .softreset = ahci_sb600_softreset,
  305. .pmp_softreset = ahci_sb600_softreset,
  306. };
  307. #define AHCI_HFLAGS(flags) .private_data = (void *)(flags)
  308. static const struct ata_port_info ahci_port_info[] = {
  309. /* board_ahci */
  310. {
  311. .flags = AHCI_FLAG_COMMON,
  312. .pio_mask = 0x1f, /* pio0-4 */
  313. .udma_mask = ATA_UDMA6,
  314. .port_ops = &ahci_ops,
  315. },
  316. /* board_ahci_vt8251 */
  317. {
  318. AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_PMP),
  319. .flags = AHCI_FLAG_COMMON,
  320. .pio_mask = 0x1f, /* pio0-4 */
  321. .udma_mask = ATA_UDMA6,
  322. .port_ops = &ahci_vt8251_ops,
  323. },
  324. /* board_ahci_ign_iferr */
  325. {
  326. AHCI_HFLAGS (AHCI_HFLAG_IGN_IRQ_IF_ERR),
  327. .flags = AHCI_FLAG_COMMON,
  328. .pio_mask = 0x1f, /* pio0-4 */
  329. .udma_mask = ATA_UDMA6,
  330. .port_ops = &ahci_ops,
  331. },
  332. /* board_ahci_sb600 */
  333. {
  334. AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL |
  335. AHCI_HFLAG_32BIT_ONLY | AHCI_HFLAG_NO_MSI |
  336. AHCI_HFLAG_SECT255),
  337. .flags = AHCI_FLAG_COMMON,
  338. .pio_mask = 0x1f, /* pio0-4 */
  339. .udma_mask = ATA_UDMA6,
  340. .port_ops = &ahci_sb600_ops,
  341. },
  342. /* board_ahci_mv */
  343. {
  344. AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_MSI |
  345. AHCI_HFLAG_MV_PATA),
  346. .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
  347. ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
  348. .pio_mask = 0x1f, /* pio0-4 */
  349. .udma_mask = ATA_UDMA6,
  350. .port_ops = &ahci_ops,
  351. },
  352. /* board_ahci_sb700 */
  353. {
  354. AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL),
  355. .flags = AHCI_FLAG_COMMON,
  356. .pio_mask = 0x1f, /* pio0-4 */
  357. .udma_mask = ATA_UDMA6,
  358. .port_ops = &ahci_sb600_ops,
  359. },
  360. /* board_ahci_mcp65 */
  361. {
  362. AHCI_HFLAGS (AHCI_HFLAG_YES_NCQ),
  363. .flags = AHCI_FLAG_COMMON,
  364. .pio_mask = 0x1f, /* pio0-4 */
  365. .udma_mask = ATA_UDMA6,
  366. .port_ops = &ahci_ops,
  367. },
  368. /* board_ahci_nopmp */
  369. {
  370. AHCI_HFLAGS (AHCI_HFLAG_NO_PMP),
  371. .flags = AHCI_FLAG_COMMON,
  372. .pio_mask = 0x1f, /* pio0-4 */
  373. .udma_mask = ATA_UDMA6,
  374. .port_ops = &ahci_ops,
  375. },
  376. };
  377. static const struct pci_device_id ahci_pci_tbl[] = {
  378. /* Intel */
  379. { PCI_VDEVICE(INTEL, 0x2652), board_ahci }, /* ICH6 */
  380. { PCI_VDEVICE(INTEL, 0x2653), board_ahci }, /* ICH6M */
  381. { PCI_VDEVICE(INTEL, 0x27c1), board_ahci }, /* ICH7 */
  382. { PCI_VDEVICE(INTEL, 0x27c5), board_ahci }, /* ICH7M */
  383. { PCI_VDEVICE(INTEL, 0x27c3), board_ahci }, /* ICH7R */
  384. { PCI_VDEVICE(AL, 0x5288), board_ahci_ign_iferr }, /* ULi M5288 */
  385. { PCI_VDEVICE(INTEL, 0x2681), board_ahci }, /* ESB2 */
  386. { PCI_VDEVICE(INTEL, 0x2682), board_ahci }, /* ESB2 */
  387. { PCI_VDEVICE(INTEL, 0x2683), board_ahci }, /* ESB2 */
  388. { PCI_VDEVICE(INTEL, 0x27c6), board_ahci }, /* ICH7-M DH */
  389. { PCI_VDEVICE(INTEL, 0x2821), board_ahci }, /* ICH8 */
  390. { PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* ICH8 */
  391. { PCI_VDEVICE(INTEL, 0x2824), board_ahci }, /* ICH8 */
  392. { PCI_VDEVICE(INTEL, 0x2829), board_ahci }, /* ICH8M */
  393. { PCI_VDEVICE(INTEL, 0x282a), board_ahci }, /* ICH8M */
  394. { PCI_VDEVICE(INTEL, 0x2922), board_ahci }, /* ICH9 */
  395. { PCI_VDEVICE(INTEL, 0x2923), board_ahci }, /* ICH9 */
  396. { PCI_VDEVICE(INTEL, 0x2924), board_ahci }, /* ICH9 */
  397. { PCI_VDEVICE(INTEL, 0x2925), board_ahci }, /* ICH9 */
  398. { PCI_VDEVICE(INTEL, 0x2927), board_ahci }, /* ICH9 */
  399. { PCI_VDEVICE(INTEL, 0x2929), board_ahci }, /* ICH9M */
  400. { PCI_VDEVICE(INTEL, 0x292a), board_ahci }, /* ICH9M */
  401. { PCI_VDEVICE(INTEL, 0x292b), board_ahci }, /* ICH9M */
  402. { PCI_VDEVICE(INTEL, 0x292c), board_ahci }, /* ICH9M */
  403. { PCI_VDEVICE(INTEL, 0x292f), board_ahci }, /* ICH9M */
  404. { PCI_VDEVICE(INTEL, 0x294d), board_ahci }, /* ICH9 */
  405. { PCI_VDEVICE(INTEL, 0x294e), board_ahci }, /* ICH9M */
  406. { PCI_VDEVICE(INTEL, 0x502a), board_ahci }, /* Tolapai */
  407. { PCI_VDEVICE(INTEL, 0x502b), board_ahci }, /* Tolapai */
  408. { PCI_VDEVICE(INTEL, 0x3a05), board_ahci }, /* ICH10 */
  409. { PCI_VDEVICE(INTEL, 0x3a25), board_ahci }, /* ICH10 */
  410. /* JMicron 360/1/3/5/6, match class to avoid IDE function */
  411. { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
  412. PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci_ign_iferr },
  413. /* ATI */
  414. { PCI_VDEVICE(ATI, 0x4380), board_ahci_sb600 }, /* ATI SB600 */
  415. { PCI_VDEVICE(ATI, 0x4390), board_ahci_sb700 }, /* ATI SB700/800 */
  416. { PCI_VDEVICE(ATI, 0x4391), board_ahci_sb700 }, /* ATI SB700/800 */
  417. { PCI_VDEVICE(ATI, 0x4392), board_ahci_sb700 }, /* ATI SB700/800 */
  418. { PCI_VDEVICE(ATI, 0x4393), board_ahci_sb700 }, /* ATI SB700/800 */
  419. { PCI_VDEVICE(ATI, 0x4394), board_ahci_sb700 }, /* ATI SB700/800 */
  420. { PCI_VDEVICE(ATI, 0x4395), board_ahci_sb700 }, /* ATI SB700/800 */
  421. /* VIA */
  422. { PCI_VDEVICE(VIA, 0x3349), board_ahci_vt8251 }, /* VIA VT8251 */
  423. { PCI_VDEVICE(VIA, 0x6287), board_ahci_vt8251 }, /* VIA VT8251 */
  424. /* NVIDIA */
  425. { PCI_VDEVICE(NVIDIA, 0x044c), board_ahci_mcp65 }, /* MCP65 */
  426. { PCI_VDEVICE(NVIDIA, 0x044d), board_ahci_mcp65 }, /* MCP65 */
  427. { PCI_VDEVICE(NVIDIA, 0x044e), board_ahci_mcp65 }, /* MCP65 */
  428. { PCI_VDEVICE(NVIDIA, 0x044f), board_ahci_mcp65 }, /* MCP65 */
  429. { PCI_VDEVICE(NVIDIA, 0x045c), board_ahci_mcp65 }, /* MCP65 */
  430. { PCI_VDEVICE(NVIDIA, 0x045d), board_ahci_mcp65 }, /* MCP65 */
  431. { PCI_VDEVICE(NVIDIA, 0x045e), board_ahci_mcp65 }, /* MCP65 */
  432. { PCI_VDEVICE(NVIDIA, 0x045f), board_ahci_mcp65 }, /* MCP65 */
  433. { PCI_VDEVICE(NVIDIA, 0x0550), board_ahci }, /* MCP67 */
  434. { PCI_VDEVICE(NVIDIA, 0x0551), board_ahci }, /* MCP67 */
  435. { PCI_VDEVICE(NVIDIA, 0x0552), board_ahci }, /* MCP67 */
  436. { PCI_VDEVICE(NVIDIA, 0x0553), board_ahci }, /* MCP67 */
  437. { PCI_VDEVICE(NVIDIA, 0x0554), board_ahci }, /* MCP67 */
  438. { PCI_VDEVICE(NVIDIA, 0x0555), board_ahci }, /* MCP67 */
  439. { PCI_VDEVICE(NVIDIA, 0x0556), board_ahci }, /* MCP67 */
  440. { PCI_VDEVICE(NVIDIA, 0x0557), board_ahci }, /* MCP67 */
  441. { PCI_VDEVICE(NVIDIA, 0x0558), board_ahci }, /* MCP67 */
  442. { PCI_VDEVICE(NVIDIA, 0x0559), board_ahci }, /* MCP67 */
  443. { PCI_VDEVICE(NVIDIA, 0x055a), board_ahci }, /* MCP67 */
  444. { PCI_VDEVICE(NVIDIA, 0x055b), board_ahci }, /* MCP67 */
  445. { PCI_VDEVICE(NVIDIA, 0x07f0), board_ahci }, /* MCP73 */
  446. { PCI_VDEVICE(NVIDIA, 0x07f1), board_ahci }, /* MCP73 */
  447. { PCI_VDEVICE(NVIDIA, 0x07f2), board_ahci }, /* MCP73 */
  448. { PCI_VDEVICE(NVIDIA, 0x07f3), board_ahci }, /* MCP73 */
  449. { PCI_VDEVICE(NVIDIA, 0x07f4), board_ahci }, /* MCP73 */
  450. { PCI_VDEVICE(NVIDIA, 0x07f5), board_ahci }, /* MCP73 */
  451. { PCI_VDEVICE(NVIDIA, 0x07f6), board_ahci }, /* MCP73 */
  452. { PCI_VDEVICE(NVIDIA, 0x07f7), board_ahci }, /* MCP73 */
  453. { PCI_VDEVICE(NVIDIA, 0x07f8), board_ahci }, /* MCP73 */
  454. { PCI_VDEVICE(NVIDIA, 0x07f9), board_ahci }, /* MCP73 */
  455. { PCI_VDEVICE(NVIDIA, 0x07fa), board_ahci }, /* MCP73 */
  456. { PCI_VDEVICE(NVIDIA, 0x07fb), board_ahci }, /* MCP73 */
  457. { PCI_VDEVICE(NVIDIA, 0x0ad0), board_ahci }, /* MCP77 */
  458. { PCI_VDEVICE(NVIDIA, 0x0ad1), board_ahci }, /* MCP77 */
  459. { PCI_VDEVICE(NVIDIA, 0x0ad2), board_ahci }, /* MCP77 */
  460. { PCI_VDEVICE(NVIDIA, 0x0ad3), board_ahci }, /* MCP77 */
  461. { PCI_VDEVICE(NVIDIA, 0x0ad4), board_ahci }, /* MCP77 */
  462. { PCI_VDEVICE(NVIDIA, 0x0ad5), board_ahci }, /* MCP77 */
  463. { PCI_VDEVICE(NVIDIA, 0x0ad6), board_ahci }, /* MCP77 */
  464. { PCI_VDEVICE(NVIDIA, 0x0ad7), board_ahci }, /* MCP77 */
  465. { PCI_VDEVICE(NVIDIA, 0x0ad8), board_ahci }, /* MCP77 */
  466. { PCI_VDEVICE(NVIDIA, 0x0ad9), board_ahci }, /* MCP77 */
  467. { PCI_VDEVICE(NVIDIA, 0x0ada), board_ahci }, /* MCP77 */
  468. { PCI_VDEVICE(NVIDIA, 0x0adb), board_ahci }, /* MCP77 */
  469. { PCI_VDEVICE(NVIDIA, 0x0ab4), board_ahci }, /* MCP79 */
  470. { PCI_VDEVICE(NVIDIA, 0x0ab5), board_ahci }, /* MCP79 */
  471. { PCI_VDEVICE(NVIDIA, 0x0ab6), board_ahci }, /* MCP79 */
  472. { PCI_VDEVICE(NVIDIA, 0x0ab7), board_ahci }, /* MCP79 */
  473. { PCI_VDEVICE(NVIDIA, 0x0ab8), board_ahci }, /* MCP79 */
  474. { PCI_VDEVICE(NVIDIA, 0x0ab9), board_ahci }, /* MCP79 */
  475. { PCI_VDEVICE(NVIDIA, 0x0aba), board_ahci }, /* MCP79 */
  476. { PCI_VDEVICE(NVIDIA, 0x0abb), board_ahci }, /* MCP79 */
  477. { PCI_VDEVICE(NVIDIA, 0x0abc), board_ahci }, /* MCP79 */
  478. { PCI_VDEVICE(NVIDIA, 0x0abd), board_ahci }, /* MCP79 */
  479. { PCI_VDEVICE(NVIDIA, 0x0abe), board_ahci }, /* MCP79 */
  480. { PCI_VDEVICE(NVIDIA, 0x0abf), board_ahci }, /* MCP79 */
  481. { PCI_VDEVICE(NVIDIA, 0x0bc8), board_ahci }, /* MCP7B */
  482. { PCI_VDEVICE(NVIDIA, 0x0bc9), board_ahci }, /* MCP7B */
  483. { PCI_VDEVICE(NVIDIA, 0x0bca), board_ahci }, /* MCP7B */
  484. { PCI_VDEVICE(NVIDIA, 0x0bcb), board_ahci }, /* MCP7B */
  485. { PCI_VDEVICE(NVIDIA, 0x0bcc), board_ahci }, /* MCP7B */
  486. { PCI_VDEVICE(NVIDIA, 0x0bcd), board_ahci }, /* MCP7B */
  487. { PCI_VDEVICE(NVIDIA, 0x0bce), board_ahci }, /* MCP7B */
  488. { PCI_VDEVICE(NVIDIA, 0x0bcf), board_ahci }, /* MCP7B */
  489. { PCI_VDEVICE(NVIDIA, 0x0bc4), board_ahci }, /* MCP7B */
  490. { PCI_VDEVICE(NVIDIA, 0x0bc5), board_ahci }, /* MCP7B */
  491. { PCI_VDEVICE(NVIDIA, 0x0bc6), board_ahci }, /* MCP7B */
  492. { PCI_VDEVICE(NVIDIA, 0x0bc7), board_ahci }, /* MCP7B */
  493. /* SiS */
  494. { PCI_VDEVICE(SI, 0x1184), board_ahci_nopmp }, /* SiS 966 */
  495. { PCI_VDEVICE(SI, 0x1185), board_ahci_nopmp }, /* SiS 968 */
  496. { PCI_VDEVICE(SI, 0x0186), board_ahci_nopmp }, /* SiS 968 */
  497. /* Marvell */
  498. { PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */
  499. { PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv }, /* 6121 */
  500. /* Generic, PCI class code for AHCI */
  501. { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
  502. PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci },
  503. { } /* terminate list */
  504. };
  505. static struct pci_driver ahci_pci_driver = {
  506. .name = DRV_NAME,
  507. .id_table = ahci_pci_tbl,
  508. .probe = ahci_init_one,
  509. .remove = ata_pci_remove_one,
  510. #ifdef CONFIG_PM
  511. .suspend = ahci_pci_device_suspend,
  512. .resume = ahci_pci_device_resume,
  513. #endif
  514. };
  515. static inline int ahci_nr_ports(u32 cap)
  516. {
  517. return (cap & 0x1f) + 1;
  518. }
  519. static inline void __iomem *__ahci_port_base(struct ata_host *host,
  520. unsigned int port_no)
  521. {
  522. void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
  523. return mmio + 0x100 + (port_no * 0x80);
  524. }
  525. static inline void __iomem *ahci_port_base(struct ata_port *ap)
  526. {
  527. return __ahci_port_base(ap->host, ap->port_no);
  528. }
  529. static void ahci_enable_ahci(void __iomem *mmio)
  530. {
  531. int i;
  532. u32 tmp;
  533. /* turn on AHCI_EN */
  534. tmp = readl(mmio + HOST_CTL);
  535. if (tmp & HOST_AHCI_EN)
  536. return;
  537. /* Some controllers need AHCI_EN to be written multiple times.
  538. * Try a few times before giving up.
  539. */
  540. for (i = 0; i < 5; i++) {
  541. tmp |= HOST_AHCI_EN;
  542. writel(tmp, mmio + HOST_CTL);
  543. tmp = readl(mmio + HOST_CTL); /* flush && sanity check */
  544. if (tmp & HOST_AHCI_EN)
  545. return;
  546. msleep(10);
  547. }
  548. WARN_ON(1);
  549. }
  550. /**
  551. * ahci_save_initial_config - Save and fixup initial config values
  552. * @pdev: target PCI device
  553. * @hpriv: host private area to store config values
  554. *
  555. * Some registers containing configuration info might be setup by
  556. * BIOS and might be cleared on reset. This function saves the
  557. * initial values of those registers into @hpriv such that they
  558. * can be restored after controller reset.
  559. *
  560. * If inconsistent, config values are fixed up by this function.
  561. *
  562. * LOCKING:
  563. * None.
  564. */
  565. static void ahci_save_initial_config(struct pci_dev *pdev,
  566. struct ahci_host_priv *hpriv)
  567. {
  568. void __iomem *mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
  569. u32 cap, port_map;
  570. int i;
  571. int mv;
  572. /* make sure AHCI mode is enabled before accessing CAP */
  573. ahci_enable_ahci(mmio);
  574. /* Values prefixed with saved_ are written back to host after
  575. * reset. Values without are used for driver operation.
  576. */
  577. hpriv->saved_cap = cap = readl(mmio + HOST_CAP);
  578. hpriv->saved_port_map = port_map = readl(mmio + HOST_PORTS_IMPL);
  579. /* some chips have errata preventing 64bit use */
  580. if ((cap & HOST_CAP_64) && (hpriv->flags & AHCI_HFLAG_32BIT_ONLY)) {
  581. dev_printk(KERN_INFO, &pdev->dev,
  582. "controller can't do 64bit DMA, forcing 32bit\n");
  583. cap &= ~HOST_CAP_64;
  584. }
  585. if ((cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_NO_NCQ)) {
  586. dev_printk(KERN_INFO, &pdev->dev,
  587. "controller can't do NCQ, turning off CAP_NCQ\n");
  588. cap &= ~HOST_CAP_NCQ;
  589. }
  590. if (!(cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_YES_NCQ)) {
  591. dev_printk(KERN_INFO, &pdev->dev,
  592. "controller can do NCQ, turning on CAP_NCQ\n");
  593. cap |= HOST_CAP_NCQ;
  594. }
  595. if ((cap & HOST_CAP_PMP) && (hpriv->flags & AHCI_HFLAG_NO_PMP)) {
  596. dev_printk(KERN_INFO, &pdev->dev,
  597. "controller can't do PMP, turning off CAP_PMP\n");
  598. cap &= ~HOST_CAP_PMP;
  599. }
  600. if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361 &&
  601. port_map != 1) {
  602. dev_printk(KERN_INFO, &pdev->dev,
  603. "JMB361 has only one port, port_map 0x%x -> 0x%x\n",
  604. port_map, 1);
  605. port_map = 1;
  606. }
  607. /*
  608. * Temporary Marvell 6145 hack: PATA port presence
  609. * is asserted through the standard AHCI port
  610. * presence register, as bit 4 (counting from 0)
  611. */
  612. if (hpriv->flags & AHCI_HFLAG_MV_PATA) {
  613. if (pdev->device == 0x6121)
  614. mv = 0x3;
  615. else
  616. mv = 0xf;
  617. dev_printk(KERN_ERR, &pdev->dev,
  618. "MV_AHCI HACK: port_map %x -> %x\n",
  619. port_map,
  620. port_map & mv);
  621. port_map &= mv;
  622. }
  623. /* cross check port_map and cap.n_ports */
  624. if (port_map) {
  625. int map_ports = 0;
  626. for (i = 0; i < AHCI_MAX_PORTS; i++)
  627. if (port_map & (1 << i))
  628. map_ports++;
  629. /* If PI has more ports than n_ports, whine, clear
  630. * port_map and let it be generated from n_ports.
  631. */
  632. if (map_ports > ahci_nr_ports(cap)) {
  633. dev_printk(KERN_WARNING, &pdev->dev,
  634. "implemented port map (0x%x) contains more "
  635. "ports than nr_ports (%u), using nr_ports\n",
  636. port_map, ahci_nr_ports(cap));
  637. port_map = 0;
  638. }
  639. }
  640. /* fabricate port_map from cap.nr_ports */
  641. if (!port_map) {
  642. port_map = (1 << ahci_nr_ports(cap)) - 1;
  643. dev_printk(KERN_WARNING, &pdev->dev,
  644. "forcing PORTS_IMPL to 0x%x\n", port_map);
  645. /* write the fixed up value to the PI register */
  646. hpriv->saved_port_map = port_map;
  647. }
  648. /* record values to use during operation */
  649. hpriv->cap = cap;
  650. hpriv->port_map = port_map;
  651. }
  652. /**
  653. * ahci_restore_initial_config - Restore initial config
  654. * @host: target ATA host
  655. *
  656. * Restore initial config stored by ahci_save_initial_config().
  657. *
  658. * LOCKING:
  659. * None.
  660. */
  661. static void ahci_restore_initial_config(struct ata_host *host)
  662. {
  663. struct ahci_host_priv *hpriv = host->private_data;
  664. void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
  665. writel(hpriv->saved_cap, mmio + HOST_CAP);
  666. writel(hpriv->saved_port_map, mmio + HOST_PORTS_IMPL);
  667. (void) readl(mmio + HOST_PORTS_IMPL); /* flush */
  668. }
  669. static unsigned ahci_scr_offset(struct ata_port *ap, unsigned int sc_reg)
  670. {
  671. static const int offset[] = {
  672. [SCR_STATUS] = PORT_SCR_STAT,
  673. [SCR_CONTROL] = PORT_SCR_CTL,
  674. [SCR_ERROR] = PORT_SCR_ERR,
  675. [SCR_ACTIVE] = PORT_SCR_ACT,
  676. [SCR_NOTIFICATION] = PORT_SCR_NTF,
  677. };
  678. struct ahci_host_priv *hpriv = ap->host->private_data;
  679. if (sc_reg < ARRAY_SIZE(offset) &&
  680. (sc_reg != SCR_NOTIFICATION || (hpriv->cap & HOST_CAP_SNTF)))
  681. return offset[sc_reg];
  682. return 0;
  683. }
  684. static int ahci_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
  685. {
  686. void __iomem *port_mmio = ahci_port_base(ap);
  687. int offset = ahci_scr_offset(ap, sc_reg);
  688. if (offset) {
  689. *val = readl(port_mmio + offset);
  690. return 0;
  691. }
  692. return -EINVAL;
  693. }
  694. static int ahci_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val)
  695. {
  696. void __iomem *port_mmio = ahci_port_base(ap);
  697. int offset = ahci_scr_offset(ap, sc_reg);
  698. if (offset) {
  699. writel(val, port_mmio + offset);
  700. return 0;
  701. }
  702. return -EINVAL;
  703. }
  704. static void ahci_start_engine(struct ata_port *ap)
  705. {
  706. void __iomem *port_mmio = ahci_port_base(ap);
  707. u32 tmp;
  708. /* start DMA */
  709. tmp = readl(port_mmio + PORT_CMD);
  710. tmp |= PORT_CMD_START;
  711. writel(tmp, port_mmio + PORT_CMD);
  712. readl(port_mmio + PORT_CMD); /* flush */
  713. }
  714. static int ahci_stop_engine(struct ata_port *ap)
  715. {
  716. void __iomem *port_mmio = ahci_port_base(ap);
  717. u32 tmp;
  718. tmp = readl(port_mmio + PORT_CMD);
  719. /* check if the HBA is idle */
  720. if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0)
  721. return 0;
  722. /* setting HBA to idle */
  723. tmp &= ~PORT_CMD_START;
  724. writel(tmp, port_mmio + PORT_CMD);
  725. /* wait for engine to stop. This could be as long as 500 msec */
  726. tmp = ata_wait_register(port_mmio + PORT_CMD,
  727. PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500);
  728. if (tmp & PORT_CMD_LIST_ON)
  729. return -EIO;
  730. return 0;
  731. }
  732. static void ahci_start_fis_rx(struct ata_port *ap)
  733. {
  734. void __iomem *port_mmio = ahci_port_base(ap);
  735. struct ahci_host_priv *hpriv = ap->host->private_data;
  736. struct ahci_port_priv *pp = ap->private_data;
  737. u32 tmp;
  738. /* set FIS registers */
  739. if (hpriv->cap & HOST_CAP_64)
  740. writel((pp->cmd_slot_dma >> 16) >> 16,
  741. port_mmio + PORT_LST_ADDR_HI);
  742. writel(pp->cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR);
  743. if (hpriv->cap & HOST_CAP_64)
  744. writel((pp->rx_fis_dma >> 16) >> 16,
  745. port_mmio + PORT_FIS_ADDR_HI);
  746. writel(pp->rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR);
  747. /* enable FIS reception */
  748. tmp = readl(port_mmio + PORT_CMD);
  749. tmp |= PORT_CMD_FIS_RX;
  750. writel(tmp, port_mmio + PORT_CMD);
  751. /* flush */
  752. readl(port_mmio + PORT_CMD);
  753. }
  754. static int ahci_stop_fis_rx(struct ata_port *ap)
  755. {
  756. void __iomem *port_mmio = ahci_port_base(ap);
  757. u32 tmp;
  758. /* disable FIS reception */
  759. tmp = readl(port_mmio + PORT_CMD);
  760. tmp &= ~PORT_CMD_FIS_RX;
  761. writel(tmp, port_mmio + PORT_CMD);
  762. /* wait for completion, spec says 500ms, give it 1000 */
  763. tmp = ata_wait_register(port_mmio + PORT_CMD, PORT_CMD_FIS_ON,
  764. PORT_CMD_FIS_ON, 10, 1000);
  765. if (tmp & PORT_CMD_FIS_ON)
  766. return -EBUSY;
  767. return 0;
  768. }
  769. static void ahci_power_up(struct ata_port *ap)
  770. {
  771. struct ahci_host_priv *hpriv = ap->host->private_data;
  772. void __iomem *port_mmio = ahci_port_base(ap);
  773. u32 cmd;
  774. cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
  775. /* spin up device */
  776. if (hpriv->cap & HOST_CAP_SSS) {
  777. cmd |= PORT_CMD_SPIN_UP;
  778. writel(cmd, port_mmio + PORT_CMD);
  779. }
  780. /* wake up link */
  781. writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD);
  782. }
  783. static void ahci_disable_alpm(struct ata_port *ap)
  784. {
  785. struct ahci_host_priv *hpriv = ap->host->private_data;
  786. void __iomem *port_mmio = ahci_port_base(ap);
  787. u32 cmd;
  788. struct ahci_port_priv *pp = ap->private_data;
  789. /* IPM bits should be disabled by libata-core */
  790. /* get the existing command bits */
  791. cmd = readl(port_mmio + PORT_CMD);
  792. /* disable ALPM and ASP */
  793. cmd &= ~PORT_CMD_ASP;
  794. cmd &= ~PORT_CMD_ALPE;
  795. /* force the interface back to active */
  796. cmd |= PORT_CMD_ICC_ACTIVE;
  797. /* write out new cmd value */
  798. writel(cmd, port_mmio + PORT_CMD);
  799. cmd = readl(port_mmio + PORT_CMD);
  800. /* wait 10ms to be sure we've come out of any low power state */
  801. msleep(10);
  802. /* clear out any PhyRdy stuff from interrupt status */
  803. writel(PORT_IRQ_PHYRDY, port_mmio + PORT_IRQ_STAT);
  804. /* go ahead and clean out PhyRdy Change from Serror too */
  805. ahci_scr_write(ap, SCR_ERROR, ((1 << 16) | (1 << 18)));
  806. /*
  807. * Clear flag to indicate that we should ignore all PhyRdy
  808. * state changes
  809. */
  810. hpriv->flags &= ~AHCI_HFLAG_NO_HOTPLUG;
  811. /*
  812. * Enable interrupts on Phy Ready.
  813. */
  814. pp->intr_mask |= PORT_IRQ_PHYRDY;
  815. writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
  816. /*
  817. * don't change the link pm policy - we can be called
  818. * just to turn of link pm temporarily
  819. */
  820. }
  821. static int ahci_enable_alpm(struct ata_port *ap,
  822. enum link_pm policy)
  823. {
  824. struct ahci_host_priv *hpriv = ap->host->private_data;
  825. void __iomem *port_mmio = ahci_port_base(ap);
  826. u32 cmd;
  827. struct ahci_port_priv *pp = ap->private_data;
  828. u32 asp;
  829. /* Make sure the host is capable of link power management */
  830. if (!(hpriv->cap & HOST_CAP_ALPM))
  831. return -EINVAL;
  832. switch (policy) {
  833. case MAX_PERFORMANCE:
  834. case NOT_AVAILABLE:
  835. /*
  836. * if we came here with NOT_AVAILABLE,
  837. * it just means this is the first time we
  838. * have tried to enable - default to max performance,
  839. * and let the user go to lower power modes on request.
  840. */
  841. ahci_disable_alpm(ap);
  842. return 0;
  843. case MIN_POWER:
  844. /* configure HBA to enter SLUMBER */
  845. asp = PORT_CMD_ASP;
  846. break;
  847. case MEDIUM_POWER:
  848. /* configure HBA to enter PARTIAL */
  849. asp = 0;
  850. break;
  851. default:
  852. return -EINVAL;
  853. }
  854. /*
  855. * Disable interrupts on Phy Ready. This keeps us from
  856. * getting woken up due to spurious phy ready interrupts
  857. * TBD - Hot plug should be done via polling now, is
  858. * that even supported?
  859. */
  860. pp->intr_mask &= ~PORT_IRQ_PHYRDY;
  861. writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
  862. /*
  863. * Set a flag to indicate that we should ignore all PhyRdy
  864. * state changes since these can happen now whenever we
  865. * change link state
  866. */
  867. hpriv->flags |= AHCI_HFLAG_NO_HOTPLUG;
  868. /* get the existing command bits */
  869. cmd = readl(port_mmio + PORT_CMD);
  870. /*
  871. * Set ASP based on Policy
  872. */
  873. cmd |= asp;
  874. /*
  875. * Setting this bit will instruct the HBA to aggressively
  876. * enter a lower power link state when it's appropriate and
  877. * based on the value set above for ASP
  878. */
  879. cmd |= PORT_CMD_ALPE;
  880. /* write out new cmd value */
  881. writel(cmd, port_mmio + PORT_CMD);
  882. cmd = readl(port_mmio + PORT_CMD);
  883. /* IPM bits should be set by libata-core */
  884. return 0;
  885. }
  886. #ifdef CONFIG_PM
  887. static void ahci_power_down(struct ata_port *ap)
  888. {
  889. struct ahci_host_priv *hpriv = ap->host->private_data;
  890. void __iomem *port_mmio = ahci_port_base(ap);
  891. u32 cmd, scontrol;
  892. if (!(hpriv->cap & HOST_CAP_SSS))
  893. return;
  894. /* put device into listen mode, first set PxSCTL.DET to 0 */
  895. scontrol = readl(port_mmio + PORT_SCR_CTL);
  896. scontrol &= ~0xf;
  897. writel(scontrol, port_mmio + PORT_SCR_CTL);
  898. /* then set PxCMD.SUD to 0 */
  899. cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
  900. cmd &= ~PORT_CMD_SPIN_UP;
  901. writel(cmd, port_mmio + PORT_CMD);
  902. }
  903. #endif
  904. static void ahci_start_port(struct ata_port *ap)
  905. {
  906. /* enable FIS reception */
  907. ahci_start_fis_rx(ap);
  908. /* enable DMA */
  909. ahci_start_engine(ap);
  910. }
  911. static int ahci_deinit_port(struct ata_port *ap, const char **emsg)
  912. {
  913. int rc;
  914. /* disable DMA */
  915. rc = ahci_stop_engine(ap);
  916. if (rc) {
  917. *emsg = "failed to stop engine";
  918. return rc;
  919. }
  920. /* disable FIS reception */
  921. rc = ahci_stop_fis_rx(ap);
  922. if (rc) {
  923. *emsg = "failed stop FIS RX";
  924. return rc;
  925. }
  926. return 0;
  927. }
  928. static int ahci_reset_controller(struct ata_host *host)
  929. {
  930. struct pci_dev *pdev = to_pci_dev(host->dev);
  931. struct ahci_host_priv *hpriv = host->private_data;
  932. void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
  933. u32 tmp;
  934. /* we must be in AHCI mode, before using anything
  935. * AHCI-specific, such as HOST_RESET.
  936. */
  937. ahci_enable_ahci(mmio);
  938. /* global controller reset */
  939. if (!ahci_skip_host_reset) {
  940. tmp = readl(mmio + HOST_CTL);
  941. if ((tmp & HOST_RESET) == 0) {
  942. writel(tmp | HOST_RESET, mmio + HOST_CTL);
  943. readl(mmio + HOST_CTL); /* flush */
  944. }
  945. /* reset must complete within 1 second, or
  946. * the hardware should be considered fried.
  947. */
  948. ssleep(1);
  949. tmp = readl(mmio + HOST_CTL);
  950. if (tmp & HOST_RESET) {
  951. dev_printk(KERN_ERR, host->dev,
  952. "controller reset failed (0x%x)\n", tmp);
  953. return -EIO;
  954. }
  955. /* turn on AHCI mode */
  956. ahci_enable_ahci(mmio);
  957. /* Some registers might be cleared on reset. Restore
  958. * initial values.
  959. */
  960. ahci_restore_initial_config(host);
  961. } else
  962. dev_printk(KERN_INFO, host->dev,
  963. "skipping global host reset\n");
  964. if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
  965. u16 tmp16;
  966. /* configure PCS */
  967. pci_read_config_word(pdev, 0x92, &tmp16);
  968. if ((tmp16 & hpriv->port_map) != hpriv->port_map) {
  969. tmp16 |= hpriv->port_map;
  970. pci_write_config_word(pdev, 0x92, tmp16);
  971. }
  972. }
  973. return 0;
  974. }
  975. static void ahci_port_init(struct pci_dev *pdev, struct ata_port *ap,
  976. int port_no, void __iomem *mmio,
  977. void __iomem *port_mmio)
  978. {
  979. const char *emsg = NULL;
  980. int rc;
  981. u32 tmp;
  982. /* make sure port is not active */
  983. rc = ahci_deinit_port(ap, &emsg);
  984. if (rc)
  985. dev_printk(KERN_WARNING, &pdev->dev,
  986. "%s (%d)\n", emsg, rc);
  987. /* clear SError */
  988. tmp = readl(port_mmio + PORT_SCR_ERR);
  989. VPRINTK("PORT_SCR_ERR 0x%x\n", tmp);
  990. writel(tmp, port_mmio + PORT_SCR_ERR);
  991. /* clear port IRQ */
  992. tmp = readl(port_mmio + PORT_IRQ_STAT);
  993. VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
  994. if (tmp)
  995. writel(tmp, port_mmio + PORT_IRQ_STAT);
  996. writel(1 << port_no, mmio + HOST_IRQ_STAT);
  997. }
  998. static void ahci_init_controller(struct ata_host *host)
  999. {
  1000. struct ahci_host_priv *hpriv = host->private_data;
  1001. struct pci_dev *pdev = to_pci_dev(host->dev);
  1002. void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
  1003. int i;
  1004. void __iomem *port_mmio;
  1005. u32 tmp;
  1006. int mv;
  1007. if (hpriv->flags & AHCI_HFLAG_MV_PATA) {
  1008. if (pdev->device == 0x6121)
  1009. mv = 2;
  1010. else
  1011. mv = 4;
  1012. port_mmio = __ahci_port_base(host, mv);
  1013. writel(0, port_mmio + PORT_IRQ_MASK);
  1014. /* clear port IRQ */
  1015. tmp = readl(port_mmio + PORT_IRQ_STAT);
  1016. VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
  1017. if (tmp)
  1018. writel(tmp, port_mmio + PORT_IRQ_STAT);
  1019. }
  1020. for (i = 0; i < host->n_ports; i++) {
  1021. struct ata_port *ap = host->ports[i];
  1022. port_mmio = ahci_port_base(ap);
  1023. if (ata_port_is_dummy(ap))
  1024. continue;
  1025. ahci_port_init(pdev, ap, i, mmio, port_mmio);
  1026. }
  1027. tmp = readl(mmio + HOST_CTL);
  1028. VPRINTK("HOST_CTL 0x%x\n", tmp);
  1029. writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL);
  1030. tmp = readl(mmio + HOST_CTL);
  1031. VPRINTK("HOST_CTL 0x%x\n", tmp);
  1032. }
  1033. static void ahci_dev_config(struct ata_device *dev)
  1034. {
  1035. struct ahci_host_priv *hpriv = dev->link->ap->host->private_data;
  1036. if (hpriv->flags & AHCI_HFLAG_SECT255) {
  1037. dev->max_sectors = 255;
  1038. ata_dev_printk(dev, KERN_INFO,
  1039. "SB600 AHCI: limiting to 255 sectors per cmd\n");
  1040. }
  1041. }
  1042. static unsigned int ahci_dev_classify(struct ata_port *ap)
  1043. {
  1044. void __iomem *port_mmio = ahci_port_base(ap);
  1045. struct ata_taskfile tf;
  1046. u32 tmp;
  1047. tmp = readl(port_mmio + PORT_SIG);
  1048. tf.lbah = (tmp >> 24) & 0xff;
  1049. tf.lbam = (tmp >> 16) & 0xff;
  1050. tf.lbal = (tmp >> 8) & 0xff;
  1051. tf.nsect = (tmp) & 0xff;
  1052. return ata_dev_classify(&tf);
  1053. }
  1054. static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
  1055. u32 opts)
  1056. {
  1057. dma_addr_t cmd_tbl_dma;
  1058. cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ;
  1059. pp->cmd_slot[tag].opts = cpu_to_le32(opts);
  1060. pp->cmd_slot[tag].status = 0;
  1061. pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff);
  1062. pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16);
  1063. }
  1064. static int ahci_kick_engine(struct ata_port *ap, int force_restart)
  1065. {
  1066. void __iomem *port_mmio = ahci_port_base(ap);
  1067. struct ahci_host_priv *hpriv = ap->host->private_data;
  1068. u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
  1069. u32 tmp;
  1070. int busy, rc;
  1071. /* do we need to kick the port? */
  1072. busy = status & (ATA_BUSY | ATA_DRQ);
  1073. if (!busy && !force_restart)
  1074. return 0;
  1075. /* stop engine */
  1076. rc = ahci_stop_engine(ap);
  1077. if (rc)
  1078. goto out_restart;
  1079. /* need to do CLO? */
  1080. if (!busy) {
  1081. rc = 0;
  1082. goto out_restart;
  1083. }
  1084. if (!(hpriv->cap & HOST_CAP_CLO)) {
  1085. rc = -EOPNOTSUPP;
  1086. goto out_restart;
  1087. }
  1088. /* perform CLO */
  1089. tmp = readl(port_mmio + PORT_CMD);
  1090. tmp |= PORT_CMD_CLO;
  1091. writel(tmp, port_mmio + PORT_CMD);
  1092. rc = 0;
  1093. tmp = ata_wait_register(port_mmio + PORT_CMD,
  1094. PORT_CMD_CLO, PORT_CMD_CLO, 1, 500);
  1095. if (tmp & PORT_CMD_CLO)
  1096. rc = -EIO;
  1097. /* restart engine */
  1098. out_restart:
  1099. ahci_start_engine(ap);
  1100. return rc;
  1101. }
  1102. static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
  1103. struct ata_taskfile *tf, int is_cmd, u16 flags,
  1104. unsigned long timeout_msec)
  1105. {
  1106. const u32 cmd_fis_len = 5; /* five dwords */
  1107. struct ahci_port_priv *pp = ap->private_data;
  1108. void __iomem *port_mmio = ahci_port_base(ap);
  1109. u8 *fis = pp->cmd_tbl;
  1110. u32 tmp;
  1111. /* prep the command */
  1112. ata_tf_to_fis(tf, pmp, is_cmd, fis);
  1113. ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12));
  1114. /* issue & wait */
  1115. writel(1, port_mmio + PORT_CMD_ISSUE);
  1116. if (timeout_msec) {
  1117. tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1,
  1118. 1, timeout_msec);
  1119. if (tmp & 0x1) {
  1120. ahci_kick_engine(ap, 1);
  1121. return -EBUSY;
  1122. }
  1123. } else
  1124. readl(port_mmio + PORT_CMD_ISSUE); /* flush */
  1125. return 0;
  1126. }
  1127. static int ahci_do_softreset(struct ata_link *link, unsigned int *class,
  1128. int pmp, unsigned long deadline,
  1129. int (*check_ready)(struct ata_link *link))
  1130. {
  1131. struct ata_port *ap = link->ap;
  1132. const char *reason = NULL;
  1133. unsigned long now, msecs;
  1134. struct ata_taskfile tf;
  1135. int rc;
  1136. DPRINTK("ENTER\n");
  1137. /* prepare for SRST (AHCI-1.1 10.4.1) */
  1138. rc = ahci_kick_engine(ap, 1);
  1139. if (rc && rc != -EOPNOTSUPP)
  1140. ata_link_printk(link, KERN_WARNING,
  1141. "failed to reset engine (errno=%d)\n", rc);
  1142. ata_tf_init(link->device, &tf);
  1143. /* issue the first D2H Register FIS */
  1144. msecs = 0;
  1145. now = jiffies;
  1146. if (time_after(now, deadline))
  1147. msecs = jiffies_to_msecs(deadline - now);
  1148. tf.ctl |= ATA_SRST;
  1149. if (ahci_exec_polled_cmd(ap, pmp, &tf, 0,
  1150. AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY, msecs)) {
  1151. rc = -EIO;
  1152. reason = "1st FIS failed";
  1153. goto fail;
  1154. }
  1155. /* spec says at least 5us, but be generous and sleep for 1ms */
  1156. msleep(1);
  1157. /* issue the second D2H Register FIS */
  1158. tf.ctl &= ~ATA_SRST;
  1159. ahci_exec_polled_cmd(ap, pmp, &tf, 0, 0, 0);
  1160. /* wait for link to become ready */
  1161. rc = ata_wait_after_reset(link, deadline, check_ready);
  1162. /* link occupied, -ENODEV too is an error */
  1163. if (rc) {
  1164. reason = "device not ready";
  1165. goto fail;
  1166. }
  1167. *class = ahci_dev_classify(ap);
  1168. DPRINTK("EXIT, class=%u\n", *class);
  1169. return 0;
  1170. fail:
  1171. ata_link_printk(link, KERN_ERR, "softreset failed (%s)\n", reason);
  1172. return rc;
  1173. }
  1174. static int ahci_check_ready(struct ata_link *link)
  1175. {
  1176. void __iomem *port_mmio = ahci_port_base(link->ap);
  1177. u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
  1178. return ata_check_ready(status);
  1179. }
  1180. static int ahci_softreset(struct ata_link *link, unsigned int *class,
  1181. unsigned long deadline)
  1182. {
  1183. int pmp = sata_srst_pmp(link);
  1184. DPRINTK("ENTER\n");
  1185. return ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
  1186. }
  1187. static int ahci_sb600_check_ready(struct ata_link *link)
  1188. {
  1189. void __iomem *port_mmio = ahci_port_base(link->ap);
  1190. u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
  1191. u32 irq_status = readl(port_mmio + PORT_IRQ_STAT);
  1192. /*
  1193. * There is no need to check TFDATA if BAD PMP is found due to HW bug,
  1194. * which can save timeout delay.
  1195. */
  1196. if (irq_status & PORT_IRQ_BAD_PMP)
  1197. return -EIO;
  1198. return ata_check_ready(status);
  1199. }
  1200. static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class,
  1201. unsigned long deadline)
  1202. {
  1203. struct ata_port *ap = link->ap;
  1204. void __iomem *port_mmio = ahci_port_base(ap);
  1205. int pmp = sata_srst_pmp(link);
  1206. int rc;
  1207. u32 irq_sts;
  1208. DPRINTK("ENTER\n");
  1209. rc = ahci_do_softreset(link, class, pmp, deadline,
  1210. ahci_sb600_check_ready);
  1211. /*
  1212. * Soft reset fails on some ATI chips with IPMS set when PMP
  1213. * is enabled but SATA HDD/ODD is connected to SATA port,
  1214. * do soft reset again to port 0.
  1215. */
  1216. if (rc == -EIO) {
  1217. irq_sts = readl(port_mmio + PORT_IRQ_STAT);
  1218. if (irq_sts & PORT_IRQ_BAD_PMP) {
  1219. ata_link_printk(link, KERN_WARNING,
  1220. "failed due to HW bug, retry pmp=0\n");
  1221. rc = ahci_do_softreset(link, class, 0, deadline,
  1222. ahci_check_ready);
  1223. }
  1224. }
  1225. return rc;
  1226. }
  1227. static int ahci_hardreset(struct ata_link *link, unsigned int *class,
  1228. unsigned long deadline)
  1229. {
  1230. const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
  1231. struct ata_port *ap = link->ap;
  1232. struct ahci_port_priv *pp = ap->private_data;
  1233. u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
  1234. struct ata_taskfile tf;
  1235. bool online;
  1236. int rc;
  1237. DPRINTK("ENTER\n");
  1238. ahci_stop_engine(ap);
  1239. /* clear D2H reception area to properly wait for D2H FIS */
  1240. ata_tf_init(link->device, &tf);
  1241. tf.command = 0x80;
  1242. ata_tf_to_fis(&tf, 0, 0, d2h_fis);
  1243. rc = sata_link_hardreset(link, timing, deadline, &online,
  1244. ahci_check_ready);
  1245. ahci_start_engine(ap);
  1246. if (online)
  1247. *class = ahci_dev_classify(ap);
  1248. DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
  1249. return rc;
  1250. }
  1251. static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
  1252. unsigned long deadline)
  1253. {
  1254. struct ata_port *ap = link->ap;
  1255. bool online;
  1256. int rc;
  1257. DPRINTK("ENTER\n");
  1258. ahci_stop_engine(ap);
  1259. rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
  1260. deadline, &online, NULL);
  1261. ahci_start_engine(ap);
  1262. DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
  1263. /* vt8251 doesn't clear BSY on signature FIS reception,
  1264. * request follow-up softreset.
  1265. */
  1266. return online ? -EAGAIN : rc;
  1267. }
  1268. static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
  1269. unsigned long deadline)
  1270. {
  1271. struct ata_port *ap = link->ap;
  1272. struct ahci_port_priv *pp = ap->private_data;
  1273. u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
  1274. struct ata_taskfile tf;
  1275. bool online;
  1276. int rc;
  1277. ahci_stop_engine(ap);
  1278. /* clear D2H reception area to properly wait for D2H FIS */
  1279. ata_tf_init(link->device, &tf);
  1280. tf.command = 0x80;
  1281. ata_tf_to_fis(&tf, 0, 0, d2h_fis);
  1282. rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
  1283. deadline, &online, NULL);
  1284. ahci_start_engine(ap);
  1285. /* The pseudo configuration device on SIMG4726 attached to
  1286. * ASUS P5W-DH Deluxe doesn't send signature FIS after
  1287. * hardreset if no device is attached to the first downstream
  1288. * port && the pseudo device locks up on SRST w/ PMP==0. To
  1289. * work around this, wait for !BSY only briefly. If BSY isn't
  1290. * cleared, perform CLO and proceed to IDENTIFY (achieved by
  1291. * ATA_LFLAG_NO_SRST and ATA_LFLAG_ASSUME_ATA).
  1292. *
  1293. * Wait for two seconds. Devices attached to downstream port
  1294. * which can't process the following IDENTIFY after this will
  1295. * have to be reset again. For most cases, this should
  1296. * suffice while making probing snappish enough.
  1297. */
  1298. if (online) {
  1299. rc = ata_wait_after_reset(link, jiffies + 2 * HZ,
  1300. ahci_check_ready);
  1301. if (rc)
  1302. ahci_kick_engine(ap, 0);
  1303. }
  1304. return rc;
  1305. }
  1306. static void ahci_postreset(struct ata_link *link, unsigned int *class)
  1307. {
  1308. struct ata_port *ap = link->ap;
  1309. void __iomem *port_mmio = ahci_port_base(ap);
  1310. u32 new_tmp, tmp;
  1311. ata_std_postreset(link, class);
  1312. /* Make sure port's ATAPI bit is set appropriately */
  1313. new_tmp = tmp = readl(port_mmio + PORT_CMD);
  1314. if (*class == ATA_DEV_ATAPI)
  1315. new_tmp |= PORT_CMD_ATAPI;
  1316. else
  1317. new_tmp &= ~PORT_CMD_ATAPI;
  1318. if (new_tmp != tmp) {
  1319. writel(new_tmp, port_mmio + PORT_CMD);
  1320. readl(port_mmio + PORT_CMD); /* flush */
  1321. }
  1322. }
  1323. static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
  1324. {
  1325. struct scatterlist *sg;
  1326. struct ahci_sg *ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
  1327. unsigned int si;
  1328. VPRINTK("ENTER\n");
  1329. /*
  1330. * Next, the S/G list.
  1331. */
  1332. for_each_sg(qc->sg, sg, qc->n_elem, si) {
  1333. dma_addr_t addr = sg_dma_address(sg);
  1334. u32 sg_len = sg_dma_len(sg);
  1335. ahci_sg[si].addr = cpu_to_le32(addr & 0xffffffff);
  1336. ahci_sg[si].addr_hi = cpu_to_le32((addr >> 16) >> 16);
  1337. ahci_sg[si].flags_size = cpu_to_le32(sg_len - 1);
  1338. }
  1339. return si;
  1340. }
  1341. static void ahci_qc_prep(struct ata_queued_cmd *qc)
  1342. {
  1343. struct ata_port *ap = qc->ap;
  1344. struct ahci_port_priv *pp = ap->private_data;
  1345. int is_atapi = ata_is_atapi(qc->tf.protocol);
  1346. void *cmd_tbl;
  1347. u32 opts;
  1348. const u32 cmd_fis_len = 5; /* five dwords */
  1349. unsigned int n_elem;
  1350. /*
  1351. * Fill in command table information. First, the header,
  1352. * a SATA Register - Host to Device command FIS.
  1353. */
  1354. cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ;
  1355. ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl);
  1356. if (is_atapi) {
  1357. memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
  1358. memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
  1359. }
  1360. n_elem = 0;
  1361. if (qc->flags & ATA_QCFLAG_DMAMAP)
  1362. n_elem = ahci_fill_sg(qc, cmd_tbl);
  1363. /*
  1364. * Fill in command slot information.
  1365. */
  1366. opts = cmd_fis_len | n_elem << 16 | (qc->dev->link->pmp << 12);
  1367. if (qc->tf.flags & ATA_TFLAG_WRITE)
  1368. opts |= AHCI_CMD_WRITE;
  1369. if (is_atapi)
  1370. opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
  1371. ahci_fill_cmd_slot(pp, qc->tag, opts);
  1372. }
  1373. static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
  1374. {
  1375. struct ahci_host_priv *hpriv = ap->host->private_data;
  1376. struct ahci_port_priv *pp = ap->private_data;
  1377. struct ata_eh_info *host_ehi = &ap->link.eh_info;
  1378. struct ata_link *link = NULL;
  1379. struct ata_queued_cmd *active_qc;
  1380. struct ata_eh_info *active_ehi;
  1381. u32 serror;
  1382. /* determine active link */
  1383. ata_port_for_each_link(link, ap)
  1384. if (ata_link_active(link))
  1385. break;
  1386. if (!link)
  1387. link = &ap->link;
  1388. active_qc = ata_qc_from_tag(ap, link->active_tag);
  1389. active_ehi = &link->eh_info;
  1390. /* record irq stat */
  1391. ata_ehi_clear_desc(host_ehi);
  1392. ata_ehi_push_desc(host_ehi, "irq_stat 0x%08x", irq_stat);
  1393. /* AHCI needs SError cleared; otherwise, it might lock up */
  1394. ahci_scr_read(ap, SCR_ERROR, &serror);
  1395. ahci_scr_write(ap, SCR_ERROR, serror);
  1396. host_ehi->serror |= serror;
  1397. /* some controllers set IRQ_IF_ERR on device errors, ignore it */
  1398. if (hpriv->flags & AHCI_HFLAG_IGN_IRQ_IF_ERR)
  1399. irq_stat &= ~PORT_IRQ_IF_ERR;
  1400. if (irq_stat & PORT_IRQ_TF_ERR) {
  1401. /* If qc is active, charge it; otherwise, the active
  1402. * link. There's no active qc on NCQ errors. It will
  1403. * be determined by EH by reading log page 10h.
  1404. */
  1405. if (active_qc)
  1406. active_qc->err_mask |= AC_ERR_DEV;
  1407. else
  1408. active_ehi->err_mask |= AC_ERR_DEV;
  1409. if (hpriv->flags & AHCI_HFLAG_IGN_SERR_INTERNAL)
  1410. host_ehi->serror &= ~SERR_INTERNAL;
  1411. }
  1412. if (irq_stat & PORT_IRQ_UNK_FIS) {
  1413. u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK);
  1414. active_ehi->err_mask |= AC_ERR_HSM;
  1415. active_ehi->action |= ATA_EH_RESET;
  1416. ata_ehi_push_desc(active_ehi,
  1417. "unknown FIS %08x %08x %08x %08x" ,
  1418. unk[0], unk[1], unk[2], unk[3]);
  1419. }
  1420. if (sata_pmp_attached(ap) && (irq_stat & PORT_IRQ_BAD_PMP)) {
  1421. active_ehi->err_mask |= AC_ERR_HSM;
  1422. active_ehi->action |= ATA_EH_RESET;
  1423. ata_ehi_push_desc(active_ehi, "incorrect PMP");
  1424. }
  1425. if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) {
  1426. host_ehi->err_mask |= AC_ERR_HOST_BUS;
  1427. host_ehi->action |= ATA_EH_RESET;
  1428. ata_ehi_push_desc(host_ehi, "host bus error");
  1429. }
  1430. if (irq_stat & PORT_IRQ_IF_ERR) {
  1431. host_ehi->err_mask |= AC_ERR_ATA_BUS;
  1432. host_ehi->action |= ATA_EH_RESET;
  1433. ata_ehi_push_desc(host_ehi, "interface fatal error");
  1434. }
  1435. if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) {
  1436. ata_ehi_hotplugged(host_ehi);
  1437. ata_ehi_push_desc(host_ehi, "%s",
  1438. irq_stat & PORT_IRQ_CONNECT ?
  1439. "connection status changed" : "PHY RDY changed");
  1440. }
  1441. /* okay, let's hand over to EH */
  1442. if (irq_stat & PORT_IRQ_FREEZE)
  1443. ata_port_freeze(ap);
  1444. else
  1445. ata_port_abort(ap);
  1446. }
  1447. static void ahci_port_intr(struct ata_port *ap)
  1448. {
  1449. void __iomem *port_mmio = ahci_port_base(ap);
  1450. struct ata_eh_info *ehi = &ap->link.eh_info;
  1451. struct ahci_port_priv *pp = ap->private_data;
  1452. struct ahci_host_priv *hpriv = ap->host->private_data;
  1453. int resetting = !!(ap->pflags & ATA_PFLAG_RESETTING);
  1454. u32 status, qc_active;
  1455. int rc;
  1456. status = readl(port_mmio + PORT_IRQ_STAT);
  1457. writel(status, port_mmio + PORT_IRQ_STAT);
  1458. /* ignore BAD_PMP while resetting */
  1459. if (unlikely(resetting))
  1460. status &= ~PORT_IRQ_BAD_PMP;
  1461. /* If we are getting PhyRdy, this is
  1462. * just a power state change, we should
  1463. * clear out this, plus the PhyRdy/Comm
  1464. * Wake bits from Serror
  1465. */
  1466. if ((hpriv->flags & AHCI_HFLAG_NO_HOTPLUG) &&
  1467. (status & PORT_IRQ_PHYRDY)) {
  1468. status &= ~PORT_IRQ_PHYRDY;
  1469. ahci_scr_write(ap, SCR_ERROR, ((1 << 16) | (1 << 18)));
  1470. }
  1471. if (unlikely(status & PORT_IRQ_ERROR)) {
  1472. ahci_error_intr(ap, status);
  1473. return;
  1474. }
  1475. if (status & PORT_IRQ_SDB_FIS) {
  1476. /* If SNotification is available, leave notification
  1477. * handling to sata_async_notification(). If not,
  1478. * emulate it by snooping SDB FIS RX area.
  1479. *
  1480. * Snooping FIS RX area is probably cheaper than
  1481. * poking SNotification but some constrollers which
  1482. * implement SNotification, ICH9 for example, don't
  1483. * store AN SDB FIS into receive area.
  1484. */
  1485. if (hpriv->cap & HOST_CAP_SNTF)
  1486. sata_async_notification(ap);
  1487. else {
  1488. /* If the 'N' bit in word 0 of the FIS is set,
  1489. * we just received asynchronous notification.
  1490. * Tell libata about it.
  1491. */
  1492. const __le32 *f = pp->rx_fis + RX_FIS_SDB;
  1493. u32 f0 = le32_to_cpu(f[0]);
  1494. if (f0 & (1 << 15))
  1495. sata_async_notification(ap);
  1496. }
  1497. }
  1498. /* pp->active_link is valid iff any command is in flight */
  1499. if (ap->qc_active && pp->active_link->sactive)
  1500. qc_active = readl(port_mmio + PORT_SCR_ACT);
  1501. else
  1502. qc_active = readl(port_mmio + PORT_CMD_ISSUE);
  1503. rc = ata_qc_complete_multiple(ap, qc_active);
  1504. /* while resetting, invalid completions are expected */
  1505. if (unlikely(rc < 0 && !resetting)) {
  1506. ehi->err_mask |= AC_ERR_HSM;
  1507. ehi->action |= ATA_EH_RESET;
  1508. ata_port_freeze(ap);
  1509. }
  1510. }
  1511. static irqreturn_t ahci_interrupt(int irq, void *dev_instance)
  1512. {
  1513. struct ata_host *host = dev_instance;
  1514. struct ahci_host_priv *hpriv;
  1515. unsigned int i, handled = 0;
  1516. void __iomem *mmio;
  1517. u32 irq_stat, irq_ack = 0;
  1518. VPRINTK("ENTER\n");
  1519. hpriv = host->private_data;
  1520. mmio = host->iomap[AHCI_PCI_BAR];
  1521. /* sigh. 0xffffffff is a valid return from h/w */
  1522. irq_stat = readl(mmio + HOST_IRQ_STAT);
  1523. irq_stat &= hpriv->port_map;
  1524. if (!irq_stat)
  1525. return IRQ_NONE;
  1526. spin_lock(&host->lock);
  1527. for (i = 0; i < host->n_ports; i++) {
  1528. struct ata_port *ap;
  1529. if (!(irq_stat & (1 << i)))
  1530. continue;
  1531. ap = host->ports[i];
  1532. if (ap) {
  1533. ahci_port_intr(ap);
  1534. VPRINTK("port %u\n", i);
  1535. } else {
  1536. VPRINTK("port %u (no irq)\n", i);
  1537. if (ata_ratelimit())
  1538. dev_printk(KERN_WARNING, host->dev,
  1539. "interrupt on disabled port %u\n", i);
  1540. }
  1541. irq_ack |= (1 << i);
  1542. }
  1543. if (irq_ack) {
  1544. writel(irq_ack, mmio + HOST_IRQ_STAT);
  1545. handled = 1;
  1546. }
  1547. spin_unlock(&host->lock);
  1548. VPRINTK("EXIT\n");
  1549. return IRQ_RETVAL(handled);
  1550. }
  1551. static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
  1552. {
  1553. struct ata_port *ap = qc->ap;
  1554. void __iomem *port_mmio = ahci_port_base(ap);
  1555. struct ahci_port_priv *pp = ap->private_data;
  1556. /* Keep track of the currently active link. It will be used
  1557. * in completion path to determine whether NCQ phase is in
  1558. * progress.
  1559. */
  1560. pp->active_link = qc->dev->link;
  1561. if (qc->tf.protocol == ATA_PROT_NCQ)
  1562. writel(1 << qc->tag, port_mmio + PORT_SCR_ACT);
  1563. writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE);
  1564. readl(port_mmio + PORT_CMD_ISSUE); /* flush */
  1565. return 0;
  1566. }
  1567. static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
  1568. {
  1569. struct ahci_port_priv *pp = qc->ap->private_data;
  1570. u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
  1571. ata_tf_from_fis(d2h_fis, &qc->result_tf);
  1572. return true;
  1573. }
  1574. static void ahci_freeze(struct ata_port *ap)
  1575. {
  1576. void __iomem *port_mmio = ahci_port_base(ap);
  1577. /* turn IRQ off */
  1578. writel(0, port_mmio + PORT_IRQ_MASK);
  1579. }
  1580. static void ahci_thaw(struct ata_port *ap)
  1581. {
  1582. void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
  1583. void __iomem *port_mmio = ahci_port_base(ap);
  1584. u32 tmp;
  1585. struct ahci_port_priv *pp = ap->private_data;
  1586. /* clear IRQ */
  1587. tmp = readl(port_mmio + PORT_IRQ_STAT);
  1588. writel(tmp, port_mmio + PORT_IRQ_STAT);
  1589. writel(1 << ap->port_no, mmio + HOST_IRQ_STAT);
  1590. /* turn IRQ back on */
  1591. writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
  1592. }
  1593. static void ahci_error_handler(struct ata_port *ap)
  1594. {
  1595. if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
  1596. /* restart engine */
  1597. ahci_stop_engine(ap);
  1598. ahci_start_engine(ap);
  1599. }
  1600. sata_pmp_error_handler(ap);
  1601. }
  1602. static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
  1603. {
  1604. struct ata_port *ap = qc->ap;
  1605. /* make DMA engine forget about the failed command */
  1606. if (qc->flags & ATA_QCFLAG_FAILED)
  1607. ahci_kick_engine(ap, 1);
  1608. }
  1609. static void ahci_pmp_attach(struct ata_port *ap)
  1610. {
  1611. void __iomem *port_mmio = ahci_port_base(ap);
  1612. struct ahci_port_priv *pp = ap->private_data;
  1613. u32 cmd;
  1614. cmd = readl(port_mmio + PORT_CMD);
  1615. cmd |= PORT_CMD_PMP;
  1616. writel(cmd, port_mmio + PORT_CMD);
  1617. pp->intr_mask |= PORT_IRQ_BAD_PMP;
  1618. writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
  1619. }
  1620. static void ahci_pmp_detach(struct ata_port *ap)
  1621. {
  1622. void __iomem *port_mmio = ahci_port_base(ap);
  1623. struct ahci_port_priv *pp = ap->private_data;
  1624. u32 cmd;
  1625. cmd = readl(port_mmio + PORT_CMD);
  1626. cmd &= ~PORT_CMD_PMP;
  1627. writel(cmd, port_mmio + PORT_CMD);
  1628. pp->intr_mask &= ~PORT_IRQ_BAD_PMP;
  1629. writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
  1630. }
  1631. static int ahci_port_resume(struct ata_port *ap)
  1632. {
  1633. ahci_power_up(ap);
  1634. ahci_start_port(ap);
  1635. if (sata_pmp_attached(ap))
  1636. ahci_pmp_attach(ap);
  1637. else
  1638. ahci_pmp_detach(ap);
  1639. return 0;
  1640. }
  1641. #ifdef CONFIG_PM
  1642. static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
  1643. {
  1644. const char *emsg = NULL;
  1645. int rc;
  1646. rc = ahci_deinit_port(ap, &emsg);
  1647. if (rc == 0)
  1648. ahci_power_down(ap);
  1649. else {
  1650. ata_port_printk(ap, KERN_ERR, "%s (%d)\n", emsg, rc);
  1651. ahci_start_port(ap);
  1652. }
  1653. return rc;
  1654. }
  1655. static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
  1656. {
  1657. struct ata_host *host = dev_get_drvdata(&pdev->dev);
  1658. void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
  1659. u32 ctl;
  1660. if (mesg.event & PM_EVENT_SLEEP) {
  1661. /* AHCI spec rev1.1 section 8.3.3:
  1662. * Software must disable interrupts prior to requesting a
  1663. * transition of the HBA to D3 state.
  1664. */
  1665. ctl = readl(mmio + HOST_CTL);
  1666. ctl &= ~HOST_IRQ_EN;
  1667. writel(ctl, mmio + HOST_CTL);
  1668. readl(mmio + HOST_CTL); /* flush */
  1669. }
  1670. return ata_pci_device_suspend(pdev, mesg);
  1671. }
  1672. static int ahci_pci_device_resume(struct pci_dev *pdev)
  1673. {
  1674. struct ata_host *host = dev_get_drvdata(&pdev->dev);
  1675. int rc;
  1676. rc = ata_pci_device_do_resume(pdev);
  1677. if (rc)
  1678. return rc;
  1679. if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
  1680. rc = ahci_reset_controller(host);
  1681. if (rc)
  1682. return rc;
  1683. ahci_init_controller(host);
  1684. }
  1685. ata_host_resume(host);
  1686. return 0;
  1687. }
  1688. #endif
  1689. static int ahci_port_start(struct ata_port *ap)
  1690. {
  1691. struct device *dev = ap->host->dev;
  1692. struct ahci_port_priv *pp;
  1693. void *mem;
  1694. dma_addr_t mem_dma;
  1695. pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
  1696. if (!pp)
  1697. return -ENOMEM;
  1698. mem = dmam_alloc_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, &mem_dma,
  1699. GFP_KERNEL);
  1700. if (!mem)
  1701. return -ENOMEM;
  1702. memset(mem, 0, AHCI_PORT_PRIV_DMA_SZ);
  1703. /*
  1704. * First item in chunk of DMA memory: 32-slot command table,
  1705. * 32 bytes each in size
  1706. */
  1707. pp->cmd_slot = mem;
  1708. pp->cmd_slot_dma = mem_dma;
  1709. mem += AHCI_CMD_SLOT_SZ;
  1710. mem_dma += AHCI_CMD_SLOT_SZ;
  1711. /*
  1712. * Second item: Received-FIS area
  1713. */
  1714. pp->rx_fis = mem;
  1715. pp->rx_fis_dma = mem_dma;
  1716. mem += AHCI_RX_FIS_SZ;
  1717. mem_dma += AHCI_RX_FIS_SZ;
  1718. /*
  1719. * Third item: data area for storing a single command
  1720. * and its scatter-gather table
  1721. */
  1722. pp->cmd_tbl = mem;
  1723. pp->cmd_tbl_dma = mem_dma;
  1724. /*
  1725. * Save off initial list of interrupts to be enabled.
  1726. * This could be changed later
  1727. */
  1728. pp->intr_mask = DEF_PORT_IRQ;
  1729. ap->private_data = pp;
  1730. /* engage engines, captain */
  1731. return ahci_port_resume(ap);
  1732. }
  1733. static void ahci_port_stop(struct ata_port *ap)
  1734. {
  1735. const char *emsg = NULL;
  1736. int rc;
  1737. /* de-initialize port */
  1738. rc = ahci_deinit_port(ap, &emsg);
  1739. if (rc)
  1740. ata_port_printk(ap, KERN_WARNING, "%s (%d)\n", emsg, rc);
  1741. }
  1742. static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
  1743. {
  1744. int rc;
  1745. if (using_dac &&
  1746. !pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
  1747. rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
  1748. if (rc) {
  1749. rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
  1750. if (rc) {
  1751. dev_printk(KERN_ERR, &pdev->dev,
  1752. "64-bit DMA enable failed\n");
  1753. return rc;
  1754. }
  1755. }
  1756. } else {
  1757. rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
  1758. if (rc) {
  1759. dev_printk(KERN_ERR, &pdev->dev,
  1760. "32-bit DMA enable failed\n");
  1761. return rc;
  1762. }
  1763. rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
  1764. if (rc) {
  1765. dev_printk(KERN_ERR, &pdev->dev,
  1766. "32-bit consistent DMA enable failed\n");
  1767. return rc;
  1768. }
  1769. }
  1770. return 0;
  1771. }
  1772. static void ahci_print_info(struct ata_host *host)
  1773. {
  1774. struct ahci_host_priv *hpriv = host->private_data;
  1775. struct pci_dev *pdev = to_pci_dev(host->dev);
  1776. void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
  1777. u32 vers, cap, impl, speed;
  1778. const char *speed_s;
  1779. u16 cc;
  1780. const char *scc_s;
  1781. vers = readl(mmio + HOST_VERSION);
  1782. cap = hpriv->cap;
  1783. impl = hpriv->port_map;
  1784. speed = (cap >> 20) & 0xf;
  1785. if (speed == 1)
  1786. speed_s = "1.5";
  1787. else if (speed == 2)
  1788. speed_s = "3";
  1789. else
  1790. speed_s = "?";
  1791. pci_read_config_word(pdev, 0x0a, &cc);
  1792. if (cc == PCI_CLASS_STORAGE_IDE)
  1793. scc_s = "IDE";
  1794. else if (cc == PCI_CLASS_STORAGE_SATA)
  1795. scc_s = "SATA";
  1796. else if (cc == PCI_CLASS_STORAGE_RAID)
  1797. scc_s = "RAID";
  1798. else
  1799. scc_s = "unknown";
  1800. dev_printk(KERN_INFO, &pdev->dev,
  1801. "AHCI %02x%02x.%02x%02x "
  1802. "%u slots %u ports %s Gbps 0x%x impl %s mode\n"
  1803. ,
  1804. (vers >> 24) & 0xff,
  1805. (vers >> 16) & 0xff,
  1806. (vers >> 8) & 0xff,
  1807. vers & 0xff,
  1808. ((cap >> 8) & 0x1f) + 1,
  1809. (cap & 0x1f) + 1,
  1810. speed_s,
  1811. impl,
  1812. scc_s);
  1813. dev_printk(KERN_INFO, &pdev->dev,
  1814. "flags: "
  1815. "%s%s%s%s%s%s%s"
  1816. "%s%s%s%s%s%s%s\n"
  1817. ,
  1818. cap & (1 << 31) ? "64bit " : "",
  1819. cap & (1 << 30) ? "ncq " : "",
  1820. cap & (1 << 29) ? "sntf " : "",
  1821. cap & (1 << 28) ? "ilck " : "",
  1822. cap & (1 << 27) ? "stag " : "",
  1823. cap & (1 << 26) ? "pm " : "",
  1824. cap & (1 << 25) ? "led " : "",
  1825. cap & (1 << 24) ? "clo " : "",
  1826. cap & (1 << 19) ? "nz " : "",
  1827. cap & (1 << 18) ? "only " : "",
  1828. cap & (1 << 17) ? "pmp " : "",
  1829. cap & (1 << 15) ? "pio " : "",
  1830. cap & (1 << 14) ? "slum " : "",
  1831. cap & (1 << 13) ? "part " : ""
  1832. );
  1833. }
  1834. /* On ASUS P5W DH Deluxe, the second port of PCI device 00:1f.2 is
  1835. * hardwired to on-board SIMG 4726. The chipset is ICH8 and doesn't
  1836. * support PMP and the 4726 either directly exports the device
  1837. * attached to the first downstream port or acts as a hardware storage
  1838. * controller and emulate a single ATA device (can be RAID 0/1 or some
  1839. * other configuration).
  1840. *
  1841. * When there's no device attached to the first downstream port of the
  1842. * 4726, "Config Disk" appears, which is a pseudo ATA device to
  1843. * configure the 4726. However, ATA emulation of the device is very
  1844. * lame. It doesn't send signature D2H Reg FIS after the initial
  1845. * hardreset, pukes on SRST w/ PMP==0 and has bunch of other issues.
  1846. *
  1847. * The following function works around the problem by always using
  1848. * hardreset on the port and not depending on receiving signature FIS
  1849. * afterward. If signature FIS isn't received soon, ATA class is
  1850. * assumed without follow-up softreset.
  1851. */
  1852. static void ahci_p5wdh_workaround(struct ata_host *host)
  1853. {
  1854. static struct dmi_system_id sysids[] = {
  1855. {
  1856. .ident = "P5W DH Deluxe",
  1857. .matches = {
  1858. DMI_MATCH(DMI_SYS_VENDOR,
  1859. "ASUSTEK COMPUTER INC"),
  1860. DMI_MATCH(DMI_PRODUCT_NAME, "P5W DH Deluxe"),
  1861. },
  1862. },
  1863. { }
  1864. };
  1865. struct pci_dev *pdev = to_pci_dev(host->dev);
  1866. if (pdev->bus->number == 0 && pdev->devfn == PCI_DEVFN(0x1f, 2) &&
  1867. dmi_check_system(sysids)) {
  1868. struct ata_port *ap = host->ports[1];
  1869. dev_printk(KERN_INFO, &pdev->dev, "enabling ASUS P5W DH "
  1870. "Deluxe on-board SIMG4726 workaround\n");
  1871. ap->ops = &ahci_p5wdh_ops;
  1872. ap->link.flags |= ATA_LFLAG_NO_SRST | ATA_LFLAG_ASSUME_ATA;
  1873. }
  1874. }
  1875. static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
  1876. {
  1877. static int printed_version;
  1878. unsigned int board_id = ent->driver_data;
  1879. struct ata_port_info pi = ahci_port_info[board_id];
  1880. const struct ata_port_info *ppi[] = { &pi, NULL };
  1881. struct device *dev = &pdev->dev;
  1882. struct ahci_host_priv *hpriv;
  1883. struct ata_host *host;
  1884. int n_ports, i, rc;
  1885. VPRINTK("ENTER\n");
  1886. WARN_ON(ATA_MAX_QUEUE > AHCI_MAX_CMDS);
  1887. if (!printed_version++)
  1888. dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
  1889. /* acquire resources */
  1890. rc = pcim_enable_device(pdev);
  1891. if (rc)
  1892. return rc;
  1893. /* AHCI controllers often implement SFF compatible interface.
  1894. * Grab all PCI BARs just in case.
  1895. */
  1896. rc = pcim_iomap_regions_request_all(pdev, 1 << AHCI_PCI_BAR, DRV_NAME);
  1897. if (rc == -EBUSY)
  1898. pcim_pin_device(pdev);
  1899. if (rc)
  1900. return rc;
  1901. if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
  1902. (pdev->device == 0x2652 || pdev->device == 0x2653)) {
  1903. u8 map;
  1904. /* ICH6s share the same PCI ID for both piix and ahci
  1905. * modes. Enabling ahci mode while MAP indicates
  1906. * combined mode is a bad idea. Yield to ata_piix.
  1907. */
  1908. pci_read_config_byte(pdev, ICH_MAP, &map);
  1909. if (map & 0x3) {
  1910. dev_printk(KERN_INFO, &pdev->dev, "controller is in "
  1911. "combined mode, can't enable AHCI mode\n");
  1912. return -ENODEV;
  1913. }
  1914. }
  1915. hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
  1916. if (!hpriv)
  1917. return -ENOMEM;
  1918. hpriv->flags |= (unsigned long)pi.private_data;
  1919. /* MCP65 revision A1 and A2 can't do MSI */
  1920. if (board_id == board_ahci_mcp65 &&
  1921. (pdev->revision == 0xa1 || pdev->revision == 0xa2))
  1922. hpriv->flags |= AHCI_HFLAG_NO_MSI;
  1923. if ((hpriv->flags & AHCI_HFLAG_NO_MSI) || pci_enable_msi(pdev))
  1924. pci_intx(pdev, 1);
  1925. /* save initial config */
  1926. ahci_save_initial_config(pdev, hpriv);
  1927. /* prepare host */
  1928. if (hpriv->cap & HOST_CAP_NCQ)
  1929. pi.flags |= ATA_FLAG_NCQ;
  1930. if (hpriv->cap & HOST_CAP_PMP)
  1931. pi.flags |= ATA_FLAG_PMP;
  1932. /* CAP.NP sometimes indicate the index of the last enabled
  1933. * port, at other times, that of the last possible port, so
  1934. * determining the maximum port number requires looking at
  1935. * both CAP.NP and port_map.
  1936. */
  1937. n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
  1938. host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
  1939. if (!host)
  1940. return -ENOMEM;
  1941. host->iomap = pcim_iomap_table(pdev);
  1942. host->private_data = hpriv;
  1943. for (i = 0; i < host->n_ports; i++) {
  1944. struct ata_port *ap = host->ports[i];
  1945. ata_port_pbar_desc(ap, AHCI_PCI_BAR, -1, "abar");
  1946. ata_port_pbar_desc(ap, AHCI_PCI_BAR,
  1947. 0x100 + ap->port_no * 0x80, "port");
  1948. /* set initial link pm policy */
  1949. ap->pm_policy = NOT_AVAILABLE;
  1950. /* disabled/not-implemented port */
  1951. if (!(hpriv->port_map & (1 << i)))
  1952. ap->ops = &ata_dummy_port_ops;
  1953. }
  1954. /* apply workaround for ASUS P5W DH Deluxe mainboard */
  1955. ahci_p5wdh_workaround(host);
  1956. /* initialize adapter */
  1957. rc = ahci_configure_dma_masks(pdev, hpriv->cap & HOST_CAP_64);
  1958. if (rc)
  1959. return rc;
  1960. rc = ahci_reset_controller(host);
  1961. if (rc)
  1962. return rc;
  1963. ahci_init_controller(host);
  1964. ahci_print_info(host);
  1965. pci_set_master(pdev);
  1966. return ata_host_activate(host, pdev->irq, ahci_interrupt, IRQF_SHARED,
  1967. &ahci_sht);
  1968. }
  1969. static int __init ahci_init(void)
  1970. {
  1971. return pci_register_driver(&ahci_pci_driver);
  1972. }
  1973. static void __exit ahci_exit(void)
  1974. {
  1975. pci_unregister_driver(&ahci_pci_driver);
  1976. }
  1977. MODULE_AUTHOR("Jeff Garzik");
  1978. MODULE_DESCRIPTION("AHCI SATA low-level driver");
  1979. MODULE_LICENSE("GPL");
  1980. MODULE_DEVICE_TABLE(pci, ahci_pci_tbl);
  1981. MODULE_VERSION(DRV_VERSION);
  1982. module_init(ahci_init);
  1983. module_exit(ahci_exit);