ahci.c 64 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370
  1. /*
  2. * ahci.c - AHCI SATA support
  3. *
  4. * Maintained by: Jeff Garzik <jgarzik@pobox.com>
  5. * Please ALWAYS copy linux-ide@vger.kernel.org
  6. * on emails.
  7. *
  8. * Copyright 2004-2005 Red Hat, Inc.
  9. *
  10. *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of the GNU General Public License as published by
  13. * the Free Software Foundation; either version 2, or (at your option)
  14. * any later version.
  15. *
  16. * This program is distributed in the hope that it will be useful,
  17. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  19. * GNU General Public License for more details.
  20. *
  21. * You should have received a copy of the GNU General Public License
  22. * along with this program; see the file COPYING. If not, write to
  23. * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
  24. *
  25. *
  26. * libata documentation is available via 'make {ps|pdf}docs',
  27. * as Documentation/DocBook/libata.*
  28. *
  29. * AHCI hardware documentation:
  30. * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
  31. * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
  32. *
  33. */
  34. #include <linux/kernel.h>
  35. #include <linux/module.h>
  36. #include <linux/pci.h>
  37. #include <linux/init.h>
  38. #include <linux/blkdev.h>
  39. #include <linux/delay.h>
  40. #include <linux/interrupt.h>
  41. #include <linux/dma-mapping.h>
  42. #include <linux/device.h>
  43. #include <linux/dmi.h>
  44. #include <scsi/scsi_host.h>
  45. #include <scsi/scsi_cmnd.h>
  46. #include <linux/libata.h>
  47. #define DRV_NAME "ahci"
  48. #define DRV_VERSION "3.0"
  49. static int ahci_skip_host_reset;
  50. module_param_named(skip_host_reset, ahci_skip_host_reset, int, 0444);
  51. MODULE_PARM_DESC(skip_host_reset, "skip global host reset (0=don't skip, 1=skip)");
  52. static int ahci_enable_alpm(struct ata_port *ap,
  53. enum link_pm policy);
  54. static void ahci_disable_alpm(struct ata_port *ap);
  55. enum {
  56. AHCI_PCI_BAR = 5,
  57. AHCI_MAX_PORTS = 32,
  58. AHCI_MAX_SG = 168, /* hardware max is 64K */
  59. AHCI_DMA_BOUNDARY = 0xffffffff,
  60. AHCI_USE_CLUSTERING = 1,
  61. AHCI_MAX_CMDS = 32,
  62. AHCI_CMD_SZ = 32,
  63. AHCI_CMD_SLOT_SZ = AHCI_MAX_CMDS * AHCI_CMD_SZ,
  64. AHCI_RX_FIS_SZ = 256,
  65. AHCI_CMD_TBL_CDB = 0x40,
  66. AHCI_CMD_TBL_HDR_SZ = 0x80,
  67. AHCI_CMD_TBL_SZ = AHCI_CMD_TBL_HDR_SZ + (AHCI_MAX_SG * 16),
  68. AHCI_CMD_TBL_AR_SZ = AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS,
  69. AHCI_PORT_PRIV_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ +
  70. AHCI_RX_FIS_SZ,
  71. AHCI_IRQ_ON_SG = (1 << 31),
  72. AHCI_CMD_ATAPI = (1 << 5),
  73. AHCI_CMD_WRITE = (1 << 6),
  74. AHCI_CMD_PREFETCH = (1 << 7),
  75. AHCI_CMD_RESET = (1 << 8),
  76. AHCI_CMD_CLR_BUSY = (1 << 10),
  77. RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */
  78. RX_FIS_SDB = 0x58, /* offset of SDB FIS data */
  79. RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */
  80. board_ahci = 0,
  81. board_ahci_vt8251 = 1,
  82. board_ahci_ign_iferr = 2,
  83. board_ahci_sb600 = 3,
  84. board_ahci_mv = 4,
  85. board_ahci_sb700 = 5,
  86. /* global controller registers */
  87. HOST_CAP = 0x00, /* host capabilities */
  88. HOST_CTL = 0x04, /* global host control */
  89. HOST_IRQ_STAT = 0x08, /* interrupt status */
  90. HOST_PORTS_IMPL = 0x0c, /* bitmap of implemented ports */
  91. HOST_VERSION = 0x10, /* AHCI spec. version compliancy */
  92. /* HOST_CTL bits */
  93. HOST_RESET = (1 << 0), /* reset controller; self-clear */
  94. HOST_IRQ_EN = (1 << 1), /* global IRQ enable */
  95. HOST_AHCI_EN = (1 << 31), /* AHCI enabled */
  96. /* HOST_CAP bits */
  97. HOST_CAP_SSC = (1 << 14), /* Slumber capable */
  98. HOST_CAP_PMP = (1 << 17), /* Port Multiplier support */
  99. HOST_CAP_CLO = (1 << 24), /* Command List Override support */
  100. HOST_CAP_ALPM = (1 << 26), /* Aggressive Link PM support */
  101. HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */
  102. HOST_CAP_SNTF = (1 << 29), /* SNotification register */
  103. HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */
  104. HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */
  105. /* registers for each SATA port */
  106. PORT_LST_ADDR = 0x00, /* command list DMA addr */
  107. PORT_LST_ADDR_HI = 0x04, /* command list DMA addr hi */
  108. PORT_FIS_ADDR = 0x08, /* FIS rx buf addr */
  109. PORT_FIS_ADDR_HI = 0x0c, /* FIS rx buf addr hi */
  110. PORT_IRQ_STAT = 0x10, /* interrupt status */
  111. PORT_IRQ_MASK = 0x14, /* interrupt enable/disable mask */
  112. PORT_CMD = 0x18, /* port command */
  113. PORT_TFDATA = 0x20, /* taskfile data */
  114. PORT_SIG = 0x24, /* device TF signature */
  115. PORT_CMD_ISSUE = 0x38, /* command issue */
  116. PORT_SCR_STAT = 0x28, /* SATA phy register: SStatus */
  117. PORT_SCR_CTL = 0x2c, /* SATA phy register: SControl */
  118. PORT_SCR_ERR = 0x30, /* SATA phy register: SError */
  119. PORT_SCR_ACT = 0x34, /* SATA phy register: SActive */
  120. PORT_SCR_NTF = 0x3c, /* SATA phy register: SNotification */
  121. /* PORT_IRQ_{STAT,MASK} bits */
  122. PORT_IRQ_COLD_PRES = (1 << 31), /* cold presence detect */
  123. PORT_IRQ_TF_ERR = (1 << 30), /* task file error */
  124. PORT_IRQ_HBUS_ERR = (1 << 29), /* host bus fatal error */
  125. PORT_IRQ_HBUS_DATA_ERR = (1 << 28), /* host bus data error */
  126. PORT_IRQ_IF_ERR = (1 << 27), /* interface fatal error */
  127. PORT_IRQ_IF_NONFATAL = (1 << 26), /* interface non-fatal error */
  128. PORT_IRQ_OVERFLOW = (1 << 24), /* xfer exhausted available S/G */
  129. PORT_IRQ_BAD_PMP = (1 << 23), /* incorrect port multiplier */
  130. PORT_IRQ_PHYRDY = (1 << 22), /* PhyRdy changed */
  131. PORT_IRQ_DEV_ILCK = (1 << 7), /* device interlock */
  132. PORT_IRQ_CONNECT = (1 << 6), /* port connect change status */
  133. PORT_IRQ_SG_DONE = (1 << 5), /* descriptor processed */
  134. PORT_IRQ_UNK_FIS = (1 << 4), /* unknown FIS rx'd */
  135. PORT_IRQ_SDB_FIS = (1 << 3), /* Set Device Bits FIS rx'd */
  136. PORT_IRQ_DMAS_FIS = (1 << 2), /* DMA Setup FIS rx'd */
  137. PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */
  138. PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */
  139. PORT_IRQ_FREEZE = PORT_IRQ_HBUS_ERR |
  140. PORT_IRQ_IF_ERR |
  141. PORT_IRQ_CONNECT |
  142. PORT_IRQ_PHYRDY |
  143. PORT_IRQ_UNK_FIS |
  144. PORT_IRQ_BAD_PMP,
  145. PORT_IRQ_ERROR = PORT_IRQ_FREEZE |
  146. PORT_IRQ_TF_ERR |
  147. PORT_IRQ_HBUS_DATA_ERR,
  148. DEF_PORT_IRQ = PORT_IRQ_ERROR | PORT_IRQ_SG_DONE |
  149. PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS |
  150. PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS,
  151. /* PORT_CMD bits */
  152. PORT_CMD_ASP = (1 << 27), /* Aggressive Slumber/Partial */
  153. PORT_CMD_ALPE = (1 << 26), /* Aggressive Link PM enable */
  154. PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */
  155. PORT_CMD_PMP = (1 << 17), /* PMP attached */
  156. PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */
  157. PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */
  158. PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */
  159. PORT_CMD_CLO = (1 << 3), /* Command list override */
  160. PORT_CMD_POWER_ON = (1 << 2), /* Power up device */
  161. PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */
  162. PORT_CMD_START = (1 << 0), /* Enable port DMA engine */
  163. PORT_CMD_ICC_MASK = (0xf << 28), /* i/f ICC state mask */
  164. PORT_CMD_ICC_ACTIVE = (0x1 << 28), /* Put i/f in active state */
  165. PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */
  166. PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */
  167. /* hpriv->flags bits */
  168. AHCI_HFLAG_NO_NCQ = (1 << 0),
  169. AHCI_HFLAG_IGN_IRQ_IF_ERR = (1 << 1), /* ignore IRQ_IF_ERR */
  170. AHCI_HFLAG_IGN_SERR_INTERNAL = (1 << 2), /* ignore SERR_INTERNAL */
  171. AHCI_HFLAG_32BIT_ONLY = (1 << 3), /* force 32bit */
  172. AHCI_HFLAG_MV_PATA = (1 << 4), /* PATA port */
  173. AHCI_HFLAG_NO_MSI = (1 << 5), /* no PCI MSI */
  174. AHCI_HFLAG_NO_PMP = (1 << 6), /* no PMP */
  175. AHCI_HFLAG_NO_HOTPLUG = (1 << 7), /* ignore PxSERR.DIAG.N */
  176. AHCI_HFLAG_SECT255 = (1 << 8), /* max 255 sectors */
  177. /* ap->flags bits */
  178. AHCI_FLAG_COMMON = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
  179. ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
  180. ATA_FLAG_ACPI_SATA | ATA_FLAG_AN |
  181. ATA_FLAG_IPM,
  182. ICH_MAP = 0x90, /* ICH MAP register */
  183. };
  184. struct ahci_cmd_hdr {
  185. __le32 opts;
  186. __le32 status;
  187. __le32 tbl_addr;
  188. __le32 tbl_addr_hi;
  189. __le32 reserved[4];
  190. };
  191. struct ahci_sg {
  192. __le32 addr;
  193. __le32 addr_hi;
  194. __le32 reserved;
  195. __le32 flags_size;
  196. };
  197. struct ahci_host_priv {
  198. unsigned int flags; /* AHCI_HFLAG_* */
  199. u32 cap; /* cap to use */
  200. u32 port_map; /* port map to use */
  201. u32 saved_cap; /* saved initial cap */
  202. u32 saved_port_map; /* saved initial port_map */
  203. };
  204. struct ahci_port_priv {
  205. struct ata_link *active_link;
  206. struct ahci_cmd_hdr *cmd_slot;
  207. dma_addr_t cmd_slot_dma;
  208. void *cmd_tbl;
  209. dma_addr_t cmd_tbl_dma;
  210. void *rx_fis;
  211. dma_addr_t rx_fis_dma;
  212. /* for NCQ spurious interrupt analysis */
  213. unsigned int ncq_saw_d2h:1;
  214. unsigned int ncq_saw_dmas:1;
  215. unsigned int ncq_saw_sdb:1;
  216. u32 intr_mask; /* interrupts to enable */
  217. };
  218. static int ahci_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val);
  219. static int ahci_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val);
  220. static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
  221. static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
  222. static int ahci_port_start(struct ata_port *ap);
  223. static void ahci_port_stop(struct ata_port *ap);
  224. static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
  225. static void ahci_qc_prep(struct ata_queued_cmd *qc);
  226. static u8 ahci_check_status(struct ata_port *ap);
  227. static void ahci_freeze(struct ata_port *ap);
  228. static void ahci_thaw(struct ata_port *ap);
  229. static void ahci_pmp_attach(struct ata_port *ap);
  230. static void ahci_pmp_detach(struct ata_port *ap);
  231. static void ahci_error_handler(struct ata_port *ap);
  232. static void ahci_vt8251_error_handler(struct ata_port *ap);
  233. static void ahci_p5wdh_error_handler(struct ata_port *ap);
  234. static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
  235. static int ahci_port_resume(struct ata_port *ap);
  236. static void ahci_dev_config(struct ata_device *dev);
  237. static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl);
  238. static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
  239. u32 opts);
  240. #ifdef CONFIG_PM
  241. static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg);
  242. static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
  243. static int ahci_pci_device_resume(struct pci_dev *pdev);
  244. #endif
  245. static struct class_device_attribute *ahci_shost_attrs[] = {
  246. &class_device_attr_link_power_management_policy,
  247. NULL
  248. };
  249. static struct scsi_host_template ahci_sht = {
  250. .module = THIS_MODULE,
  251. .name = DRV_NAME,
  252. .ioctl = ata_scsi_ioctl,
  253. .queuecommand = ata_scsi_queuecmd,
  254. .change_queue_depth = ata_scsi_change_queue_depth,
  255. .can_queue = AHCI_MAX_CMDS - 1,
  256. .this_id = ATA_SHT_THIS_ID,
  257. .sg_tablesize = AHCI_MAX_SG,
  258. .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
  259. .emulated = ATA_SHT_EMULATED,
  260. .use_clustering = AHCI_USE_CLUSTERING,
  261. .proc_name = DRV_NAME,
  262. .dma_boundary = AHCI_DMA_BOUNDARY,
  263. .slave_configure = ata_scsi_slave_config,
  264. .slave_destroy = ata_scsi_slave_destroy,
  265. .bios_param = ata_std_bios_param,
  266. .shost_attrs = ahci_shost_attrs,
  267. };
  268. static const struct ata_port_operations ahci_ops = {
  269. .check_status = ahci_check_status,
  270. .check_altstatus = ahci_check_status,
  271. .dev_select = ata_noop_dev_select,
  272. .dev_config = ahci_dev_config,
  273. .tf_read = ahci_tf_read,
  274. .qc_defer = sata_pmp_qc_defer_cmd_switch,
  275. .qc_prep = ahci_qc_prep,
  276. .qc_issue = ahci_qc_issue,
  277. .irq_clear = ata_noop_irq_clear,
  278. .scr_read = ahci_scr_read,
  279. .scr_write = ahci_scr_write,
  280. .freeze = ahci_freeze,
  281. .thaw = ahci_thaw,
  282. .error_handler = ahci_error_handler,
  283. .post_internal_cmd = ahci_post_internal_cmd,
  284. .pmp_attach = ahci_pmp_attach,
  285. .pmp_detach = ahci_pmp_detach,
  286. #ifdef CONFIG_PM
  287. .port_suspend = ahci_port_suspend,
  288. .port_resume = ahci_port_resume,
  289. #endif
  290. .enable_pm = ahci_enable_alpm,
  291. .disable_pm = ahci_disable_alpm,
  292. .port_start = ahci_port_start,
  293. .port_stop = ahci_port_stop,
  294. };
  295. static const struct ata_port_operations ahci_vt8251_ops = {
  296. .check_status = ahci_check_status,
  297. .check_altstatus = ahci_check_status,
  298. .dev_select = ata_noop_dev_select,
  299. .tf_read = ahci_tf_read,
  300. .qc_defer = sata_pmp_qc_defer_cmd_switch,
  301. .qc_prep = ahci_qc_prep,
  302. .qc_issue = ahci_qc_issue,
  303. .irq_clear = ata_noop_irq_clear,
  304. .scr_read = ahci_scr_read,
  305. .scr_write = ahci_scr_write,
  306. .freeze = ahci_freeze,
  307. .thaw = ahci_thaw,
  308. .error_handler = ahci_vt8251_error_handler,
  309. .post_internal_cmd = ahci_post_internal_cmd,
  310. .pmp_attach = ahci_pmp_attach,
  311. .pmp_detach = ahci_pmp_detach,
  312. #ifdef CONFIG_PM
  313. .port_suspend = ahci_port_suspend,
  314. .port_resume = ahci_port_resume,
  315. #endif
  316. .port_start = ahci_port_start,
  317. .port_stop = ahci_port_stop,
  318. };
  319. static const struct ata_port_operations ahci_p5wdh_ops = {
  320. .check_status = ahci_check_status,
  321. .check_altstatus = ahci_check_status,
  322. .dev_select = ata_noop_dev_select,
  323. .tf_read = ahci_tf_read,
  324. .qc_defer = sata_pmp_qc_defer_cmd_switch,
  325. .qc_prep = ahci_qc_prep,
  326. .qc_issue = ahci_qc_issue,
  327. .irq_clear = ata_noop_irq_clear,
  328. .scr_read = ahci_scr_read,
  329. .scr_write = ahci_scr_write,
  330. .freeze = ahci_freeze,
  331. .thaw = ahci_thaw,
  332. .error_handler = ahci_p5wdh_error_handler,
  333. .post_internal_cmd = ahci_post_internal_cmd,
  334. .pmp_attach = ahci_pmp_attach,
  335. .pmp_detach = ahci_pmp_detach,
  336. #ifdef CONFIG_PM
  337. .port_suspend = ahci_port_suspend,
  338. .port_resume = ahci_port_resume,
  339. #endif
  340. .port_start = ahci_port_start,
  341. .port_stop = ahci_port_stop,
  342. };
  343. #define AHCI_HFLAGS(flags) .private_data = (void *)(flags)
  344. static const struct ata_port_info ahci_port_info[] = {
  345. /* board_ahci */
  346. {
  347. .flags = AHCI_FLAG_COMMON,
  348. .pio_mask = 0x1f, /* pio0-4 */
  349. .udma_mask = ATA_UDMA6,
  350. .port_ops = &ahci_ops,
  351. },
  352. /* board_ahci_vt8251 */
  353. {
  354. AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_PMP),
  355. .flags = AHCI_FLAG_COMMON,
  356. .pio_mask = 0x1f, /* pio0-4 */
  357. .udma_mask = ATA_UDMA6,
  358. .port_ops = &ahci_vt8251_ops,
  359. },
  360. /* board_ahci_ign_iferr */
  361. {
  362. AHCI_HFLAGS (AHCI_HFLAG_IGN_IRQ_IF_ERR),
  363. .flags = AHCI_FLAG_COMMON,
  364. .pio_mask = 0x1f, /* pio0-4 */
  365. .udma_mask = ATA_UDMA6,
  366. .port_ops = &ahci_ops,
  367. },
  368. /* board_ahci_sb600 */
  369. {
  370. AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL |
  371. AHCI_HFLAG_32BIT_ONLY |
  372. AHCI_HFLAG_SECT255 | AHCI_HFLAG_NO_PMP),
  373. .flags = AHCI_FLAG_COMMON,
  374. .pio_mask = 0x1f, /* pio0-4 */
  375. .udma_mask = ATA_UDMA6,
  376. .port_ops = &ahci_ops,
  377. },
  378. /* board_ahci_mv */
  379. {
  380. AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_MSI |
  381. AHCI_HFLAG_MV_PATA),
  382. .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
  383. ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
  384. .pio_mask = 0x1f, /* pio0-4 */
  385. .udma_mask = ATA_UDMA6,
  386. .port_ops = &ahci_ops,
  387. },
  388. /* board_ahci_sb700 */
  389. {
  390. AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL |
  391. AHCI_HFLAG_NO_PMP),
  392. .flags = AHCI_FLAG_COMMON,
  393. .pio_mask = 0x1f, /* pio0-4 */
  394. .udma_mask = ATA_UDMA6,
  395. .port_ops = &ahci_ops,
  396. },
  397. };
  398. static const struct pci_device_id ahci_pci_tbl[] = {
  399. /* Intel */
  400. { PCI_VDEVICE(INTEL, 0x2652), board_ahci }, /* ICH6 */
  401. { PCI_VDEVICE(INTEL, 0x2653), board_ahci }, /* ICH6M */
  402. { PCI_VDEVICE(INTEL, 0x27c1), board_ahci }, /* ICH7 */
  403. { PCI_VDEVICE(INTEL, 0x27c5), board_ahci }, /* ICH7M */
  404. { PCI_VDEVICE(INTEL, 0x27c3), board_ahci }, /* ICH7R */
  405. { PCI_VDEVICE(AL, 0x5288), board_ahci_ign_iferr }, /* ULi M5288 */
  406. { PCI_VDEVICE(INTEL, 0x2681), board_ahci }, /* ESB2 */
  407. { PCI_VDEVICE(INTEL, 0x2682), board_ahci }, /* ESB2 */
  408. { PCI_VDEVICE(INTEL, 0x2683), board_ahci }, /* ESB2 */
  409. { PCI_VDEVICE(INTEL, 0x27c6), board_ahci }, /* ICH7-M DH */
  410. { PCI_VDEVICE(INTEL, 0x2821), board_ahci }, /* ICH8 */
  411. { PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* ICH8 */
  412. { PCI_VDEVICE(INTEL, 0x2824), board_ahci }, /* ICH8 */
  413. { PCI_VDEVICE(INTEL, 0x2829), board_ahci }, /* ICH8M */
  414. { PCI_VDEVICE(INTEL, 0x282a), board_ahci }, /* ICH8M */
  415. { PCI_VDEVICE(INTEL, 0x2922), board_ahci }, /* ICH9 */
  416. { PCI_VDEVICE(INTEL, 0x2923), board_ahci }, /* ICH9 */
  417. { PCI_VDEVICE(INTEL, 0x2924), board_ahci }, /* ICH9 */
  418. { PCI_VDEVICE(INTEL, 0x2925), board_ahci }, /* ICH9 */
  419. { PCI_VDEVICE(INTEL, 0x2927), board_ahci }, /* ICH9 */
  420. { PCI_VDEVICE(INTEL, 0x2929), board_ahci }, /* ICH9M */
  421. { PCI_VDEVICE(INTEL, 0x292a), board_ahci }, /* ICH9M */
  422. { PCI_VDEVICE(INTEL, 0x292b), board_ahci }, /* ICH9M */
  423. { PCI_VDEVICE(INTEL, 0x292c), board_ahci }, /* ICH9M */
  424. { PCI_VDEVICE(INTEL, 0x292f), board_ahci }, /* ICH9M */
  425. { PCI_VDEVICE(INTEL, 0x294d), board_ahci }, /* ICH9 */
  426. { PCI_VDEVICE(INTEL, 0x294e), board_ahci }, /* ICH9M */
  427. { PCI_VDEVICE(INTEL, 0x502a), board_ahci }, /* Tolapai */
  428. { PCI_VDEVICE(INTEL, 0x502b), board_ahci }, /* Tolapai */
  429. { PCI_VDEVICE(INTEL, 0x3a05), board_ahci }, /* ICH10 */
  430. { PCI_VDEVICE(INTEL, 0x3a25), board_ahci }, /* ICH10 */
  431. /* JMicron 360/1/3/5/6, match class to avoid IDE function */
  432. { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
  433. PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci_ign_iferr },
  434. /* ATI */
  435. { PCI_VDEVICE(ATI, 0x4380), board_ahci_sb600 }, /* ATI SB600 */
  436. { PCI_VDEVICE(ATI, 0x4390), board_ahci_sb700 }, /* ATI SB700/800 */
  437. { PCI_VDEVICE(ATI, 0x4391), board_ahci_sb700 }, /* ATI SB700/800 */
  438. { PCI_VDEVICE(ATI, 0x4392), board_ahci_sb700 }, /* ATI SB700/800 */
  439. { PCI_VDEVICE(ATI, 0x4393), board_ahci_sb700 }, /* ATI SB700/800 */
  440. { PCI_VDEVICE(ATI, 0x4394), board_ahci_sb700 }, /* ATI SB700/800 */
  441. { PCI_VDEVICE(ATI, 0x4395), board_ahci_sb700 }, /* ATI SB700/800 */
  442. /* VIA */
  443. { PCI_VDEVICE(VIA, 0x3349), board_ahci_vt8251 }, /* VIA VT8251 */
  444. { PCI_VDEVICE(VIA, 0x6287), board_ahci_vt8251 }, /* VIA VT8251 */
  445. /* NVIDIA */
  446. { PCI_VDEVICE(NVIDIA, 0x044c), board_ahci }, /* MCP65 */
  447. { PCI_VDEVICE(NVIDIA, 0x044d), board_ahci }, /* MCP65 */
  448. { PCI_VDEVICE(NVIDIA, 0x044e), board_ahci }, /* MCP65 */
  449. { PCI_VDEVICE(NVIDIA, 0x044f), board_ahci }, /* MCP65 */
  450. { PCI_VDEVICE(NVIDIA, 0x045c), board_ahci }, /* MCP65 */
  451. { PCI_VDEVICE(NVIDIA, 0x045d), board_ahci }, /* MCP65 */
  452. { PCI_VDEVICE(NVIDIA, 0x045e), board_ahci }, /* MCP65 */
  453. { PCI_VDEVICE(NVIDIA, 0x045f), board_ahci }, /* MCP65 */
  454. { PCI_VDEVICE(NVIDIA, 0x0550), board_ahci }, /* MCP67 */
  455. { PCI_VDEVICE(NVIDIA, 0x0551), board_ahci }, /* MCP67 */
  456. { PCI_VDEVICE(NVIDIA, 0x0552), board_ahci }, /* MCP67 */
  457. { PCI_VDEVICE(NVIDIA, 0x0553), board_ahci }, /* MCP67 */
  458. { PCI_VDEVICE(NVIDIA, 0x0554), board_ahci }, /* MCP67 */
  459. { PCI_VDEVICE(NVIDIA, 0x0555), board_ahci }, /* MCP67 */
  460. { PCI_VDEVICE(NVIDIA, 0x0556), board_ahci }, /* MCP67 */
  461. { PCI_VDEVICE(NVIDIA, 0x0557), board_ahci }, /* MCP67 */
  462. { PCI_VDEVICE(NVIDIA, 0x0558), board_ahci }, /* MCP67 */
  463. { PCI_VDEVICE(NVIDIA, 0x0559), board_ahci }, /* MCP67 */
  464. { PCI_VDEVICE(NVIDIA, 0x055a), board_ahci }, /* MCP67 */
  465. { PCI_VDEVICE(NVIDIA, 0x055b), board_ahci }, /* MCP67 */
  466. { PCI_VDEVICE(NVIDIA, 0x07f0), board_ahci }, /* MCP73 */
  467. { PCI_VDEVICE(NVIDIA, 0x07f1), board_ahci }, /* MCP73 */
  468. { PCI_VDEVICE(NVIDIA, 0x07f2), board_ahci }, /* MCP73 */
  469. { PCI_VDEVICE(NVIDIA, 0x07f3), board_ahci }, /* MCP73 */
  470. { PCI_VDEVICE(NVIDIA, 0x07f4), board_ahci }, /* MCP73 */
  471. { PCI_VDEVICE(NVIDIA, 0x07f5), board_ahci }, /* MCP73 */
  472. { PCI_VDEVICE(NVIDIA, 0x07f6), board_ahci }, /* MCP73 */
  473. { PCI_VDEVICE(NVIDIA, 0x07f7), board_ahci }, /* MCP73 */
  474. { PCI_VDEVICE(NVIDIA, 0x07f8), board_ahci }, /* MCP73 */
  475. { PCI_VDEVICE(NVIDIA, 0x07f9), board_ahci }, /* MCP73 */
  476. { PCI_VDEVICE(NVIDIA, 0x07fa), board_ahci }, /* MCP73 */
  477. { PCI_VDEVICE(NVIDIA, 0x07fb), board_ahci }, /* MCP73 */
  478. { PCI_VDEVICE(NVIDIA, 0x0ad0), board_ahci }, /* MCP77 */
  479. { PCI_VDEVICE(NVIDIA, 0x0ad1), board_ahci }, /* MCP77 */
  480. { PCI_VDEVICE(NVIDIA, 0x0ad2), board_ahci }, /* MCP77 */
  481. { PCI_VDEVICE(NVIDIA, 0x0ad3), board_ahci }, /* MCP77 */
  482. { PCI_VDEVICE(NVIDIA, 0x0ad4), board_ahci }, /* MCP77 */
  483. { PCI_VDEVICE(NVIDIA, 0x0ad5), board_ahci }, /* MCP77 */
  484. { PCI_VDEVICE(NVIDIA, 0x0ad6), board_ahci }, /* MCP77 */
  485. { PCI_VDEVICE(NVIDIA, 0x0ad7), board_ahci }, /* MCP77 */
  486. { PCI_VDEVICE(NVIDIA, 0x0ad8), board_ahci }, /* MCP77 */
  487. { PCI_VDEVICE(NVIDIA, 0x0ad9), board_ahci }, /* MCP77 */
  488. { PCI_VDEVICE(NVIDIA, 0x0ada), board_ahci }, /* MCP77 */
  489. { PCI_VDEVICE(NVIDIA, 0x0adb), board_ahci }, /* MCP77 */
  490. { PCI_VDEVICE(NVIDIA, 0x0ab4), board_ahci }, /* MCP79 */
  491. { PCI_VDEVICE(NVIDIA, 0x0ab5), board_ahci }, /* MCP79 */
  492. { PCI_VDEVICE(NVIDIA, 0x0ab6), board_ahci }, /* MCP79 */
  493. { PCI_VDEVICE(NVIDIA, 0x0ab7), board_ahci }, /* MCP79 */
  494. { PCI_VDEVICE(NVIDIA, 0x0ab8), board_ahci }, /* MCP79 */
  495. { PCI_VDEVICE(NVIDIA, 0x0ab9), board_ahci }, /* MCP79 */
  496. { PCI_VDEVICE(NVIDIA, 0x0aba), board_ahci }, /* MCP79 */
  497. { PCI_VDEVICE(NVIDIA, 0x0abb), board_ahci }, /* MCP79 */
  498. { PCI_VDEVICE(NVIDIA, 0x0abc), board_ahci }, /* MCP79 */
  499. { PCI_VDEVICE(NVIDIA, 0x0abd), board_ahci }, /* MCP79 */
  500. { PCI_VDEVICE(NVIDIA, 0x0abe), board_ahci }, /* MCP79 */
  501. { PCI_VDEVICE(NVIDIA, 0x0abf), board_ahci }, /* MCP79 */
  502. { PCI_VDEVICE(NVIDIA, 0x0bc8), board_ahci }, /* MCP7B */
  503. { PCI_VDEVICE(NVIDIA, 0x0bc9), board_ahci }, /* MCP7B */
  504. { PCI_VDEVICE(NVIDIA, 0x0bca), board_ahci }, /* MCP7B */
  505. { PCI_VDEVICE(NVIDIA, 0x0bcb), board_ahci }, /* MCP7B */
  506. { PCI_VDEVICE(NVIDIA, 0x0bcc), board_ahci }, /* MCP7B */
  507. { PCI_VDEVICE(NVIDIA, 0x0bcd), board_ahci }, /* MCP7B */
  508. { PCI_VDEVICE(NVIDIA, 0x0bce), board_ahci }, /* MCP7B */
  509. { PCI_VDEVICE(NVIDIA, 0x0bcf), board_ahci }, /* MCP7B */
  510. { PCI_VDEVICE(NVIDIA, 0x0bd0), board_ahci }, /* MCP7B */
  511. { PCI_VDEVICE(NVIDIA, 0x0bd1), board_ahci }, /* MCP7B */
  512. { PCI_VDEVICE(NVIDIA, 0x0bd2), board_ahci }, /* MCP7B */
  513. { PCI_VDEVICE(NVIDIA, 0x0bd3), board_ahci }, /* MCP7B */
  514. /* SiS */
  515. { PCI_VDEVICE(SI, 0x1184), board_ahci }, /* SiS 966 */
  516. { PCI_VDEVICE(SI, 0x1185), board_ahci }, /* SiS 966 */
  517. { PCI_VDEVICE(SI, 0x0186), board_ahci }, /* SiS 968 */
  518. /* Marvell */
  519. { PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */
  520. { PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv }, /* 6121 */
  521. /* Generic, PCI class code for AHCI */
  522. { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
  523. PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci },
  524. { } /* terminate list */
  525. };
  526. static struct pci_driver ahci_pci_driver = {
  527. .name = DRV_NAME,
  528. .id_table = ahci_pci_tbl,
  529. .probe = ahci_init_one,
  530. .remove = ata_pci_remove_one,
  531. #ifdef CONFIG_PM
  532. .suspend = ahci_pci_device_suspend,
  533. .resume = ahci_pci_device_resume,
  534. #endif
  535. };
  536. static inline int ahci_nr_ports(u32 cap)
  537. {
  538. return (cap & 0x1f) + 1;
  539. }
  540. static inline void __iomem *__ahci_port_base(struct ata_host *host,
  541. unsigned int port_no)
  542. {
  543. void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
  544. return mmio + 0x100 + (port_no * 0x80);
  545. }
  546. static inline void __iomem *ahci_port_base(struct ata_port *ap)
  547. {
  548. return __ahci_port_base(ap->host, ap->port_no);
  549. }
  550. static void ahci_enable_ahci(void __iomem *mmio)
  551. {
  552. u32 tmp;
  553. /* turn on AHCI_EN */
  554. tmp = readl(mmio + HOST_CTL);
  555. if (!(tmp & HOST_AHCI_EN)) {
  556. tmp |= HOST_AHCI_EN;
  557. writel(tmp, mmio + HOST_CTL);
  558. tmp = readl(mmio + HOST_CTL); /* flush && sanity check */
  559. WARN_ON(!(tmp & HOST_AHCI_EN));
  560. }
  561. }
  562. /**
  563. * ahci_save_initial_config - Save and fixup initial config values
  564. * @pdev: target PCI device
  565. * @hpriv: host private area to store config values
  566. *
  567. * Some registers containing configuration info might be setup by
  568. * BIOS and might be cleared on reset. This function saves the
  569. * initial values of those registers into @hpriv such that they
  570. * can be restored after controller reset.
  571. *
  572. * If inconsistent, config values are fixed up by this function.
  573. *
  574. * LOCKING:
  575. * None.
  576. */
  577. static void ahci_save_initial_config(struct pci_dev *pdev,
  578. struct ahci_host_priv *hpriv)
  579. {
  580. void __iomem *mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
  581. u32 cap, port_map;
  582. int i;
  583. int mv;
  584. /* make sure AHCI mode is enabled before accessing CAP */
  585. ahci_enable_ahci(mmio);
  586. /* Values prefixed with saved_ are written back to host after
  587. * reset. Values without are used for driver operation.
  588. */
  589. hpriv->saved_cap = cap = readl(mmio + HOST_CAP);
  590. hpriv->saved_port_map = port_map = readl(mmio + HOST_PORTS_IMPL);
  591. /* some chips have errata preventing 64bit use */
  592. if ((cap & HOST_CAP_64) && (hpriv->flags & AHCI_HFLAG_32BIT_ONLY)) {
  593. dev_printk(KERN_INFO, &pdev->dev,
  594. "controller can't do 64bit DMA, forcing 32bit\n");
  595. cap &= ~HOST_CAP_64;
  596. }
  597. if ((cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_NO_NCQ)) {
  598. dev_printk(KERN_INFO, &pdev->dev,
  599. "controller can't do NCQ, turning off CAP_NCQ\n");
  600. cap &= ~HOST_CAP_NCQ;
  601. }
  602. if ((cap & HOST_CAP_PMP) && (hpriv->flags & AHCI_HFLAG_NO_PMP)) {
  603. dev_printk(KERN_INFO, &pdev->dev,
  604. "controller can't do PMP, turning off CAP_PMP\n");
  605. cap &= ~HOST_CAP_PMP;
  606. }
  607. /*
  608. * Temporary Marvell 6145 hack: PATA port presence
  609. * is asserted through the standard AHCI port
  610. * presence register, as bit 4 (counting from 0)
  611. */
  612. if (hpriv->flags & AHCI_HFLAG_MV_PATA) {
  613. if (pdev->device == 0x6121)
  614. mv = 0x3;
  615. else
  616. mv = 0xf;
  617. dev_printk(KERN_ERR, &pdev->dev,
  618. "MV_AHCI HACK: port_map %x -> %x\n",
  619. port_map,
  620. port_map & mv);
  621. port_map &= mv;
  622. }
  623. /* cross check port_map and cap.n_ports */
  624. if (port_map) {
  625. int map_ports = 0;
  626. for (i = 0; i < AHCI_MAX_PORTS; i++)
  627. if (port_map & (1 << i))
  628. map_ports++;
  629. /* If PI has more ports than n_ports, whine, clear
  630. * port_map and let it be generated from n_ports.
  631. */
  632. if (map_ports > ahci_nr_ports(cap)) {
  633. dev_printk(KERN_WARNING, &pdev->dev,
  634. "implemented port map (0x%x) contains more "
  635. "ports than nr_ports (%u), using nr_ports\n",
  636. port_map, ahci_nr_ports(cap));
  637. port_map = 0;
  638. }
  639. }
  640. /* fabricate port_map from cap.nr_ports */
  641. if (!port_map) {
  642. port_map = (1 << ahci_nr_ports(cap)) - 1;
  643. dev_printk(KERN_WARNING, &pdev->dev,
  644. "forcing PORTS_IMPL to 0x%x\n", port_map);
  645. /* write the fixed up value to the PI register */
  646. hpriv->saved_port_map = port_map;
  647. }
  648. /* record values to use during operation */
  649. hpriv->cap = cap;
  650. hpriv->port_map = port_map;
  651. }
  652. /**
  653. * ahci_restore_initial_config - Restore initial config
  654. * @host: target ATA host
  655. *
  656. * Restore initial config stored by ahci_save_initial_config().
  657. *
  658. * LOCKING:
  659. * None.
  660. */
  661. static void ahci_restore_initial_config(struct ata_host *host)
  662. {
  663. struct ahci_host_priv *hpriv = host->private_data;
  664. void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
  665. writel(hpriv->saved_cap, mmio + HOST_CAP);
  666. writel(hpriv->saved_port_map, mmio + HOST_PORTS_IMPL);
  667. (void) readl(mmio + HOST_PORTS_IMPL); /* flush */
  668. }
  669. static unsigned ahci_scr_offset(struct ata_port *ap, unsigned int sc_reg)
  670. {
  671. static const int offset[] = {
  672. [SCR_STATUS] = PORT_SCR_STAT,
  673. [SCR_CONTROL] = PORT_SCR_CTL,
  674. [SCR_ERROR] = PORT_SCR_ERR,
  675. [SCR_ACTIVE] = PORT_SCR_ACT,
  676. [SCR_NOTIFICATION] = PORT_SCR_NTF,
  677. };
  678. struct ahci_host_priv *hpriv = ap->host->private_data;
  679. if (sc_reg < ARRAY_SIZE(offset) &&
  680. (sc_reg != SCR_NOTIFICATION || (hpriv->cap & HOST_CAP_SNTF)))
  681. return offset[sc_reg];
  682. return 0;
  683. }
  684. static int ahci_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
  685. {
  686. void __iomem *port_mmio = ahci_port_base(ap);
  687. int offset = ahci_scr_offset(ap, sc_reg);
  688. if (offset) {
  689. *val = readl(port_mmio + offset);
  690. return 0;
  691. }
  692. return -EINVAL;
  693. }
  694. static int ahci_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val)
  695. {
  696. void __iomem *port_mmio = ahci_port_base(ap);
  697. int offset = ahci_scr_offset(ap, sc_reg);
  698. if (offset) {
  699. writel(val, port_mmio + offset);
  700. return 0;
  701. }
  702. return -EINVAL;
  703. }
  704. static void ahci_start_engine(struct ata_port *ap)
  705. {
  706. void __iomem *port_mmio = ahci_port_base(ap);
  707. u32 tmp;
  708. /* start DMA */
  709. tmp = readl(port_mmio + PORT_CMD);
  710. tmp |= PORT_CMD_START;
  711. writel(tmp, port_mmio + PORT_CMD);
  712. readl(port_mmio + PORT_CMD); /* flush */
  713. }
  714. static int ahci_stop_engine(struct ata_port *ap)
  715. {
  716. void __iomem *port_mmio = ahci_port_base(ap);
  717. u32 tmp;
  718. tmp = readl(port_mmio + PORT_CMD);
  719. /* check if the HBA is idle */
  720. if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0)
  721. return 0;
  722. /* setting HBA to idle */
  723. tmp &= ~PORT_CMD_START;
  724. writel(tmp, port_mmio + PORT_CMD);
  725. /* wait for engine to stop. This could be as long as 500 msec */
  726. tmp = ata_wait_register(port_mmio + PORT_CMD,
  727. PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500);
  728. if (tmp & PORT_CMD_LIST_ON)
  729. return -EIO;
  730. return 0;
  731. }
  732. static void ahci_start_fis_rx(struct ata_port *ap)
  733. {
  734. void __iomem *port_mmio = ahci_port_base(ap);
  735. struct ahci_host_priv *hpriv = ap->host->private_data;
  736. struct ahci_port_priv *pp = ap->private_data;
  737. u32 tmp;
  738. /* set FIS registers */
  739. if (hpriv->cap & HOST_CAP_64)
  740. writel((pp->cmd_slot_dma >> 16) >> 16,
  741. port_mmio + PORT_LST_ADDR_HI);
  742. writel(pp->cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR);
  743. if (hpriv->cap & HOST_CAP_64)
  744. writel((pp->rx_fis_dma >> 16) >> 16,
  745. port_mmio + PORT_FIS_ADDR_HI);
  746. writel(pp->rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR);
  747. /* enable FIS reception */
  748. tmp = readl(port_mmio + PORT_CMD);
  749. tmp |= PORT_CMD_FIS_RX;
  750. writel(tmp, port_mmio + PORT_CMD);
  751. /* flush */
  752. readl(port_mmio + PORT_CMD);
  753. }
  754. static int ahci_stop_fis_rx(struct ata_port *ap)
  755. {
  756. void __iomem *port_mmio = ahci_port_base(ap);
  757. u32 tmp;
  758. /* disable FIS reception */
  759. tmp = readl(port_mmio + PORT_CMD);
  760. tmp &= ~PORT_CMD_FIS_RX;
  761. writel(tmp, port_mmio + PORT_CMD);
  762. /* wait for completion, spec says 500ms, give it 1000 */
  763. tmp = ata_wait_register(port_mmio + PORT_CMD, PORT_CMD_FIS_ON,
  764. PORT_CMD_FIS_ON, 10, 1000);
  765. if (tmp & PORT_CMD_FIS_ON)
  766. return -EBUSY;
  767. return 0;
  768. }
  769. static void ahci_power_up(struct ata_port *ap)
  770. {
  771. struct ahci_host_priv *hpriv = ap->host->private_data;
  772. void __iomem *port_mmio = ahci_port_base(ap);
  773. u32 cmd;
  774. cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
  775. /* spin up device */
  776. if (hpriv->cap & HOST_CAP_SSS) {
  777. cmd |= PORT_CMD_SPIN_UP;
  778. writel(cmd, port_mmio + PORT_CMD);
  779. }
  780. /* wake up link */
  781. writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD);
  782. }
  783. static void ahci_disable_alpm(struct ata_port *ap)
  784. {
  785. struct ahci_host_priv *hpriv = ap->host->private_data;
  786. void __iomem *port_mmio = ahci_port_base(ap);
  787. u32 cmd;
  788. struct ahci_port_priv *pp = ap->private_data;
  789. /* IPM bits should be disabled by libata-core */
  790. /* get the existing command bits */
  791. cmd = readl(port_mmio + PORT_CMD);
  792. /* disable ALPM and ASP */
  793. cmd &= ~PORT_CMD_ASP;
  794. cmd &= ~PORT_CMD_ALPE;
  795. /* force the interface back to active */
  796. cmd |= PORT_CMD_ICC_ACTIVE;
  797. /* write out new cmd value */
  798. writel(cmd, port_mmio + PORT_CMD);
  799. cmd = readl(port_mmio + PORT_CMD);
  800. /* wait 10ms to be sure we've come out of any low power state */
  801. msleep(10);
  802. /* clear out any PhyRdy stuff from interrupt status */
  803. writel(PORT_IRQ_PHYRDY, port_mmio + PORT_IRQ_STAT);
  804. /* go ahead and clean out PhyRdy Change from Serror too */
  805. ahci_scr_write(ap, SCR_ERROR, ((1 << 16) | (1 << 18)));
  806. /*
  807. * Clear flag to indicate that we should ignore all PhyRdy
  808. * state changes
  809. */
  810. hpriv->flags &= ~AHCI_HFLAG_NO_HOTPLUG;
  811. /*
  812. * Enable interrupts on Phy Ready.
  813. */
  814. pp->intr_mask |= PORT_IRQ_PHYRDY;
  815. writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
  816. /*
  817. * don't change the link pm policy - we can be called
  818. * just to turn of link pm temporarily
  819. */
  820. }
  821. static int ahci_enable_alpm(struct ata_port *ap,
  822. enum link_pm policy)
  823. {
  824. struct ahci_host_priv *hpriv = ap->host->private_data;
  825. void __iomem *port_mmio = ahci_port_base(ap);
  826. u32 cmd;
  827. struct ahci_port_priv *pp = ap->private_data;
  828. u32 asp;
  829. /* Make sure the host is capable of link power management */
  830. if (!(hpriv->cap & HOST_CAP_ALPM))
  831. return -EINVAL;
  832. switch (policy) {
  833. case MAX_PERFORMANCE:
  834. case NOT_AVAILABLE:
  835. /*
  836. * if we came here with NOT_AVAILABLE,
  837. * it just means this is the first time we
  838. * have tried to enable - default to max performance,
  839. * and let the user go to lower power modes on request.
  840. */
  841. ahci_disable_alpm(ap);
  842. return 0;
  843. case MIN_POWER:
  844. /* configure HBA to enter SLUMBER */
  845. asp = PORT_CMD_ASP;
  846. break;
  847. case MEDIUM_POWER:
  848. /* configure HBA to enter PARTIAL */
  849. asp = 0;
  850. break;
  851. default:
  852. return -EINVAL;
  853. }
  854. /*
  855. * Disable interrupts on Phy Ready. This keeps us from
  856. * getting woken up due to spurious phy ready interrupts
  857. * TBD - Hot plug should be done via polling now, is
  858. * that even supported?
  859. */
  860. pp->intr_mask &= ~PORT_IRQ_PHYRDY;
  861. writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
  862. /*
  863. * Set a flag to indicate that we should ignore all PhyRdy
  864. * state changes since these can happen now whenever we
  865. * change link state
  866. */
  867. hpriv->flags |= AHCI_HFLAG_NO_HOTPLUG;
  868. /* get the existing command bits */
  869. cmd = readl(port_mmio + PORT_CMD);
  870. /*
  871. * Set ASP based on Policy
  872. */
  873. cmd |= asp;
  874. /*
  875. * Setting this bit will instruct the HBA to aggressively
  876. * enter a lower power link state when it's appropriate and
  877. * based on the value set above for ASP
  878. */
  879. cmd |= PORT_CMD_ALPE;
  880. /* write out new cmd value */
  881. writel(cmd, port_mmio + PORT_CMD);
  882. cmd = readl(port_mmio + PORT_CMD);
  883. /* IPM bits should be set by libata-core */
  884. return 0;
  885. }
  886. #ifdef CONFIG_PM
  887. static void ahci_power_down(struct ata_port *ap)
  888. {
  889. struct ahci_host_priv *hpriv = ap->host->private_data;
  890. void __iomem *port_mmio = ahci_port_base(ap);
  891. u32 cmd, scontrol;
  892. if (!(hpriv->cap & HOST_CAP_SSS))
  893. return;
  894. /* put device into listen mode, first set PxSCTL.DET to 0 */
  895. scontrol = readl(port_mmio + PORT_SCR_CTL);
  896. scontrol &= ~0xf;
  897. writel(scontrol, port_mmio + PORT_SCR_CTL);
  898. /* then set PxCMD.SUD to 0 */
  899. cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
  900. cmd &= ~PORT_CMD_SPIN_UP;
  901. writel(cmd, port_mmio + PORT_CMD);
  902. }
  903. #endif
  904. static void ahci_start_port(struct ata_port *ap)
  905. {
  906. /* enable FIS reception */
  907. ahci_start_fis_rx(ap);
  908. /* enable DMA */
  909. ahci_start_engine(ap);
  910. }
  911. static int ahci_deinit_port(struct ata_port *ap, const char **emsg)
  912. {
  913. int rc;
  914. /* disable DMA */
  915. rc = ahci_stop_engine(ap);
  916. if (rc) {
  917. *emsg = "failed to stop engine";
  918. return rc;
  919. }
  920. /* disable FIS reception */
  921. rc = ahci_stop_fis_rx(ap);
  922. if (rc) {
  923. *emsg = "failed stop FIS RX";
  924. return rc;
  925. }
  926. return 0;
  927. }
  928. static int ahci_reset_controller(struct ata_host *host)
  929. {
  930. struct pci_dev *pdev = to_pci_dev(host->dev);
  931. struct ahci_host_priv *hpriv = host->private_data;
  932. void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
  933. u32 tmp;
  934. /* we must be in AHCI mode, before using anything
  935. * AHCI-specific, such as HOST_RESET.
  936. */
  937. ahci_enable_ahci(mmio);
  938. /* global controller reset */
  939. if (!ahci_skip_host_reset) {
  940. tmp = readl(mmio + HOST_CTL);
  941. if ((tmp & HOST_RESET) == 0) {
  942. writel(tmp | HOST_RESET, mmio + HOST_CTL);
  943. readl(mmio + HOST_CTL); /* flush */
  944. }
  945. /* reset must complete within 1 second, or
  946. * the hardware should be considered fried.
  947. */
  948. ssleep(1);
  949. tmp = readl(mmio + HOST_CTL);
  950. if (tmp & HOST_RESET) {
  951. dev_printk(KERN_ERR, host->dev,
  952. "controller reset failed (0x%x)\n", tmp);
  953. return -EIO;
  954. }
  955. /* turn on AHCI mode */
  956. ahci_enable_ahci(mmio);
  957. /* Some registers might be cleared on reset. Restore
  958. * initial values.
  959. */
  960. ahci_restore_initial_config(host);
  961. } else
  962. dev_printk(KERN_INFO, host->dev,
  963. "skipping global host reset\n");
  964. if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
  965. u16 tmp16;
  966. /* configure PCS */
  967. pci_read_config_word(pdev, 0x92, &tmp16);
  968. if ((tmp16 & hpriv->port_map) != hpriv->port_map) {
  969. tmp16 |= hpriv->port_map;
  970. pci_write_config_word(pdev, 0x92, tmp16);
  971. }
  972. }
  973. return 0;
  974. }
  975. static void ahci_port_init(struct pci_dev *pdev, struct ata_port *ap,
  976. int port_no, void __iomem *mmio,
  977. void __iomem *port_mmio)
  978. {
  979. const char *emsg = NULL;
  980. int rc;
  981. u32 tmp;
  982. /* make sure port is not active */
  983. rc = ahci_deinit_port(ap, &emsg);
  984. if (rc)
  985. dev_printk(KERN_WARNING, &pdev->dev,
  986. "%s (%d)\n", emsg, rc);
  987. /* clear SError */
  988. tmp = readl(port_mmio + PORT_SCR_ERR);
  989. VPRINTK("PORT_SCR_ERR 0x%x\n", tmp);
  990. writel(tmp, port_mmio + PORT_SCR_ERR);
  991. /* clear port IRQ */
  992. tmp = readl(port_mmio + PORT_IRQ_STAT);
  993. VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
  994. if (tmp)
  995. writel(tmp, port_mmio + PORT_IRQ_STAT);
  996. writel(1 << port_no, mmio + HOST_IRQ_STAT);
  997. }
  998. static void ahci_init_controller(struct ata_host *host)
  999. {
  1000. struct ahci_host_priv *hpriv = host->private_data;
  1001. struct pci_dev *pdev = to_pci_dev(host->dev);
  1002. void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
  1003. int i;
  1004. void __iomem *port_mmio;
  1005. u32 tmp;
  1006. int mv;
  1007. if (hpriv->flags & AHCI_HFLAG_MV_PATA) {
  1008. if (pdev->device == 0x6121)
  1009. mv = 2;
  1010. else
  1011. mv = 4;
  1012. port_mmio = __ahci_port_base(host, mv);
  1013. writel(0, port_mmio + PORT_IRQ_MASK);
  1014. /* clear port IRQ */
  1015. tmp = readl(port_mmio + PORT_IRQ_STAT);
  1016. VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
  1017. if (tmp)
  1018. writel(tmp, port_mmio + PORT_IRQ_STAT);
  1019. }
  1020. for (i = 0; i < host->n_ports; i++) {
  1021. struct ata_port *ap = host->ports[i];
  1022. port_mmio = ahci_port_base(ap);
  1023. if (ata_port_is_dummy(ap))
  1024. continue;
  1025. ahci_port_init(pdev, ap, i, mmio, port_mmio);
  1026. }
  1027. tmp = readl(mmio + HOST_CTL);
  1028. VPRINTK("HOST_CTL 0x%x\n", tmp);
  1029. writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL);
  1030. tmp = readl(mmio + HOST_CTL);
  1031. VPRINTK("HOST_CTL 0x%x\n", tmp);
  1032. }
  1033. static void ahci_dev_config(struct ata_device *dev)
  1034. {
  1035. struct ahci_host_priv *hpriv = dev->link->ap->host->private_data;
  1036. if (hpriv->flags & AHCI_HFLAG_SECT255) {
  1037. dev->max_sectors = 255;
  1038. ata_dev_printk(dev, KERN_INFO,
  1039. "SB600 AHCI: limiting to 255 sectors per cmd\n");
  1040. }
  1041. }
  1042. static unsigned int ahci_dev_classify(struct ata_port *ap)
  1043. {
  1044. void __iomem *port_mmio = ahci_port_base(ap);
  1045. struct ata_taskfile tf;
  1046. u32 tmp;
  1047. tmp = readl(port_mmio + PORT_SIG);
  1048. tf.lbah = (tmp >> 24) & 0xff;
  1049. tf.lbam = (tmp >> 16) & 0xff;
  1050. tf.lbal = (tmp >> 8) & 0xff;
  1051. tf.nsect = (tmp) & 0xff;
  1052. return ata_dev_classify(&tf);
  1053. }
  1054. static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
  1055. u32 opts)
  1056. {
  1057. dma_addr_t cmd_tbl_dma;
  1058. cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ;
  1059. pp->cmd_slot[tag].opts = cpu_to_le32(opts);
  1060. pp->cmd_slot[tag].status = 0;
  1061. pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff);
  1062. pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16);
  1063. }
  1064. static int ahci_kick_engine(struct ata_port *ap, int force_restart)
  1065. {
  1066. void __iomem *port_mmio = ap->ioaddr.cmd_addr;
  1067. struct ahci_host_priv *hpriv = ap->host->private_data;
  1068. u32 tmp;
  1069. int busy, rc;
  1070. /* do we need to kick the port? */
  1071. busy = ahci_check_status(ap) & (ATA_BUSY | ATA_DRQ);
  1072. if (!busy && !force_restart)
  1073. return 0;
  1074. /* stop engine */
  1075. rc = ahci_stop_engine(ap);
  1076. if (rc)
  1077. goto out_restart;
  1078. /* need to do CLO? */
  1079. if (!busy) {
  1080. rc = 0;
  1081. goto out_restart;
  1082. }
  1083. if (!(hpriv->cap & HOST_CAP_CLO)) {
  1084. rc = -EOPNOTSUPP;
  1085. goto out_restart;
  1086. }
  1087. /* perform CLO */
  1088. tmp = readl(port_mmio + PORT_CMD);
  1089. tmp |= PORT_CMD_CLO;
  1090. writel(tmp, port_mmio + PORT_CMD);
  1091. rc = 0;
  1092. tmp = ata_wait_register(port_mmio + PORT_CMD,
  1093. PORT_CMD_CLO, PORT_CMD_CLO, 1, 500);
  1094. if (tmp & PORT_CMD_CLO)
  1095. rc = -EIO;
  1096. /* restart engine */
  1097. out_restart:
  1098. ahci_start_engine(ap);
  1099. return rc;
  1100. }
  1101. static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
  1102. struct ata_taskfile *tf, int is_cmd, u16 flags,
  1103. unsigned long timeout_msec)
  1104. {
  1105. const u32 cmd_fis_len = 5; /* five dwords */
  1106. struct ahci_port_priv *pp = ap->private_data;
  1107. void __iomem *port_mmio = ahci_port_base(ap);
  1108. u8 *fis = pp->cmd_tbl;
  1109. u32 tmp;
  1110. /* prep the command */
  1111. ata_tf_to_fis(tf, pmp, is_cmd, fis);
  1112. ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12));
  1113. /* issue & wait */
  1114. writel(1, port_mmio + PORT_CMD_ISSUE);
  1115. if (timeout_msec) {
  1116. tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1,
  1117. 1, timeout_msec);
  1118. if (tmp & 0x1) {
  1119. ahci_kick_engine(ap, 1);
  1120. return -EBUSY;
  1121. }
  1122. } else
  1123. readl(port_mmio + PORT_CMD_ISSUE); /* flush */
  1124. return 0;
  1125. }
  1126. static int ahci_do_softreset(struct ata_link *link, unsigned int *class,
  1127. int pmp, unsigned long deadline)
  1128. {
  1129. struct ata_port *ap = link->ap;
  1130. const char *reason = NULL;
  1131. unsigned long now, msecs;
  1132. struct ata_taskfile tf;
  1133. int rc;
  1134. DPRINTK("ENTER\n");
  1135. if (ata_link_offline(link)) {
  1136. DPRINTK("PHY reports no device\n");
  1137. *class = ATA_DEV_NONE;
  1138. return 0;
  1139. }
  1140. /* prepare for SRST (AHCI-1.1 10.4.1) */
  1141. rc = ahci_kick_engine(ap, 1);
  1142. if (rc && rc != -EOPNOTSUPP)
  1143. ata_link_printk(link, KERN_WARNING,
  1144. "failed to reset engine (errno=%d)\n", rc);
  1145. ata_tf_init(link->device, &tf);
  1146. /* issue the first D2H Register FIS */
  1147. msecs = 0;
  1148. now = jiffies;
  1149. if (time_after(now, deadline))
  1150. msecs = jiffies_to_msecs(deadline - now);
  1151. tf.ctl |= ATA_SRST;
  1152. if (ahci_exec_polled_cmd(ap, pmp, &tf, 0,
  1153. AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY, msecs)) {
  1154. rc = -EIO;
  1155. reason = "1st FIS failed";
  1156. goto fail;
  1157. }
  1158. /* spec says at least 5us, but be generous and sleep for 1ms */
  1159. msleep(1);
  1160. /* issue the second D2H Register FIS */
  1161. tf.ctl &= ~ATA_SRST;
  1162. ahci_exec_polled_cmd(ap, pmp, &tf, 0, 0, 0);
  1163. /* wait a while before checking status */
  1164. ata_wait_after_reset(ap, deadline);
  1165. rc = ata_wait_ready(ap, deadline);
  1166. /* link occupied, -ENODEV too is an error */
  1167. if (rc) {
  1168. reason = "device not ready";
  1169. goto fail;
  1170. }
  1171. *class = ahci_dev_classify(ap);
  1172. DPRINTK("EXIT, class=%u\n", *class);
  1173. return 0;
  1174. fail:
  1175. ata_link_printk(link, KERN_ERR, "softreset failed (%s)\n", reason);
  1176. return rc;
  1177. }
  1178. static int ahci_softreset(struct ata_link *link, unsigned int *class,
  1179. unsigned long deadline)
  1180. {
  1181. int pmp = 0;
  1182. if (link->ap->flags & ATA_FLAG_PMP)
  1183. pmp = SATA_PMP_CTRL_PORT;
  1184. return ahci_do_softreset(link, class, pmp, deadline);
  1185. }
  1186. static int ahci_hardreset(struct ata_link *link, unsigned int *class,
  1187. unsigned long deadline)
  1188. {
  1189. struct ata_port *ap = link->ap;
  1190. struct ahci_port_priv *pp = ap->private_data;
  1191. u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
  1192. struct ata_taskfile tf;
  1193. int rc;
  1194. DPRINTK("ENTER\n");
  1195. ahci_stop_engine(ap);
  1196. /* clear D2H reception area to properly wait for D2H FIS */
  1197. ata_tf_init(link->device, &tf);
  1198. tf.command = 0x80;
  1199. ata_tf_to_fis(&tf, 0, 0, d2h_fis);
  1200. rc = sata_std_hardreset(link, class, deadline);
  1201. ahci_start_engine(ap);
  1202. if (rc == 0 && ata_link_online(link))
  1203. *class = ahci_dev_classify(ap);
  1204. if (rc != -EAGAIN && *class == ATA_DEV_UNKNOWN)
  1205. *class = ATA_DEV_NONE;
  1206. DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
  1207. return rc;
  1208. }
  1209. static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
  1210. unsigned long deadline)
  1211. {
  1212. struct ata_port *ap = link->ap;
  1213. u32 serror;
  1214. int rc;
  1215. DPRINTK("ENTER\n");
  1216. ahci_stop_engine(ap);
  1217. rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
  1218. deadline);
  1219. /* vt8251 needs SError cleared for the port to operate */
  1220. ahci_scr_read(ap, SCR_ERROR, &serror);
  1221. ahci_scr_write(ap, SCR_ERROR, serror);
  1222. ahci_start_engine(ap);
  1223. DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
  1224. /* vt8251 doesn't clear BSY on signature FIS reception,
  1225. * request follow-up softreset.
  1226. */
  1227. return rc ?: -EAGAIN;
  1228. }
  1229. static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
  1230. unsigned long deadline)
  1231. {
  1232. struct ata_port *ap = link->ap;
  1233. struct ahci_port_priv *pp = ap->private_data;
  1234. u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
  1235. struct ata_taskfile tf;
  1236. int rc;
  1237. ahci_stop_engine(ap);
  1238. /* clear D2H reception area to properly wait for D2H FIS */
  1239. ata_tf_init(link->device, &tf);
  1240. tf.command = 0x80;
  1241. ata_tf_to_fis(&tf, 0, 0, d2h_fis);
  1242. rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
  1243. deadline);
  1244. ahci_start_engine(ap);
  1245. if (rc || ata_link_offline(link))
  1246. return rc;
  1247. /* spec mandates ">= 2ms" before checking status */
  1248. msleep(150);
  1249. /* The pseudo configuration device on SIMG4726 attached to
  1250. * ASUS P5W-DH Deluxe doesn't send signature FIS after
  1251. * hardreset if no device is attached to the first downstream
  1252. * port && the pseudo device locks up on SRST w/ PMP==0. To
  1253. * work around this, wait for !BSY only briefly. If BSY isn't
  1254. * cleared, perform CLO and proceed to IDENTIFY (achieved by
  1255. * ATA_LFLAG_NO_SRST and ATA_LFLAG_ASSUME_ATA).
  1256. *
  1257. * Wait for two seconds. Devices attached to downstream port
  1258. * which can't process the following IDENTIFY after this will
  1259. * have to be reset again. For most cases, this should
  1260. * suffice while making probing snappish enough.
  1261. */
  1262. rc = ata_wait_ready(ap, jiffies + 2 * HZ);
  1263. if (rc)
  1264. ahci_kick_engine(ap, 0);
  1265. return 0;
  1266. }
  1267. static void ahci_postreset(struct ata_link *link, unsigned int *class)
  1268. {
  1269. struct ata_port *ap = link->ap;
  1270. void __iomem *port_mmio = ahci_port_base(ap);
  1271. u32 new_tmp, tmp;
  1272. ata_std_postreset(link, class);
  1273. /* Make sure port's ATAPI bit is set appropriately */
  1274. new_tmp = tmp = readl(port_mmio + PORT_CMD);
  1275. if (*class == ATA_DEV_ATAPI)
  1276. new_tmp |= PORT_CMD_ATAPI;
  1277. else
  1278. new_tmp &= ~PORT_CMD_ATAPI;
  1279. if (new_tmp != tmp) {
  1280. writel(new_tmp, port_mmio + PORT_CMD);
  1281. readl(port_mmio + PORT_CMD); /* flush */
  1282. }
  1283. }
  1284. static int ahci_pmp_softreset(struct ata_link *link, unsigned int *class,
  1285. unsigned long deadline)
  1286. {
  1287. return ahci_do_softreset(link, class, link->pmp, deadline);
  1288. }
  1289. static u8 ahci_check_status(struct ata_port *ap)
  1290. {
  1291. void __iomem *mmio = ap->ioaddr.cmd_addr;
  1292. return readl(mmio + PORT_TFDATA) & 0xFF;
  1293. }
  1294. static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
  1295. {
  1296. struct ahci_port_priv *pp = ap->private_data;
  1297. u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
  1298. ata_tf_from_fis(d2h_fis, tf);
  1299. }
  1300. static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
  1301. {
  1302. struct scatterlist *sg;
  1303. struct ahci_sg *ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
  1304. unsigned int si;
  1305. VPRINTK("ENTER\n");
  1306. /*
  1307. * Next, the S/G list.
  1308. */
  1309. for_each_sg(qc->sg, sg, qc->n_elem, si) {
  1310. dma_addr_t addr = sg_dma_address(sg);
  1311. u32 sg_len = sg_dma_len(sg);
  1312. ahci_sg[si].addr = cpu_to_le32(addr & 0xffffffff);
  1313. ahci_sg[si].addr_hi = cpu_to_le32((addr >> 16) >> 16);
  1314. ahci_sg[si].flags_size = cpu_to_le32(sg_len - 1);
  1315. }
  1316. return si;
  1317. }
  1318. static void ahci_qc_prep(struct ata_queued_cmd *qc)
  1319. {
  1320. struct ata_port *ap = qc->ap;
  1321. struct ahci_port_priv *pp = ap->private_data;
  1322. int is_atapi = ata_is_atapi(qc->tf.protocol);
  1323. void *cmd_tbl;
  1324. u32 opts;
  1325. const u32 cmd_fis_len = 5; /* five dwords */
  1326. unsigned int n_elem;
  1327. /*
  1328. * Fill in command table information. First, the header,
  1329. * a SATA Register - Host to Device command FIS.
  1330. */
  1331. cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ;
  1332. ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl);
  1333. if (is_atapi) {
  1334. memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
  1335. memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
  1336. }
  1337. n_elem = 0;
  1338. if (qc->flags & ATA_QCFLAG_DMAMAP)
  1339. n_elem = ahci_fill_sg(qc, cmd_tbl);
  1340. /*
  1341. * Fill in command slot information.
  1342. */
  1343. opts = cmd_fis_len | n_elem << 16 | (qc->dev->link->pmp << 12);
  1344. if (qc->tf.flags & ATA_TFLAG_WRITE)
  1345. opts |= AHCI_CMD_WRITE;
  1346. if (is_atapi)
  1347. opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
  1348. ahci_fill_cmd_slot(pp, qc->tag, opts);
  1349. }
  1350. static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
  1351. {
  1352. struct ahci_host_priv *hpriv = ap->host->private_data;
  1353. struct ahci_port_priv *pp = ap->private_data;
  1354. struct ata_eh_info *host_ehi = &ap->link.eh_info;
  1355. struct ata_link *link = NULL;
  1356. struct ata_queued_cmd *active_qc;
  1357. struct ata_eh_info *active_ehi;
  1358. u32 serror;
  1359. /* determine active link */
  1360. ata_port_for_each_link(link, ap)
  1361. if (ata_link_active(link))
  1362. break;
  1363. if (!link)
  1364. link = &ap->link;
  1365. active_qc = ata_qc_from_tag(ap, link->active_tag);
  1366. active_ehi = &link->eh_info;
  1367. /* record irq stat */
  1368. ata_ehi_clear_desc(host_ehi);
  1369. ata_ehi_push_desc(host_ehi, "irq_stat 0x%08x", irq_stat);
  1370. /* AHCI needs SError cleared; otherwise, it might lock up */
  1371. ahci_scr_read(ap, SCR_ERROR, &serror);
  1372. ahci_scr_write(ap, SCR_ERROR, serror);
  1373. host_ehi->serror |= serror;
  1374. /* some controllers set IRQ_IF_ERR on device errors, ignore it */
  1375. if (hpriv->flags & AHCI_HFLAG_IGN_IRQ_IF_ERR)
  1376. irq_stat &= ~PORT_IRQ_IF_ERR;
  1377. if (irq_stat & PORT_IRQ_TF_ERR) {
  1378. /* If qc is active, charge it; otherwise, the active
  1379. * link. There's no active qc on NCQ errors. It will
  1380. * be determined by EH by reading log page 10h.
  1381. */
  1382. if (active_qc)
  1383. active_qc->err_mask |= AC_ERR_DEV;
  1384. else
  1385. active_ehi->err_mask |= AC_ERR_DEV;
  1386. if (hpriv->flags & AHCI_HFLAG_IGN_SERR_INTERNAL)
  1387. host_ehi->serror &= ~SERR_INTERNAL;
  1388. }
  1389. if (irq_stat & PORT_IRQ_UNK_FIS) {
  1390. u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK);
  1391. active_ehi->err_mask |= AC_ERR_HSM;
  1392. active_ehi->action |= ATA_EH_RESET;
  1393. ata_ehi_push_desc(active_ehi,
  1394. "unknown FIS %08x %08x %08x %08x" ,
  1395. unk[0], unk[1], unk[2], unk[3]);
  1396. }
  1397. if (ap->nr_pmp_links && (irq_stat & PORT_IRQ_BAD_PMP)) {
  1398. active_ehi->err_mask |= AC_ERR_HSM;
  1399. active_ehi->action |= ATA_EH_RESET;
  1400. ata_ehi_push_desc(active_ehi, "incorrect PMP");
  1401. }
  1402. if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) {
  1403. host_ehi->err_mask |= AC_ERR_HOST_BUS;
  1404. host_ehi->action |= ATA_EH_RESET;
  1405. ata_ehi_push_desc(host_ehi, "host bus error");
  1406. }
  1407. if (irq_stat & PORT_IRQ_IF_ERR) {
  1408. host_ehi->err_mask |= AC_ERR_ATA_BUS;
  1409. host_ehi->action |= ATA_EH_RESET;
  1410. ata_ehi_push_desc(host_ehi, "interface fatal error");
  1411. }
  1412. if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) {
  1413. ata_ehi_hotplugged(host_ehi);
  1414. ata_ehi_push_desc(host_ehi, "%s",
  1415. irq_stat & PORT_IRQ_CONNECT ?
  1416. "connection status changed" : "PHY RDY changed");
  1417. }
  1418. /* okay, let's hand over to EH */
  1419. if (irq_stat & PORT_IRQ_FREEZE)
  1420. ata_port_freeze(ap);
  1421. else
  1422. ata_port_abort(ap);
  1423. }
  1424. static void ahci_port_intr(struct ata_port *ap)
  1425. {
  1426. void __iomem *port_mmio = ap->ioaddr.cmd_addr;
  1427. struct ata_eh_info *ehi = &ap->link.eh_info;
  1428. struct ahci_port_priv *pp = ap->private_data;
  1429. struct ahci_host_priv *hpriv = ap->host->private_data;
  1430. int resetting = !!(ap->pflags & ATA_PFLAG_RESETTING);
  1431. u32 status, qc_active;
  1432. int rc;
  1433. status = readl(port_mmio + PORT_IRQ_STAT);
  1434. writel(status, port_mmio + PORT_IRQ_STAT);
  1435. /* ignore BAD_PMP while resetting */
  1436. if (unlikely(resetting))
  1437. status &= ~PORT_IRQ_BAD_PMP;
  1438. /* If we are getting PhyRdy, this is
  1439. * just a power state change, we should
  1440. * clear out this, plus the PhyRdy/Comm
  1441. * Wake bits from Serror
  1442. */
  1443. if ((hpriv->flags & AHCI_HFLAG_NO_HOTPLUG) &&
  1444. (status & PORT_IRQ_PHYRDY)) {
  1445. status &= ~PORT_IRQ_PHYRDY;
  1446. ahci_scr_write(ap, SCR_ERROR, ((1 << 16) | (1 << 18)));
  1447. }
  1448. if (unlikely(status & PORT_IRQ_ERROR)) {
  1449. ahci_error_intr(ap, status);
  1450. return;
  1451. }
  1452. if (status & PORT_IRQ_SDB_FIS) {
  1453. /* If SNotification is available, leave notification
  1454. * handling to sata_async_notification(). If not,
  1455. * emulate it by snooping SDB FIS RX area.
  1456. *
  1457. * Snooping FIS RX area is probably cheaper than
  1458. * poking SNotification but some constrollers which
  1459. * implement SNotification, ICH9 for example, don't
  1460. * store AN SDB FIS into receive area.
  1461. */
  1462. if (hpriv->cap & HOST_CAP_SNTF)
  1463. sata_async_notification(ap);
  1464. else {
  1465. /* If the 'N' bit in word 0 of the FIS is set,
  1466. * we just received asynchronous notification.
  1467. * Tell libata about it.
  1468. */
  1469. const __le32 *f = pp->rx_fis + RX_FIS_SDB;
  1470. u32 f0 = le32_to_cpu(f[0]);
  1471. if (f0 & (1 << 15))
  1472. sata_async_notification(ap);
  1473. }
  1474. }
  1475. /* pp->active_link is valid iff any command is in flight */
  1476. if (ap->qc_active && pp->active_link->sactive)
  1477. qc_active = readl(port_mmio + PORT_SCR_ACT);
  1478. else
  1479. qc_active = readl(port_mmio + PORT_CMD_ISSUE);
  1480. rc = ata_qc_complete_multiple(ap, qc_active, NULL);
  1481. /* while resetting, invalid completions are expected */
  1482. if (unlikely(rc < 0 && !resetting)) {
  1483. ehi->err_mask |= AC_ERR_HSM;
  1484. ehi->action |= ATA_EH_RESET;
  1485. ata_port_freeze(ap);
  1486. }
  1487. }
  1488. static irqreturn_t ahci_interrupt(int irq, void *dev_instance)
  1489. {
  1490. struct ata_host *host = dev_instance;
  1491. struct ahci_host_priv *hpriv;
  1492. unsigned int i, handled = 0;
  1493. void __iomem *mmio;
  1494. u32 irq_stat, irq_ack = 0;
  1495. VPRINTK("ENTER\n");
  1496. hpriv = host->private_data;
  1497. mmio = host->iomap[AHCI_PCI_BAR];
  1498. /* sigh. 0xffffffff is a valid return from h/w */
  1499. irq_stat = readl(mmio + HOST_IRQ_STAT);
  1500. irq_stat &= hpriv->port_map;
  1501. if (!irq_stat)
  1502. return IRQ_NONE;
  1503. spin_lock(&host->lock);
  1504. for (i = 0; i < host->n_ports; i++) {
  1505. struct ata_port *ap;
  1506. if (!(irq_stat & (1 << i)))
  1507. continue;
  1508. ap = host->ports[i];
  1509. if (ap) {
  1510. ahci_port_intr(ap);
  1511. VPRINTK("port %u\n", i);
  1512. } else {
  1513. VPRINTK("port %u (no irq)\n", i);
  1514. if (ata_ratelimit())
  1515. dev_printk(KERN_WARNING, host->dev,
  1516. "interrupt on disabled port %u\n", i);
  1517. }
  1518. irq_ack |= (1 << i);
  1519. }
  1520. if (irq_ack) {
  1521. writel(irq_ack, mmio + HOST_IRQ_STAT);
  1522. handled = 1;
  1523. }
  1524. spin_unlock(&host->lock);
  1525. VPRINTK("EXIT\n");
  1526. return IRQ_RETVAL(handled);
  1527. }
  1528. static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
  1529. {
  1530. struct ata_port *ap = qc->ap;
  1531. void __iomem *port_mmio = ahci_port_base(ap);
  1532. struct ahci_port_priv *pp = ap->private_data;
  1533. /* Keep track of the currently active link. It will be used
  1534. * in completion path to determine whether NCQ phase is in
  1535. * progress.
  1536. */
  1537. pp->active_link = qc->dev->link;
  1538. if (qc->tf.protocol == ATA_PROT_NCQ)
  1539. writel(1 << qc->tag, port_mmio + PORT_SCR_ACT);
  1540. writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE);
  1541. readl(port_mmio + PORT_CMD_ISSUE); /* flush */
  1542. return 0;
  1543. }
  1544. static void ahci_freeze(struct ata_port *ap)
  1545. {
  1546. void __iomem *port_mmio = ahci_port_base(ap);
  1547. /* turn IRQ off */
  1548. writel(0, port_mmio + PORT_IRQ_MASK);
  1549. }
  1550. static void ahci_thaw(struct ata_port *ap)
  1551. {
  1552. void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
  1553. void __iomem *port_mmio = ahci_port_base(ap);
  1554. u32 tmp;
  1555. struct ahci_port_priv *pp = ap->private_data;
  1556. /* clear IRQ */
  1557. tmp = readl(port_mmio + PORT_IRQ_STAT);
  1558. writel(tmp, port_mmio + PORT_IRQ_STAT);
  1559. writel(1 << ap->port_no, mmio + HOST_IRQ_STAT);
  1560. /* turn IRQ back on */
  1561. writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
  1562. }
  1563. static void ahci_error_handler(struct ata_port *ap)
  1564. {
  1565. if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
  1566. /* restart engine */
  1567. ahci_stop_engine(ap);
  1568. ahci_start_engine(ap);
  1569. }
  1570. /* perform recovery */
  1571. sata_pmp_do_eh(ap, ata_std_prereset, ahci_softreset,
  1572. ahci_hardreset, ahci_postreset,
  1573. sata_pmp_std_prereset, ahci_pmp_softreset,
  1574. sata_pmp_std_hardreset, sata_pmp_std_postreset);
  1575. }
  1576. static void ahci_vt8251_error_handler(struct ata_port *ap)
  1577. {
  1578. if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
  1579. /* restart engine */
  1580. ahci_stop_engine(ap);
  1581. ahci_start_engine(ap);
  1582. }
  1583. /* perform recovery */
  1584. ata_do_eh(ap, ata_std_prereset, ahci_softreset, ahci_vt8251_hardreset,
  1585. ahci_postreset);
  1586. }
  1587. static void ahci_p5wdh_error_handler(struct ata_port *ap)
  1588. {
  1589. if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
  1590. /* restart engine */
  1591. ahci_stop_engine(ap);
  1592. ahci_start_engine(ap);
  1593. }
  1594. /* perform recovery */
  1595. ata_do_eh(ap, ata_std_prereset, ahci_softreset, ahci_p5wdh_hardreset,
  1596. ahci_postreset);
  1597. }
  1598. static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
  1599. {
  1600. struct ata_port *ap = qc->ap;
  1601. /* make DMA engine forget about the failed command */
  1602. if (qc->flags & ATA_QCFLAG_FAILED)
  1603. ahci_kick_engine(ap, 1);
  1604. }
  1605. static void ahci_pmp_attach(struct ata_port *ap)
  1606. {
  1607. void __iomem *port_mmio = ahci_port_base(ap);
  1608. struct ahci_port_priv *pp = ap->private_data;
  1609. u32 cmd;
  1610. cmd = readl(port_mmio + PORT_CMD);
  1611. cmd |= PORT_CMD_PMP;
  1612. writel(cmd, port_mmio + PORT_CMD);
  1613. pp->intr_mask |= PORT_IRQ_BAD_PMP;
  1614. writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
  1615. }
  1616. static void ahci_pmp_detach(struct ata_port *ap)
  1617. {
  1618. void __iomem *port_mmio = ahci_port_base(ap);
  1619. struct ahci_port_priv *pp = ap->private_data;
  1620. u32 cmd;
  1621. cmd = readl(port_mmio + PORT_CMD);
  1622. cmd &= ~PORT_CMD_PMP;
  1623. writel(cmd, port_mmio + PORT_CMD);
  1624. pp->intr_mask &= ~PORT_IRQ_BAD_PMP;
  1625. writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
  1626. }
  1627. static int ahci_port_resume(struct ata_port *ap)
  1628. {
  1629. ahci_power_up(ap);
  1630. ahci_start_port(ap);
  1631. if (ap->nr_pmp_links)
  1632. ahci_pmp_attach(ap);
  1633. else
  1634. ahci_pmp_detach(ap);
  1635. return 0;
  1636. }
  1637. #ifdef CONFIG_PM
  1638. static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
  1639. {
  1640. const char *emsg = NULL;
  1641. int rc;
  1642. rc = ahci_deinit_port(ap, &emsg);
  1643. if (rc == 0)
  1644. ahci_power_down(ap);
  1645. else {
  1646. ata_port_printk(ap, KERN_ERR, "%s (%d)\n", emsg, rc);
  1647. ahci_start_port(ap);
  1648. }
  1649. return rc;
  1650. }
  1651. static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
  1652. {
  1653. struct ata_host *host = dev_get_drvdata(&pdev->dev);
  1654. void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
  1655. u32 ctl;
  1656. if (mesg.event & PM_EVENT_SLEEP) {
  1657. /* AHCI spec rev1.1 section 8.3.3:
  1658. * Software must disable interrupts prior to requesting a
  1659. * transition of the HBA to D3 state.
  1660. */
  1661. ctl = readl(mmio + HOST_CTL);
  1662. ctl &= ~HOST_IRQ_EN;
  1663. writel(ctl, mmio + HOST_CTL);
  1664. readl(mmio + HOST_CTL); /* flush */
  1665. }
  1666. return ata_pci_device_suspend(pdev, mesg);
  1667. }
  1668. static int ahci_pci_device_resume(struct pci_dev *pdev)
  1669. {
  1670. struct ata_host *host = dev_get_drvdata(&pdev->dev);
  1671. int rc;
  1672. rc = ata_pci_device_do_resume(pdev);
  1673. if (rc)
  1674. return rc;
  1675. if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
  1676. rc = ahci_reset_controller(host);
  1677. if (rc)
  1678. return rc;
  1679. ahci_init_controller(host);
  1680. }
  1681. ata_host_resume(host);
  1682. return 0;
  1683. }
  1684. #endif
  1685. static int ahci_port_start(struct ata_port *ap)
  1686. {
  1687. struct device *dev = ap->host->dev;
  1688. struct ahci_port_priv *pp;
  1689. void *mem;
  1690. dma_addr_t mem_dma;
  1691. pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
  1692. if (!pp)
  1693. return -ENOMEM;
  1694. mem = dmam_alloc_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, &mem_dma,
  1695. GFP_KERNEL);
  1696. if (!mem)
  1697. return -ENOMEM;
  1698. memset(mem, 0, AHCI_PORT_PRIV_DMA_SZ);
  1699. /*
  1700. * First item in chunk of DMA memory: 32-slot command table,
  1701. * 32 bytes each in size
  1702. */
  1703. pp->cmd_slot = mem;
  1704. pp->cmd_slot_dma = mem_dma;
  1705. mem += AHCI_CMD_SLOT_SZ;
  1706. mem_dma += AHCI_CMD_SLOT_SZ;
  1707. /*
  1708. * Second item: Received-FIS area
  1709. */
  1710. pp->rx_fis = mem;
  1711. pp->rx_fis_dma = mem_dma;
  1712. mem += AHCI_RX_FIS_SZ;
  1713. mem_dma += AHCI_RX_FIS_SZ;
  1714. /*
  1715. * Third item: data area for storing a single command
  1716. * and its scatter-gather table
  1717. */
  1718. pp->cmd_tbl = mem;
  1719. pp->cmd_tbl_dma = mem_dma;
  1720. /*
  1721. * Save off initial list of interrupts to be enabled.
  1722. * This could be changed later
  1723. */
  1724. pp->intr_mask = DEF_PORT_IRQ;
  1725. ap->private_data = pp;
  1726. /* engage engines, captain */
  1727. return ahci_port_resume(ap);
  1728. }
  1729. static void ahci_port_stop(struct ata_port *ap)
  1730. {
  1731. const char *emsg = NULL;
  1732. int rc;
  1733. /* de-initialize port */
  1734. rc = ahci_deinit_port(ap, &emsg);
  1735. if (rc)
  1736. ata_port_printk(ap, KERN_WARNING, "%s (%d)\n", emsg, rc);
  1737. }
  1738. static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
  1739. {
  1740. int rc;
  1741. if (using_dac &&
  1742. !pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
  1743. rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
  1744. if (rc) {
  1745. rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
  1746. if (rc) {
  1747. dev_printk(KERN_ERR, &pdev->dev,
  1748. "64-bit DMA enable failed\n");
  1749. return rc;
  1750. }
  1751. }
  1752. } else {
  1753. rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
  1754. if (rc) {
  1755. dev_printk(KERN_ERR, &pdev->dev,
  1756. "32-bit DMA enable failed\n");
  1757. return rc;
  1758. }
  1759. rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
  1760. if (rc) {
  1761. dev_printk(KERN_ERR, &pdev->dev,
  1762. "32-bit consistent DMA enable failed\n");
  1763. return rc;
  1764. }
  1765. }
  1766. return 0;
  1767. }
  1768. static void ahci_print_info(struct ata_host *host)
  1769. {
  1770. struct ahci_host_priv *hpriv = host->private_data;
  1771. struct pci_dev *pdev = to_pci_dev(host->dev);
  1772. void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
  1773. u32 vers, cap, impl, speed;
  1774. const char *speed_s;
  1775. u16 cc;
  1776. const char *scc_s;
  1777. vers = readl(mmio + HOST_VERSION);
  1778. cap = hpriv->cap;
  1779. impl = hpriv->port_map;
  1780. speed = (cap >> 20) & 0xf;
  1781. if (speed == 1)
  1782. speed_s = "1.5";
  1783. else if (speed == 2)
  1784. speed_s = "3";
  1785. else
  1786. speed_s = "?";
  1787. pci_read_config_word(pdev, 0x0a, &cc);
  1788. if (cc == PCI_CLASS_STORAGE_IDE)
  1789. scc_s = "IDE";
  1790. else if (cc == PCI_CLASS_STORAGE_SATA)
  1791. scc_s = "SATA";
  1792. else if (cc == PCI_CLASS_STORAGE_RAID)
  1793. scc_s = "RAID";
  1794. else
  1795. scc_s = "unknown";
  1796. dev_printk(KERN_INFO, &pdev->dev,
  1797. "AHCI %02x%02x.%02x%02x "
  1798. "%u slots %u ports %s Gbps 0x%x impl %s mode\n"
  1799. ,
  1800. (vers >> 24) & 0xff,
  1801. (vers >> 16) & 0xff,
  1802. (vers >> 8) & 0xff,
  1803. vers & 0xff,
  1804. ((cap >> 8) & 0x1f) + 1,
  1805. (cap & 0x1f) + 1,
  1806. speed_s,
  1807. impl,
  1808. scc_s);
  1809. dev_printk(KERN_INFO, &pdev->dev,
  1810. "flags: "
  1811. "%s%s%s%s%s%s%s"
  1812. "%s%s%s%s%s%s%s\n"
  1813. ,
  1814. cap & (1 << 31) ? "64bit " : "",
  1815. cap & (1 << 30) ? "ncq " : "",
  1816. cap & (1 << 29) ? "sntf " : "",
  1817. cap & (1 << 28) ? "ilck " : "",
  1818. cap & (1 << 27) ? "stag " : "",
  1819. cap & (1 << 26) ? "pm " : "",
  1820. cap & (1 << 25) ? "led " : "",
  1821. cap & (1 << 24) ? "clo " : "",
  1822. cap & (1 << 19) ? "nz " : "",
  1823. cap & (1 << 18) ? "only " : "",
  1824. cap & (1 << 17) ? "pmp " : "",
  1825. cap & (1 << 15) ? "pio " : "",
  1826. cap & (1 << 14) ? "slum " : "",
  1827. cap & (1 << 13) ? "part " : ""
  1828. );
  1829. }
  1830. /* On ASUS P5W DH Deluxe, the second port of PCI device 00:1f.2 is
  1831. * hardwired to on-board SIMG 4726. The chipset is ICH8 and doesn't
  1832. * support PMP and the 4726 either directly exports the device
  1833. * attached to the first downstream port or acts as a hardware storage
  1834. * controller and emulate a single ATA device (can be RAID 0/1 or some
  1835. * other configuration).
  1836. *
  1837. * When there's no device attached to the first downstream port of the
  1838. * 4726, "Config Disk" appears, which is a pseudo ATA device to
  1839. * configure the 4726. However, ATA emulation of the device is very
  1840. * lame. It doesn't send signature D2H Reg FIS after the initial
  1841. * hardreset, pukes on SRST w/ PMP==0 and has bunch of other issues.
  1842. *
  1843. * The following function works around the problem by always using
  1844. * hardreset on the port and not depending on receiving signature FIS
  1845. * afterward. If signature FIS isn't received soon, ATA class is
  1846. * assumed without follow-up softreset.
  1847. */
  1848. static void ahci_p5wdh_workaround(struct ata_host *host)
  1849. {
  1850. static struct dmi_system_id sysids[] = {
  1851. {
  1852. .ident = "P5W DH Deluxe",
  1853. .matches = {
  1854. DMI_MATCH(DMI_SYS_VENDOR,
  1855. "ASUSTEK COMPUTER INC"),
  1856. DMI_MATCH(DMI_PRODUCT_NAME, "P5W DH Deluxe"),
  1857. },
  1858. },
  1859. { }
  1860. };
  1861. struct pci_dev *pdev = to_pci_dev(host->dev);
  1862. if (pdev->bus->number == 0 && pdev->devfn == PCI_DEVFN(0x1f, 2) &&
  1863. dmi_check_system(sysids)) {
  1864. struct ata_port *ap = host->ports[1];
  1865. dev_printk(KERN_INFO, &pdev->dev, "enabling ASUS P5W DH "
  1866. "Deluxe on-board SIMG4726 workaround\n");
  1867. ap->ops = &ahci_p5wdh_ops;
  1868. ap->link.flags |= ATA_LFLAG_NO_SRST | ATA_LFLAG_ASSUME_ATA;
  1869. }
  1870. }
  1871. static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
  1872. {
  1873. static int printed_version;
  1874. struct ata_port_info pi = ahci_port_info[ent->driver_data];
  1875. const struct ata_port_info *ppi[] = { &pi, NULL };
  1876. struct device *dev = &pdev->dev;
  1877. struct ahci_host_priv *hpriv;
  1878. struct ata_host *host;
  1879. int n_ports, i, rc;
  1880. VPRINTK("ENTER\n");
  1881. WARN_ON(ATA_MAX_QUEUE > AHCI_MAX_CMDS);
  1882. if (!printed_version++)
  1883. dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
  1884. /* acquire resources */
  1885. rc = pcim_enable_device(pdev);
  1886. if (rc)
  1887. return rc;
  1888. /* AHCI controllers often implement SFF compatible interface.
  1889. * Grab all PCI BARs just in case.
  1890. */
  1891. rc = pcim_iomap_regions_request_all(pdev, 1 << AHCI_PCI_BAR, DRV_NAME);
  1892. if (rc == -EBUSY)
  1893. pcim_pin_device(pdev);
  1894. if (rc)
  1895. return rc;
  1896. if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
  1897. (pdev->device == 0x2652 || pdev->device == 0x2653)) {
  1898. u8 map;
  1899. /* ICH6s share the same PCI ID for both piix and ahci
  1900. * modes. Enabling ahci mode while MAP indicates
  1901. * combined mode is a bad idea. Yield to ata_piix.
  1902. */
  1903. pci_read_config_byte(pdev, ICH_MAP, &map);
  1904. if (map & 0x3) {
  1905. dev_printk(KERN_INFO, &pdev->dev, "controller is in "
  1906. "combined mode, can't enable AHCI mode\n");
  1907. return -ENODEV;
  1908. }
  1909. }
  1910. hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
  1911. if (!hpriv)
  1912. return -ENOMEM;
  1913. hpriv->flags |= (unsigned long)pi.private_data;
  1914. if ((hpriv->flags & AHCI_HFLAG_NO_MSI) || pci_enable_msi(pdev))
  1915. pci_intx(pdev, 1);
  1916. /* save initial config */
  1917. ahci_save_initial_config(pdev, hpriv);
  1918. /* prepare host */
  1919. if (hpriv->cap & HOST_CAP_NCQ)
  1920. pi.flags |= ATA_FLAG_NCQ;
  1921. if (hpriv->cap & HOST_CAP_PMP)
  1922. pi.flags |= ATA_FLAG_PMP;
  1923. /* CAP.NP sometimes indicate the index of the last enabled
  1924. * port, at other times, that of the last possible port, so
  1925. * determining the maximum port number requires looking at
  1926. * both CAP.NP and port_map.
  1927. */
  1928. n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
  1929. host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
  1930. if (!host)
  1931. return -ENOMEM;
  1932. host->iomap = pcim_iomap_table(pdev);
  1933. host->private_data = hpriv;
  1934. for (i = 0; i < host->n_ports; i++) {
  1935. struct ata_port *ap = host->ports[i];
  1936. void __iomem *port_mmio = ahci_port_base(ap);
  1937. ata_port_pbar_desc(ap, AHCI_PCI_BAR, -1, "abar");
  1938. ata_port_pbar_desc(ap, AHCI_PCI_BAR,
  1939. 0x100 + ap->port_no * 0x80, "port");
  1940. /* set initial link pm policy */
  1941. ap->pm_policy = NOT_AVAILABLE;
  1942. /* standard SATA port setup */
  1943. if (hpriv->port_map & (1 << i))
  1944. ap->ioaddr.cmd_addr = port_mmio;
  1945. /* disabled/not-implemented port */
  1946. else
  1947. ap->ops = &ata_dummy_port_ops;
  1948. }
  1949. /* apply workaround for ASUS P5W DH Deluxe mainboard */
  1950. ahci_p5wdh_workaround(host);
  1951. /* initialize adapter */
  1952. rc = ahci_configure_dma_masks(pdev, hpriv->cap & HOST_CAP_64);
  1953. if (rc)
  1954. return rc;
  1955. rc = ahci_reset_controller(host);
  1956. if (rc)
  1957. return rc;
  1958. ahci_init_controller(host);
  1959. ahci_print_info(host);
  1960. pci_set_master(pdev);
  1961. return ata_host_activate(host, pdev->irq, ahci_interrupt, IRQF_SHARED,
  1962. &ahci_sht);
  1963. }
  1964. static int __init ahci_init(void)
  1965. {
  1966. return pci_register_driver(&ahci_pci_driver);
  1967. }
  1968. static void __exit ahci_exit(void)
  1969. {
  1970. pci_unregister_driver(&ahci_pci_driver);
  1971. }
  1972. MODULE_AUTHOR("Jeff Garzik");
  1973. MODULE_DESCRIPTION("AHCI SATA low-level driver");
  1974. MODULE_LICENSE("GPL");
  1975. MODULE_DEVICE_TABLE(pci, ahci_pci_tbl);
  1976. MODULE_VERSION(DRV_VERSION);
  1977. module_init(ahci_init);
  1978. module_exit(ahci_exit);