ahci.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157
  1. /*
  2. * ahci.c - AHCI SATA support
  3. *
  4. * Maintained by: Jeff Garzik <jgarzik@pobox.com>
  5. * Please ALWAYS copy linux-ide@vger.kernel.org
  6. * on emails.
  7. *
  8. * Copyright 2004-2005 Red Hat, Inc.
  9. *
  10. *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of the GNU General Public License as published by
  13. * the Free Software Foundation; either version 2, or (at your option)
  14. * any later version.
  15. *
  16. * This program is distributed in the hope that it will be useful,
  17. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  19. * GNU General Public License for more details.
  20. *
  21. * You should have received a copy of the GNU General Public License
  22. * along with this program; see the file COPYING. If not, write to
  23. * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
  24. *
  25. *
  26. * libata documentation is available via 'make {ps|pdf}docs',
  27. * as Documentation/DocBook/libata.*
  28. *
  29. * AHCI hardware documentation:
  30. * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
  31. * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
  32. *
  33. */
  34. #include <linux/kernel.h>
  35. #include <linux/module.h>
  36. #include <linux/pci.h>
  37. #include <linux/init.h>
  38. #include <linux/blkdev.h>
  39. #include <linux/delay.h>
  40. #include <linux/interrupt.h>
  41. #include <linux/sched.h>
  42. #include <linux/dma-mapping.h>
  43. #include <linux/device.h>
  44. #include <scsi/scsi_host.h>
  45. #include <scsi/scsi_cmnd.h>
  46. #include <linux/libata.h>
  47. #include <asm/io.h>
  48. #define DRV_NAME "ahci"
  49. #define DRV_VERSION "1.01"
  50. enum {
  51. AHCI_PCI_BAR = 5,
  52. AHCI_MAX_SG = 168, /* hardware max is 64K */
  53. AHCI_DMA_BOUNDARY = 0xffffffff,
  54. AHCI_USE_CLUSTERING = 0,
  55. AHCI_CMD_SLOT_SZ = 32 * 32,
  56. AHCI_RX_FIS_SZ = 256,
  57. AHCI_CMD_TBL_HDR = 0x80,
  58. AHCI_CMD_TBL_CDB = 0x40,
  59. AHCI_CMD_TBL_SZ = AHCI_CMD_TBL_HDR + (AHCI_MAX_SG * 16),
  60. AHCI_PORT_PRIV_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_SZ +
  61. AHCI_RX_FIS_SZ,
  62. AHCI_IRQ_ON_SG = (1 << 31),
  63. AHCI_CMD_ATAPI = (1 << 5),
  64. AHCI_CMD_WRITE = (1 << 6),
  65. RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */
  66. board_ahci = 0,
  67. /* global controller registers */
  68. HOST_CAP = 0x00, /* host capabilities */
  69. HOST_CTL = 0x04, /* global host control */
  70. HOST_IRQ_STAT = 0x08, /* interrupt status */
  71. HOST_PORTS_IMPL = 0x0c, /* bitmap of implemented ports */
  72. HOST_VERSION = 0x10, /* AHCI spec. version compliancy */
  73. /* HOST_CTL bits */
  74. HOST_RESET = (1 << 0), /* reset controller; self-clear */
  75. HOST_IRQ_EN = (1 << 1), /* global IRQ enable */
  76. HOST_AHCI_EN = (1 << 31), /* AHCI enabled */
  77. /* HOST_CAP bits */
  78. HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */
  79. /* registers for each SATA port */
  80. PORT_LST_ADDR = 0x00, /* command list DMA addr */
  81. PORT_LST_ADDR_HI = 0x04, /* command list DMA addr hi */
  82. PORT_FIS_ADDR = 0x08, /* FIS rx buf addr */
  83. PORT_FIS_ADDR_HI = 0x0c, /* FIS rx buf addr hi */
  84. PORT_IRQ_STAT = 0x10, /* interrupt status */
  85. PORT_IRQ_MASK = 0x14, /* interrupt enable/disable mask */
  86. PORT_CMD = 0x18, /* port command */
  87. PORT_TFDATA = 0x20, /* taskfile data */
  88. PORT_SIG = 0x24, /* device TF signature */
  89. PORT_CMD_ISSUE = 0x38, /* command issue */
  90. PORT_SCR = 0x28, /* SATA phy register block */
  91. PORT_SCR_STAT = 0x28, /* SATA phy register: SStatus */
  92. PORT_SCR_CTL = 0x2c, /* SATA phy register: SControl */
  93. PORT_SCR_ERR = 0x30, /* SATA phy register: SError */
  94. PORT_SCR_ACT = 0x34, /* SATA phy register: SActive */
  95. /* PORT_IRQ_{STAT,MASK} bits */
  96. PORT_IRQ_COLD_PRES = (1 << 31), /* cold presence detect */
  97. PORT_IRQ_TF_ERR = (1 << 30), /* task file error */
  98. PORT_IRQ_HBUS_ERR = (1 << 29), /* host bus fatal error */
  99. PORT_IRQ_HBUS_DATA_ERR = (1 << 28), /* host bus data error */
  100. PORT_IRQ_IF_ERR = (1 << 27), /* interface fatal error */
  101. PORT_IRQ_IF_NONFATAL = (1 << 26), /* interface non-fatal error */
  102. PORT_IRQ_OVERFLOW = (1 << 24), /* xfer exhausted available S/G */
  103. PORT_IRQ_BAD_PMP = (1 << 23), /* incorrect port multiplier */
  104. PORT_IRQ_PHYRDY = (1 << 22), /* PhyRdy changed */
  105. PORT_IRQ_DEV_ILCK = (1 << 7), /* device interlock */
  106. PORT_IRQ_CONNECT = (1 << 6), /* port connect change status */
  107. PORT_IRQ_SG_DONE = (1 << 5), /* descriptor processed */
  108. PORT_IRQ_UNK_FIS = (1 << 4), /* unknown FIS rx'd */
  109. PORT_IRQ_SDB_FIS = (1 << 3), /* Set Device Bits FIS rx'd */
  110. PORT_IRQ_DMAS_FIS = (1 << 2), /* DMA Setup FIS rx'd */
  111. PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */
  112. PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */
  113. PORT_IRQ_FATAL = PORT_IRQ_TF_ERR |
  114. PORT_IRQ_HBUS_ERR |
  115. PORT_IRQ_HBUS_DATA_ERR |
  116. PORT_IRQ_IF_ERR,
  117. DEF_PORT_IRQ = PORT_IRQ_FATAL | PORT_IRQ_PHYRDY |
  118. PORT_IRQ_CONNECT | PORT_IRQ_SG_DONE |
  119. PORT_IRQ_UNK_FIS | PORT_IRQ_SDB_FIS |
  120. PORT_IRQ_DMAS_FIS | PORT_IRQ_PIOS_FIS |
  121. PORT_IRQ_D2H_REG_FIS,
  122. /* PORT_CMD bits */
  123. PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */
  124. PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */
  125. PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */
  126. PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */
  127. PORT_CMD_POWER_ON = (1 << 2), /* Power up device */
  128. PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */
  129. PORT_CMD_START = (1 << 0), /* Enable port DMA engine */
  130. PORT_CMD_ICC_ACTIVE = (0x1 << 28), /* Put i/f in active state */
  131. PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */
  132. PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */
  133. /* hpriv->flags bits */
  134. AHCI_FLAG_MSI = (1 << 0),
  135. };
  136. struct ahci_cmd_hdr {
  137. u32 opts;
  138. u32 status;
  139. u32 tbl_addr;
  140. u32 tbl_addr_hi;
  141. u32 reserved[4];
  142. };
  143. struct ahci_sg {
  144. u32 addr;
  145. u32 addr_hi;
  146. u32 reserved;
  147. u32 flags_size;
  148. };
  149. struct ahci_host_priv {
  150. unsigned long flags;
  151. u32 cap; /* cache of HOST_CAP register */
  152. u32 port_map; /* cache of HOST_PORTS_IMPL reg */
  153. };
  154. struct ahci_port_priv {
  155. struct ahci_cmd_hdr *cmd_slot;
  156. dma_addr_t cmd_slot_dma;
  157. void *cmd_tbl;
  158. dma_addr_t cmd_tbl_dma;
  159. struct ahci_sg *cmd_tbl_sg;
  160. void *rx_fis;
  161. dma_addr_t rx_fis_dma;
  162. };
  163. static u32 ahci_scr_read (struct ata_port *ap, unsigned int sc_reg);
  164. static void ahci_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
  165. static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
  166. static int ahci_qc_issue(struct ata_queued_cmd *qc);
  167. static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *regs);
  168. static void ahci_phy_reset(struct ata_port *ap);
  169. static void ahci_irq_clear(struct ata_port *ap);
  170. static void ahci_eng_timeout(struct ata_port *ap);
  171. static int ahci_port_start(struct ata_port *ap);
  172. static void ahci_port_stop(struct ata_port *ap);
  173. static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
  174. static void ahci_qc_prep(struct ata_queued_cmd *qc);
  175. static u8 ahci_check_status(struct ata_port *ap);
  176. static inline int ahci_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc);
  177. static void ahci_remove_one (struct pci_dev *pdev);
  178. static struct scsi_host_template ahci_sht = {
  179. .module = THIS_MODULE,
  180. .name = DRV_NAME,
  181. .ioctl = ata_scsi_ioctl,
  182. .queuecommand = ata_scsi_queuecmd,
  183. .eh_strategy_handler = ata_scsi_error,
  184. .can_queue = ATA_DEF_QUEUE,
  185. .this_id = ATA_SHT_THIS_ID,
  186. .sg_tablesize = AHCI_MAX_SG,
  187. .max_sectors = ATA_MAX_SECTORS,
  188. .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
  189. .emulated = ATA_SHT_EMULATED,
  190. .use_clustering = AHCI_USE_CLUSTERING,
  191. .proc_name = DRV_NAME,
  192. .dma_boundary = AHCI_DMA_BOUNDARY,
  193. .slave_configure = ata_scsi_slave_config,
  194. .bios_param = ata_std_bios_param,
  195. .ordered_flush = 1,
  196. };
  197. static const struct ata_port_operations ahci_ops = {
  198. .port_disable = ata_port_disable,
  199. .check_status = ahci_check_status,
  200. .check_altstatus = ahci_check_status,
  201. .dev_select = ata_noop_dev_select,
  202. .tf_read = ahci_tf_read,
  203. .phy_reset = ahci_phy_reset,
  204. .qc_prep = ahci_qc_prep,
  205. .qc_issue = ahci_qc_issue,
  206. .eng_timeout = ahci_eng_timeout,
  207. .irq_handler = ahci_interrupt,
  208. .irq_clear = ahci_irq_clear,
  209. .scr_read = ahci_scr_read,
  210. .scr_write = ahci_scr_write,
  211. .port_start = ahci_port_start,
  212. .port_stop = ahci_port_stop,
  213. };
  214. static struct ata_port_info ahci_port_info[] = {
  215. /* board_ahci */
  216. {
  217. .sht = &ahci_sht,
  218. .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
  219. ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO |
  220. ATA_FLAG_PIO_DMA,
  221. .pio_mask = 0x1f, /* pio0-4 */
  222. .udma_mask = 0x7f, /* udma0-6 ; FIXME */
  223. .port_ops = &ahci_ops,
  224. },
  225. };
  226. static const struct pci_device_id ahci_pci_tbl[] = {
  227. { PCI_VENDOR_ID_INTEL, 0x2652, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
  228. board_ahci }, /* ICH6 */
  229. { PCI_VENDOR_ID_INTEL, 0x2653, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
  230. board_ahci }, /* ICH6M */
  231. { PCI_VENDOR_ID_INTEL, 0x27c1, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
  232. board_ahci }, /* ICH7 */
  233. { PCI_VENDOR_ID_INTEL, 0x27c5, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
  234. board_ahci }, /* ICH7M */
  235. { PCI_VENDOR_ID_INTEL, 0x27c3, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
  236. board_ahci }, /* ICH7R */
  237. { PCI_VENDOR_ID_AL, 0x5288, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
  238. board_ahci }, /* ULi M5288 */
  239. { PCI_VENDOR_ID_INTEL, 0x2681, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
  240. board_ahci }, /* ESB2 */
  241. { PCI_VENDOR_ID_INTEL, 0x2682, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
  242. board_ahci }, /* ESB2 */
  243. { PCI_VENDOR_ID_INTEL, 0x2683, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
  244. board_ahci }, /* ESB2 */
  245. { PCI_VENDOR_ID_INTEL, 0x27c6, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
  246. board_ahci }, /* ICH7-M DH */
  247. { } /* terminate list */
  248. };
  249. static struct pci_driver ahci_pci_driver = {
  250. .name = DRV_NAME,
  251. .id_table = ahci_pci_tbl,
  252. .probe = ahci_init_one,
  253. .remove = ahci_remove_one,
  254. };
  255. static inline unsigned long ahci_port_base_ul (unsigned long base, unsigned int port)
  256. {
  257. return base + 0x100 + (port * 0x80);
  258. }
  259. static inline void __iomem *ahci_port_base (void __iomem *base, unsigned int port)
  260. {
  261. return (void __iomem *) ahci_port_base_ul((unsigned long)base, port);
  262. }
  263. static int ahci_port_start(struct ata_port *ap)
  264. {
  265. struct device *dev = ap->host_set->dev;
  266. struct ahci_host_priv *hpriv = ap->host_set->private_data;
  267. struct ahci_port_priv *pp;
  268. void __iomem *mmio = ap->host_set->mmio_base;
  269. void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
  270. void *mem;
  271. dma_addr_t mem_dma;
  272. int rc;
  273. pp = kmalloc(sizeof(*pp), GFP_KERNEL);
  274. if (!pp)
  275. return -ENOMEM;
  276. memset(pp, 0, sizeof(*pp));
  277. rc = ata_pad_alloc(ap, dev);
  278. if (rc) {
  279. kfree(pp);
  280. return rc;
  281. }
  282. mem = dma_alloc_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, &mem_dma, GFP_KERNEL);
  283. if (!mem) {
  284. ata_pad_free(ap, dev);
  285. kfree(pp);
  286. return -ENOMEM;
  287. }
  288. memset(mem, 0, AHCI_PORT_PRIV_DMA_SZ);
  289. /*
  290. * First item in chunk of DMA memory: 32-slot command table,
  291. * 32 bytes each in size
  292. */
  293. pp->cmd_slot = mem;
  294. pp->cmd_slot_dma = mem_dma;
  295. mem += AHCI_CMD_SLOT_SZ;
  296. mem_dma += AHCI_CMD_SLOT_SZ;
  297. /*
  298. * Second item: Received-FIS area
  299. */
  300. pp->rx_fis = mem;
  301. pp->rx_fis_dma = mem_dma;
  302. mem += AHCI_RX_FIS_SZ;
  303. mem_dma += AHCI_RX_FIS_SZ;
  304. /*
  305. * Third item: data area for storing a single command
  306. * and its scatter-gather table
  307. */
  308. pp->cmd_tbl = mem;
  309. pp->cmd_tbl_dma = mem_dma;
  310. pp->cmd_tbl_sg = mem + AHCI_CMD_TBL_HDR;
  311. ap->private_data = pp;
  312. if (hpriv->cap & HOST_CAP_64)
  313. writel((pp->cmd_slot_dma >> 16) >> 16, port_mmio + PORT_LST_ADDR_HI);
  314. writel(pp->cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR);
  315. readl(port_mmio + PORT_LST_ADDR); /* flush */
  316. if (hpriv->cap & HOST_CAP_64)
  317. writel((pp->rx_fis_dma >> 16) >> 16, port_mmio + PORT_FIS_ADDR_HI);
  318. writel(pp->rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR);
  319. readl(port_mmio + PORT_FIS_ADDR); /* flush */
  320. writel(PORT_CMD_ICC_ACTIVE | PORT_CMD_FIS_RX |
  321. PORT_CMD_POWER_ON | PORT_CMD_SPIN_UP |
  322. PORT_CMD_START, port_mmio + PORT_CMD);
  323. readl(port_mmio + PORT_CMD); /* flush */
  324. return 0;
  325. }
  326. static void ahci_port_stop(struct ata_port *ap)
  327. {
  328. struct device *dev = ap->host_set->dev;
  329. struct ahci_port_priv *pp = ap->private_data;
  330. void __iomem *mmio = ap->host_set->mmio_base;
  331. void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
  332. u32 tmp;
  333. tmp = readl(port_mmio + PORT_CMD);
  334. tmp &= ~(PORT_CMD_START | PORT_CMD_FIS_RX);
  335. writel(tmp, port_mmio + PORT_CMD);
  336. readl(port_mmio + PORT_CMD); /* flush */
  337. /* spec says 500 msecs for each PORT_CMD_{START,FIS_RX} bit, so
  338. * this is slightly incorrect.
  339. */
  340. msleep(500);
  341. ap->private_data = NULL;
  342. dma_free_coherent(dev, AHCI_PORT_PRIV_DMA_SZ,
  343. pp->cmd_slot, pp->cmd_slot_dma);
  344. ata_pad_free(ap, dev);
  345. kfree(pp);
  346. }
  347. static u32 ahci_scr_read (struct ata_port *ap, unsigned int sc_reg_in)
  348. {
  349. unsigned int sc_reg;
  350. switch (sc_reg_in) {
  351. case SCR_STATUS: sc_reg = 0; break;
  352. case SCR_CONTROL: sc_reg = 1; break;
  353. case SCR_ERROR: sc_reg = 2; break;
  354. case SCR_ACTIVE: sc_reg = 3; break;
  355. default:
  356. return 0xffffffffU;
  357. }
  358. return readl((void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
  359. }
  360. static void ahci_scr_write (struct ata_port *ap, unsigned int sc_reg_in,
  361. u32 val)
  362. {
  363. unsigned int sc_reg;
  364. switch (sc_reg_in) {
  365. case SCR_STATUS: sc_reg = 0; break;
  366. case SCR_CONTROL: sc_reg = 1; break;
  367. case SCR_ERROR: sc_reg = 2; break;
  368. case SCR_ACTIVE: sc_reg = 3; break;
  369. default:
  370. return;
  371. }
  372. writel(val, (void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
  373. }
  374. static void ahci_phy_reset(struct ata_port *ap)
  375. {
  376. void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
  377. struct ata_taskfile tf;
  378. struct ata_device *dev = &ap->device[0];
  379. u32 new_tmp, tmp;
  380. __sata_phy_reset(ap);
  381. if (ap->flags & ATA_FLAG_PORT_DISABLED)
  382. return;
  383. tmp = readl(port_mmio + PORT_SIG);
  384. tf.lbah = (tmp >> 24) & 0xff;
  385. tf.lbam = (tmp >> 16) & 0xff;
  386. tf.lbal = (tmp >> 8) & 0xff;
  387. tf.nsect = (tmp) & 0xff;
  388. dev->class = ata_dev_classify(&tf);
  389. if (!ata_dev_present(dev)) {
  390. ata_port_disable(ap);
  391. return;
  392. }
  393. /* Make sure port's ATAPI bit is set appropriately */
  394. new_tmp = tmp = readl(port_mmio + PORT_CMD);
  395. if (dev->class == ATA_DEV_ATAPI)
  396. new_tmp |= PORT_CMD_ATAPI;
  397. else
  398. new_tmp &= ~PORT_CMD_ATAPI;
  399. if (new_tmp != tmp) {
  400. writel(new_tmp, port_mmio + PORT_CMD);
  401. readl(port_mmio + PORT_CMD); /* flush */
  402. }
  403. }
  404. static u8 ahci_check_status(struct ata_port *ap)
  405. {
  406. void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr;
  407. return readl(mmio + PORT_TFDATA) & 0xFF;
  408. }
  409. static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
  410. {
  411. struct ahci_port_priv *pp = ap->private_data;
  412. u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
  413. ata_tf_from_fis(d2h_fis, tf);
  414. }
  415. static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc)
  416. {
  417. struct ahci_port_priv *pp = qc->ap->private_data;
  418. struct scatterlist *sg;
  419. struct ahci_sg *ahci_sg;
  420. unsigned int n_sg = 0;
  421. VPRINTK("ENTER\n");
  422. /*
  423. * Next, the S/G list.
  424. */
  425. ahci_sg = pp->cmd_tbl_sg;
  426. ata_for_each_sg(sg, qc) {
  427. dma_addr_t addr = sg_dma_address(sg);
  428. u32 sg_len = sg_dma_len(sg);
  429. ahci_sg->addr = cpu_to_le32(addr & 0xffffffff);
  430. ahci_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
  431. ahci_sg->flags_size = cpu_to_le32(sg_len - 1);
  432. ahci_sg++;
  433. n_sg++;
  434. }
  435. return n_sg;
  436. }
  437. static void ahci_qc_prep(struct ata_queued_cmd *qc)
  438. {
  439. struct ata_port *ap = qc->ap;
  440. struct ahci_port_priv *pp = ap->private_data;
  441. u32 opts;
  442. const u32 cmd_fis_len = 5; /* five dwords */
  443. unsigned int n_elem;
  444. /*
  445. * Fill in command slot information (currently only one slot,
  446. * slot 0, is currently since we don't do queueing)
  447. */
  448. opts = cmd_fis_len;
  449. if (qc->tf.flags & ATA_TFLAG_WRITE)
  450. opts |= AHCI_CMD_WRITE;
  451. if (is_atapi_taskfile(&qc->tf))
  452. opts |= AHCI_CMD_ATAPI;
  453. pp->cmd_slot[0].opts = cpu_to_le32(opts);
  454. pp->cmd_slot[0].status = 0;
  455. pp->cmd_slot[0].tbl_addr = cpu_to_le32(pp->cmd_tbl_dma & 0xffffffff);
  456. pp->cmd_slot[0].tbl_addr_hi = cpu_to_le32((pp->cmd_tbl_dma >> 16) >> 16);
  457. /*
  458. * Fill in command table information. First, the header,
  459. * a SATA Register - Host to Device command FIS.
  460. */
  461. ata_tf_to_fis(&qc->tf, pp->cmd_tbl, 0);
  462. if (opts & AHCI_CMD_ATAPI) {
  463. memset(pp->cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
  464. memcpy(pp->cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, ap->cdb_len);
  465. }
  466. if (!(qc->flags & ATA_QCFLAG_DMAMAP))
  467. return;
  468. n_elem = ahci_fill_sg(qc);
  469. pp->cmd_slot[0].opts |= cpu_to_le32(n_elem << 16);
  470. }
  471. static void ahci_intr_error(struct ata_port *ap, u32 irq_stat)
  472. {
  473. void __iomem *mmio = ap->host_set->mmio_base;
  474. void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
  475. u32 tmp;
  476. int work;
  477. printk(KERN_WARNING "ata%u: port reset, "
  478. "p_is %x is %x pis %x cmd %x tf %x ss %x se %x\n",
  479. ap->id,
  480. irq_stat,
  481. readl(mmio + HOST_IRQ_STAT),
  482. readl(port_mmio + PORT_IRQ_STAT),
  483. readl(port_mmio + PORT_CMD),
  484. readl(port_mmio + PORT_TFDATA),
  485. readl(port_mmio + PORT_SCR_STAT),
  486. readl(port_mmio + PORT_SCR_ERR));
  487. /* stop DMA */
  488. tmp = readl(port_mmio + PORT_CMD);
  489. tmp &= ~PORT_CMD_START;
  490. writel(tmp, port_mmio + PORT_CMD);
  491. /* wait for engine to stop. TODO: this could be
  492. * as long as 500 msec
  493. */
  494. work = 1000;
  495. while (work-- > 0) {
  496. tmp = readl(port_mmio + PORT_CMD);
  497. if ((tmp & PORT_CMD_LIST_ON) == 0)
  498. break;
  499. udelay(10);
  500. }
  501. /* clear SATA phy error, if any */
  502. tmp = readl(port_mmio + PORT_SCR_ERR);
  503. writel(tmp, port_mmio + PORT_SCR_ERR);
  504. /* if DRQ/BSY is set, device needs to be reset.
  505. * if so, issue COMRESET
  506. */
  507. tmp = readl(port_mmio + PORT_TFDATA);
  508. if (tmp & (ATA_BUSY | ATA_DRQ)) {
  509. writel(0x301, port_mmio + PORT_SCR_CTL);
  510. readl(port_mmio + PORT_SCR_CTL); /* flush */
  511. udelay(10);
  512. writel(0x300, port_mmio + PORT_SCR_CTL);
  513. readl(port_mmio + PORT_SCR_CTL); /* flush */
  514. }
  515. /* re-start DMA */
  516. tmp = readl(port_mmio + PORT_CMD);
  517. tmp |= PORT_CMD_START;
  518. writel(tmp, port_mmio + PORT_CMD);
  519. readl(port_mmio + PORT_CMD); /* flush */
  520. }
  521. static void ahci_eng_timeout(struct ata_port *ap)
  522. {
  523. struct ata_host_set *host_set = ap->host_set;
  524. void __iomem *mmio = host_set->mmio_base;
  525. void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
  526. struct ata_queued_cmd *qc;
  527. unsigned long flags;
  528. printk(KERN_WARNING "ata%u: handling error/timeout\n", ap->id);
  529. spin_lock_irqsave(&host_set->lock, flags);
  530. qc = ata_qc_from_tag(ap, ap->active_tag);
  531. if (!qc) {
  532. printk(KERN_ERR "ata%u: BUG: timeout without command\n",
  533. ap->id);
  534. } else {
  535. ahci_intr_error(ap, readl(port_mmio + PORT_IRQ_STAT));
  536. /* hack alert! We cannot use the supplied completion
  537. * function from inside the ->eh_strategy_handler() thread.
  538. * libata is the only user of ->eh_strategy_handler() in
  539. * any kernel, so the default scsi_done() assumes it is
  540. * not being called from the SCSI EH.
  541. */
  542. qc->scsidone = scsi_finish_command;
  543. ata_qc_complete(qc, AC_ERR_OTHER);
  544. }
  545. spin_unlock_irqrestore(&host_set->lock, flags);
  546. }
  547. static inline int ahci_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
  548. {
  549. void __iomem *mmio = ap->host_set->mmio_base;
  550. void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
  551. u32 status, serr, ci;
  552. serr = readl(port_mmio + PORT_SCR_ERR);
  553. writel(serr, port_mmio + PORT_SCR_ERR);
  554. status = readl(port_mmio + PORT_IRQ_STAT);
  555. writel(status, port_mmio + PORT_IRQ_STAT);
  556. ci = readl(port_mmio + PORT_CMD_ISSUE);
  557. if (likely((ci & 0x1) == 0)) {
  558. if (qc) {
  559. ata_qc_complete(qc, 0);
  560. qc = NULL;
  561. }
  562. }
  563. if (status & PORT_IRQ_FATAL) {
  564. unsigned int err_mask;
  565. if (status & PORT_IRQ_TF_ERR)
  566. err_mask = AC_ERR_DEV;
  567. else if (status & PORT_IRQ_IF_ERR)
  568. err_mask = AC_ERR_ATA_BUS;
  569. else
  570. err_mask = AC_ERR_HOST_BUS;
  571. /* command processing has stopped due to error; restart */
  572. ahci_intr_error(ap, status);
  573. if (qc)
  574. ata_qc_complete(qc, err_mask);
  575. }
  576. return 1;
  577. }
  578. static void ahci_irq_clear(struct ata_port *ap)
  579. {
  580. /* TODO */
  581. }
  582. static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
  583. {
  584. struct ata_host_set *host_set = dev_instance;
  585. struct ahci_host_priv *hpriv;
  586. unsigned int i, handled = 0;
  587. void __iomem *mmio;
  588. u32 irq_stat, irq_ack = 0;
  589. VPRINTK("ENTER\n");
  590. hpriv = host_set->private_data;
  591. mmio = host_set->mmio_base;
  592. /* sigh. 0xffffffff is a valid return from h/w */
  593. irq_stat = readl(mmio + HOST_IRQ_STAT);
  594. irq_stat &= hpriv->port_map;
  595. if (!irq_stat)
  596. return IRQ_NONE;
  597. spin_lock(&host_set->lock);
  598. for (i = 0; i < host_set->n_ports; i++) {
  599. struct ata_port *ap;
  600. if (!(irq_stat & (1 << i)))
  601. continue;
  602. ap = host_set->ports[i];
  603. if (ap) {
  604. struct ata_queued_cmd *qc;
  605. qc = ata_qc_from_tag(ap, ap->active_tag);
  606. if (!ahci_host_intr(ap, qc))
  607. if (ata_ratelimit()) {
  608. struct pci_dev *pdev =
  609. to_pci_dev(ap->host_set->dev);
  610. dev_printk(KERN_WARNING, &pdev->dev,
  611. "unhandled interrupt on port %u\n",
  612. i);
  613. }
  614. VPRINTK("port %u\n", i);
  615. } else {
  616. VPRINTK("port %u (no irq)\n", i);
  617. if (ata_ratelimit()) {
  618. struct pci_dev *pdev =
  619. to_pci_dev(ap->host_set->dev);
  620. dev_printk(KERN_WARNING, &pdev->dev,
  621. "interrupt on disabled port %u\n", i);
  622. }
  623. }
  624. irq_ack |= (1 << i);
  625. }
  626. if (irq_ack) {
  627. writel(irq_ack, mmio + HOST_IRQ_STAT);
  628. handled = 1;
  629. }
  630. spin_unlock(&host_set->lock);
  631. VPRINTK("EXIT\n");
  632. return IRQ_RETVAL(handled);
  633. }
  634. static int ahci_qc_issue(struct ata_queued_cmd *qc)
  635. {
  636. struct ata_port *ap = qc->ap;
  637. void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
  638. writel(1, port_mmio + PORT_CMD_ISSUE);
  639. readl(port_mmio + PORT_CMD_ISSUE); /* flush */
  640. return 0;
  641. }
  642. static void ahci_setup_port(struct ata_ioports *port, unsigned long base,
  643. unsigned int port_idx)
  644. {
  645. VPRINTK("ENTER, base==0x%lx, port_idx %u\n", base, port_idx);
  646. base = ahci_port_base_ul(base, port_idx);
  647. VPRINTK("base now==0x%lx\n", base);
  648. port->cmd_addr = base;
  649. port->scr_addr = base + PORT_SCR;
  650. VPRINTK("EXIT\n");
  651. }
  652. static int ahci_host_init(struct ata_probe_ent *probe_ent)
  653. {
  654. struct ahci_host_priv *hpriv = probe_ent->private_data;
  655. struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
  656. void __iomem *mmio = probe_ent->mmio_base;
  657. u32 tmp, cap_save;
  658. u16 tmp16;
  659. unsigned int i, j, using_dac;
  660. int rc;
  661. void __iomem *port_mmio;
  662. cap_save = readl(mmio + HOST_CAP);
  663. cap_save &= ( (1<<28) | (1<<17) );
  664. cap_save |= (1 << 27);
  665. /* global controller reset */
  666. tmp = readl(mmio + HOST_CTL);
  667. if ((tmp & HOST_RESET) == 0) {
  668. writel(tmp | HOST_RESET, mmio + HOST_CTL);
  669. readl(mmio + HOST_CTL); /* flush */
  670. }
  671. /* reset must complete within 1 second, or
  672. * the hardware should be considered fried.
  673. */
  674. ssleep(1);
  675. tmp = readl(mmio + HOST_CTL);
  676. if (tmp & HOST_RESET) {
  677. dev_printk(KERN_ERR, &pdev->dev,
  678. "controller reset failed (0x%x)\n", tmp);
  679. return -EIO;
  680. }
  681. writel(HOST_AHCI_EN, mmio + HOST_CTL);
  682. (void) readl(mmio + HOST_CTL); /* flush */
  683. writel(cap_save, mmio + HOST_CAP);
  684. writel(0xf, mmio + HOST_PORTS_IMPL);
  685. (void) readl(mmio + HOST_PORTS_IMPL); /* flush */
  686. pci_read_config_word(pdev, 0x92, &tmp16);
  687. tmp16 |= 0xf;
  688. pci_write_config_word(pdev, 0x92, tmp16);
  689. hpriv->cap = readl(mmio + HOST_CAP);
  690. hpriv->port_map = readl(mmio + HOST_PORTS_IMPL);
  691. probe_ent->n_ports = (hpriv->cap & 0x1f) + 1;
  692. VPRINTK("cap 0x%x port_map 0x%x n_ports %d\n",
  693. hpriv->cap, hpriv->port_map, probe_ent->n_ports);
  694. using_dac = hpriv->cap & HOST_CAP_64;
  695. if (using_dac &&
  696. !pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
  697. rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
  698. if (rc) {
  699. rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
  700. if (rc) {
  701. dev_printk(KERN_ERR, &pdev->dev,
  702. "64-bit DMA enable failed\n");
  703. return rc;
  704. }
  705. }
  706. } else {
  707. rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
  708. if (rc) {
  709. dev_printk(KERN_ERR, &pdev->dev,
  710. "32-bit DMA enable failed\n");
  711. return rc;
  712. }
  713. rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
  714. if (rc) {
  715. dev_printk(KERN_ERR, &pdev->dev,
  716. "32-bit consistent DMA enable failed\n");
  717. return rc;
  718. }
  719. }
  720. for (i = 0; i < probe_ent->n_ports; i++) {
  721. #if 0 /* BIOSen initialize this incorrectly */
  722. if (!(hpriv->port_map & (1 << i)))
  723. continue;
  724. #endif
  725. port_mmio = ahci_port_base(mmio, i);
  726. VPRINTK("mmio %p port_mmio %p\n", mmio, port_mmio);
  727. ahci_setup_port(&probe_ent->port[i],
  728. (unsigned long) mmio, i);
  729. /* make sure port is not active */
  730. tmp = readl(port_mmio + PORT_CMD);
  731. VPRINTK("PORT_CMD 0x%x\n", tmp);
  732. if (tmp & (PORT_CMD_LIST_ON | PORT_CMD_FIS_ON |
  733. PORT_CMD_FIS_RX | PORT_CMD_START)) {
  734. tmp &= ~(PORT_CMD_LIST_ON | PORT_CMD_FIS_ON |
  735. PORT_CMD_FIS_RX | PORT_CMD_START);
  736. writel(tmp, port_mmio + PORT_CMD);
  737. readl(port_mmio + PORT_CMD); /* flush */
  738. /* spec says 500 msecs for each bit, so
  739. * this is slightly incorrect.
  740. */
  741. msleep(500);
  742. }
  743. writel(PORT_CMD_SPIN_UP, port_mmio + PORT_CMD);
  744. j = 0;
  745. while (j < 100) {
  746. msleep(10);
  747. tmp = readl(port_mmio + PORT_SCR_STAT);
  748. if ((tmp & 0xf) == 0x3)
  749. break;
  750. j++;
  751. }
  752. tmp = readl(port_mmio + PORT_SCR_ERR);
  753. VPRINTK("PORT_SCR_ERR 0x%x\n", tmp);
  754. writel(tmp, port_mmio + PORT_SCR_ERR);
  755. /* ack any pending irq events for this port */
  756. tmp = readl(port_mmio + PORT_IRQ_STAT);
  757. VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
  758. if (tmp)
  759. writel(tmp, port_mmio + PORT_IRQ_STAT);
  760. writel(1 << i, mmio + HOST_IRQ_STAT);
  761. /* set irq mask (enables interrupts) */
  762. writel(DEF_PORT_IRQ, port_mmio + PORT_IRQ_MASK);
  763. }
  764. tmp = readl(mmio + HOST_CTL);
  765. VPRINTK("HOST_CTL 0x%x\n", tmp);
  766. writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL);
  767. tmp = readl(mmio + HOST_CTL);
  768. VPRINTK("HOST_CTL 0x%x\n", tmp);
  769. pci_set_master(pdev);
  770. return 0;
  771. }
  772. static void ahci_print_info(struct ata_probe_ent *probe_ent)
  773. {
  774. struct ahci_host_priv *hpriv = probe_ent->private_data;
  775. struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
  776. void __iomem *mmio = probe_ent->mmio_base;
  777. u32 vers, cap, impl, speed;
  778. const char *speed_s;
  779. u16 cc;
  780. const char *scc_s;
  781. vers = readl(mmio + HOST_VERSION);
  782. cap = hpriv->cap;
  783. impl = hpriv->port_map;
  784. speed = (cap >> 20) & 0xf;
  785. if (speed == 1)
  786. speed_s = "1.5";
  787. else if (speed == 2)
  788. speed_s = "3";
  789. else
  790. speed_s = "?";
  791. pci_read_config_word(pdev, 0x0a, &cc);
  792. if (cc == 0x0101)
  793. scc_s = "IDE";
  794. else if (cc == 0x0106)
  795. scc_s = "SATA";
  796. else if (cc == 0x0104)
  797. scc_s = "RAID";
  798. else
  799. scc_s = "unknown";
  800. dev_printk(KERN_INFO, &pdev->dev,
  801. "AHCI %02x%02x.%02x%02x "
  802. "%u slots %u ports %s Gbps 0x%x impl %s mode\n"
  803. ,
  804. (vers >> 24) & 0xff,
  805. (vers >> 16) & 0xff,
  806. (vers >> 8) & 0xff,
  807. vers & 0xff,
  808. ((cap >> 8) & 0x1f) + 1,
  809. (cap & 0x1f) + 1,
  810. speed_s,
  811. impl,
  812. scc_s);
  813. dev_printk(KERN_INFO, &pdev->dev,
  814. "flags: "
  815. "%s%s%s%s%s%s"
  816. "%s%s%s%s%s%s%s\n"
  817. ,
  818. cap & (1 << 31) ? "64bit " : "",
  819. cap & (1 << 30) ? "ncq " : "",
  820. cap & (1 << 28) ? "ilck " : "",
  821. cap & (1 << 27) ? "stag " : "",
  822. cap & (1 << 26) ? "pm " : "",
  823. cap & (1 << 25) ? "led " : "",
  824. cap & (1 << 24) ? "clo " : "",
  825. cap & (1 << 19) ? "nz " : "",
  826. cap & (1 << 18) ? "only " : "",
  827. cap & (1 << 17) ? "pmp " : "",
  828. cap & (1 << 15) ? "pio " : "",
  829. cap & (1 << 14) ? "slum " : "",
  830. cap & (1 << 13) ? "part " : ""
  831. );
  832. }
  833. static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
  834. {
  835. static int printed_version;
  836. struct ata_probe_ent *probe_ent = NULL;
  837. struct ahci_host_priv *hpriv;
  838. unsigned long base;
  839. void __iomem *mmio_base;
  840. unsigned int board_idx = (unsigned int) ent->driver_data;
  841. int have_msi, pci_dev_busy = 0;
  842. int rc;
  843. VPRINTK("ENTER\n");
  844. if (!printed_version++)
  845. dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
  846. rc = pci_enable_device(pdev);
  847. if (rc)
  848. return rc;
  849. rc = pci_request_regions(pdev, DRV_NAME);
  850. if (rc) {
  851. pci_dev_busy = 1;
  852. goto err_out;
  853. }
  854. if (pci_enable_msi(pdev) == 0)
  855. have_msi = 1;
  856. else {
  857. pci_intx(pdev, 1);
  858. have_msi = 0;
  859. }
  860. probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
  861. if (probe_ent == NULL) {
  862. rc = -ENOMEM;
  863. goto err_out_msi;
  864. }
  865. memset(probe_ent, 0, sizeof(*probe_ent));
  866. probe_ent->dev = pci_dev_to_dev(pdev);
  867. INIT_LIST_HEAD(&probe_ent->node);
  868. mmio_base = pci_iomap(pdev, AHCI_PCI_BAR, 0);
  869. if (mmio_base == NULL) {
  870. rc = -ENOMEM;
  871. goto err_out_free_ent;
  872. }
  873. base = (unsigned long) mmio_base;
  874. hpriv = kmalloc(sizeof(*hpriv), GFP_KERNEL);
  875. if (!hpriv) {
  876. rc = -ENOMEM;
  877. goto err_out_iounmap;
  878. }
  879. memset(hpriv, 0, sizeof(*hpriv));
  880. probe_ent->sht = ahci_port_info[board_idx].sht;
  881. probe_ent->host_flags = ahci_port_info[board_idx].host_flags;
  882. probe_ent->pio_mask = ahci_port_info[board_idx].pio_mask;
  883. probe_ent->udma_mask = ahci_port_info[board_idx].udma_mask;
  884. probe_ent->port_ops = ahci_port_info[board_idx].port_ops;
  885. probe_ent->irq = pdev->irq;
  886. probe_ent->irq_flags = SA_SHIRQ;
  887. probe_ent->mmio_base = mmio_base;
  888. probe_ent->private_data = hpriv;
  889. if (have_msi)
  890. hpriv->flags |= AHCI_FLAG_MSI;
  891. /* initialize adapter */
  892. rc = ahci_host_init(probe_ent);
  893. if (rc)
  894. goto err_out_hpriv;
  895. ahci_print_info(probe_ent);
  896. /* FIXME: check ata_device_add return value */
  897. ata_device_add(probe_ent);
  898. kfree(probe_ent);
  899. return 0;
  900. err_out_hpriv:
  901. kfree(hpriv);
  902. err_out_iounmap:
  903. pci_iounmap(pdev, mmio_base);
  904. err_out_free_ent:
  905. kfree(probe_ent);
  906. err_out_msi:
  907. if (have_msi)
  908. pci_disable_msi(pdev);
  909. else
  910. pci_intx(pdev, 0);
  911. pci_release_regions(pdev);
  912. err_out:
  913. if (!pci_dev_busy)
  914. pci_disable_device(pdev);
  915. return rc;
  916. }
  917. static void ahci_remove_one (struct pci_dev *pdev)
  918. {
  919. struct device *dev = pci_dev_to_dev(pdev);
  920. struct ata_host_set *host_set = dev_get_drvdata(dev);
  921. struct ahci_host_priv *hpriv = host_set->private_data;
  922. struct ata_port *ap;
  923. unsigned int i;
  924. int have_msi;
  925. for (i = 0; i < host_set->n_ports; i++) {
  926. ap = host_set->ports[i];
  927. scsi_remove_host(ap->host);
  928. }
  929. have_msi = hpriv->flags & AHCI_FLAG_MSI;
  930. free_irq(host_set->irq, host_set);
  931. for (i = 0; i < host_set->n_ports; i++) {
  932. ap = host_set->ports[i];
  933. ata_scsi_release(ap->host);
  934. scsi_host_put(ap->host);
  935. }
  936. kfree(hpriv);
  937. pci_iounmap(pdev, host_set->mmio_base);
  938. kfree(host_set);
  939. if (have_msi)
  940. pci_disable_msi(pdev);
  941. else
  942. pci_intx(pdev, 0);
  943. pci_release_regions(pdev);
  944. pci_disable_device(pdev);
  945. dev_set_drvdata(dev, NULL);
  946. }
  947. static int __init ahci_init(void)
  948. {
  949. return pci_module_init(&ahci_pci_driver);
  950. }
  951. static void __exit ahci_exit(void)
  952. {
  953. pci_unregister_driver(&ahci_pci_driver);
  954. }
  955. MODULE_AUTHOR("Jeff Garzik");
  956. MODULE_DESCRIPTION("AHCI SATA low-level driver");
  957. MODULE_LICENSE("GPL");
  958. MODULE_DEVICE_TABLE(pci, ahci_pci_tbl);
  959. MODULE_VERSION(DRV_VERSION);
  960. module_init(ahci_init);
  961. module_exit(ahci_exit);