libata-bmdma.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149
  1. /*
  2. * libata-bmdma.c - helper library for PCI IDE BMDMA
  3. *
  4. * Maintained by: Jeff Garzik <jgarzik@pobox.com>
  5. * Please ALWAYS copy linux-ide@vger.kernel.org
  6. * on emails.
  7. *
  8. * Copyright 2003-2006 Red Hat, Inc. All rights reserved.
  9. * Copyright 2003-2006 Jeff Garzik
  10. *
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License as published by
  14. * the Free Software Foundation; either version 2, or (at your option)
  15. * any later version.
  16. *
  17. * This program is distributed in the hope that it will be useful,
  18. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  19. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  20. * GNU General Public License for more details.
  21. *
  22. * You should have received a copy of the GNU General Public License
  23. * along with this program; see the file COPYING. If not, write to
  24. * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
  25. *
  26. *
  27. * libata documentation is available via 'make {ps|pdf}docs',
  28. * as Documentation/DocBook/libata.*
  29. *
  30. * Hardware documentation available from http://www.t13.org/ and
  31. * http://www.sata-io.org/
  32. *
  33. */
  34. #include <linux/kernel.h>
  35. #include <linux/pci.h>
  36. #include <linux/libata.h>
  37. #include "libata.h"
  38. /**
  39. * ata_tf_load_pio - send taskfile registers to host controller
  40. * @ap: Port to which output is sent
  41. * @tf: ATA taskfile register set
  42. *
  43. * Outputs ATA taskfile to standard ATA host controller.
  44. *
  45. * LOCKING:
  46. * Inherited from caller.
  47. */
  48. static void ata_tf_load_pio(struct ata_port *ap, const struct ata_taskfile *tf)
  49. {
  50. struct ata_ioports *ioaddr = &ap->ioaddr;
  51. unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
  52. if (tf->ctl != ap->last_ctl) {
  53. outb(tf->ctl, ioaddr->ctl_addr);
  54. ap->last_ctl = tf->ctl;
  55. ata_wait_idle(ap);
  56. }
  57. if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
  58. outb(tf->hob_feature, ioaddr->feature_addr);
  59. outb(tf->hob_nsect, ioaddr->nsect_addr);
  60. outb(tf->hob_lbal, ioaddr->lbal_addr);
  61. outb(tf->hob_lbam, ioaddr->lbam_addr);
  62. outb(tf->hob_lbah, ioaddr->lbah_addr);
  63. VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
  64. tf->hob_feature,
  65. tf->hob_nsect,
  66. tf->hob_lbal,
  67. tf->hob_lbam,
  68. tf->hob_lbah);
  69. }
  70. if (is_addr) {
  71. outb(tf->feature, ioaddr->feature_addr);
  72. outb(tf->nsect, ioaddr->nsect_addr);
  73. outb(tf->lbal, ioaddr->lbal_addr);
  74. outb(tf->lbam, ioaddr->lbam_addr);
  75. outb(tf->lbah, ioaddr->lbah_addr);
  76. VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
  77. tf->feature,
  78. tf->nsect,
  79. tf->lbal,
  80. tf->lbam,
  81. tf->lbah);
  82. }
  83. if (tf->flags & ATA_TFLAG_DEVICE) {
  84. outb(tf->device, ioaddr->device_addr);
  85. VPRINTK("device 0x%X\n", tf->device);
  86. }
  87. ata_wait_idle(ap);
  88. }
  89. /**
  90. * ata_tf_load_mmio - send taskfile registers to host controller
  91. * @ap: Port to which output is sent
  92. * @tf: ATA taskfile register set
  93. *
  94. * Outputs ATA taskfile to standard ATA host controller using MMIO.
  95. *
  96. * LOCKING:
  97. * Inherited from caller.
  98. */
  99. static void ata_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
  100. {
  101. struct ata_ioports *ioaddr = &ap->ioaddr;
  102. unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
  103. if (tf->ctl != ap->last_ctl) {
  104. writeb(tf->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
  105. ap->last_ctl = tf->ctl;
  106. ata_wait_idle(ap);
  107. }
  108. if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
  109. writeb(tf->hob_feature, (void __iomem *) ioaddr->feature_addr);
  110. writeb(tf->hob_nsect, (void __iomem *) ioaddr->nsect_addr);
  111. writeb(tf->hob_lbal, (void __iomem *) ioaddr->lbal_addr);
  112. writeb(tf->hob_lbam, (void __iomem *) ioaddr->lbam_addr);
  113. writeb(tf->hob_lbah, (void __iomem *) ioaddr->lbah_addr);
  114. VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
  115. tf->hob_feature,
  116. tf->hob_nsect,
  117. tf->hob_lbal,
  118. tf->hob_lbam,
  119. tf->hob_lbah);
  120. }
  121. if (is_addr) {
  122. writeb(tf->feature, (void __iomem *) ioaddr->feature_addr);
  123. writeb(tf->nsect, (void __iomem *) ioaddr->nsect_addr);
  124. writeb(tf->lbal, (void __iomem *) ioaddr->lbal_addr);
  125. writeb(tf->lbam, (void __iomem *) ioaddr->lbam_addr);
  126. writeb(tf->lbah, (void __iomem *) ioaddr->lbah_addr);
  127. VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
  128. tf->feature,
  129. tf->nsect,
  130. tf->lbal,
  131. tf->lbam,
  132. tf->lbah);
  133. }
  134. if (tf->flags & ATA_TFLAG_DEVICE) {
  135. writeb(tf->device, (void __iomem *) ioaddr->device_addr);
  136. VPRINTK("device 0x%X\n", tf->device);
  137. }
  138. ata_wait_idle(ap);
  139. }
  140. /**
  141. * ata_tf_load - send taskfile registers to host controller
  142. * @ap: Port to which output is sent
  143. * @tf: ATA taskfile register set
  144. *
  145. * Outputs ATA taskfile to standard ATA host controller using MMIO
  146. * or PIO as indicated by the ATA_FLAG_MMIO flag.
  147. * Writes the control, feature, nsect, lbal, lbam, and lbah registers.
  148. * Optionally (ATA_TFLAG_LBA48) writes hob_feature, hob_nsect,
  149. * hob_lbal, hob_lbam, and hob_lbah.
  150. *
  151. * This function waits for idle (!BUSY and !DRQ) after writing
  152. * registers. If the control register has a new value, this
  153. * function also waits for idle after writing control and before
  154. * writing the remaining registers.
  155. *
  156. * May be used as the tf_load() entry in ata_port_operations.
  157. *
  158. * LOCKING:
  159. * Inherited from caller.
  160. */
  161. void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
  162. {
  163. if (ap->flags & ATA_FLAG_MMIO)
  164. ata_tf_load_mmio(ap, tf);
  165. else
  166. ata_tf_load_pio(ap, tf);
  167. }
  168. /**
  169. * ata_exec_command_pio - issue ATA command to host controller
  170. * @ap: port to which command is being issued
  171. * @tf: ATA taskfile register set
  172. *
  173. * Issues PIO write to ATA command register, with proper
  174. * synchronization with interrupt handler / other threads.
  175. *
  176. * LOCKING:
  177. * spin_lock_irqsave(host_set lock)
  178. */
  179. static void ata_exec_command_pio(struct ata_port *ap, const struct ata_taskfile *tf)
  180. {
  181. DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
  182. outb(tf->command, ap->ioaddr.command_addr);
  183. ata_pause(ap);
  184. }
  185. /**
  186. * ata_exec_command_mmio - issue ATA command to host controller
  187. * @ap: port to which command is being issued
  188. * @tf: ATA taskfile register set
  189. *
  190. * Issues MMIO write to ATA command register, with proper
  191. * synchronization with interrupt handler / other threads.
  192. *
  193. * FIXME: missing write posting for 400nS delay enforcement
  194. *
  195. * LOCKING:
  196. * spin_lock_irqsave(host_set lock)
  197. */
  198. static void ata_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
  199. {
  200. DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
  201. writeb(tf->command, (void __iomem *) ap->ioaddr.command_addr);
  202. ata_pause(ap);
  203. }
  204. /**
  205. * ata_exec_command - issue ATA command to host controller
  206. * @ap: port to which command is being issued
  207. * @tf: ATA taskfile register set
  208. *
  209. * Issues PIO/MMIO write to ATA command register, with proper
  210. * synchronization with interrupt handler / other threads.
  211. *
  212. * LOCKING:
  213. * spin_lock_irqsave(host_set lock)
  214. */
  215. void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
  216. {
  217. if (ap->flags & ATA_FLAG_MMIO)
  218. ata_exec_command_mmio(ap, tf);
  219. else
  220. ata_exec_command_pio(ap, tf);
  221. }
  222. /**
  223. * ata_tf_read_pio - input device's ATA taskfile shadow registers
  224. * @ap: Port from which input is read
  225. * @tf: ATA taskfile register set for storing input
  226. *
  227. * Reads ATA taskfile registers for currently-selected device
  228. * into @tf.
  229. *
  230. * LOCKING:
  231. * Inherited from caller.
  232. */
  233. static void ata_tf_read_pio(struct ata_port *ap, struct ata_taskfile *tf)
  234. {
  235. struct ata_ioports *ioaddr = &ap->ioaddr;
  236. tf->command = ata_check_status(ap);
  237. tf->feature = inb(ioaddr->error_addr);
  238. tf->nsect = inb(ioaddr->nsect_addr);
  239. tf->lbal = inb(ioaddr->lbal_addr);
  240. tf->lbam = inb(ioaddr->lbam_addr);
  241. tf->lbah = inb(ioaddr->lbah_addr);
  242. tf->device = inb(ioaddr->device_addr);
  243. if (tf->flags & ATA_TFLAG_LBA48) {
  244. outb(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
  245. tf->hob_feature = inb(ioaddr->error_addr);
  246. tf->hob_nsect = inb(ioaddr->nsect_addr);
  247. tf->hob_lbal = inb(ioaddr->lbal_addr);
  248. tf->hob_lbam = inb(ioaddr->lbam_addr);
  249. tf->hob_lbah = inb(ioaddr->lbah_addr);
  250. }
  251. }
  252. /**
  253. * ata_tf_read_mmio - input device's ATA taskfile shadow registers
  254. * @ap: Port from which input is read
  255. * @tf: ATA taskfile register set for storing input
  256. *
  257. * Reads ATA taskfile registers for currently-selected device
  258. * into @tf via MMIO.
  259. *
  260. * LOCKING:
  261. * Inherited from caller.
  262. */
  263. static void ata_tf_read_mmio(struct ata_port *ap, struct ata_taskfile *tf)
  264. {
  265. struct ata_ioports *ioaddr = &ap->ioaddr;
  266. tf->command = ata_check_status(ap);
  267. tf->feature = readb((void __iomem *)ioaddr->error_addr);
  268. tf->nsect = readb((void __iomem *)ioaddr->nsect_addr);
  269. tf->lbal = readb((void __iomem *)ioaddr->lbal_addr);
  270. tf->lbam = readb((void __iomem *)ioaddr->lbam_addr);
  271. tf->lbah = readb((void __iomem *)ioaddr->lbah_addr);
  272. tf->device = readb((void __iomem *)ioaddr->device_addr);
  273. if (tf->flags & ATA_TFLAG_LBA48) {
  274. writeb(tf->ctl | ATA_HOB, (void __iomem *) ap->ioaddr.ctl_addr);
  275. tf->hob_feature = readb((void __iomem *)ioaddr->error_addr);
  276. tf->hob_nsect = readb((void __iomem *)ioaddr->nsect_addr);
  277. tf->hob_lbal = readb((void __iomem *)ioaddr->lbal_addr);
  278. tf->hob_lbam = readb((void __iomem *)ioaddr->lbam_addr);
  279. tf->hob_lbah = readb((void __iomem *)ioaddr->lbah_addr);
  280. }
  281. }
  282. /**
  283. * ata_tf_read - input device's ATA taskfile shadow registers
  284. * @ap: Port from which input is read
  285. * @tf: ATA taskfile register set for storing input
  286. *
  287. * Reads ATA taskfile registers for currently-selected device
  288. * into @tf.
  289. *
  290. * Reads nsect, lbal, lbam, lbah, and device. If ATA_TFLAG_LBA48
  291. * is set, also reads the hob registers.
  292. *
  293. * May be used as the tf_read() entry in ata_port_operations.
  294. *
  295. * LOCKING:
  296. * Inherited from caller.
  297. */
  298. void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
  299. {
  300. if (ap->flags & ATA_FLAG_MMIO)
  301. ata_tf_read_mmio(ap, tf);
  302. else
  303. ata_tf_read_pio(ap, tf);
  304. }
  305. /**
  306. * ata_check_status_pio - Read device status reg & clear interrupt
  307. * @ap: port where the device is
  308. *
  309. * Reads ATA taskfile status register for currently-selected device
  310. * and return its value. This also clears pending interrupts
  311. * from this device
  312. *
  313. * LOCKING:
  314. * Inherited from caller.
  315. */
  316. static u8 ata_check_status_pio(struct ata_port *ap)
  317. {
  318. return inb(ap->ioaddr.status_addr);
  319. }
  320. /**
  321. * ata_check_status_mmio - Read device status reg & clear interrupt
  322. * @ap: port where the device is
  323. *
  324. * Reads ATA taskfile status register for currently-selected device
  325. * via MMIO and return its value. This also clears pending interrupts
  326. * from this device
  327. *
  328. * LOCKING:
  329. * Inherited from caller.
  330. */
  331. static u8 ata_check_status_mmio(struct ata_port *ap)
  332. {
  333. return readb((void __iomem *) ap->ioaddr.status_addr);
  334. }
  335. /**
  336. * ata_check_status - Read device status reg & clear interrupt
  337. * @ap: port where the device is
  338. *
  339. * Reads ATA taskfile status register for currently-selected device
  340. * and return its value. This also clears pending interrupts
  341. * from this device
  342. *
  343. * May be used as the check_status() entry in ata_port_operations.
  344. *
  345. * LOCKING:
  346. * Inherited from caller.
  347. */
  348. u8 ata_check_status(struct ata_port *ap)
  349. {
  350. if (ap->flags & ATA_FLAG_MMIO)
  351. return ata_check_status_mmio(ap);
  352. return ata_check_status_pio(ap);
  353. }
  354. /**
  355. * ata_altstatus - Read device alternate status reg
  356. * @ap: port where the device is
  357. *
  358. * Reads ATA taskfile alternate status register for
  359. * currently-selected device and return its value.
  360. *
  361. * Note: may NOT be used as the check_altstatus() entry in
  362. * ata_port_operations.
  363. *
  364. * LOCKING:
  365. * Inherited from caller.
  366. */
  367. u8 ata_altstatus(struct ata_port *ap)
  368. {
  369. if (ap->ops->check_altstatus)
  370. return ap->ops->check_altstatus(ap);
  371. if (ap->flags & ATA_FLAG_MMIO)
  372. return readb((void __iomem *)ap->ioaddr.altstatus_addr);
  373. return inb(ap->ioaddr.altstatus_addr);
  374. }
  375. /**
  376. * ata_bmdma_setup_mmio - Set up PCI IDE BMDMA transaction
  377. * @qc: Info associated with this ATA transaction.
  378. *
  379. * LOCKING:
  380. * spin_lock_irqsave(host_set lock)
  381. */
  382. static void ata_bmdma_setup_mmio (struct ata_queued_cmd *qc)
  383. {
  384. struct ata_port *ap = qc->ap;
  385. unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
  386. u8 dmactl;
  387. void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
  388. /* load PRD table addr. */
  389. mb(); /* make sure PRD table writes are visible to controller */
  390. writel(ap->prd_dma, mmio + ATA_DMA_TABLE_OFS);
  391. /* specify data direction, triple-check start bit is clear */
  392. dmactl = readb(mmio + ATA_DMA_CMD);
  393. dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
  394. if (!rw)
  395. dmactl |= ATA_DMA_WR;
  396. writeb(dmactl, mmio + ATA_DMA_CMD);
  397. /* issue r/w command */
  398. ap->ops->exec_command(ap, &qc->tf);
  399. }
  400. /**
  401. * ata_bmdma_start_mmio - Start a PCI IDE BMDMA transaction
  402. * @qc: Info associated with this ATA transaction.
  403. *
  404. * LOCKING:
  405. * spin_lock_irqsave(host_set lock)
  406. */
  407. static void ata_bmdma_start_mmio (struct ata_queued_cmd *qc)
  408. {
  409. struct ata_port *ap = qc->ap;
  410. void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
  411. u8 dmactl;
  412. /* start host DMA transaction */
  413. dmactl = readb(mmio + ATA_DMA_CMD);
  414. writeb(dmactl | ATA_DMA_START, mmio + ATA_DMA_CMD);
  415. /* Strictly, one may wish to issue a readb() here, to
  416. * flush the mmio write. However, control also passes
  417. * to the hardware at this point, and it will interrupt
  418. * us when we are to resume control. So, in effect,
  419. * we don't care when the mmio write flushes.
  420. * Further, a read of the DMA status register _immediately_
  421. * following the write may not be what certain flaky hardware
  422. * is expected, so I think it is best to not add a readb()
  423. * without first all the MMIO ATA cards/mobos.
  424. * Or maybe I'm just being paranoid.
  425. */
  426. }
  427. /**
  428. * ata_bmdma_setup_pio - Set up PCI IDE BMDMA transaction (PIO)
  429. * @qc: Info associated with this ATA transaction.
  430. *
  431. * LOCKING:
  432. * spin_lock_irqsave(host_set lock)
  433. */
  434. static void ata_bmdma_setup_pio (struct ata_queued_cmd *qc)
  435. {
  436. struct ata_port *ap = qc->ap;
  437. unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
  438. u8 dmactl;
  439. /* load PRD table addr. */
  440. outl(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
  441. /* specify data direction, triple-check start bit is clear */
  442. dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
  443. dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
  444. if (!rw)
  445. dmactl |= ATA_DMA_WR;
  446. outb(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
  447. /* issue r/w command */
  448. ap->ops->exec_command(ap, &qc->tf);
  449. }
  450. /**
  451. * ata_bmdma_start_pio - Start a PCI IDE BMDMA transaction (PIO)
  452. * @qc: Info associated with this ATA transaction.
  453. *
  454. * LOCKING:
  455. * spin_lock_irqsave(host_set lock)
  456. */
  457. static void ata_bmdma_start_pio (struct ata_queued_cmd *qc)
  458. {
  459. struct ata_port *ap = qc->ap;
  460. u8 dmactl;
  461. /* start host DMA transaction */
  462. dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
  463. outb(dmactl | ATA_DMA_START,
  464. ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
  465. }
  466. /**
  467. * ata_bmdma_start - Start a PCI IDE BMDMA transaction
  468. * @qc: Info associated with this ATA transaction.
  469. *
  470. * Writes the ATA_DMA_START flag to the DMA command register.
  471. *
  472. * May be used as the bmdma_start() entry in ata_port_operations.
  473. *
  474. * LOCKING:
  475. * spin_lock_irqsave(host_set lock)
  476. */
  477. void ata_bmdma_start(struct ata_queued_cmd *qc)
  478. {
  479. if (qc->ap->flags & ATA_FLAG_MMIO)
  480. ata_bmdma_start_mmio(qc);
  481. else
  482. ata_bmdma_start_pio(qc);
  483. }
  484. /**
  485. * ata_bmdma_setup - Set up PCI IDE BMDMA transaction
  486. * @qc: Info associated with this ATA transaction.
  487. *
  488. * Writes address of PRD table to device's PRD Table Address
  489. * register, sets the DMA control register, and calls
  490. * ops->exec_command() to start the transfer.
  491. *
  492. * May be used as the bmdma_setup() entry in ata_port_operations.
  493. *
  494. * LOCKING:
  495. * spin_lock_irqsave(host_set lock)
  496. */
  497. void ata_bmdma_setup(struct ata_queued_cmd *qc)
  498. {
  499. if (qc->ap->flags & ATA_FLAG_MMIO)
  500. ata_bmdma_setup_mmio(qc);
  501. else
  502. ata_bmdma_setup_pio(qc);
  503. }
  504. /**
  505. * ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt.
  506. * @ap: Port associated with this ATA transaction.
  507. *
  508. * Clear interrupt and error flags in DMA status register.
  509. *
  510. * May be used as the irq_clear() entry in ata_port_operations.
  511. *
  512. * LOCKING:
  513. * spin_lock_irqsave(host_set lock)
  514. */
  515. void ata_bmdma_irq_clear(struct ata_port *ap)
  516. {
  517. if (!ap->ioaddr.bmdma_addr)
  518. return;
  519. if (ap->flags & ATA_FLAG_MMIO) {
  520. void __iomem *mmio =
  521. ((void __iomem *) ap->ioaddr.bmdma_addr) + ATA_DMA_STATUS;
  522. writeb(readb(mmio), mmio);
  523. } else {
  524. unsigned long addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS;
  525. outb(inb(addr), addr);
  526. }
  527. }
  528. /**
  529. * ata_bmdma_status - Read PCI IDE BMDMA status
  530. * @ap: Port associated with this ATA transaction.
  531. *
  532. * Read and return BMDMA status register.
  533. *
  534. * May be used as the bmdma_status() entry in ata_port_operations.
  535. *
  536. * LOCKING:
  537. * spin_lock_irqsave(host_set lock)
  538. */
  539. u8 ata_bmdma_status(struct ata_port *ap)
  540. {
  541. u8 host_stat;
  542. if (ap->flags & ATA_FLAG_MMIO) {
  543. void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
  544. host_stat = readb(mmio + ATA_DMA_STATUS);
  545. } else
  546. host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
  547. return host_stat;
  548. }
  549. /**
  550. * ata_bmdma_stop - Stop PCI IDE BMDMA transfer
  551. * @qc: Command we are ending DMA for
  552. *
  553. * Clears the ATA_DMA_START flag in the dma control register
  554. *
  555. * May be used as the bmdma_stop() entry in ata_port_operations.
  556. *
  557. * LOCKING:
  558. * spin_lock_irqsave(host_set lock)
  559. */
  560. void ata_bmdma_stop(struct ata_queued_cmd *qc)
  561. {
  562. struct ata_port *ap = qc->ap;
  563. if (ap->flags & ATA_FLAG_MMIO) {
  564. void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
  565. /* clear start/stop bit */
  566. writeb(readb(mmio + ATA_DMA_CMD) & ~ATA_DMA_START,
  567. mmio + ATA_DMA_CMD);
  568. } else {
  569. /* clear start/stop bit */
  570. outb(inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START,
  571. ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
  572. }
  573. /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
  574. ata_altstatus(ap); /* dummy read */
  575. }
  576. /**
  577. * ata_bmdma_freeze - Freeze BMDMA controller port
  578. * @ap: port to freeze
  579. *
  580. * Freeze BMDMA controller port.
  581. *
  582. * LOCKING:
  583. * Inherited from caller.
  584. */
  585. void ata_bmdma_freeze(struct ata_port *ap)
  586. {
  587. struct ata_ioports *ioaddr = &ap->ioaddr;
  588. ap->ctl |= ATA_NIEN;
  589. ap->last_ctl = ap->ctl;
  590. if (ap->flags & ATA_FLAG_MMIO)
  591. writeb(ap->ctl, (void __iomem *)ioaddr->ctl_addr);
  592. else
  593. outb(ap->ctl, ioaddr->ctl_addr);
  594. }
  595. /**
  596. * ata_bmdma_thaw - Thaw BMDMA controller port
  597. * @ap: port to thaw
  598. *
  599. * Thaw BMDMA controller port.
  600. *
  601. * LOCKING:
  602. * Inherited from caller.
  603. */
  604. void ata_bmdma_thaw(struct ata_port *ap)
  605. {
  606. /* clear & re-enable interrupts */
  607. ata_chk_status(ap);
  608. ap->ops->irq_clear(ap);
  609. if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
  610. ata_irq_on(ap);
  611. }
  612. /**
  613. * ata_bmdma_drive_eh - Perform EH with given methods for BMDMA controller
  614. * @ap: port to handle error for
  615. * @prereset: prereset method (can be NULL)
  616. * @softreset: softreset method (can be NULL)
  617. * @hardreset: hardreset method (can be NULL)
  618. * @postreset: postreset method (can be NULL)
  619. *
  620. * Handle error for ATA BMDMA controller. It can handle both
  621. * PATA and SATA controllers. Many controllers should be able to
  622. * use this EH as-is or with some added handling before and
  623. * after.
  624. *
  625. * This function is intended to be used for constructing
  626. * ->error_handler callback by low level drivers.
  627. *
  628. * LOCKING:
  629. * Kernel thread context (may sleep)
  630. */
  631. void ata_bmdma_drive_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
  632. ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
  633. ata_postreset_fn_t postreset)
  634. {
  635. struct ata_eh_context *ehc = &ap->eh_context;
  636. struct ata_queued_cmd *qc;
  637. unsigned long flags;
  638. int thaw = 0;
  639. qc = __ata_qc_from_tag(ap, ap->active_tag);
  640. if (qc && !(qc->flags & ATA_QCFLAG_FAILED))
  641. qc = NULL;
  642. /* reset PIO HSM and stop DMA engine */
  643. spin_lock_irqsave(ap->lock, flags);
  644. ap->hsm_task_state = HSM_ST_IDLE;
  645. if (qc && (qc->tf.protocol == ATA_PROT_DMA ||
  646. qc->tf.protocol == ATA_PROT_ATAPI_DMA)) {
  647. u8 host_stat;
  648. host_stat = ata_bmdma_status(ap);
  649. ata_ehi_push_desc(&ehc->i, "BMDMA stat 0x%x", host_stat);
  650. /* BMDMA controllers indicate host bus error by
  651. * setting DMA_ERR bit and timing out. As it wasn't
  652. * really a timeout event, adjust error mask and
  653. * cancel frozen state.
  654. */
  655. if (qc->err_mask == AC_ERR_TIMEOUT && host_stat & ATA_DMA_ERR) {
  656. qc->err_mask = AC_ERR_HOST_BUS;
  657. thaw = 1;
  658. }
  659. ap->ops->bmdma_stop(qc);
  660. }
  661. ata_altstatus(ap);
  662. ata_chk_status(ap);
  663. ap->ops->irq_clear(ap);
  664. spin_unlock_irqrestore(ap->lock, flags);
  665. if (thaw)
  666. ata_eh_thaw_port(ap);
  667. /* PIO and DMA engines have been stopped, perform recovery */
  668. ata_do_eh(ap, prereset, softreset, hardreset, postreset);
  669. }
  670. /**
  671. * ata_bmdma_error_handler - Stock error handler for BMDMA controller
  672. * @ap: port to handle error for
  673. *
  674. * Stock error handler for BMDMA controller.
  675. *
  676. * LOCKING:
  677. * Kernel thread context (may sleep)
  678. */
  679. void ata_bmdma_error_handler(struct ata_port *ap)
  680. {
  681. ata_reset_fn_t hardreset;
  682. hardreset = NULL;
  683. if (sata_scr_valid(ap))
  684. hardreset = sata_std_hardreset;
  685. ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset, hardreset,
  686. ata_std_postreset);
  687. }
  688. /**
  689. * ata_bmdma_post_internal_cmd - Stock post_internal_cmd for
  690. * BMDMA controller
  691. * @qc: internal command to clean up
  692. *
  693. * LOCKING:
  694. * Kernel thread context (may sleep)
  695. */
  696. void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc)
  697. {
  698. ata_bmdma_stop(qc);
  699. }
  700. #ifdef CONFIG_PCI
  701. static struct ata_probe_ent *
  702. ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port)
  703. {
  704. struct ata_probe_ent *probe_ent;
  705. probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
  706. if (!probe_ent) {
  707. printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
  708. kobject_name(&(dev->kobj)));
  709. return NULL;
  710. }
  711. INIT_LIST_HEAD(&probe_ent->node);
  712. probe_ent->dev = dev;
  713. probe_ent->sht = port->sht;
  714. probe_ent->host_flags = port->host_flags;
  715. probe_ent->pio_mask = port->pio_mask;
  716. probe_ent->mwdma_mask = port->mwdma_mask;
  717. probe_ent->udma_mask = port->udma_mask;
  718. probe_ent->port_ops = port->port_ops;
  719. return probe_ent;
  720. }
  721. /**
  722. * ata_pci_init_native_mode - Initialize native-mode driver
  723. * @pdev: pci device to be initialized
  724. * @port: array[2] of pointers to port info structures.
  725. * @ports: bitmap of ports present
  726. *
  727. * Utility function which allocates and initializes an
  728. * ata_probe_ent structure for a standard dual-port
  729. * PIO-based IDE controller. The returned ata_probe_ent
  730. * structure can be passed to ata_device_add(). The returned
  731. * ata_probe_ent structure should then be freed with kfree().
  732. *
  733. * The caller need only pass the address of the primary port, the
  734. * secondary will be deduced automatically. If the device has non
  735. * standard secondary port mappings this function can be called twice,
  736. * once for each interface.
  737. */
  738. struct ata_probe_ent *
  739. ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int ports)
  740. {
  741. struct ata_probe_ent *probe_ent =
  742. ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]);
  743. int p = 0;
  744. unsigned long bmdma;
  745. if (!probe_ent)
  746. return NULL;
  747. probe_ent->irq = pdev->irq;
  748. probe_ent->irq_flags = IRQF_SHARED;
  749. probe_ent->private_data = port[0]->private_data;
  750. if (ports & ATA_PORT_PRIMARY) {
  751. probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 0);
  752. probe_ent->port[p].altstatus_addr =
  753. probe_ent->port[p].ctl_addr =
  754. pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS;
  755. bmdma = pci_resource_start(pdev, 4);
  756. if (bmdma) {
  757. if (inb(bmdma + 2) & 0x80)
  758. probe_ent->host_set_flags |= ATA_HOST_SIMPLEX;
  759. probe_ent->port[p].bmdma_addr = bmdma;
  760. }
  761. ata_std_ports(&probe_ent->port[p]);
  762. p++;
  763. }
  764. if (ports & ATA_PORT_SECONDARY) {
  765. probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 2);
  766. probe_ent->port[p].altstatus_addr =
  767. probe_ent->port[p].ctl_addr =
  768. pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS;
  769. bmdma = pci_resource_start(pdev, 4);
  770. if (bmdma) {
  771. bmdma += 8;
  772. if(inb(bmdma + 2) & 0x80)
  773. probe_ent->host_set_flags |= ATA_HOST_SIMPLEX;
  774. probe_ent->port[p].bmdma_addr = bmdma;
  775. }
  776. ata_std_ports(&probe_ent->port[p]);
  777. p++;
  778. }
  779. probe_ent->n_ports = p;
  780. return probe_ent;
  781. }
  782. static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev,
  783. struct ata_port_info *port, int port_num)
  784. {
  785. struct ata_probe_ent *probe_ent;
  786. unsigned long bmdma;
  787. probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port);
  788. if (!probe_ent)
  789. return NULL;
  790. probe_ent->legacy_mode = 1;
  791. probe_ent->n_ports = 1;
  792. probe_ent->hard_port_no = port_num;
  793. probe_ent->private_data = port->private_data;
  794. switch(port_num)
  795. {
  796. case 0:
  797. probe_ent->irq = 14;
  798. probe_ent->port[0].cmd_addr = 0x1f0;
  799. probe_ent->port[0].altstatus_addr =
  800. probe_ent->port[0].ctl_addr = 0x3f6;
  801. break;
  802. case 1:
  803. probe_ent->irq = 15;
  804. probe_ent->port[0].cmd_addr = 0x170;
  805. probe_ent->port[0].altstatus_addr =
  806. probe_ent->port[0].ctl_addr = 0x376;
  807. break;
  808. }
  809. bmdma = pci_resource_start(pdev, 4);
  810. if (bmdma != 0) {
  811. bmdma += 8 * port_num;
  812. probe_ent->port[0].bmdma_addr = bmdma;
  813. if (inb(bmdma + 2) & 0x80)
  814. probe_ent->host_set_flags |= ATA_HOST_SIMPLEX;
  815. }
  816. ata_std_ports(&probe_ent->port[0]);
  817. return probe_ent;
  818. }
  819. /**
  820. * ata_pci_init_one - Initialize/register PCI IDE host controller
  821. * @pdev: Controller to be initialized
  822. * @port_info: Information from low-level host driver
  823. * @n_ports: Number of ports attached to host controller
  824. *
  825. * This is a helper function which can be called from a driver's
  826. * xxx_init_one() probe function if the hardware uses traditional
  827. * IDE taskfile registers.
  828. *
  829. * This function calls pci_enable_device(), reserves its register
  830. * regions, sets the dma mask, enables bus master mode, and calls
  831. * ata_device_add()
  832. *
  833. * LOCKING:
  834. * Inherited from PCI layer (may sleep).
  835. *
  836. * RETURNS:
  837. * Zero on success, negative on errno-based value on error.
  838. */
  839. int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
  840. unsigned int n_ports)
  841. {
  842. struct ata_probe_ent *probe_ent = NULL, *probe_ent2 = NULL;
  843. struct ata_port_info *port[2];
  844. u8 tmp8, mask;
  845. unsigned int legacy_mode = 0;
  846. int disable_dev_on_err = 1;
  847. int rc;
  848. DPRINTK("ENTER\n");
  849. port[0] = port_info[0];
  850. if (n_ports > 1)
  851. port[1] = port_info[1];
  852. else
  853. port[1] = port[0];
  854. if ((port[0]->host_flags & ATA_FLAG_NO_LEGACY) == 0
  855. && (pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
  856. /* TODO: What if one channel is in native mode ... */
  857. pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
  858. mask = (1 << 2) | (1 << 0);
  859. if ((tmp8 & mask) != mask)
  860. legacy_mode = (1 << 3);
  861. }
  862. /* FIXME... */
  863. if ((!legacy_mode) && (n_ports > 2)) {
  864. printk(KERN_ERR "ata: BUG: native mode, n_ports > 2\n");
  865. n_ports = 2;
  866. /* For now */
  867. }
  868. /* FIXME: Really for ATA it isn't safe because the device may be
  869. multi-purpose and we want to leave it alone if it was already
  870. enabled. Secondly for shared use as Arjan says we want refcounting
  871. Checking dev->is_enabled is insufficient as this is not set at
  872. boot for the primary video which is BIOS enabled
  873. */
  874. rc = pci_enable_device(pdev);
  875. if (rc)
  876. return rc;
  877. rc = pci_request_regions(pdev, DRV_NAME);
  878. if (rc) {
  879. disable_dev_on_err = 0;
  880. goto err_out;
  881. }
  882. /* FIXME: Should use platform specific mappers for legacy port ranges */
  883. if (legacy_mode) {
  884. if (!request_region(0x1f0, 8, "libata")) {
  885. struct resource *conflict, res;
  886. res.start = 0x1f0;
  887. res.end = 0x1f0 + 8 - 1;
  888. conflict = ____request_resource(&ioport_resource, &res);
  889. if (!strcmp(conflict->name, "libata"))
  890. legacy_mode |= (1 << 0);
  891. else {
  892. disable_dev_on_err = 0;
  893. printk(KERN_WARNING "ata: 0x1f0 IDE port busy\n");
  894. }
  895. } else
  896. legacy_mode |= (1 << 0);
  897. if (!request_region(0x170, 8, "libata")) {
  898. struct resource *conflict, res;
  899. res.start = 0x170;
  900. res.end = 0x170 + 8 - 1;
  901. conflict = ____request_resource(&ioport_resource, &res);
  902. if (!strcmp(conflict->name, "libata"))
  903. legacy_mode |= (1 << 1);
  904. else {
  905. disable_dev_on_err = 0;
  906. printk(KERN_WARNING "ata: 0x170 IDE port busy\n");
  907. }
  908. } else
  909. legacy_mode |= (1 << 1);
  910. }
  911. /* we have legacy mode, but all ports are unavailable */
  912. if (legacy_mode == (1 << 3)) {
  913. rc = -EBUSY;
  914. goto err_out_regions;
  915. }
  916. /* FIXME: If we get no DMA mask we should fall back to PIO */
  917. rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
  918. if (rc)
  919. goto err_out_regions;
  920. rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
  921. if (rc)
  922. goto err_out_regions;
  923. if (legacy_mode) {
  924. if (legacy_mode & (1 << 0))
  925. probe_ent = ata_pci_init_legacy_port(pdev, port[0], 0);
  926. if (legacy_mode & (1 << 1))
  927. probe_ent2 = ata_pci_init_legacy_port(pdev, port[1], 1);
  928. } else {
  929. if (n_ports == 2)
  930. probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
  931. else
  932. probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY);
  933. }
  934. if (!probe_ent && !probe_ent2) {
  935. rc = -ENOMEM;
  936. goto err_out_regions;
  937. }
  938. pci_set_master(pdev);
  939. /* FIXME: check ata_device_add return */
  940. if (legacy_mode) {
  941. struct device *dev = &pdev->dev;
  942. struct ata_host_set *host_set = NULL;
  943. if (legacy_mode & (1 << 0)) {
  944. ata_device_add(probe_ent);
  945. host_set = dev_get_drvdata(dev);
  946. }
  947. if (legacy_mode & (1 << 1)) {
  948. ata_device_add(probe_ent2);
  949. if (host_set) {
  950. host_set->next = dev_get_drvdata(dev);
  951. dev_set_drvdata(dev, host_set);
  952. }
  953. }
  954. } else
  955. ata_device_add(probe_ent);
  956. kfree(probe_ent);
  957. kfree(probe_ent2);
  958. return 0;
  959. err_out_regions:
  960. if (legacy_mode & (1 << 0))
  961. release_region(0x1f0, 8);
  962. if (legacy_mode & (1 << 1))
  963. release_region(0x170, 8);
  964. pci_release_regions(pdev);
  965. err_out:
  966. if (disable_dev_on_err)
  967. pci_disable_device(pdev);
  968. return rc;
  969. }
  970. /**
  971. * ata_pci_clear_simplex - attempt to kick device out of simplex
  972. * @pdev: PCI device
  973. *
  974. * Some PCI ATA devices report simplex mode but in fact can be told to
  975. * enter non simplex mode. This implements the neccessary logic to
  976. * perform the task on such devices. Calling it on other devices will
  977. * have -undefined- behaviour.
  978. */
  979. int ata_pci_clear_simplex(struct pci_dev *pdev)
  980. {
  981. unsigned long bmdma = pci_resource_start(pdev, 4);
  982. u8 simplex;
  983. if (bmdma == 0)
  984. return -ENOENT;
  985. simplex = inb(bmdma + 0x02);
  986. outb(simplex & 0x60, bmdma + 0x02);
  987. simplex = inb(bmdma + 0x02);
  988. if (simplex & 0x80)
  989. return -EOPNOTSUPP;
  990. return 0;
  991. }
  992. unsigned long ata_pci_default_filter(const struct ata_port *ap, struct ata_device *adev, unsigned long xfer_mask)
  993. {
  994. /* Filter out DMA modes if the device has been configured by
  995. the BIOS as PIO only */
  996. if (ap->ioaddr.bmdma_addr == 0)
  997. xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
  998. return xfer_mask;
  999. }
  1000. #endif /* CONFIG_PCI */