libata-bmdma.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140
  1. /*
  2. * libata-bmdma.c - helper library for PCI IDE BMDMA
  3. *
  4. * Maintained by: Jeff Garzik <jgarzik@pobox.com>
  5. * Please ALWAYS copy linux-ide@vger.kernel.org
  6. * on emails.
  7. *
  8. * Copyright 2003-2006 Red Hat, Inc. All rights reserved.
  9. * Copyright 2003-2006 Jeff Garzik
  10. *
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License as published by
  14. * the Free Software Foundation; either version 2, or (at your option)
  15. * any later version.
  16. *
  17. * This program is distributed in the hope that it will be useful,
  18. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  19. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  20. * GNU General Public License for more details.
  21. *
  22. * You should have received a copy of the GNU General Public License
  23. * along with this program; see the file COPYING. If not, write to
  24. * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
  25. *
  26. *
  27. * libata documentation is available via 'make {ps|pdf}docs',
  28. * as Documentation/DocBook/libata.*
  29. *
  30. * Hardware documentation available from http://www.t13.org/ and
  31. * http://www.sata-io.org/
  32. *
  33. */
  34. #include <linux/config.h>
  35. #include <linux/kernel.h>
  36. #include <linux/pci.h>
  37. #include <linux/libata.h>
  38. #include "libata.h"
  39. /**
  40. * ata_tf_load_pio - send taskfile registers to host controller
  41. * @ap: Port to which output is sent
  42. * @tf: ATA taskfile register set
  43. *
  44. * Outputs ATA taskfile to standard ATA host controller.
  45. *
  46. * LOCKING:
  47. * Inherited from caller.
  48. */
  49. static void ata_tf_load_pio(struct ata_port *ap, const struct ata_taskfile *tf)
  50. {
  51. struct ata_ioports *ioaddr = &ap->ioaddr;
  52. unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
  53. if (tf->ctl != ap->last_ctl) {
  54. outb(tf->ctl, ioaddr->ctl_addr);
  55. ap->last_ctl = tf->ctl;
  56. ata_wait_idle(ap);
  57. }
  58. if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
  59. outb(tf->hob_feature, ioaddr->feature_addr);
  60. outb(tf->hob_nsect, ioaddr->nsect_addr);
  61. outb(tf->hob_lbal, ioaddr->lbal_addr);
  62. outb(tf->hob_lbam, ioaddr->lbam_addr);
  63. outb(tf->hob_lbah, ioaddr->lbah_addr);
  64. VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
  65. tf->hob_feature,
  66. tf->hob_nsect,
  67. tf->hob_lbal,
  68. tf->hob_lbam,
  69. tf->hob_lbah);
  70. }
  71. if (is_addr) {
  72. outb(tf->feature, ioaddr->feature_addr);
  73. outb(tf->nsect, ioaddr->nsect_addr);
  74. outb(tf->lbal, ioaddr->lbal_addr);
  75. outb(tf->lbam, ioaddr->lbam_addr);
  76. outb(tf->lbah, ioaddr->lbah_addr);
  77. VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
  78. tf->feature,
  79. tf->nsect,
  80. tf->lbal,
  81. tf->lbam,
  82. tf->lbah);
  83. }
  84. if (tf->flags & ATA_TFLAG_DEVICE) {
  85. outb(tf->device, ioaddr->device_addr);
  86. VPRINTK("device 0x%X\n", tf->device);
  87. }
  88. ata_wait_idle(ap);
  89. }
  90. /**
  91. * ata_tf_load_mmio - send taskfile registers to host controller
  92. * @ap: Port to which output is sent
  93. * @tf: ATA taskfile register set
  94. *
  95. * Outputs ATA taskfile to standard ATA host controller using MMIO.
  96. *
  97. * LOCKING:
  98. * Inherited from caller.
  99. */
  100. static void ata_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
  101. {
  102. struct ata_ioports *ioaddr = &ap->ioaddr;
  103. unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
  104. if (tf->ctl != ap->last_ctl) {
  105. writeb(tf->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
  106. ap->last_ctl = tf->ctl;
  107. ata_wait_idle(ap);
  108. }
  109. if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
  110. writeb(tf->hob_feature, (void __iomem *) ioaddr->feature_addr);
  111. writeb(tf->hob_nsect, (void __iomem *) ioaddr->nsect_addr);
  112. writeb(tf->hob_lbal, (void __iomem *) ioaddr->lbal_addr);
  113. writeb(tf->hob_lbam, (void __iomem *) ioaddr->lbam_addr);
  114. writeb(tf->hob_lbah, (void __iomem *) ioaddr->lbah_addr);
  115. VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
  116. tf->hob_feature,
  117. tf->hob_nsect,
  118. tf->hob_lbal,
  119. tf->hob_lbam,
  120. tf->hob_lbah);
  121. }
  122. if (is_addr) {
  123. writeb(tf->feature, (void __iomem *) ioaddr->feature_addr);
  124. writeb(tf->nsect, (void __iomem *) ioaddr->nsect_addr);
  125. writeb(tf->lbal, (void __iomem *) ioaddr->lbal_addr);
  126. writeb(tf->lbam, (void __iomem *) ioaddr->lbam_addr);
  127. writeb(tf->lbah, (void __iomem *) ioaddr->lbah_addr);
  128. VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
  129. tf->feature,
  130. tf->nsect,
  131. tf->lbal,
  132. tf->lbam,
  133. tf->lbah);
  134. }
  135. if (tf->flags & ATA_TFLAG_DEVICE) {
  136. writeb(tf->device, (void __iomem *) ioaddr->device_addr);
  137. VPRINTK("device 0x%X\n", tf->device);
  138. }
  139. ata_wait_idle(ap);
  140. }
  141. /**
  142. * ata_tf_load - send taskfile registers to host controller
  143. * @ap: Port to which output is sent
  144. * @tf: ATA taskfile register set
  145. *
  146. * Outputs ATA taskfile to standard ATA host controller using MMIO
  147. * or PIO as indicated by the ATA_FLAG_MMIO flag.
  148. * Writes the control, feature, nsect, lbal, lbam, and lbah registers.
  149. * Optionally (ATA_TFLAG_LBA48) writes hob_feature, hob_nsect,
  150. * hob_lbal, hob_lbam, and hob_lbah.
  151. *
  152. * This function waits for idle (!BUSY and !DRQ) after writing
  153. * registers. If the control register has a new value, this
  154. * function also waits for idle after writing control and before
  155. * writing the remaining registers.
  156. *
  157. * May be used as the tf_load() entry in ata_port_operations.
  158. *
  159. * LOCKING:
  160. * Inherited from caller.
  161. */
  162. void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
  163. {
  164. if (ap->flags & ATA_FLAG_MMIO)
  165. ata_tf_load_mmio(ap, tf);
  166. else
  167. ata_tf_load_pio(ap, tf);
  168. }
  169. /**
  170. * ata_exec_command_pio - issue ATA command to host controller
  171. * @ap: port to which command is being issued
  172. * @tf: ATA taskfile register set
  173. *
  174. * Issues PIO write to ATA command register, with proper
  175. * synchronization with interrupt handler / other threads.
  176. *
  177. * LOCKING:
  178. * spin_lock_irqsave(host_set lock)
  179. */
  180. static void ata_exec_command_pio(struct ata_port *ap, const struct ata_taskfile *tf)
  181. {
  182. DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
  183. outb(tf->command, ap->ioaddr.command_addr);
  184. ata_pause(ap);
  185. }
  186. /**
  187. * ata_exec_command_mmio - issue ATA command to host controller
  188. * @ap: port to which command is being issued
  189. * @tf: ATA taskfile register set
  190. *
  191. * Issues MMIO write to ATA command register, with proper
  192. * synchronization with interrupt handler / other threads.
  193. *
  194. * FIXME: missing write posting for 400nS delay enforcement
  195. *
  196. * LOCKING:
  197. * spin_lock_irqsave(host_set lock)
  198. */
  199. static void ata_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
  200. {
  201. DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
  202. writeb(tf->command, (void __iomem *) ap->ioaddr.command_addr);
  203. ata_pause(ap);
  204. }
  205. /**
  206. * ata_exec_command - issue ATA command to host controller
  207. * @ap: port to which command is being issued
  208. * @tf: ATA taskfile register set
  209. *
  210. * Issues PIO/MMIO write to ATA command register, with proper
  211. * synchronization with interrupt handler / other threads.
  212. *
  213. * LOCKING:
  214. * spin_lock_irqsave(host_set lock)
  215. */
  216. void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
  217. {
  218. if (ap->flags & ATA_FLAG_MMIO)
  219. ata_exec_command_mmio(ap, tf);
  220. else
  221. ata_exec_command_pio(ap, tf);
  222. }
  223. /**
  224. * ata_tf_read_pio - input device's ATA taskfile shadow registers
  225. * @ap: Port from which input is read
  226. * @tf: ATA taskfile register set for storing input
  227. *
  228. * Reads ATA taskfile registers for currently-selected device
  229. * into @tf.
  230. *
  231. * LOCKING:
  232. * Inherited from caller.
  233. */
  234. static void ata_tf_read_pio(struct ata_port *ap, struct ata_taskfile *tf)
  235. {
  236. struct ata_ioports *ioaddr = &ap->ioaddr;
  237. tf->command = ata_check_status(ap);
  238. tf->feature = inb(ioaddr->error_addr);
  239. tf->nsect = inb(ioaddr->nsect_addr);
  240. tf->lbal = inb(ioaddr->lbal_addr);
  241. tf->lbam = inb(ioaddr->lbam_addr);
  242. tf->lbah = inb(ioaddr->lbah_addr);
  243. tf->device = inb(ioaddr->device_addr);
  244. if (tf->flags & ATA_TFLAG_LBA48) {
  245. outb(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
  246. tf->hob_feature = inb(ioaddr->error_addr);
  247. tf->hob_nsect = inb(ioaddr->nsect_addr);
  248. tf->hob_lbal = inb(ioaddr->lbal_addr);
  249. tf->hob_lbam = inb(ioaddr->lbam_addr);
  250. tf->hob_lbah = inb(ioaddr->lbah_addr);
  251. }
  252. }
  253. /**
  254. * ata_tf_read_mmio - input device's ATA taskfile shadow registers
  255. * @ap: Port from which input is read
  256. * @tf: ATA taskfile register set for storing input
  257. *
  258. * Reads ATA taskfile registers for currently-selected device
  259. * into @tf via MMIO.
  260. *
  261. * LOCKING:
  262. * Inherited from caller.
  263. */
  264. static void ata_tf_read_mmio(struct ata_port *ap, struct ata_taskfile *tf)
  265. {
  266. struct ata_ioports *ioaddr = &ap->ioaddr;
  267. tf->command = ata_check_status(ap);
  268. tf->feature = readb((void __iomem *)ioaddr->error_addr);
  269. tf->nsect = readb((void __iomem *)ioaddr->nsect_addr);
  270. tf->lbal = readb((void __iomem *)ioaddr->lbal_addr);
  271. tf->lbam = readb((void __iomem *)ioaddr->lbam_addr);
  272. tf->lbah = readb((void __iomem *)ioaddr->lbah_addr);
  273. tf->device = readb((void __iomem *)ioaddr->device_addr);
  274. if (tf->flags & ATA_TFLAG_LBA48) {
  275. writeb(tf->ctl | ATA_HOB, (void __iomem *) ap->ioaddr.ctl_addr);
  276. tf->hob_feature = readb((void __iomem *)ioaddr->error_addr);
  277. tf->hob_nsect = readb((void __iomem *)ioaddr->nsect_addr);
  278. tf->hob_lbal = readb((void __iomem *)ioaddr->lbal_addr);
  279. tf->hob_lbam = readb((void __iomem *)ioaddr->lbam_addr);
  280. tf->hob_lbah = readb((void __iomem *)ioaddr->lbah_addr);
  281. }
  282. }
  283. /**
  284. * ata_tf_read - input device's ATA taskfile shadow registers
  285. * @ap: Port from which input is read
  286. * @tf: ATA taskfile register set for storing input
  287. *
  288. * Reads ATA taskfile registers for currently-selected device
  289. * into @tf.
  290. *
  291. * Reads nsect, lbal, lbam, lbah, and device. If ATA_TFLAG_LBA48
  292. * is set, also reads the hob registers.
  293. *
  294. * May be used as the tf_read() entry in ata_port_operations.
  295. *
  296. * LOCKING:
  297. * Inherited from caller.
  298. */
  299. void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
  300. {
  301. if (ap->flags & ATA_FLAG_MMIO)
  302. ata_tf_read_mmio(ap, tf);
  303. else
  304. ata_tf_read_pio(ap, tf);
  305. }
  306. /**
  307. * ata_check_status_pio - Read device status reg & clear interrupt
  308. * @ap: port where the device is
  309. *
  310. * Reads ATA taskfile status register for currently-selected device
  311. * and return its value. This also clears pending interrupts
  312. * from this device
  313. *
  314. * LOCKING:
  315. * Inherited from caller.
  316. */
  317. static u8 ata_check_status_pio(struct ata_port *ap)
  318. {
  319. return inb(ap->ioaddr.status_addr);
  320. }
  321. /**
  322. * ata_check_status_mmio - Read device status reg & clear interrupt
  323. * @ap: port where the device is
  324. *
  325. * Reads ATA taskfile status register for currently-selected device
  326. * via MMIO and return its value. This also clears pending interrupts
  327. * from this device
  328. *
  329. * LOCKING:
  330. * Inherited from caller.
  331. */
  332. static u8 ata_check_status_mmio(struct ata_port *ap)
  333. {
  334. return readb((void __iomem *) ap->ioaddr.status_addr);
  335. }
  336. /**
  337. * ata_check_status - Read device status reg & clear interrupt
  338. * @ap: port where the device is
  339. *
  340. * Reads ATA taskfile status register for currently-selected device
  341. * and return its value. This also clears pending interrupts
  342. * from this device
  343. *
  344. * May be used as the check_status() entry in ata_port_operations.
  345. *
  346. * LOCKING:
  347. * Inherited from caller.
  348. */
  349. u8 ata_check_status(struct ata_port *ap)
  350. {
  351. if (ap->flags & ATA_FLAG_MMIO)
  352. return ata_check_status_mmio(ap);
  353. return ata_check_status_pio(ap);
  354. }
  355. /**
  356. * ata_altstatus - Read device alternate status reg
  357. * @ap: port where the device is
  358. *
  359. * Reads ATA taskfile alternate status register for
  360. * currently-selected device and return its value.
  361. *
  362. * Note: may NOT be used as the check_altstatus() entry in
  363. * ata_port_operations.
  364. *
  365. * LOCKING:
  366. * Inherited from caller.
  367. */
  368. u8 ata_altstatus(struct ata_port *ap)
  369. {
  370. if (ap->ops->check_altstatus)
  371. return ap->ops->check_altstatus(ap);
  372. if (ap->flags & ATA_FLAG_MMIO)
  373. return readb((void __iomem *)ap->ioaddr.altstatus_addr);
  374. return inb(ap->ioaddr.altstatus_addr);
  375. }
  376. /**
  377. * ata_bmdma_setup_mmio - Set up PCI IDE BMDMA transaction
  378. * @qc: Info associated with this ATA transaction.
  379. *
  380. * LOCKING:
  381. * spin_lock_irqsave(host_set lock)
  382. */
  383. static void ata_bmdma_setup_mmio (struct ata_queued_cmd *qc)
  384. {
  385. struct ata_port *ap = qc->ap;
  386. unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
  387. u8 dmactl;
  388. void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
  389. /* load PRD table addr. */
  390. mb(); /* make sure PRD table writes are visible to controller */
  391. writel(ap->prd_dma, mmio + ATA_DMA_TABLE_OFS);
  392. /* specify data direction, triple-check start bit is clear */
  393. dmactl = readb(mmio + ATA_DMA_CMD);
  394. dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
  395. if (!rw)
  396. dmactl |= ATA_DMA_WR;
  397. writeb(dmactl, mmio + ATA_DMA_CMD);
  398. /* issue r/w command */
  399. ap->ops->exec_command(ap, &qc->tf);
  400. }
  401. /**
  402. * ata_bmdma_start_mmio - Start a PCI IDE BMDMA transaction
  403. * @qc: Info associated with this ATA transaction.
  404. *
  405. * LOCKING:
  406. * spin_lock_irqsave(host_set lock)
  407. */
  408. static void ata_bmdma_start_mmio (struct ata_queued_cmd *qc)
  409. {
  410. struct ata_port *ap = qc->ap;
  411. void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
  412. u8 dmactl;
  413. /* start host DMA transaction */
  414. dmactl = readb(mmio + ATA_DMA_CMD);
  415. writeb(dmactl | ATA_DMA_START, mmio + ATA_DMA_CMD);
  416. /* Strictly, one may wish to issue a readb() here, to
  417. * flush the mmio write. However, control also passes
  418. * to the hardware at this point, and it will interrupt
  419. * us when we are to resume control. So, in effect,
  420. * we don't care when the mmio write flushes.
  421. * Further, a read of the DMA status register _immediately_
  422. * following the write may not be what certain flaky hardware
  423. * is expected, so I think it is best to not add a readb()
  424. * without first all the MMIO ATA cards/mobos.
  425. * Or maybe I'm just being paranoid.
  426. */
  427. }
  428. /**
  429. * ata_bmdma_setup_pio - Set up PCI IDE BMDMA transaction (PIO)
  430. * @qc: Info associated with this ATA transaction.
  431. *
  432. * LOCKING:
  433. * spin_lock_irqsave(host_set lock)
  434. */
  435. static void ata_bmdma_setup_pio (struct ata_queued_cmd *qc)
  436. {
  437. struct ata_port *ap = qc->ap;
  438. unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
  439. u8 dmactl;
  440. /* load PRD table addr. */
  441. outl(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
  442. /* specify data direction, triple-check start bit is clear */
  443. dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
  444. dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
  445. if (!rw)
  446. dmactl |= ATA_DMA_WR;
  447. outb(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
  448. /* issue r/w command */
  449. ap->ops->exec_command(ap, &qc->tf);
  450. }
  451. /**
  452. * ata_bmdma_start_pio - Start a PCI IDE BMDMA transaction (PIO)
  453. * @qc: Info associated with this ATA transaction.
  454. *
  455. * LOCKING:
  456. * spin_lock_irqsave(host_set lock)
  457. */
  458. static void ata_bmdma_start_pio (struct ata_queued_cmd *qc)
  459. {
  460. struct ata_port *ap = qc->ap;
  461. u8 dmactl;
  462. /* start host DMA transaction */
  463. dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
  464. outb(dmactl | ATA_DMA_START,
  465. ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
  466. }
  467. /**
  468. * ata_bmdma_start - Start a PCI IDE BMDMA transaction
  469. * @qc: Info associated with this ATA transaction.
  470. *
  471. * Writes the ATA_DMA_START flag to the DMA command register.
  472. *
  473. * May be used as the bmdma_start() entry in ata_port_operations.
  474. *
  475. * LOCKING:
  476. * spin_lock_irqsave(host_set lock)
  477. */
  478. void ata_bmdma_start(struct ata_queued_cmd *qc)
  479. {
  480. if (qc->ap->flags & ATA_FLAG_MMIO)
  481. ata_bmdma_start_mmio(qc);
  482. else
  483. ata_bmdma_start_pio(qc);
  484. }
  485. /**
  486. * ata_bmdma_setup - Set up PCI IDE BMDMA transaction
  487. * @qc: Info associated with this ATA transaction.
  488. *
  489. * Writes address of PRD table to device's PRD Table Address
  490. * register, sets the DMA control register, and calls
  491. * ops->exec_command() to start the transfer.
  492. *
  493. * May be used as the bmdma_setup() entry in ata_port_operations.
  494. *
  495. * LOCKING:
  496. * spin_lock_irqsave(host_set lock)
  497. */
  498. void ata_bmdma_setup(struct ata_queued_cmd *qc)
  499. {
  500. if (qc->ap->flags & ATA_FLAG_MMIO)
  501. ata_bmdma_setup_mmio(qc);
  502. else
  503. ata_bmdma_setup_pio(qc);
  504. }
  505. /**
  506. * ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt.
  507. * @ap: Port associated with this ATA transaction.
  508. *
  509. * Clear interrupt and error flags in DMA status register.
  510. *
  511. * May be used as the irq_clear() entry in ata_port_operations.
  512. *
  513. * LOCKING:
  514. * spin_lock_irqsave(host_set lock)
  515. */
  516. void ata_bmdma_irq_clear(struct ata_port *ap)
  517. {
  518. if (!ap->ioaddr.bmdma_addr)
  519. return;
  520. if (ap->flags & ATA_FLAG_MMIO) {
  521. void __iomem *mmio =
  522. ((void __iomem *) ap->ioaddr.bmdma_addr) + ATA_DMA_STATUS;
  523. writeb(readb(mmio), mmio);
  524. } else {
  525. unsigned long addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS;
  526. outb(inb(addr), addr);
  527. }
  528. }
  529. /**
  530. * ata_bmdma_status - Read PCI IDE BMDMA status
  531. * @ap: Port associated with this ATA transaction.
  532. *
  533. * Read and return BMDMA status register.
  534. *
  535. * May be used as the bmdma_status() entry in ata_port_operations.
  536. *
  537. * LOCKING:
  538. * spin_lock_irqsave(host_set lock)
  539. */
  540. u8 ata_bmdma_status(struct ata_port *ap)
  541. {
  542. u8 host_stat;
  543. if (ap->flags & ATA_FLAG_MMIO) {
  544. void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
  545. host_stat = readb(mmio + ATA_DMA_STATUS);
  546. } else
  547. host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
  548. return host_stat;
  549. }
  550. /**
  551. * ata_bmdma_stop - Stop PCI IDE BMDMA transfer
  552. * @qc: Command we are ending DMA for
  553. *
  554. * Clears the ATA_DMA_START flag in the dma control register
  555. *
  556. * May be used as the bmdma_stop() entry in ata_port_operations.
  557. *
  558. * LOCKING:
  559. * spin_lock_irqsave(host_set lock)
  560. */
  561. void ata_bmdma_stop(struct ata_queued_cmd *qc)
  562. {
  563. struct ata_port *ap = qc->ap;
  564. if (ap->flags & ATA_FLAG_MMIO) {
  565. void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
  566. /* clear start/stop bit */
  567. writeb(readb(mmio + ATA_DMA_CMD) & ~ATA_DMA_START,
  568. mmio + ATA_DMA_CMD);
  569. } else {
  570. /* clear start/stop bit */
  571. outb(inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START,
  572. ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
  573. }
  574. /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
  575. ata_altstatus(ap); /* dummy read */
  576. }
  577. /**
  578. * ata_bmdma_freeze - Freeze BMDMA controller port
  579. * @ap: port to freeze
  580. *
  581. * Freeze BMDMA controller port.
  582. *
  583. * LOCKING:
  584. * Inherited from caller.
  585. */
  586. void ata_bmdma_freeze(struct ata_port *ap)
  587. {
  588. struct ata_ioports *ioaddr = &ap->ioaddr;
  589. ap->ctl |= ATA_NIEN;
  590. ap->last_ctl = ap->ctl;
  591. if (ap->flags & ATA_FLAG_MMIO)
  592. writeb(ap->ctl, (void __iomem *)ioaddr->ctl_addr);
  593. else
  594. outb(ap->ctl, ioaddr->ctl_addr);
  595. }
  596. /**
  597. * ata_bmdma_thaw - Thaw BMDMA controller port
  598. * @ap: port to thaw
  599. *
  600. * Thaw BMDMA controller port.
  601. *
  602. * LOCKING:
  603. * Inherited from caller.
  604. */
  605. void ata_bmdma_thaw(struct ata_port *ap)
  606. {
  607. /* clear & re-enable interrupts */
  608. ata_chk_status(ap);
  609. ap->ops->irq_clear(ap);
  610. if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
  611. ata_irq_on(ap);
  612. }
  613. /**
  614. * ata_bmdma_drive_eh - Perform EH with given methods for BMDMA controller
  615. * @ap: port to handle error for
  616. * @prereset: prereset method (can be NULL)
  617. * @softreset: softreset method (can be NULL)
  618. * @hardreset: hardreset method (can be NULL)
  619. * @postreset: postreset method (can be NULL)
  620. *
  621. * Handle error for ATA BMDMA controller. It can handle both
  622. * PATA and SATA controllers. Many controllers should be able to
  623. * use this EH as-is or with some added handling before and
  624. * after.
  625. *
  626. * This function is intended to be used for constructing
  627. * ->error_handler callback by low level drivers.
  628. *
  629. * LOCKING:
  630. * Kernel thread context (may sleep)
  631. */
  632. void ata_bmdma_drive_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
  633. ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
  634. ata_postreset_fn_t postreset)
  635. {
  636. struct ata_host_set *host_set = ap->host_set;
  637. struct ata_eh_context *ehc = &ap->eh_context;
  638. struct ata_queued_cmd *qc;
  639. unsigned long flags;
  640. int thaw = 0;
  641. qc = __ata_qc_from_tag(ap, ap->active_tag);
  642. if (qc && !(qc->flags & ATA_QCFLAG_FAILED))
  643. qc = NULL;
  644. /* reset PIO HSM and stop DMA engine */
  645. spin_lock_irqsave(&host_set->lock, flags);
  646. ap->hsm_task_state = HSM_ST_IDLE;
  647. if (qc && (qc->tf.protocol == ATA_PROT_DMA ||
  648. qc->tf.protocol == ATA_PROT_ATAPI_DMA)) {
  649. u8 host_stat;
  650. host_stat = ata_bmdma_status(ap);
  651. ata_ehi_push_desc(&ehc->i, "BMDMA stat 0x%x", host_stat);
  652. /* BMDMA controllers indicate host bus error by
  653. * setting DMA_ERR bit and timing out. As it wasn't
  654. * really a timeout event, adjust error mask and
  655. * cancel frozen state.
  656. */
  657. if (qc->err_mask == AC_ERR_TIMEOUT && host_stat & ATA_DMA_ERR) {
  658. qc->err_mask = AC_ERR_HOST_BUS;
  659. thaw = 1;
  660. }
  661. ap->ops->bmdma_stop(qc);
  662. }
  663. ata_altstatus(ap);
  664. ata_chk_status(ap);
  665. ap->ops->irq_clear(ap);
  666. spin_unlock_irqrestore(&host_set->lock, flags);
  667. if (thaw)
  668. ata_eh_thaw_port(ap);
  669. /* PIO and DMA engines have been stopped, perform recovery */
  670. ata_do_eh(ap, prereset, softreset, hardreset, postreset);
  671. }
  672. /**
  673. * ata_bmdma_error_handler - Stock error handler for BMDMA controller
  674. * @ap: port to handle error for
  675. *
  676. * Stock error handler for BMDMA controller.
  677. *
  678. * LOCKING:
  679. * Kernel thread context (may sleep)
  680. */
  681. void ata_bmdma_error_handler(struct ata_port *ap)
  682. {
  683. ata_reset_fn_t hardreset;
  684. hardreset = NULL;
  685. if (sata_scr_valid(ap))
  686. hardreset = sata_std_hardreset;
  687. ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset, hardreset,
  688. ata_std_postreset);
  689. }
  690. /**
  691. * ata_bmdma_post_internal_cmd - Stock post_internal_cmd for
  692. * BMDMA controller
  693. * @qc: internal command to clean up
  694. *
  695. * LOCKING:
  696. * Kernel thread context (may sleep)
  697. */
  698. void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc)
  699. {
  700. ata_bmdma_stop(qc);
  701. }
  702. #ifdef CONFIG_PCI
  703. static struct ata_probe_ent *
  704. ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port)
  705. {
  706. struct ata_probe_ent *probe_ent;
  707. probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
  708. if (!probe_ent) {
  709. printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
  710. kobject_name(&(dev->kobj)));
  711. return NULL;
  712. }
  713. INIT_LIST_HEAD(&probe_ent->node);
  714. probe_ent->dev = dev;
  715. probe_ent->sht = port->sht;
  716. probe_ent->host_flags = port->host_flags;
  717. probe_ent->pio_mask = port->pio_mask;
  718. probe_ent->mwdma_mask = port->mwdma_mask;
  719. probe_ent->udma_mask = port->udma_mask;
  720. probe_ent->port_ops = port->port_ops;
  721. return probe_ent;
  722. }
  723. /**
  724. * ata_pci_init_native_mode - Initialize native-mode driver
  725. * @pdev: pci device to be initialized
  726. * @port: array[2] of pointers to port info structures.
  727. * @ports: bitmap of ports present
  728. *
  729. * Utility function which allocates and initializes an
  730. * ata_probe_ent structure for a standard dual-port
  731. * PIO-based IDE controller. The returned ata_probe_ent
  732. * structure can be passed to ata_device_add(). The returned
  733. * ata_probe_ent structure should then be freed with kfree().
  734. *
  735. * The caller need only pass the address of the primary port, the
  736. * secondary will be deduced automatically. If the device has non
  737. * standard secondary port mappings this function can be called twice,
  738. * once for each interface.
  739. */
  740. struct ata_probe_ent *
  741. ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int ports)
  742. {
  743. struct ata_probe_ent *probe_ent =
  744. ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]);
  745. int p = 0;
  746. unsigned long bmdma;
  747. if (!probe_ent)
  748. return NULL;
  749. probe_ent->irq = pdev->irq;
  750. probe_ent->irq_flags = SA_SHIRQ;
  751. probe_ent->private_data = port[0]->private_data;
  752. if (ports & ATA_PORT_PRIMARY) {
  753. probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 0);
  754. probe_ent->port[p].altstatus_addr =
  755. probe_ent->port[p].ctl_addr =
  756. pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS;
  757. bmdma = pci_resource_start(pdev, 4);
  758. if (bmdma) {
  759. if (inb(bmdma + 2) & 0x80)
  760. probe_ent->host_set_flags |= ATA_HOST_SIMPLEX;
  761. probe_ent->port[p].bmdma_addr = bmdma;
  762. }
  763. ata_std_ports(&probe_ent->port[p]);
  764. p++;
  765. }
  766. if (ports & ATA_PORT_SECONDARY) {
  767. probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 2);
  768. probe_ent->port[p].altstatus_addr =
  769. probe_ent->port[p].ctl_addr =
  770. pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS;
  771. bmdma = pci_resource_start(pdev, 4);
  772. if (bmdma) {
  773. bmdma += 8;
  774. if(inb(bmdma + 2) & 0x80)
  775. probe_ent->host_set_flags |= ATA_HOST_SIMPLEX;
  776. probe_ent->port[p].bmdma_addr = bmdma;
  777. }
  778. ata_std_ports(&probe_ent->port[p]);
  779. p++;
  780. }
  781. probe_ent->n_ports = p;
  782. return probe_ent;
  783. }
  784. static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev,
  785. struct ata_port_info *port, int port_num)
  786. {
  787. struct ata_probe_ent *probe_ent;
  788. unsigned long bmdma;
  789. probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port);
  790. if (!probe_ent)
  791. return NULL;
  792. probe_ent->legacy_mode = 1;
  793. probe_ent->n_ports = 1;
  794. probe_ent->hard_port_no = port_num;
  795. probe_ent->private_data = port->private_data;
  796. switch(port_num)
  797. {
  798. case 0:
  799. probe_ent->irq = 14;
  800. probe_ent->port[0].cmd_addr = 0x1f0;
  801. probe_ent->port[0].altstatus_addr =
  802. probe_ent->port[0].ctl_addr = 0x3f6;
  803. break;
  804. case 1:
  805. probe_ent->irq = 15;
  806. probe_ent->port[0].cmd_addr = 0x170;
  807. probe_ent->port[0].altstatus_addr =
  808. probe_ent->port[0].ctl_addr = 0x376;
  809. break;
  810. }
  811. bmdma = pci_resource_start(pdev, 4);
  812. if (bmdma != 0) {
  813. bmdma += 8 * port_num;
  814. probe_ent->port[0].bmdma_addr = bmdma;
  815. if (inb(bmdma + 2) & 0x80)
  816. probe_ent->host_set_flags |= ATA_HOST_SIMPLEX;
  817. }
  818. ata_std_ports(&probe_ent->port[0]);
  819. return probe_ent;
  820. }
  821. /**
  822. * ata_pci_init_one - Initialize/register PCI IDE host controller
  823. * @pdev: Controller to be initialized
  824. * @port_info: Information from low-level host driver
  825. * @n_ports: Number of ports attached to host controller
  826. *
  827. * This is a helper function which can be called from a driver's
  828. * xxx_init_one() probe function if the hardware uses traditional
  829. * IDE taskfile registers.
  830. *
  831. * This function calls pci_enable_device(), reserves its register
  832. * regions, sets the dma mask, enables bus master mode, and calls
  833. * ata_device_add()
  834. *
  835. * LOCKING:
  836. * Inherited from PCI layer (may sleep).
  837. *
  838. * RETURNS:
  839. * Zero on success, negative on errno-based value on error.
  840. */
  841. int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
  842. unsigned int n_ports)
  843. {
  844. struct ata_probe_ent *probe_ent = NULL, *probe_ent2 = NULL;
  845. struct ata_port_info *port[2];
  846. u8 tmp8, mask;
  847. unsigned int legacy_mode = 0;
  848. int disable_dev_on_err = 1;
  849. int rc;
  850. DPRINTK("ENTER\n");
  851. port[0] = port_info[0];
  852. if (n_ports > 1)
  853. port[1] = port_info[1];
  854. else
  855. port[1] = port[0];
  856. if ((port[0]->host_flags & ATA_FLAG_NO_LEGACY) == 0
  857. && (pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
  858. /* TODO: What if one channel is in native mode ... */
  859. pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
  860. mask = (1 << 2) | (1 << 0);
  861. if ((tmp8 & mask) != mask)
  862. legacy_mode = (1 << 3);
  863. }
  864. /* FIXME... */
  865. if ((!legacy_mode) && (n_ports > 2)) {
  866. printk(KERN_ERR "ata: BUG: native mode, n_ports > 2\n");
  867. n_ports = 2;
  868. /* For now */
  869. }
  870. /* FIXME: Really for ATA it isn't safe because the device may be
  871. multi-purpose and we want to leave it alone if it was already
  872. enabled. Secondly for shared use as Arjan says we want refcounting
  873. Checking dev->is_enabled is insufficient as this is not set at
  874. boot for the primary video which is BIOS enabled
  875. */
  876. rc = pci_enable_device(pdev);
  877. if (rc)
  878. return rc;
  879. rc = pci_request_regions(pdev, DRV_NAME);
  880. if (rc) {
  881. disable_dev_on_err = 0;
  882. goto err_out;
  883. }
  884. /* FIXME: Should use platform specific mappers for legacy port ranges */
  885. if (legacy_mode) {
  886. if (!request_region(0x1f0, 8, "libata")) {
  887. struct resource *conflict, res;
  888. res.start = 0x1f0;
  889. res.end = 0x1f0 + 8 - 1;
  890. conflict = ____request_resource(&ioport_resource, &res);
  891. if (!strcmp(conflict->name, "libata"))
  892. legacy_mode |= (1 << 0);
  893. else {
  894. disable_dev_on_err = 0;
  895. printk(KERN_WARNING "ata: 0x1f0 IDE port busy\n");
  896. }
  897. } else
  898. legacy_mode |= (1 << 0);
  899. if (!request_region(0x170, 8, "libata")) {
  900. struct resource *conflict, res;
  901. res.start = 0x170;
  902. res.end = 0x170 + 8 - 1;
  903. conflict = ____request_resource(&ioport_resource, &res);
  904. if (!strcmp(conflict->name, "libata"))
  905. legacy_mode |= (1 << 1);
  906. else {
  907. disable_dev_on_err = 0;
  908. printk(KERN_WARNING "ata: 0x170 IDE port busy\n");
  909. }
  910. } else
  911. legacy_mode |= (1 << 1);
  912. }
  913. /* we have legacy mode, but all ports are unavailable */
  914. if (legacy_mode == (1 << 3)) {
  915. rc = -EBUSY;
  916. goto err_out_regions;
  917. }
  918. /* FIXME: If we get no DMA mask we should fall back to PIO */
  919. rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
  920. if (rc)
  921. goto err_out_regions;
  922. rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
  923. if (rc)
  924. goto err_out_regions;
  925. if (legacy_mode) {
  926. if (legacy_mode & (1 << 0))
  927. probe_ent = ata_pci_init_legacy_port(pdev, port[0], 0);
  928. if (legacy_mode & (1 << 1))
  929. probe_ent2 = ata_pci_init_legacy_port(pdev, port[1], 1);
  930. } else {
  931. if (n_ports == 2)
  932. probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
  933. else
  934. probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY);
  935. }
  936. if (!probe_ent && !probe_ent2) {
  937. rc = -ENOMEM;
  938. goto err_out_regions;
  939. }
  940. pci_set_master(pdev);
  941. /* FIXME: check ata_device_add return */
  942. if (legacy_mode) {
  943. if (legacy_mode & (1 << 0))
  944. ata_device_add(probe_ent);
  945. if (legacy_mode & (1 << 1))
  946. ata_device_add(probe_ent2);
  947. } else
  948. ata_device_add(probe_ent);
  949. kfree(probe_ent);
  950. kfree(probe_ent2);
  951. return 0;
  952. err_out_regions:
  953. if (legacy_mode & (1 << 0))
  954. release_region(0x1f0, 8);
  955. if (legacy_mode & (1 << 1))
  956. release_region(0x170, 8);
  957. pci_release_regions(pdev);
  958. err_out:
  959. if (disable_dev_on_err)
  960. pci_disable_device(pdev);
  961. return rc;
  962. }
  963. /**
  964. * ata_pci_clear_simplex - attempt to kick device out of simplex
  965. * @pdev: PCI device
  966. *
  967. * Some PCI ATA devices report simplex mode but in fact can be told to
  968. * enter non simplex mode. This implements the neccessary logic to
  969. * perform the task on such devices. Calling it on other devices will
  970. * have -undefined- behaviour.
  971. */
  972. int ata_pci_clear_simplex(struct pci_dev *pdev)
  973. {
  974. unsigned long bmdma = pci_resource_start(pdev, 4);
  975. u8 simplex;
  976. if (bmdma == 0)
  977. return -ENOENT;
  978. simplex = inb(bmdma + 0x02);
  979. outb(simplex & 0x60, bmdma + 0x02);
  980. simplex = inb(bmdma + 0x02);
  981. if (simplex & 0x80)
  982. return -EOPNOTSUPP;
  983. return 0;
  984. }
  985. unsigned long ata_pci_default_filter(const struct ata_port *ap, struct ata_device *adev, unsigned long xfer_mask)
  986. {
  987. /* Filter out DMA modes if the device has been configured by
  988. the BIOS as PIO only */
  989. if (ap->ioaddr.bmdma_addr == 0)
  990. xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
  991. return xfer_mask;
  992. }
  993. #endif /* CONFIG_PCI */