libata.h 38 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268
  1. /*
  2. * Copyright 2003-2005 Red Hat, Inc. All rights reserved.
  3. * Copyright 2003-2005 Jeff Garzik
  4. *
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2, or (at your option)
  9. * any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; see the file COPYING. If not, write to
  18. * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
  19. *
  20. *
  21. * libata documentation is available via 'make {ps|pdf}docs',
  22. * as Documentation/DocBook/libata.*
  23. *
  24. */
  25. #ifndef __LINUX_LIBATA_H__
  26. #define __LINUX_LIBATA_H__
  27. #include <linux/delay.h>
  28. #include <linux/interrupt.h>
  29. #include <linux/pci.h>
  30. #include <linux/dma-mapping.h>
  31. #include <asm/scatterlist.h>
  32. #include <asm/io.h>
  33. #include <linux/ata.h>
  34. #include <linux/workqueue.h>
  35. #include <scsi/scsi_host.h>
  36. /*
  37. * Define if arch has non-standard setup. This is a _PCI_ standard
  38. * not a legacy or ISA standard.
  39. */
  40. #ifdef CONFIG_ATA_NONSTANDARD
  41. #include <asm/libata-portmap.h>
  42. #else
  43. #include <asm-generic/libata-portmap.h>
  44. #endif
  45. /*
  46. * compile-time options: to be removed as soon as all the drivers are
  47. * converted to the new debugging mechanism
  48. */
  49. #undef ATA_DEBUG /* debugging output */
  50. #undef ATA_VERBOSE_DEBUG /* yet more debugging output */
  51. #undef ATA_IRQ_TRAP /* define to ack screaming irqs */
  52. #undef ATA_NDEBUG /* define to disable quick runtime checks */
  53. #define ATA_ENABLE_PATA /* define to enable PATA support in some
  54. * low-level drivers */
  55. /* note: prints function name for you */
  56. #ifdef ATA_DEBUG
  57. #define DPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args)
  58. #ifdef ATA_VERBOSE_DEBUG
  59. #define VPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args)
  60. #else
  61. #define VPRINTK(fmt, args...)
  62. #endif /* ATA_VERBOSE_DEBUG */
  63. #else
  64. #define DPRINTK(fmt, args...)
  65. #define VPRINTK(fmt, args...)
  66. #endif /* ATA_DEBUG */
  67. #define BPRINTK(fmt, args...) if (ap->flags & ATA_FLAG_DEBUGMSG) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args)
  68. /* NEW: debug levels */
  69. #define HAVE_LIBATA_MSG 1
  70. enum {
  71. ATA_MSG_DRV = 0x0001,
  72. ATA_MSG_INFO = 0x0002,
  73. ATA_MSG_PROBE = 0x0004,
  74. ATA_MSG_WARN = 0x0008,
  75. ATA_MSG_MALLOC = 0x0010,
  76. ATA_MSG_CTL = 0x0020,
  77. ATA_MSG_INTR = 0x0040,
  78. ATA_MSG_ERR = 0x0080,
  79. };
  80. #define ata_msg_drv(p) ((p)->msg_enable & ATA_MSG_DRV)
  81. #define ata_msg_info(p) ((p)->msg_enable & ATA_MSG_INFO)
  82. #define ata_msg_probe(p) ((p)->msg_enable & ATA_MSG_PROBE)
  83. #define ata_msg_warn(p) ((p)->msg_enable & ATA_MSG_WARN)
  84. #define ata_msg_malloc(p) ((p)->msg_enable & ATA_MSG_MALLOC)
  85. #define ata_msg_ctl(p) ((p)->msg_enable & ATA_MSG_CTL)
  86. #define ata_msg_intr(p) ((p)->msg_enable & ATA_MSG_INTR)
  87. #define ata_msg_err(p) ((p)->msg_enable & ATA_MSG_ERR)
  88. static inline u32 ata_msg_init(int dval, int default_msg_enable_bits)
  89. {
  90. if (dval < 0 || dval >= (sizeof(u32) * 8))
  91. return default_msg_enable_bits; /* should be 0x1 - only driver info msgs */
  92. if (!dval)
  93. return 0;
  94. return (1 << dval) - 1;
  95. }
  96. /* defines only for the constants which don't work well as enums */
  97. #define ATA_TAG_POISON 0xfafbfcfdU
  98. /* move to PCI layer? */
  99. #define PCI_VDEVICE(vendor, device) \
  100. PCI_VENDOR_ID_##vendor, (device), \
  101. PCI_ANY_ID, PCI_ANY_ID, 0, 0
  102. static inline struct device *pci_dev_to_dev(struct pci_dev *pdev)
  103. {
  104. return &pdev->dev;
  105. }
  106. enum {
  107. /* various global constants */
  108. LIBATA_MAX_PRD = ATA_MAX_PRD / 2,
  109. ATA_MAX_PORTS = 8,
  110. ATA_DEF_QUEUE = 1,
  111. /* tag ATA_MAX_QUEUE - 1 is reserved for internal commands */
  112. ATA_MAX_QUEUE = 32,
  113. ATA_TAG_INTERNAL = ATA_MAX_QUEUE - 1,
  114. ATA_MAX_BUS = 2,
  115. ATA_DEF_BUSY_WAIT = 10000,
  116. ATA_SHORT_PAUSE = (HZ >> 6) + 1,
  117. ATA_SHT_EMULATED = 1,
  118. ATA_SHT_CMD_PER_LUN = 1,
  119. ATA_SHT_THIS_ID = -1,
  120. ATA_SHT_USE_CLUSTERING = 1,
  121. /* struct ata_device stuff */
  122. ATA_DFLAG_LBA = (1 << 0), /* device supports LBA */
  123. ATA_DFLAG_LBA48 = (1 << 1), /* device supports LBA48 */
  124. ATA_DFLAG_CDB_INTR = (1 << 2), /* device asserts INTRQ when ready for CDB */
  125. ATA_DFLAG_NCQ = (1 << 3), /* device supports NCQ */
  126. ATA_DFLAG_CFG_MASK = (1 << 8) - 1,
  127. ATA_DFLAG_PIO = (1 << 8), /* device limited to PIO mode */
  128. ATA_DFLAG_NCQ_OFF = (1 << 9), /* device limited to non-NCQ mode */
  129. ATA_DFLAG_SUSPENDED = (1 << 10), /* device suspended */
  130. ATA_DFLAG_INIT_MASK = (1 << 16) - 1,
  131. ATA_DFLAG_DETACH = (1 << 16),
  132. ATA_DFLAG_DETACHED = (1 << 17),
  133. ATA_DEV_UNKNOWN = 0, /* unknown device */
  134. ATA_DEV_ATA = 1, /* ATA device */
  135. ATA_DEV_ATA_UNSUP = 2, /* ATA device (unsupported) */
  136. ATA_DEV_ATAPI = 3, /* ATAPI device */
  137. ATA_DEV_ATAPI_UNSUP = 4, /* ATAPI device (unsupported) */
  138. ATA_DEV_NONE = 5, /* no device */
  139. /* struct ata_port flags */
  140. ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */
  141. /* (doesn't imply presence) */
  142. ATA_FLAG_SATA = (1 << 1),
  143. ATA_FLAG_NO_LEGACY = (1 << 2), /* no legacy mode check */
  144. ATA_FLAG_MMIO = (1 << 3), /* use MMIO, not PIO */
  145. ATA_FLAG_SRST = (1 << 4), /* (obsolete) use ATA SRST, not E.D.D. */
  146. ATA_FLAG_SATA_RESET = (1 << 5), /* (obsolete) use COMRESET */
  147. ATA_FLAG_NO_ATAPI = (1 << 6), /* No ATAPI support */
  148. ATA_FLAG_PIO_DMA = (1 << 7), /* PIO cmds via DMA */
  149. ATA_FLAG_PIO_LBA48 = (1 << 8), /* Host DMA engine is LBA28 only */
  150. ATA_FLAG_PIO_POLLING = (1 << 9), /* use polling PIO if LLD
  151. * doesn't handle PIO interrupts */
  152. ATA_FLAG_NCQ = (1 << 10), /* host supports NCQ */
  153. ATA_FLAG_HRST_TO_RESUME = (1 << 11), /* hardreset to resume phy */
  154. ATA_FLAG_SKIP_D2H_BSY = (1 << 12), /* can't wait for the first D2H
  155. * Register FIS clearing BSY */
  156. ATA_FLAG_DEBUGMSG = (1 << 13),
  157. /* The following flag belongs to ap->pflags but is kept in
  158. * ap->flags because it's referenced in many LLDs and will be
  159. * removed in not-too-distant future.
  160. */
  161. ATA_FLAG_DISABLED = (1 << 23), /* port is disabled, ignore it */
  162. /* bits 24:31 of ap->flags are reserved for LLD specific flags */
  163. /* struct ata_port pflags */
  164. ATA_PFLAG_EH_PENDING = (1 << 0), /* EH pending */
  165. ATA_PFLAG_EH_IN_PROGRESS = (1 << 1), /* EH in progress */
  166. ATA_PFLAG_FROZEN = (1 << 2), /* port is frozen */
  167. ATA_PFLAG_RECOVERED = (1 << 3), /* recovery action performed */
  168. ATA_PFLAG_LOADING = (1 << 4), /* boot/loading probe */
  169. ATA_PFLAG_UNLOADING = (1 << 5), /* module is unloading */
  170. ATA_PFLAG_SCSI_HOTPLUG = (1 << 6), /* SCSI hotplug scheduled */
  171. ATA_PFLAG_FLUSH_PORT_TASK = (1 << 16), /* flush port task */
  172. ATA_PFLAG_SUSPENDED = (1 << 17), /* port is suspended (power) */
  173. ATA_PFLAG_PM_PENDING = (1 << 18), /* PM operation pending */
  174. /* struct ata_queued_cmd flags */
  175. ATA_QCFLAG_ACTIVE = (1 << 0), /* cmd not yet ack'd to scsi lyer */
  176. ATA_QCFLAG_SG = (1 << 1), /* have s/g table? */
  177. ATA_QCFLAG_SINGLE = (1 << 2), /* no s/g, just a single buffer */
  178. ATA_QCFLAG_DMAMAP = ATA_QCFLAG_SG | ATA_QCFLAG_SINGLE,
  179. ATA_QCFLAG_IO = (1 << 3), /* standard IO command */
  180. ATA_QCFLAG_RESULT_TF = (1 << 4), /* result TF requested */
  181. ATA_QCFLAG_FAILED = (1 << 16), /* cmd failed and is owned by EH */
  182. ATA_QCFLAG_SENSE_VALID = (1 << 17), /* sense data valid */
  183. ATA_QCFLAG_EH_SCHEDULED = (1 << 18), /* EH scheduled (obsolete) */
  184. /* host set flags */
  185. ATA_HOST_SIMPLEX = (1 << 0), /* Host is simplex, one DMA channel per host only */
  186. /* various lengths of time */
  187. ATA_TMOUT_BOOT = 30 * HZ, /* heuristic */
  188. ATA_TMOUT_BOOT_QUICK = 7 * HZ, /* heuristic */
  189. ATA_TMOUT_INTERNAL = 30 * HZ,
  190. ATA_TMOUT_INTERNAL_QUICK = 5 * HZ,
  191. /* ATA bus states */
  192. BUS_UNKNOWN = 0,
  193. BUS_DMA = 1,
  194. BUS_IDLE = 2,
  195. BUS_NOINTR = 3,
  196. BUS_NODATA = 4,
  197. BUS_TIMER = 5,
  198. BUS_PIO = 6,
  199. BUS_EDD = 7,
  200. BUS_IDENTIFY = 8,
  201. BUS_PACKET = 9,
  202. /* SATA port states */
  203. PORT_UNKNOWN = 0,
  204. PORT_ENABLED = 1,
  205. PORT_DISABLED = 2,
  206. /* encoding various smaller bitmaps into a single
  207. * unsigned int bitmap
  208. */
  209. ATA_BITS_PIO = 7,
  210. ATA_BITS_MWDMA = 5,
  211. ATA_BITS_UDMA = 8,
  212. ATA_SHIFT_PIO = 0,
  213. ATA_SHIFT_MWDMA = ATA_SHIFT_PIO + ATA_BITS_PIO,
  214. ATA_SHIFT_UDMA = ATA_SHIFT_MWDMA + ATA_BITS_MWDMA,
  215. ATA_MASK_PIO = ((1 << ATA_BITS_PIO) - 1) << ATA_SHIFT_PIO,
  216. ATA_MASK_MWDMA = ((1 << ATA_BITS_MWDMA) - 1) << ATA_SHIFT_MWDMA,
  217. ATA_MASK_UDMA = ((1 << ATA_BITS_UDMA) - 1) << ATA_SHIFT_UDMA,
  218. /* size of buffer to pad xfers ending on unaligned boundaries */
  219. ATA_DMA_PAD_SZ = 4,
  220. ATA_DMA_PAD_BUF_SZ = ATA_DMA_PAD_SZ * ATA_MAX_QUEUE,
  221. /* masks for port functions */
  222. ATA_PORT_PRIMARY = (1 << 0),
  223. ATA_PORT_SECONDARY = (1 << 1),
  224. /* ering size */
  225. ATA_ERING_SIZE = 32,
  226. /* desc_len for ata_eh_info and context */
  227. ATA_EH_DESC_LEN = 80,
  228. /* reset / recovery action types */
  229. ATA_EH_REVALIDATE = (1 << 0),
  230. ATA_EH_SOFTRESET = (1 << 1),
  231. ATA_EH_HARDRESET = (1 << 2),
  232. ATA_EH_SUSPEND = (1 << 3),
  233. ATA_EH_RESUME = (1 << 4),
  234. ATA_EH_PM_FREEZE = (1 << 5),
  235. ATA_EH_RESET_MASK = ATA_EH_SOFTRESET | ATA_EH_HARDRESET,
  236. ATA_EH_PERDEV_MASK = ATA_EH_REVALIDATE | ATA_EH_SUSPEND |
  237. ATA_EH_RESUME | ATA_EH_PM_FREEZE,
  238. /* ata_eh_info->flags */
  239. ATA_EHI_HOTPLUGGED = (1 << 0), /* could have been hotplugged */
  240. ATA_EHI_RESUME_LINK = (1 << 1), /* resume link (reset modifier) */
  241. ATA_EHI_NO_AUTOPSY = (1 << 2), /* no autopsy */
  242. ATA_EHI_QUIET = (1 << 3), /* be quiet */
  243. ATA_EHI_DID_RESET = (1 << 16), /* already reset this port */
  244. ATA_EHI_RESET_MODIFIER_MASK = ATA_EHI_RESUME_LINK,
  245. /* max repeat if error condition is still set after ->error_handler */
  246. ATA_EH_MAX_REPEAT = 5,
  247. /* how hard are we gonna try to probe/recover devices */
  248. ATA_PROBE_MAX_TRIES = 3,
  249. ATA_EH_RESET_TRIES = 3,
  250. ATA_EH_DEV_TRIES = 3,
  251. /* Drive spinup time (time from power-on to the first D2H FIS)
  252. * in msecs - 8s currently. Failing to get ready in this time
  253. * isn't critical. It will result in reset failure for
  254. * controllers which can't wait for the first D2H FIS. libata
  255. * will retry, so it just has to be long enough to spin up
  256. * most devices.
  257. */
  258. ATA_SPINUP_WAIT = 8000,
  259. /* Horkage types. May be set by libata or controller on drives
  260. (some horkage may be drive/controller pair dependant */
  261. ATA_HORKAGE_DIAGNOSTIC = (1 << 0), /* Failed boot diag */
  262. };
  263. enum hsm_task_states {
  264. HSM_ST_UNKNOWN, /* state unknown */
  265. HSM_ST_IDLE, /* no command on going */
  266. HSM_ST, /* (waiting the device to) transfer data */
  267. HSM_ST_LAST, /* (waiting the device to) complete command */
  268. HSM_ST_ERR, /* error */
  269. HSM_ST_FIRST, /* (waiting the device to)
  270. write CDB or first data block */
  271. };
  272. enum ata_completion_errors {
  273. AC_ERR_DEV = (1 << 0), /* device reported error */
  274. AC_ERR_HSM = (1 << 1), /* host state machine violation */
  275. AC_ERR_TIMEOUT = (1 << 2), /* timeout */
  276. AC_ERR_MEDIA = (1 << 3), /* media error */
  277. AC_ERR_ATA_BUS = (1 << 4), /* ATA bus error */
  278. AC_ERR_HOST_BUS = (1 << 5), /* host bus error */
  279. AC_ERR_SYSTEM = (1 << 6), /* system error */
  280. AC_ERR_INVALID = (1 << 7), /* invalid argument */
  281. AC_ERR_OTHER = (1 << 8), /* unknown */
  282. };
  283. /* forward declarations */
  284. struct scsi_device;
  285. struct ata_port_operations;
  286. struct ata_port;
  287. struct ata_queued_cmd;
  288. /* typedefs */
  289. typedef void (*ata_qc_cb_t) (struct ata_queued_cmd *qc);
  290. typedef int (*ata_prereset_fn_t)(struct ata_port *ap);
  291. typedef int (*ata_reset_fn_t)(struct ata_port *ap, unsigned int *classes);
  292. typedef void (*ata_postreset_fn_t)(struct ata_port *ap, unsigned int *classes);
  293. struct ata_ioports {
  294. unsigned long cmd_addr;
  295. unsigned long data_addr;
  296. unsigned long error_addr;
  297. unsigned long feature_addr;
  298. unsigned long nsect_addr;
  299. unsigned long lbal_addr;
  300. unsigned long lbam_addr;
  301. unsigned long lbah_addr;
  302. unsigned long device_addr;
  303. unsigned long status_addr;
  304. unsigned long command_addr;
  305. unsigned long altstatus_addr;
  306. unsigned long ctl_addr;
  307. unsigned long bmdma_addr;
  308. unsigned long scr_addr;
  309. };
  310. struct ata_probe_ent {
  311. struct list_head node;
  312. struct device *dev;
  313. const struct ata_port_operations *port_ops;
  314. struct scsi_host_template *sht;
  315. struct ata_ioports port[ATA_MAX_PORTS];
  316. unsigned int n_ports;
  317. unsigned int dummy_port_mask;
  318. unsigned int pio_mask;
  319. unsigned int mwdma_mask;
  320. unsigned int udma_mask;
  321. unsigned long irq;
  322. unsigned long irq2;
  323. unsigned int irq_flags;
  324. unsigned long port_flags;
  325. unsigned long _host_flags;
  326. void __iomem *mmio_base;
  327. void *private_data;
  328. /* port_info for the secondary port. Together with irq2, it's
  329. * used to implement non-uniform secondary port. Currently,
  330. * the only user is ata_piix combined mode. This workaround
  331. * will be removed together with ata_probe_ent when init model
  332. * is updated.
  333. */
  334. const struct ata_port_info *pinfo2;
  335. };
  336. struct ata_host {
  337. spinlock_t lock;
  338. struct device *dev;
  339. unsigned long irq;
  340. unsigned long irq2;
  341. void __iomem *mmio_base;
  342. unsigned int n_ports;
  343. void *private_data;
  344. const struct ata_port_operations *ops;
  345. unsigned long flags;
  346. int simplex_claimed; /* Keep seperate in case we
  347. ever need to do this locked */
  348. struct ata_port *ports[0];
  349. };
  350. struct ata_queued_cmd {
  351. struct ata_port *ap;
  352. struct ata_device *dev;
  353. struct scsi_cmnd *scsicmd;
  354. void (*scsidone)(struct scsi_cmnd *);
  355. struct ata_taskfile tf;
  356. u8 cdb[ATAPI_CDB_LEN];
  357. unsigned long flags; /* ATA_QCFLAG_xxx */
  358. unsigned int tag;
  359. unsigned int n_elem;
  360. unsigned int orig_n_elem;
  361. int dma_dir;
  362. unsigned int pad_len;
  363. unsigned int nsect;
  364. unsigned int cursect;
  365. unsigned int nbytes;
  366. unsigned int curbytes;
  367. unsigned int cursg;
  368. unsigned int cursg_ofs;
  369. struct scatterlist sgent;
  370. struct scatterlist pad_sgent;
  371. void *buf_virt;
  372. /* DO NOT iterate over __sg manually, use ata_for_each_sg() */
  373. struct scatterlist *__sg;
  374. unsigned int err_mask;
  375. struct ata_taskfile result_tf;
  376. ata_qc_cb_t complete_fn;
  377. void *private_data;
  378. };
  379. struct ata_port_stats {
  380. unsigned long unhandled_irq;
  381. unsigned long idle_irq;
  382. unsigned long rw_reqbuf;
  383. };
  384. struct ata_ering_entry {
  385. int is_io;
  386. unsigned int err_mask;
  387. u64 timestamp;
  388. };
  389. struct ata_ering {
  390. int cursor;
  391. struct ata_ering_entry ring[ATA_ERING_SIZE];
  392. };
  393. struct ata_device {
  394. struct ata_port *ap;
  395. unsigned int devno; /* 0 or 1 */
  396. unsigned long flags; /* ATA_DFLAG_xxx */
  397. struct scsi_device *sdev; /* attached SCSI device */
  398. /* n_sector is used as CLEAR_OFFSET, read comment above CLEAR_OFFSET */
  399. u64 n_sectors; /* size of device, if ATA */
  400. unsigned int class; /* ATA_DEV_xxx */
  401. u16 id[ATA_ID_WORDS]; /* IDENTIFY xxx DEVICE data */
  402. u8 pio_mode;
  403. u8 dma_mode;
  404. u8 xfer_mode;
  405. unsigned int xfer_shift; /* ATA_SHIFT_xxx */
  406. unsigned int multi_count; /* sectors count for
  407. READ/WRITE MULTIPLE */
  408. unsigned int max_sectors; /* per-device max sectors */
  409. unsigned int cdb_len;
  410. /* per-dev xfer mask */
  411. unsigned int pio_mask;
  412. unsigned int mwdma_mask;
  413. unsigned int udma_mask;
  414. /* for CHS addressing */
  415. u16 cylinders; /* Number of cylinders */
  416. u16 heads; /* Number of heads */
  417. u16 sectors; /* Number of sectors per track */
  418. /* error history */
  419. struct ata_ering ering;
  420. unsigned int horkage; /* List of broken features */
  421. };
  422. /* Offset into struct ata_device. Fields above it are maintained
  423. * acress device init. Fields below are zeroed.
  424. */
  425. #define ATA_DEVICE_CLEAR_OFFSET offsetof(struct ata_device, n_sectors)
  426. struct ata_eh_info {
  427. struct ata_device *dev; /* offending device */
  428. u32 serror; /* SError from LLDD */
  429. unsigned int err_mask; /* port-wide err_mask */
  430. unsigned int action; /* ATA_EH_* action mask */
  431. unsigned int dev_action[ATA_MAX_DEVICES]; /* dev EH action */
  432. unsigned int flags; /* ATA_EHI_* flags */
  433. unsigned long hotplug_timestamp;
  434. unsigned int probe_mask;
  435. char desc[ATA_EH_DESC_LEN];
  436. int desc_len;
  437. };
  438. struct ata_eh_context {
  439. struct ata_eh_info i;
  440. int tries[ATA_MAX_DEVICES];
  441. unsigned int classes[ATA_MAX_DEVICES];
  442. unsigned int did_probe_mask;
  443. };
  444. struct ata_port {
  445. struct Scsi_Host *scsi_host; /* our co-allocated scsi host */
  446. const struct ata_port_operations *ops;
  447. spinlock_t *lock;
  448. unsigned long flags; /* ATA_FLAG_xxx */
  449. unsigned int pflags; /* ATA_PFLAG_xxx */
  450. unsigned int id; /* unique id req'd by scsi midlyr */
  451. unsigned int port_no; /* unique port #; from zero */
  452. struct ata_prd *prd; /* our SG list */
  453. dma_addr_t prd_dma; /* and its DMA mapping */
  454. void *pad; /* array of DMA pad buffers */
  455. dma_addr_t pad_dma;
  456. struct ata_ioports ioaddr; /* ATA cmd/ctl/dma register blocks */
  457. u8 ctl; /* cache of ATA control register */
  458. u8 last_ctl; /* Cache last written value */
  459. unsigned int pio_mask;
  460. unsigned int mwdma_mask;
  461. unsigned int udma_mask;
  462. unsigned int cbl; /* cable type; ATA_CBL_xxx */
  463. unsigned int hw_sata_spd_limit;
  464. unsigned int sata_spd_limit; /* SATA PHY speed limit */
  465. /* record runtime error info, protected by host lock */
  466. struct ata_eh_info eh_info;
  467. /* EH context owned by EH */
  468. struct ata_eh_context eh_context;
  469. struct ata_device device[ATA_MAX_DEVICES];
  470. struct ata_queued_cmd qcmd[ATA_MAX_QUEUE];
  471. unsigned long qc_allocated;
  472. unsigned int qc_active;
  473. unsigned int active_tag;
  474. u32 sactive;
  475. struct ata_port_stats stats;
  476. struct ata_host *host;
  477. struct device *dev;
  478. struct delayed_work port_task;
  479. struct delayed_work hotplug_task;
  480. struct work_struct scsi_rescan_task;
  481. unsigned int hsm_task_state;
  482. u32 msg_enable;
  483. struct list_head eh_done_q;
  484. wait_queue_head_t eh_wait_q;
  485. pm_message_t pm_mesg;
  486. int *pm_result;
  487. void *private_data;
  488. u8 sector_buf[ATA_SECT_SIZE]; /* owned by EH */
  489. };
  490. struct ata_port_operations {
  491. void (*port_disable) (struct ata_port *);
  492. void (*dev_config) (struct ata_port *, struct ata_device *);
  493. void (*set_piomode) (struct ata_port *, struct ata_device *);
  494. void (*set_dmamode) (struct ata_port *, struct ata_device *);
  495. unsigned long (*mode_filter) (const struct ata_port *, struct ata_device *, unsigned long);
  496. void (*tf_load) (struct ata_port *ap, const struct ata_taskfile *tf);
  497. void (*tf_read) (struct ata_port *ap, struct ata_taskfile *tf);
  498. void (*exec_command)(struct ata_port *ap, const struct ata_taskfile *tf);
  499. u8 (*check_status)(struct ata_port *ap);
  500. u8 (*check_altstatus)(struct ata_port *ap);
  501. void (*dev_select)(struct ata_port *ap, unsigned int device);
  502. void (*phy_reset) (struct ata_port *ap); /* obsolete */
  503. void (*set_mode) (struct ata_port *ap);
  504. void (*post_set_mode) (struct ata_port *ap);
  505. int (*check_atapi_dma) (struct ata_queued_cmd *qc);
  506. void (*bmdma_setup) (struct ata_queued_cmd *qc);
  507. void (*bmdma_start) (struct ata_queued_cmd *qc);
  508. void (*data_xfer) (struct ata_device *, unsigned char *, unsigned int, int);
  509. void (*qc_prep) (struct ata_queued_cmd *qc);
  510. unsigned int (*qc_issue) (struct ata_queued_cmd *qc);
  511. /* Error handlers. ->error_handler overrides ->eng_timeout and
  512. * indicates that new-style EH is in place.
  513. */
  514. void (*eng_timeout) (struct ata_port *ap); /* obsolete */
  515. void (*freeze) (struct ata_port *ap);
  516. void (*thaw) (struct ata_port *ap);
  517. void (*error_handler) (struct ata_port *ap);
  518. void (*post_internal_cmd) (struct ata_queued_cmd *qc);
  519. irq_handler_t irq_handler;
  520. void (*irq_clear) (struct ata_port *);
  521. u32 (*scr_read) (struct ata_port *ap, unsigned int sc_reg);
  522. void (*scr_write) (struct ata_port *ap, unsigned int sc_reg,
  523. u32 val);
  524. int (*port_suspend) (struct ata_port *ap, pm_message_t mesg);
  525. int (*port_resume) (struct ata_port *ap);
  526. int (*port_start) (struct ata_port *ap);
  527. void (*port_stop) (struct ata_port *ap);
  528. void (*host_stop) (struct ata_host *host);
  529. void (*bmdma_stop) (struct ata_queued_cmd *qc);
  530. u8 (*bmdma_status) (struct ata_port *ap);
  531. };
  532. struct ata_port_info {
  533. struct scsi_host_template *sht;
  534. unsigned long flags;
  535. unsigned long pio_mask;
  536. unsigned long mwdma_mask;
  537. unsigned long udma_mask;
  538. const struct ata_port_operations *port_ops;
  539. void *private_data;
  540. };
  541. struct ata_timing {
  542. unsigned short mode; /* ATA mode */
  543. unsigned short setup; /* t1 */
  544. unsigned short act8b; /* t2 for 8-bit I/O */
  545. unsigned short rec8b; /* t2i for 8-bit I/O */
  546. unsigned short cyc8b; /* t0 for 8-bit I/O */
  547. unsigned short active; /* t2 or tD */
  548. unsigned short recover; /* t2i or tK */
  549. unsigned short cycle; /* t0 */
  550. unsigned short udma; /* t2CYCTYP/2 */
  551. };
  552. #define FIT(v,vmin,vmax) max_t(short,min_t(short,v,vmax),vmin)
  553. extern const unsigned long sata_deb_timing_normal[];
  554. extern const unsigned long sata_deb_timing_hotplug[];
  555. extern const unsigned long sata_deb_timing_long[];
  556. extern const struct ata_port_operations ata_dummy_port_ops;
  557. static inline const unsigned long *
  558. sata_ehc_deb_timing(struct ata_eh_context *ehc)
  559. {
  560. if (ehc->i.flags & ATA_EHI_HOTPLUGGED)
  561. return sata_deb_timing_hotplug;
  562. else
  563. return sata_deb_timing_normal;
  564. }
  565. static inline int ata_port_is_dummy(struct ata_port *ap)
  566. {
  567. return ap->ops == &ata_dummy_port_ops;
  568. }
  569. extern void ata_port_probe(struct ata_port *);
  570. extern void __sata_phy_reset(struct ata_port *ap);
  571. extern void sata_phy_reset(struct ata_port *ap);
  572. extern void ata_bus_reset(struct ata_port *ap);
  573. extern int sata_set_spd(struct ata_port *ap);
  574. extern int sata_phy_debounce(struct ata_port *ap, const unsigned long *param);
  575. extern int sata_phy_resume(struct ata_port *ap, const unsigned long *param);
  576. extern int ata_std_prereset(struct ata_port *ap);
  577. extern int ata_std_softreset(struct ata_port *ap, unsigned int *classes);
  578. extern int sata_std_hardreset(struct ata_port *ap, unsigned int *class);
  579. extern void ata_std_postreset(struct ata_port *ap, unsigned int *classes);
  580. extern void ata_port_disable(struct ata_port *);
  581. extern void ata_std_ports(struct ata_ioports *ioaddr);
  582. #ifdef CONFIG_PCI
  583. extern int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
  584. unsigned int n_ports);
  585. extern void ata_pci_remove_one (struct pci_dev *pdev);
  586. extern void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg);
  587. extern void ata_pci_device_do_resume(struct pci_dev *pdev);
  588. extern int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
  589. extern int ata_pci_device_resume(struct pci_dev *pdev);
  590. extern int ata_pci_clear_simplex(struct pci_dev *pdev);
  591. #endif /* CONFIG_PCI */
  592. extern int ata_device_add(const struct ata_probe_ent *ent);
  593. extern void ata_port_detach(struct ata_port *ap);
  594. extern void ata_host_init(struct ata_host *, struct device *,
  595. unsigned long, const struct ata_port_operations *);
  596. extern void ata_host_remove(struct ata_host *host);
  597. extern int ata_scsi_detect(struct scsi_host_template *sht);
  598. extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
  599. extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
  600. extern int ata_scsi_release(struct Scsi_Host *host);
  601. extern void ata_sas_port_destroy(struct ata_port *);
  602. extern struct ata_port *ata_sas_port_alloc(struct ata_host *,
  603. struct ata_port_info *, struct Scsi_Host *);
  604. extern int ata_sas_port_init(struct ata_port *);
  605. extern int ata_sas_port_start(struct ata_port *ap);
  606. extern void ata_sas_port_stop(struct ata_port *ap);
  607. extern int ata_sas_slave_configure(struct scsi_device *, struct ata_port *);
  608. extern int ata_sas_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *),
  609. struct ata_port *ap);
  610. extern unsigned int ata_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc);
  611. extern int sata_scr_valid(struct ata_port *ap);
  612. extern int sata_scr_read(struct ata_port *ap, int reg, u32 *val);
  613. extern int sata_scr_write(struct ata_port *ap, int reg, u32 val);
  614. extern int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val);
  615. extern int ata_port_online(struct ata_port *ap);
  616. extern int ata_port_offline(struct ata_port *ap);
  617. extern int ata_scsi_device_resume(struct scsi_device *);
  618. extern int ata_scsi_device_suspend(struct scsi_device *, pm_message_t mesg);
  619. extern int ata_host_suspend(struct ata_host *host, pm_message_t mesg);
  620. extern void ata_host_resume(struct ata_host *host);
  621. extern int ata_ratelimit(void);
  622. extern unsigned int ata_busy_sleep(struct ata_port *ap,
  623. unsigned long timeout_pat,
  624. unsigned long timeout);
  625. extern void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *),
  626. void *data, unsigned long delay);
  627. extern u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
  628. unsigned long interval_msec,
  629. unsigned long timeout_msec);
  630. /*
  631. * Default driver ops implementations
  632. */
  633. extern void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf);
  634. extern void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
  635. extern void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp);
  636. extern void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf);
  637. extern void ata_noop_dev_select (struct ata_port *ap, unsigned int device);
  638. extern void ata_std_dev_select (struct ata_port *ap, unsigned int device);
  639. extern u8 ata_check_status(struct ata_port *ap);
  640. extern u8 ata_altstatus(struct ata_port *ap);
  641. extern void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf);
  642. extern int ata_port_start (struct ata_port *ap);
  643. extern void ata_port_stop (struct ata_port *ap);
  644. extern void ata_host_stop (struct ata_host *host);
  645. extern irqreturn_t ata_interrupt (int irq, void *dev_instance);
  646. extern void ata_mmio_data_xfer(struct ata_device *adev, unsigned char *buf,
  647. unsigned int buflen, int write_data);
  648. extern void ata_pio_data_xfer(struct ata_device *adev, unsigned char *buf,
  649. unsigned int buflen, int write_data);
  650. extern void ata_pio_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
  651. unsigned int buflen, int write_data);
  652. extern void ata_qc_prep(struct ata_queued_cmd *qc);
  653. extern void ata_noop_qc_prep(struct ata_queued_cmd *qc);
  654. extern unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc);
  655. extern void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf,
  656. unsigned int buflen);
  657. extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
  658. unsigned int n_elem);
  659. extern unsigned int ata_dev_classify(const struct ata_taskfile *tf);
  660. extern void ata_id_string(const u16 *id, unsigned char *s,
  661. unsigned int ofs, unsigned int len);
  662. extern void ata_id_c_string(const u16 *id, unsigned char *s,
  663. unsigned int ofs, unsigned int len);
  664. extern void ata_bmdma_setup (struct ata_queued_cmd *qc);
  665. extern void ata_bmdma_start (struct ata_queued_cmd *qc);
  666. extern void ata_bmdma_stop(struct ata_queued_cmd *qc);
  667. extern u8 ata_bmdma_status(struct ata_port *ap);
  668. extern void ata_bmdma_irq_clear(struct ata_port *ap);
  669. extern void ata_bmdma_freeze(struct ata_port *ap);
  670. extern void ata_bmdma_thaw(struct ata_port *ap);
  671. extern void ata_bmdma_drive_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
  672. ata_reset_fn_t softreset,
  673. ata_reset_fn_t hardreset,
  674. ata_postreset_fn_t postreset);
  675. extern void ata_bmdma_error_handler(struct ata_port *ap);
  676. extern void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc);
  677. extern int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
  678. u8 status, int in_wq);
  679. extern void ata_qc_complete(struct ata_queued_cmd *qc);
  680. extern int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
  681. void (*finish_qc)(struct ata_queued_cmd *));
  682. extern void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd,
  683. void (*done)(struct scsi_cmnd *));
  684. extern int ata_std_bios_param(struct scsi_device *sdev,
  685. struct block_device *bdev,
  686. sector_t capacity, int geom[]);
  687. extern int ata_scsi_slave_config(struct scsi_device *sdev);
  688. extern void ata_scsi_slave_destroy(struct scsi_device *sdev);
  689. extern int ata_scsi_change_queue_depth(struct scsi_device *sdev,
  690. int queue_depth);
  691. extern struct ata_device *ata_dev_pair(struct ata_device *adev);
  692. /*
  693. * Timing helpers
  694. */
  695. extern unsigned int ata_pio_need_iordy(const struct ata_device *);
  696. extern int ata_timing_compute(struct ata_device *, unsigned short,
  697. struct ata_timing *, int, int);
  698. extern void ata_timing_merge(const struct ata_timing *,
  699. const struct ata_timing *, struct ata_timing *,
  700. unsigned int);
  701. enum {
  702. ATA_TIMING_SETUP = (1 << 0),
  703. ATA_TIMING_ACT8B = (1 << 1),
  704. ATA_TIMING_REC8B = (1 << 2),
  705. ATA_TIMING_CYC8B = (1 << 3),
  706. ATA_TIMING_8BIT = ATA_TIMING_ACT8B | ATA_TIMING_REC8B |
  707. ATA_TIMING_CYC8B,
  708. ATA_TIMING_ACTIVE = (1 << 4),
  709. ATA_TIMING_RECOVER = (1 << 5),
  710. ATA_TIMING_CYCLE = (1 << 6),
  711. ATA_TIMING_UDMA = (1 << 7),
  712. ATA_TIMING_ALL = ATA_TIMING_SETUP | ATA_TIMING_ACT8B |
  713. ATA_TIMING_REC8B | ATA_TIMING_CYC8B |
  714. ATA_TIMING_ACTIVE | ATA_TIMING_RECOVER |
  715. ATA_TIMING_CYCLE | ATA_TIMING_UDMA,
  716. };
  717. #ifdef CONFIG_PCI
  718. struct pci_bits {
  719. unsigned int reg; /* PCI config register to read */
  720. unsigned int width; /* 1 (8 bit), 2 (16 bit), 4 (32 bit) */
  721. unsigned long mask;
  722. unsigned long val;
  723. };
  724. extern void ata_pci_host_stop (struct ata_host *host);
  725. extern struct ata_probe_ent *
  726. ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int portmask);
  727. extern int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits);
  728. extern unsigned long ata_pci_default_filter(const struct ata_port *, struct ata_device *, unsigned long);
  729. #endif /* CONFIG_PCI */
  730. /*
  731. * EH
  732. */
  733. extern void ata_eng_timeout(struct ata_port *ap);
  734. extern void ata_port_schedule_eh(struct ata_port *ap);
  735. extern int ata_port_abort(struct ata_port *ap);
  736. extern int ata_port_freeze(struct ata_port *ap);
  737. extern void ata_eh_freeze_port(struct ata_port *ap);
  738. extern void ata_eh_thaw_port(struct ata_port *ap);
  739. extern void ata_eh_qc_complete(struct ata_queued_cmd *qc);
  740. extern void ata_eh_qc_retry(struct ata_queued_cmd *qc);
  741. extern void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
  742. ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
  743. ata_postreset_fn_t postreset);
  744. /*
  745. * printk helpers
  746. */
  747. #define ata_port_printk(ap, lv, fmt, args...) \
  748. printk(lv"ata%u: "fmt, (ap)->id , ##args)
  749. #define ata_dev_printk(dev, lv, fmt, args...) \
  750. printk(lv"ata%u.%02u: "fmt, (dev)->ap->id, (dev)->devno , ##args)
  751. /*
  752. * ata_eh_info helpers
  753. */
  754. #define ata_ehi_push_desc(ehi, fmt, args...) do { \
  755. (ehi)->desc_len += scnprintf((ehi)->desc + (ehi)->desc_len, \
  756. ATA_EH_DESC_LEN - (ehi)->desc_len, \
  757. fmt , ##args); \
  758. } while (0)
  759. #define ata_ehi_clear_desc(ehi) do { \
  760. (ehi)->desc[0] = '\0'; \
  761. (ehi)->desc_len = 0; \
  762. } while (0)
  763. static inline void __ata_ehi_hotplugged(struct ata_eh_info *ehi)
  764. {
  765. if (ehi->flags & ATA_EHI_HOTPLUGGED)
  766. return;
  767. ehi->flags |= ATA_EHI_HOTPLUGGED | ATA_EHI_RESUME_LINK;
  768. ehi->hotplug_timestamp = jiffies;
  769. ehi->action |= ATA_EH_SOFTRESET;
  770. ehi->probe_mask |= (1 << ATA_MAX_DEVICES) - 1;
  771. }
  772. static inline void ata_ehi_hotplugged(struct ata_eh_info *ehi)
  773. {
  774. __ata_ehi_hotplugged(ehi);
  775. ehi->err_mask |= AC_ERR_ATA_BUS;
  776. }
  777. /*
  778. * qc helpers
  779. */
  780. static inline int
  781. ata_sg_is_last(struct scatterlist *sg, struct ata_queued_cmd *qc)
  782. {
  783. if (sg == &qc->pad_sgent)
  784. return 1;
  785. if (qc->pad_len)
  786. return 0;
  787. if (((sg - qc->__sg) + 1) == qc->n_elem)
  788. return 1;
  789. return 0;
  790. }
  791. static inline struct scatterlist *
  792. ata_qc_first_sg(struct ata_queued_cmd *qc)
  793. {
  794. if (qc->n_elem)
  795. return qc->__sg;
  796. if (qc->pad_len)
  797. return &qc->pad_sgent;
  798. return NULL;
  799. }
  800. static inline struct scatterlist *
  801. ata_qc_next_sg(struct scatterlist *sg, struct ata_queued_cmd *qc)
  802. {
  803. if (sg == &qc->pad_sgent)
  804. return NULL;
  805. if (++sg - qc->__sg < qc->n_elem)
  806. return sg;
  807. if (qc->pad_len)
  808. return &qc->pad_sgent;
  809. return NULL;
  810. }
  811. #define ata_for_each_sg(sg, qc) \
  812. for (sg = ata_qc_first_sg(qc); sg; sg = ata_qc_next_sg(sg, qc))
  813. static inline unsigned int ata_tag_valid(unsigned int tag)
  814. {
  815. return (tag < ATA_MAX_QUEUE) ? 1 : 0;
  816. }
  817. static inline unsigned int ata_tag_internal(unsigned int tag)
  818. {
  819. return tag == ATA_MAX_QUEUE - 1;
  820. }
  821. /*
  822. * device helpers
  823. */
  824. static inline unsigned int ata_class_enabled(unsigned int class)
  825. {
  826. return class == ATA_DEV_ATA || class == ATA_DEV_ATAPI;
  827. }
  828. static inline unsigned int ata_class_disabled(unsigned int class)
  829. {
  830. return class == ATA_DEV_ATA_UNSUP || class == ATA_DEV_ATAPI_UNSUP;
  831. }
  832. static inline unsigned int ata_class_absent(unsigned int class)
  833. {
  834. return !ata_class_enabled(class) && !ata_class_disabled(class);
  835. }
  836. static inline unsigned int ata_dev_enabled(const struct ata_device *dev)
  837. {
  838. return ata_class_enabled(dev->class);
  839. }
  840. static inline unsigned int ata_dev_disabled(const struct ata_device *dev)
  841. {
  842. return ata_class_disabled(dev->class);
  843. }
  844. static inline unsigned int ata_dev_absent(const struct ata_device *dev)
  845. {
  846. return ata_class_absent(dev->class);
  847. }
  848. static inline unsigned int ata_dev_ready(const struct ata_device *dev)
  849. {
  850. return ata_dev_enabled(dev) && !(dev->flags & ATA_DFLAG_SUSPENDED);
  851. }
  852. /*
  853. * port helpers
  854. */
  855. static inline int ata_port_max_devices(const struct ata_port *ap)
  856. {
  857. if (ap->flags & ATA_FLAG_SLAVE_POSS)
  858. return 2;
  859. return 1;
  860. }
  861. static inline u8 ata_chk_status(struct ata_port *ap)
  862. {
  863. return ap->ops->check_status(ap);
  864. }
  865. /**
  866. * ata_pause - Flush writes and pause 400 nanoseconds.
  867. * @ap: Port to wait for.
  868. *
  869. * LOCKING:
  870. * Inherited from caller.
  871. */
  872. static inline void ata_pause(struct ata_port *ap)
  873. {
  874. ata_altstatus(ap);
  875. ndelay(400);
  876. }
  877. /**
  878. * ata_busy_wait - Wait for a port status register
  879. * @ap: Port to wait for.
  880. *
  881. * Waits up to max*10 microseconds for the selected bits in the port's
  882. * status register to be cleared.
  883. * Returns final value of status register.
  884. *
  885. * LOCKING:
  886. * Inherited from caller.
  887. */
  888. static inline u8 ata_busy_wait(struct ata_port *ap, unsigned int bits,
  889. unsigned int max)
  890. {
  891. u8 status;
  892. do {
  893. udelay(10);
  894. status = ata_chk_status(ap);
  895. max--;
  896. } while ((status & bits) && (max > 0));
  897. return status;
  898. }
  899. /**
  900. * ata_wait_idle - Wait for a port to be idle.
  901. * @ap: Port to wait for.
  902. *
  903. * Waits up to 10ms for port's BUSY and DRQ signals to clear.
  904. * Returns final value of status register.
  905. *
  906. * LOCKING:
  907. * Inherited from caller.
  908. */
  909. static inline u8 ata_wait_idle(struct ata_port *ap)
  910. {
  911. u8 status = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
  912. if (status & (ATA_BUSY | ATA_DRQ)) {
  913. unsigned long l = ap->ioaddr.status_addr;
  914. if (ata_msg_warn(ap))
  915. printk(KERN_WARNING "ATA: abnormal status 0x%X on port 0x%lX\n",
  916. status, l);
  917. }
  918. return status;
  919. }
  920. static inline void ata_qc_set_polling(struct ata_queued_cmd *qc)
  921. {
  922. qc->tf.ctl |= ATA_NIEN;
  923. }
  924. static inline struct ata_queued_cmd *__ata_qc_from_tag(struct ata_port *ap,
  925. unsigned int tag)
  926. {
  927. if (likely(ata_tag_valid(tag)))
  928. return &ap->qcmd[tag];
  929. return NULL;
  930. }
  931. static inline struct ata_queued_cmd *ata_qc_from_tag(struct ata_port *ap,
  932. unsigned int tag)
  933. {
  934. struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
  935. if (unlikely(!qc) || !ap->ops->error_handler)
  936. return qc;
  937. if ((qc->flags & (ATA_QCFLAG_ACTIVE |
  938. ATA_QCFLAG_FAILED)) == ATA_QCFLAG_ACTIVE)
  939. return qc;
  940. return NULL;
  941. }
  942. static inline void ata_tf_init(struct ata_device *dev, struct ata_taskfile *tf)
  943. {
  944. memset(tf, 0, sizeof(*tf));
  945. tf->ctl = dev->ap->ctl;
  946. if (dev->devno == 0)
  947. tf->device = ATA_DEVICE_OBS;
  948. else
  949. tf->device = ATA_DEVICE_OBS | ATA_DEV1;
  950. }
  951. static inline void ata_qc_reinit(struct ata_queued_cmd *qc)
  952. {
  953. qc->__sg = NULL;
  954. qc->flags = 0;
  955. qc->cursect = qc->cursg = qc->cursg_ofs = 0;
  956. qc->nsect = 0;
  957. qc->nbytes = qc->curbytes = 0;
  958. qc->err_mask = 0;
  959. ata_tf_init(qc->dev, &qc->tf);
  960. /* init result_tf such that it indicates normal completion */
  961. qc->result_tf.command = ATA_DRDY;
  962. qc->result_tf.feature = 0;
  963. }
  964. /**
  965. * ata_irq_on - Enable interrupts on a port.
  966. * @ap: Port on which interrupts are enabled.
  967. *
  968. * Enable interrupts on a legacy IDE device using MMIO or PIO,
  969. * wait for idle, clear any pending interrupts.
  970. *
  971. * LOCKING:
  972. * Inherited from caller.
  973. */
  974. static inline u8 ata_irq_on(struct ata_port *ap)
  975. {
  976. struct ata_ioports *ioaddr = &ap->ioaddr;
  977. u8 tmp;
  978. ap->ctl &= ~ATA_NIEN;
  979. ap->last_ctl = ap->ctl;
  980. if (ap->flags & ATA_FLAG_MMIO)
  981. writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
  982. else
  983. outb(ap->ctl, ioaddr->ctl_addr);
  984. tmp = ata_wait_idle(ap);
  985. ap->ops->irq_clear(ap);
  986. return tmp;
  987. }
  988. /**
  989. * ata_irq_ack - Acknowledge a device interrupt.
  990. * @ap: Port on which interrupts are enabled.
  991. *
  992. * Wait up to 10 ms for legacy IDE device to become idle (BUSY
  993. * or BUSY+DRQ clear). Obtain dma status and port status from
  994. * device. Clear the interrupt. Return port status.
  995. *
  996. * LOCKING:
  997. */
  998. static inline u8 ata_irq_ack(struct ata_port *ap, unsigned int chk_drq)
  999. {
  1000. unsigned int bits = chk_drq ? ATA_BUSY | ATA_DRQ : ATA_BUSY;
  1001. u8 host_stat, post_stat, status;
  1002. status = ata_busy_wait(ap, bits, 1000);
  1003. if (status & bits)
  1004. if (ata_msg_err(ap))
  1005. printk(KERN_ERR "abnormal status 0x%X\n", status);
  1006. /* get controller status; clear intr, err bits */
  1007. if (ap->flags & ATA_FLAG_MMIO) {
  1008. void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
  1009. host_stat = readb(mmio + ATA_DMA_STATUS);
  1010. writeb(host_stat | ATA_DMA_INTR | ATA_DMA_ERR,
  1011. mmio + ATA_DMA_STATUS);
  1012. post_stat = readb(mmio + ATA_DMA_STATUS);
  1013. } else {
  1014. host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
  1015. outb(host_stat | ATA_DMA_INTR | ATA_DMA_ERR,
  1016. ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
  1017. post_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
  1018. }
  1019. if (ata_msg_intr(ap))
  1020. printk(KERN_INFO "%s: irq ack: host_stat 0x%X, new host_stat 0x%X, drv_stat 0x%X\n",
  1021. __FUNCTION__,
  1022. host_stat, post_stat, status);
  1023. return status;
  1024. }
  1025. static inline int ata_try_flush_cache(const struct ata_device *dev)
  1026. {
  1027. return ata_id_wcache_enabled(dev->id) ||
  1028. ata_id_has_flush(dev->id) ||
  1029. ata_id_has_flush_ext(dev->id);
  1030. }
  1031. static inline unsigned int ac_err_mask(u8 status)
  1032. {
  1033. if (status & (ATA_BUSY | ATA_DRQ))
  1034. return AC_ERR_HSM;
  1035. if (status & (ATA_ERR | ATA_DF))
  1036. return AC_ERR_DEV;
  1037. return 0;
  1038. }
  1039. static inline unsigned int __ac_err_mask(u8 status)
  1040. {
  1041. unsigned int mask = ac_err_mask(status);
  1042. if (mask == 0)
  1043. return AC_ERR_OTHER;
  1044. return mask;
  1045. }
  1046. static inline int ata_pad_alloc(struct ata_port *ap, struct device *dev)
  1047. {
  1048. ap->pad_dma = 0;
  1049. ap->pad = dma_alloc_coherent(dev, ATA_DMA_PAD_BUF_SZ,
  1050. &ap->pad_dma, GFP_KERNEL);
  1051. return (ap->pad == NULL) ? -ENOMEM : 0;
  1052. }
  1053. static inline void ata_pad_free(struct ata_port *ap, struct device *dev)
  1054. {
  1055. dma_free_coherent(dev, ATA_DMA_PAD_BUF_SZ, ap->pad, ap->pad_dma);
  1056. }
  1057. static inline struct ata_port *ata_shost_to_port(struct Scsi_Host *host)
  1058. {
  1059. return (struct ata_port *) &host->hostdata[0];
  1060. }
  1061. #endif /* __LINUX_LIBATA_H__ */