ide.h 45 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573
  1. #ifndef _IDE_H
  2. #define _IDE_H
  3. /*
  4. * linux/include/linux/ide.h
  5. *
  6. * Copyright (C) 1994-2002 Linus Torvalds & authors
  7. */
  8. #include <linux/init.h>
  9. #include <linux/ioport.h>
  10. #include <linux/ata.h>
  11. #include <linux/blkdev.h>
  12. #include <linux/proc_fs.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/bitops.h>
  15. #include <linux/bio.h>
  16. #include <linux/device.h>
  17. #include <linux/pci.h>
  18. #include <linux/completion.h>
  19. #include <linux/pm.h>
  20. #ifdef CONFIG_BLK_DEV_IDEACPI
  21. #include <acpi/acpi.h>
  22. #endif
  23. #include <asm/byteorder.h>
  24. #include <asm/system.h>
  25. #include <asm/io.h>
  26. #include <asm/mutex.h>
  27. /* for request_sense */
  28. #include <linux/cdrom.h>
  29. #if defined(CONFIG_CRIS) || defined(CONFIG_FRV) || defined(CONFIG_MN10300)
  30. # define SUPPORT_VLB_SYNC 0
  31. #else
  32. # define SUPPORT_VLB_SYNC 1
  33. #endif
  34. /*
  35. * Probably not wise to fiddle with these
  36. */
  37. #define IDE_DEFAULT_MAX_FAILURES 1
  38. #define ERROR_MAX 8 /* Max read/write errors per sector */
  39. #define ERROR_RESET 3 /* Reset controller every 4th retry */
  40. #define ERROR_RECAL 1 /* Recalibrate every 2nd retry */
  41. /* Error codes returned in rq->errors to the higher part of the driver. */
  42. enum {
  43. IDE_DRV_ERROR_GENERAL = 101,
  44. IDE_DRV_ERROR_FILEMARK = 102,
  45. IDE_DRV_ERROR_EOD = 103,
  46. };
  47. /*
  48. * Definitions for accessing IDE controller registers
  49. */
  50. #define IDE_NR_PORTS (10)
  51. struct ide_io_ports {
  52. unsigned long data_addr;
  53. union {
  54. unsigned long error_addr; /* read: error */
  55. unsigned long feature_addr; /* write: feature */
  56. };
  57. unsigned long nsect_addr;
  58. unsigned long lbal_addr;
  59. unsigned long lbam_addr;
  60. unsigned long lbah_addr;
  61. unsigned long device_addr;
  62. union {
  63. unsigned long status_addr; /*  read: status  */
  64. unsigned long command_addr; /* write: command */
  65. };
  66. unsigned long ctl_addr;
  67. unsigned long irq_addr;
  68. };
  69. #define OK_STAT(stat,good,bad) (((stat)&((good)|(bad)))==(good))
  70. #define BAD_R_STAT (ATA_BUSY | ATA_ERR)
  71. #define BAD_W_STAT (BAD_R_STAT | ATA_DF)
  72. #define BAD_STAT (BAD_R_STAT | ATA_DRQ)
  73. #define DRIVE_READY (ATA_DRDY | ATA_DSC)
  74. #define BAD_CRC (ATA_ABORTED | ATA_ICRC)
  75. #define SATA_NR_PORTS (3) /* 16 possible ?? */
  76. #define SATA_STATUS_OFFSET (0)
  77. #define SATA_ERROR_OFFSET (1)
  78. #define SATA_CONTROL_OFFSET (2)
  79. /*
  80. * Our Physical Region Descriptor (PRD) table should be large enough
  81. * to handle the biggest I/O request we are likely to see. Since requests
  82. * can have no more than 256 sectors, and since the typical blocksize is
  83. * two or more sectors, we could get by with a limit of 128 entries here for
  84. * the usual worst case. Most requests seem to include some contiguous blocks,
  85. * further reducing the number of table entries required.
  86. *
  87. * The driver reverts to PIO mode for individual requests that exceed
  88. * this limit (possible with 512 byte blocksizes, eg. MSDOS f/s), so handling
  89. * 100% of all crazy scenarios here is not necessary.
  90. *
  91. * As it turns out though, we must allocate a full 4KB page for this,
  92. * so the two PRD tables (ide0 & ide1) will each get half of that,
  93. * allowing each to have about 256 entries (8 bytes each) from this.
  94. */
  95. #define PRD_BYTES 8
  96. #define PRD_ENTRIES 256
  97. /*
  98. * Some more useful definitions
  99. */
  100. #define PARTN_BITS 6 /* number of minor dev bits for partitions */
  101. #define MAX_DRIVES 2 /* per interface; 2 assumed by lots of code */
  102. #define SECTOR_SIZE 512
  103. /*
  104. * Timeouts for various operations:
  105. */
  106. enum {
  107. /* spec allows up to 20ms */
  108. WAIT_DRQ = HZ / 10, /* 100ms */
  109. /* some laptops are very slow */
  110. WAIT_READY = 5 * HZ, /* 5s */
  111. /* should be less than 3ms (?), if all ATAPI CD is closed at boot */
  112. WAIT_PIDENTIFY = 10 * HZ, /* 10s */
  113. /* worst case when spinning up */
  114. WAIT_WORSTCASE = 30 * HZ, /* 30s */
  115. /* maximum wait for an IRQ to happen */
  116. WAIT_CMD = 10 * HZ, /* 10s */
  117. /* Some drives require a longer IRQ timeout. */
  118. WAIT_FLOPPY_CMD = 50 * HZ, /* 50s */
  119. /*
  120. * Some drives (for example, Seagate STT3401A Travan) require a very
  121. * long timeout, because they don't return an interrupt or clear their
  122. * BSY bit until after the command completes (even retension commands).
  123. */
  124. WAIT_TAPE_CMD = 900 * HZ, /* 900s */
  125. /* minimum sleep time */
  126. WAIT_MIN_SLEEP = HZ / 50, /* 20ms */
  127. };
  128. /*
  129. * Op codes for special requests to be handled by ide_special_rq().
  130. * Values should be in the range of 0x20 to 0x3f.
  131. */
  132. #define REQ_DRIVE_RESET 0x20
  133. #define REQ_DEVSET_EXEC 0x21
  134. #define REQ_PARK_HEADS 0x22
  135. #define REQ_UNPARK_HEADS 0x23
  136. /*
  137. * Check for an interrupt and acknowledge the interrupt status
  138. */
  139. struct hwif_s;
  140. typedef int (ide_ack_intr_t)(struct hwif_s *);
  141. /*
  142. * hwif_chipset_t is used to keep track of the specific hardware
  143. * chipset used by each IDE interface, if known.
  144. */
  145. enum { ide_unknown, ide_generic, ide_pci,
  146. ide_cmd640, ide_dtc2278, ide_ali14xx,
  147. ide_qd65xx, ide_umc8672, ide_ht6560b,
  148. ide_4drives, ide_pmac, ide_acorn,
  149. ide_au1xxx, ide_palm3710
  150. };
  151. typedef u8 hwif_chipset_t;
  152. /*
  153. * Structure to hold all information about the location of this port
  154. */
  155. struct ide_hw {
  156. union {
  157. struct ide_io_ports io_ports;
  158. unsigned long io_ports_array[IDE_NR_PORTS];
  159. };
  160. int irq; /* our irq number */
  161. ide_ack_intr_t *ack_intr; /* acknowledge interrupt */
  162. struct device *dev, *parent;
  163. unsigned long config;
  164. };
  165. static inline void ide_std_init_ports(struct ide_hw *hw,
  166. unsigned long io_addr,
  167. unsigned long ctl_addr)
  168. {
  169. unsigned int i;
  170. for (i = 0; i <= 7; i++)
  171. hw->io_ports_array[i] = io_addr++;
  172. hw->io_ports.ctl_addr = ctl_addr;
  173. }
  174. #define MAX_HWIFS 10
  175. /*
  176. * Now for the data we need to maintain per-drive: ide_drive_t
  177. */
  178. #define ide_scsi 0x21
  179. #define ide_disk 0x20
  180. #define ide_optical 0x7
  181. #define ide_cdrom 0x5
  182. #define ide_tape 0x1
  183. #define ide_floppy 0x0
  184. /*
  185. * Special Driver Flags
  186. */
  187. enum {
  188. IDE_SFLAG_SET_GEOMETRY = (1 << 0),
  189. IDE_SFLAG_RECALIBRATE = (1 << 1),
  190. IDE_SFLAG_SET_MULTMODE = (1 << 2),
  191. };
  192. /*
  193. * Status returned from various ide_ functions
  194. */
  195. typedef enum {
  196. ide_stopped, /* no drive operation was started */
  197. ide_started, /* a drive operation was started, handler was set */
  198. } ide_startstop_t;
  199. enum {
  200. IDE_VALID_ERROR = (1 << 1),
  201. IDE_VALID_FEATURE = IDE_VALID_ERROR,
  202. IDE_VALID_NSECT = (1 << 2),
  203. IDE_VALID_LBAL = (1 << 3),
  204. IDE_VALID_LBAM = (1 << 4),
  205. IDE_VALID_LBAH = (1 << 5),
  206. IDE_VALID_DEVICE = (1 << 6),
  207. IDE_VALID_LBA = IDE_VALID_LBAL |
  208. IDE_VALID_LBAM |
  209. IDE_VALID_LBAH,
  210. IDE_VALID_OUT_TF = IDE_VALID_FEATURE |
  211. IDE_VALID_NSECT |
  212. IDE_VALID_LBA,
  213. IDE_VALID_IN_TF = IDE_VALID_NSECT |
  214. IDE_VALID_LBA,
  215. IDE_VALID_OUT_HOB = IDE_VALID_OUT_TF,
  216. IDE_VALID_IN_HOB = IDE_VALID_ERROR |
  217. IDE_VALID_NSECT |
  218. IDE_VALID_LBA,
  219. };
  220. enum {
  221. IDE_TFLAG_LBA48 = (1 << 0),
  222. IDE_TFLAG_WRITE = (1 << 1),
  223. IDE_TFLAG_CUSTOM_HANDLER = (1 << 2),
  224. IDE_TFLAG_DMA_PIO_FALLBACK = (1 << 3),
  225. /* force 16-bit I/O operations */
  226. IDE_TFLAG_IO_16BIT = (1 << 4),
  227. /* struct ide_cmd was allocated using kmalloc() */
  228. IDE_TFLAG_DYN = (1 << 5),
  229. IDE_TFLAG_FS = (1 << 6),
  230. IDE_TFLAG_MULTI_PIO = (1 << 7),
  231. };
  232. enum {
  233. IDE_FTFLAG_FLAGGED = (1 << 0),
  234. IDE_FTFLAG_SET_IN_FLAGS = (1 << 1),
  235. IDE_FTFLAG_OUT_DATA = (1 << 2),
  236. IDE_FTFLAG_IN_DATA = (1 << 3),
  237. };
  238. struct ide_taskfile {
  239. u8 data; /* 0: data byte (for TASKFILE ioctl) */
  240. union { /* 1: */
  241. u8 error; /* read: error */
  242. u8 feature; /* write: feature */
  243. };
  244. u8 nsect; /* 2: number of sectors */
  245. u8 lbal; /* 3: LBA low */
  246. u8 lbam; /* 4: LBA mid */
  247. u8 lbah; /* 5: LBA high */
  248. u8 device; /* 6: device select */
  249. union { /* 7: */
  250. u8 status; /* read: status */
  251. u8 command; /* write: command */
  252. };
  253. };
  254. struct ide_cmd {
  255. struct ide_taskfile tf;
  256. struct ide_taskfile hob;
  257. struct {
  258. struct {
  259. u8 tf;
  260. u8 hob;
  261. } out, in;
  262. } valid;
  263. u8 tf_flags;
  264. u8 ftf_flags; /* for TASKFILE ioctl */
  265. int protocol;
  266. int sg_nents; /* number of sg entries */
  267. int orig_sg_nents;
  268. int sg_dma_direction; /* DMA transfer direction */
  269. unsigned int nbytes;
  270. unsigned int nleft;
  271. unsigned int last_xfer_len;
  272. struct scatterlist *cursg;
  273. unsigned int cursg_ofs;
  274. struct request *rq; /* copy of request */
  275. };
  276. /* ATAPI packet command flags */
  277. enum {
  278. /* set when an error is considered normal - no retry (ide-tape) */
  279. PC_FLAG_ABORT = (1 << 0),
  280. PC_FLAG_SUPPRESS_ERROR = (1 << 1),
  281. PC_FLAG_WAIT_FOR_DSC = (1 << 2),
  282. PC_FLAG_DMA_OK = (1 << 3),
  283. PC_FLAG_DMA_IN_PROGRESS = (1 << 4),
  284. PC_FLAG_DMA_ERROR = (1 << 5),
  285. PC_FLAG_WRITING = (1 << 6),
  286. };
  287. /*
  288. * With each packet command, we allocate a buffer of IDE_PC_BUFFER_SIZE bytes.
  289. * This is used for several packet commands (not for READ/WRITE commands).
  290. */
  291. #define IDE_PC_BUFFER_SIZE 64
  292. #define ATAPI_WAIT_PC (60 * HZ)
  293. struct ide_atapi_pc {
  294. /* actual packet bytes */
  295. u8 c[12];
  296. /* incremented on each retry */
  297. int retries;
  298. int error;
  299. /* bytes to transfer */
  300. int req_xfer;
  301. /* bytes actually transferred */
  302. int xferred;
  303. /* data buffer */
  304. u8 *buf;
  305. int buf_size;
  306. /* the corresponding request */
  307. struct request *rq;
  308. unsigned long flags;
  309. /*
  310. * those are more or less driver-specific and some of them are subject
  311. * to change/removal later.
  312. */
  313. u8 pc_buf[IDE_PC_BUFFER_SIZE];
  314. unsigned long timeout;
  315. };
  316. struct ide_devset;
  317. struct ide_driver;
  318. #ifdef CONFIG_BLK_DEV_IDEACPI
  319. struct ide_acpi_drive_link;
  320. struct ide_acpi_hwif_link;
  321. #endif
  322. struct ide_drive_s;
  323. struct ide_disk_ops {
  324. int (*check)(struct ide_drive_s *, const char *);
  325. int (*get_capacity)(struct ide_drive_s *);
  326. u64 (*set_capacity)(struct ide_drive_s *, u64);
  327. void (*setup)(struct ide_drive_s *);
  328. void (*flush)(struct ide_drive_s *);
  329. int (*init_media)(struct ide_drive_s *, struct gendisk *);
  330. int (*set_doorlock)(struct ide_drive_s *, struct gendisk *,
  331. int);
  332. ide_startstop_t (*do_request)(struct ide_drive_s *, struct request *,
  333. sector_t);
  334. int (*ioctl)(struct ide_drive_s *, struct block_device *,
  335. fmode_t, unsigned int, unsigned long);
  336. };
  337. /* ATAPI device flags */
  338. enum {
  339. IDE_AFLAG_DRQ_INTERRUPT = (1 << 0),
  340. /* ide-cd */
  341. /* Drive cannot eject the disc. */
  342. IDE_AFLAG_NO_EJECT = (1 << 1),
  343. /* Drive is a pre ATAPI 1.2 drive. */
  344. IDE_AFLAG_PRE_ATAPI12 = (1 << 2),
  345. /* TOC addresses are in BCD. */
  346. IDE_AFLAG_TOCADDR_AS_BCD = (1 << 3),
  347. /* TOC track numbers are in BCD. */
  348. IDE_AFLAG_TOCTRACKS_AS_BCD = (1 << 4),
  349. /* Saved TOC information is current. */
  350. IDE_AFLAG_TOC_VALID = (1 << 6),
  351. /* We think that the drive door is locked. */
  352. IDE_AFLAG_DOOR_LOCKED = (1 << 7),
  353. /* SET_CD_SPEED command is unsupported. */
  354. IDE_AFLAG_NO_SPEED_SELECT = (1 << 8),
  355. IDE_AFLAG_VERTOS_300_SSD = (1 << 9),
  356. IDE_AFLAG_VERTOS_600_ESD = (1 << 10),
  357. IDE_AFLAG_SANYO_3CD = (1 << 11),
  358. IDE_AFLAG_FULL_CAPS_PAGE = (1 << 12),
  359. IDE_AFLAG_PLAY_AUDIO_OK = (1 << 13),
  360. IDE_AFLAG_LE_SPEED_FIELDS = (1 << 14),
  361. /* ide-floppy */
  362. /* Avoid commands not supported in Clik drive */
  363. IDE_AFLAG_CLIK_DRIVE = (1 << 15),
  364. /* Requires BH algorithm for packets */
  365. IDE_AFLAG_ZIP_DRIVE = (1 << 16),
  366. /* Supports format progress report */
  367. IDE_AFLAG_SRFP = (1 << 17),
  368. /* ide-tape */
  369. IDE_AFLAG_IGNORE_DSC = (1 << 18),
  370. /* 0 When the tape position is unknown */
  371. IDE_AFLAG_ADDRESS_VALID = (1 << 19),
  372. /* Device already opened */
  373. IDE_AFLAG_BUSY = (1 << 20),
  374. /* Attempt to auto-detect the current user block size */
  375. IDE_AFLAG_DETECT_BS = (1 << 21),
  376. /* Currently on a filemark */
  377. IDE_AFLAG_FILEMARK = (1 << 22),
  378. /* 0 = no tape is loaded, so we don't rewind after ejecting */
  379. IDE_AFLAG_MEDIUM_PRESENT = (1 << 23),
  380. IDE_AFLAG_NO_AUTOCLOSE = (1 << 24),
  381. };
  382. /* device flags */
  383. enum {
  384. /* restore settings after device reset */
  385. IDE_DFLAG_KEEP_SETTINGS = (1 << 0),
  386. /* device is using DMA for read/write */
  387. IDE_DFLAG_USING_DMA = (1 << 1),
  388. /* okay to unmask other IRQs */
  389. IDE_DFLAG_UNMASK = (1 << 2),
  390. /* don't attempt flushes */
  391. IDE_DFLAG_NOFLUSH = (1 << 3),
  392. /* DSC overlap */
  393. IDE_DFLAG_DSC_OVERLAP = (1 << 4),
  394. /* give potential excess bandwidth */
  395. IDE_DFLAG_NICE1 = (1 << 5),
  396. /* device is physically present */
  397. IDE_DFLAG_PRESENT = (1 << 6),
  398. /* disable Host Protected Area */
  399. IDE_DFLAG_NOHPA = (1 << 7),
  400. /* id read from device (synthetic if not set) */
  401. IDE_DFLAG_ID_READ = (1 << 8),
  402. IDE_DFLAG_NOPROBE = (1 << 9),
  403. /* need to do check_media_change() */
  404. IDE_DFLAG_REMOVABLE = (1 << 10),
  405. /* needed for removable devices */
  406. IDE_DFLAG_ATTACH = (1 << 11),
  407. IDE_DFLAG_FORCED_GEOM = (1 << 12),
  408. /* disallow setting unmask bit */
  409. IDE_DFLAG_NO_UNMASK = (1 << 13),
  410. /* disallow enabling 32-bit I/O */
  411. IDE_DFLAG_NO_IO_32BIT = (1 << 14),
  412. /* for removable only: door lock/unlock works */
  413. IDE_DFLAG_DOORLOCKING = (1 << 15),
  414. /* disallow DMA */
  415. IDE_DFLAG_NODMA = (1 << 16),
  416. /* powermanagment told us not to do anything, so sleep nicely */
  417. IDE_DFLAG_BLOCKED = (1 << 17),
  418. /* sleeping & sleep field valid */
  419. IDE_DFLAG_SLEEPING = (1 << 18),
  420. IDE_DFLAG_POST_RESET = (1 << 19),
  421. IDE_DFLAG_UDMA33_WARNED = (1 << 20),
  422. IDE_DFLAG_LBA48 = (1 << 21),
  423. /* status of write cache */
  424. IDE_DFLAG_WCACHE = (1 << 22),
  425. /* used for ignoring ATA_DF */
  426. IDE_DFLAG_NOWERR = (1 << 23),
  427. /* retrying in PIO */
  428. IDE_DFLAG_DMA_PIO_RETRY = (1 << 24),
  429. IDE_DFLAG_LBA = (1 << 25),
  430. /* don't unload heads */
  431. IDE_DFLAG_NO_UNLOAD = (1 << 26),
  432. /* heads unloaded, please don't reset port */
  433. IDE_DFLAG_PARKED = (1 << 27),
  434. IDE_DFLAG_MEDIA_CHANGED = (1 << 28),
  435. /* write protect */
  436. IDE_DFLAG_WP = (1 << 29),
  437. IDE_DFLAG_FORMAT_IN_PROGRESS = (1 << 30),
  438. };
  439. struct ide_drive_s {
  440. char name[4]; /* drive name, such as "hda" */
  441. char driver_req[10]; /* requests specific driver */
  442. struct request_queue *queue; /* request queue */
  443. struct request *rq; /* current request */
  444. void *driver_data; /* extra driver data */
  445. u16 *id; /* identification info */
  446. #ifdef CONFIG_IDE_PROC_FS
  447. struct proc_dir_entry *proc; /* /proc/ide/ directory entry */
  448. const struct ide_proc_devset *settings; /* /proc/ide/ drive settings */
  449. #endif
  450. struct hwif_s *hwif; /* actually (ide_hwif_t *) */
  451. const struct ide_disk_ops *disk_ops;
  452. unsigned long dev_flags;
  453. unsigned long sleep; /* sleep until this time */
  454. unsigned long timeout; /* max time to wait for irq */
  455. u8 special_flags; /* special action flags */
  456. u8 select; /* basic drive/head select reg value */
  457. u8 retry_pio; /* retrying dma capable host in pio */
  458. u8 waiting_for_dma; /* dma currently in progress */
  459. u8 dma; /* atapi dma flag */
  460. u8 quirk_list; /* considered quirky, set for a specific host */
  461. u8 init_speed; /* transfer rate set at boot */
  462. u8 current_speed; /* current transfer rate set */
  463. u8 desired_speed; /* desired transfer rate set */
  464. u8 dn; /* now wide spread use */
  465. u8 acoustic; /* acoustic management */
  466. u8 media; /* disk, cdrom, tape, floppy, ... */
  467. u8 ready_stat; /* min status value for drive ready */
  468. u8 mult_count; /* current multiple sector setting */
  469. u8 mult_req; /* requested multiple sector setting */
  470. u8 io_32bit; /* 0=16-bit, 1=32-bit, 2/3=32bit+sync */
  471. u8 bad_wstat; /* used for ignoring ATA_DF */
  472. u8 head; /* "real" number of heads */
  473. u8 sect; /* "real" sectors per track */
  474. u8 bios_head; /* BIOS/fdisk/LILO number of heads */
  475. u8 bios_sect; /* BIOS/fdisk/LILO sectors per track */
  476. /* delay this long before sending packet command */
  477. u8 pc_delay;
  478. unsigned int bios_cyl; /* BIOS/fdisk/LILO number of cyls */
  479. unsigned int cyl; /* "real" number of cyls */
  480. unsigned int drive_data; /* used by set_pio_mode/dev_select() */
  481. unsigned int failures; /* current failure count */
  482. unsigned int max_failures; /* maximum allowed failure count */
  483. u64 probed_capacity;/* initial/native media capacity */
  484. u64 capacity64; /* total number of sectors */
  485. int lun; /* logical unit */
  486. int crc_count; /* crc counter to reduce drive speed */
  487. unsigned long debug_mask; /* debugging levels switch */
  488. #ifdef CONFIG_BLK_DEV_IDEACPI
  489. struct ide_acpi_drive_link *acpidata;
  490. #endif
  491. struct list_head list;
  492. struct device gendev;
  493. struct completion gendev_rel_comp; /* to deal with device release() */
  494. /* current packet command */
  495. struct ide_atapi_pc *pc;
  496. /* last failed packet command */
  497. struct ide_atapi_pc *failed_pc;
  498. /* callback for packet commands */
  499. int (*pc_callback)(struct ide_drive_s *, int);
  500. ide_startstop_t (*irq_handler)(struct ide_drive_s *);
  501. unsigned long atapi_flags;
  502. struct ide_atapi_pc request_sense_pc;
  503. /* current sense rq and buffer */
  504. bool sense_rq_armed;
  505. struct request sense_rq;
  506. struct request_sense sense_data;
  507. };
  508. typedef struct ide_drive_s ide_drive_t;
  509. #define to_ide_device(dev) container_of(dev, ide_drive_t, gendev)
  510. #define to_ide_drv(obj, cont_type) \
  511. container_of(obj, struct cont_type, dev)
  512. #define ide_drv_g(disk, cont_type) \
  513. container_of((disk)->private_data, struct cont_type, driver)
  514. struct ide_port_info;
  515. struct ide_tp_ops {
  516. void (*exec_command)(struct hwif_s *, u8);
  517. u8 (*read_status)(struct hwif_s *);
  518. u8 (*read_altstatus)(struct hwif_s *);
  519. void (*write_devctl)(struct hwif_s *, u8);
  520. void (*dev_select)(ide_drive_t *);
  521. void (*tf_load)(ide_drive_t *, struct ide_taskfile *, u8);
  522. void (*tf_read)(ide_drive_t *, struct ide_taskfile *, u8);
  523. void (*input_data)(ide_drive_t *, struct ide_cmd *,
  524. void *, unsigned int);
  525. void (*output_data)(ide_drive_t *, struct ide_cmd *,
  526. void *, unsigned int);
  527. };
  528. extern const struct ide_tp_ops default_tp_ops;
  529. /**
  530. * struct ide_port_ops - IDE port operations
  531. *
  532. * @init_dev: host specific initialization of a device
  533. * @set_pio_mode: routine to program host for PIO mode
  534. * @set_dma_mode: routine to program host for DMA mode
  535. * @reset_poll: chipset polling based on hba specifics
  536. * @pre_reset: chipset specific changes to default for device-hba resets
  537. * @resetproc: routine to reset controller after a disk reset
  538. * @maskproc: special host masking for drive selection
  539. * @quirkproc: check host's drive quirk list
  540. * @clear_irq: clear IRQ
  541. *
  542. * @mdma_filter: filter MDMA modes
  543. * @udma_filter: filter UDMA modes
  544. *
  545. * @cable_detect: detect cable type
  546. */
  547. struct ide_port_ops {
  548. void (*init_dev)(ide_drive_t *);
  549. void (*set_pio_mode)(ide_drive_t *, const u8);
  550. void (*set_dma_mode)(ide_drive_t *, const u8);
  551. int (*reset_poll)(ide_drive_t *);
  552. void (*pre_reset)(ide_drive_t *);
  553. void (*resetproc)(ide_drive_t *);
  554. void (*maskproc)(ide_drive_t *, int);
  555. void (*quirkproc)(ide_drive_t *);
  556. void (*clear_irq)(ide_drive_t *);
  557. u8 (*mdma_filter)(ide_drive_t *);
  558. u8 (*udma_filter)(ide_drive_t *);
  559. u8 (*cable_detect)(struct hwif_s *);
  560. };
  561. struct ide_dma_ops {
  562. void (*dma_host_set)(struct ide_drive_s *, int);
  563. int (*dma_setup)(struct ide_drive_s *, struct ide_cmd *);
  564. void (*dma_start)(struct ide_drive_s *);
  565. int (*dma_end)(struct ide_drive_s *);
  566. int (*dma_test_irq)(struct ide_drive_s *);
  567. void (*dma_lost_irq)(struct ide_drive_s *);
  568. /* below ones are optional */
  569. int (*dma_check)(struct ide_drive_s *, struct ide_cmd *);
  570. int (*dma_timer_expiry)(struct ide_drive_s *);
  571. void (*dma_clear)(struct ide_drive_s *);
  572. /*
  573. * The following method is optional and only required to be
  574. * implemented for the SFF-8038i compatible controllers.
  575. */
  576. u8 (*dma_sff_read_status)(struct hwif_s *);
  577. };
  578. struct ide_host;
  579. typedef struct hwif_s {
  580. struct hwif_s *mate; /* other hwif from same PCI chip */
  581. struct proc_dir_entry *proc; /* /proc/ide/ directory entry */
  582. struct ide_host *host;
  583. char name[6]; /* name of interface, eg. "ide0" */
  584. struct ide_io_ports io_ports;
  585. unsigned long sata_scr[SATA_NR_PORTS];
  586. ide_drive_t *devices[MAX_DRIVES + 1];
  587. u8 major; /* our major number */
  588. u8 index; /* 0 for ide0; 1 for ide1; ... */
  589. u8 channel; /* for dual-port chips: 0=primary, 1=secondary */
  590. u32 host_flags;
  591. u8 pio_mask;
  592. u8 ultra_mask;
  593. u8 mwdma_mask;
  594. u8 swdma_mask;
  595. u8 cbl; /* cable type */
  596. hwif_chipset_t chipset; /* sub-module for tuning.. */
  597. struct device *dev;
  598. ide_ack_intr_t *ack_intr;
  599. void (*rw_disk)(ide_drive_t *, struct request *);
  600. const struct ide_tp_ops *tp_ops;
  601. const struct ide_port_ops *port_ops;
  602. const struct ide_dma_ops *dma_ops;
  603. /* dma physical region descriptor table (cpu view) */
  604. unsigned int *dmatable_cpu;
  605. /* dma physical region descriptor table (dma view) */
  606. dma_addr_t dmatable_dma;
  607. /* maximum number of PRD table entries */
  608. int prd_max_nents;
  609. /* PRD entry size in bytes */
  610. int prd_ent_size;
  611. /* Scatter-gather list used to build the above */
  612. struct scatterlist *sg_table;
  613. int sg_max_nents; /* Maximum number of entries in it */
  614. struct ide_cmd cmd; /* current command */
  615. int rqsize; /* max sectors per request */
  616. int irq; /* our irq number */
  617. unsigned long dma_base; /* base addr for dma ports */
  618. unsigned long config_data; /* for use by chipset-specific code */
  619. unsigned long select_data; /* for use by chipset-specific code */
  620. unsigned long extra_base; /* extra addr for dma ports */
  621. unsigned extra_ports; /* number of extra dma ports */
  622. unsigned present : 1; /* this interface exists */
  623. unsigned busy : 1; /* serializes devices on a port */
  624. struct device gendev;
  625. struct device *portdev;
  626. struct completion gendev_rel_comp; /* To deal with device release() */
  627. void *hwif_data; /* extra hwif data */
  628. #ifdef CONFIG_BLK_DEV_IDEACPI
  629. struct ide_acpi_hwif_link *acpidata;
  630. #endif
  631. /* IRQ handler, if active */
  632. ide_startstop_t (*handler)(ide_drive_t *);
  633. /* BOOL: polling active & poll_timeout field valid */
  634. unsigned int polling : 1;
  635. /* current drive */
  636. ide_drive_t *cur_dev;
  637. /* current request */
  638. struct request *rq;
  639. /* failsafe timer */
  640. struct timer_list timer;
  641. /* timeout value during long polls */
  642. unsigned long poll_timeout;
  643. /* queried upon timeouts */
  644. int (*expiry)(ide_drive_t *);
  645. int req_gen;
  646. int req_gen_timer;
  647. spinlock_t lock;
  648. } ____cacheline_internodealigned_in_smp ide_hwif_t;
  649. #define MAX_HOST_PORTS 4
  650. struct ide_host {
  651. ide_hwif_t *ports[MAX_HOST_PORTS + 1];
  652. unsigned int n_ports;
  653. struct device *dev[2];
  654. int (*init_chipset)(struct pci_dev *);
  655. void (*get_lock)(irq_handler_t, void *);
  656. void (*release_lock)(void);
  657. irq_handler_t irq_handler;
  658. unsigned long host_flags;
  659. int irq_flags;
  660. void *host_priv;
  661. ide_hwif_t *cur_port; /* for hosts requiring serialization */
  662. /* used for hosts requiring serialization */
  663. volatile unsigned long host_busy;
  664. };
  665. #define IDE_HOST_BUSY 0
  666. /*
  667. * internal ide interrupt handler type
  668. */
  669. typedef ide_startstop_t (ide_handler_t)(ide_drive_t *);
  670. typedef int (ide_expiry_t)(ide_drive_t *);
  671. /* used by ide-cd, ide-floppy, etc. */
  672. typedef void (xfer_func_t)(ide_drive_t *, struct ide_cmd *, void *, unsigned);
  673. extern struct mutex ide_setting_mtx;
  674. /*
  675. * configurable drive settings
  676. */
  677. #define DS_SYNC (1 << 0)
  678. struct ide_devset {
  679. int (*get)(ide_drive_t *);
  680. int (*set)(ide_drive_t *, int);
  681. unsigned int flags;
  682. };
  683. #define __DEVSET(_flags, _get, _set) { \
  684. .flags = _flags, \
  685. .get = _get, \
  686. .set = _set, \
  687. }
  688. #define ide_devset_get(name, field) \
  689. static int get_##name(ide_drive_t *drive) \
  690. { \
  691. return drive->field; \
  692. }
  693. #define ide_devset_set(name, field) \
  694. static int set_##name(ide_drive_t *drive, int arg) \
  695. { \
  696. drive->field = arg; \
  697. return 0; \
  698. }
  699. #define ide_devset_get_flag(name, flag) \
  700. static int get_##name(ide_drive_t *drive) \
  701. { \
  702. return !!(drive->dev_flags & flag); \
  703. }
  704. #define ide_devset_set_flag(name, flag) \
  705. static int set_##name(ide_drive_t *drive, int arg) \
  706. { \
  707. if (arg) \
  708. drive->dev_flags |= flag; \
  709. else \
  710. drive->dev_flags &= ~flag; \
  711. return 0; \
  712. }
  713. #define __IDE_DEVSET(_name, _flags, _get, _set) \
  714. const struct ide_devset ide_devset_##_name = \
  715. __DEVSET(_flags, _get, _set)
  716. #define IDE_DEVSET(_name, _flags, _get, _set) \
  717. static __IDE_DEVSET(_name, _flags, _get, _set)
  718. #define ide_devset_rw(_name, _func) \
  719. IDE_DEVSET(_name, 0, get_##_func, set_##_func)
  720. #define ide_devset_w(_name, _func) \
  721. IDE_DEVSET(_name, 0, NULL, set_##_func)
  722. #define ide_ext_devset_rw(_name, _func) \
  723. __IDE_DEVSET(_name, 0, get_##_func, set_##_func)
  724. #define ide_ext_devset_rw_sync(_name, _func) \
  725. __IDE_DEVSET(_name, DS_SYNC, get_##_func, set_##_func)
  726. #define ide_decl_devset(_name) \
  727. extern const struct ide_devset ide_devset_##_name
  728. ide_decl_devset(io_32bit);
  729. ide_decl_devset(keepsettings);
  730. ide_decl_devset(pio_mode);
  731. ide_decl_devset(unmaskirq);
  732. ide_decl_devset(using_dma);
  733. #ifdef CONFIG_IDE_PROC_FS
  734. /*
  735. * /proc/ide interface
  736. */
  737. #define ide_devset_rw_field(_name, _field) \
  738. ide_devset_get(_name, _field); \
  739. ide_devset_set(_name, _field); \
  740. IDE_DEVSET(_name, DS_SYNC, get_##_name, set_##_name)
  741. #define ide_devset_rw_flag(_name, _field) \
  742. ide_devset_get_flag(_name, _field); \
  743. ide_devset_set_flag(_name, _field); \
  744. IDE_DEVSET(_name, DS_SYNC, get_##_name, set_##_name)
  745. struct ide_proc_devset {
  746. const char *name;
  747. const struct ide_devset *setting;
  748. int min, max;
  749. int (*mulf)(ide_drive_t *);
  750. int (*divf)(ide_drive_t *);
  751. };
  752. #define __IDE_PROC_DEVSET(_name, _min, _max, _mulf, _divf) { \
  753. .name = __stringify(_name), \
  754. .setting = &ide_devset_##_name, \
  755. .min = _min, \
  756. .max = _max, \
  757. .mulf = _mulf, \
  758. .divf = _divf, \
  759. }
  760. #define IDE_PROC_DEVSET(_name, _min, _max) \
  761. __IDE_PROC_DEVSET(_name, _min, _max, NULL, NULL)
  762. typedef struct {
  763. const char *name;
  764. mode_t mode;
  765. read_proc_t *read_proc;
  766. write_proc_t *write_proc;
  767. } ide_proc_entry_t;
  768. void proc_ide_create(void);
  769. void proc_ide_destroy(void);
  770. void ide_proc_register_port(ide_hwif_t *);
  771. void ide_proc_port_register_devices(ide_hwif_t *);
  772. void ide_proc_unregister_device(ide_drive_t *);
  773. void ide_proc_unregister_port(ide_hwif_t *);
  774. void ide_proc_register_driver(ide_drive_t *, struct ide_driver *);
  775. void ide_proc_unregister_driver(ide_drive_t *, struct ide_driver *);
  776. read_proc_t proc_ide_read_capacity;
  777. read_proc_t proc_ide_read_geometry;
  778. /*
  779. * Standard exit stuff:
  780. */
  781. #define PROC_IDE_READ_RETURN(page,start,off,count,eof,len) \
  782. { \
  783. len -= off; \
  784. if (len < count) { \
  785. *eof = 1; \
  786. if (len <= 0) \
  787. return 0; \
  788. } else \
  789. len = count; \
  790. *start = page + off; \
  791. return len; \
  792. }
  793. #else
  794. static inline void proc_ide_create(void) { ; }
  795. static inline void proc_ide_destroy(void) { ; }
  796. static inline void ide_proc_register_port(ide_hwif_t *hwif) { ; }
  797. static inline void ide_proc_port_register_devices(ide_hwif_t *hwif) { ; }
  798. static inline void ide_proc_unregister_device(ide_drive_t *drive) { ; }
  799. static inline void ide_proc_unregister_port(ide_hwif_t *hwif) { ; }
  800. static inline void ide_proc_register_driver(ide_drive_t *drive,
  801. struct ide_driver *driver) { ; }
  802. static inline void ide_proc_unregister_driver(ide_drive_t *drive,
  803. struct ide_driver *driver) { ; }
  804. #define PROC_IDE_READ_RETURN(page,start,off,count,eof,len) return 0;
  805. #endif
  806. enum {
  807. /* enter/exit functions */
  808. IDE_DBG_FUNC = (1 << 0),
  809. /* sense key/asc handling */
  810. IDE_DBG_SENSE = (1 << 1),
  811. /* packet commands handling */
  812. IDE_DBG_PC = (1 << 2),
  813. /* request handling */
  814. IDE_DBG_RQ = (1 << 3),
  815. /* driver probing/setup */
  816. IDE_DBG_PROBE = (1 << 4),
  817. };
  818. /* DRV_NAME has to be defined in the driver before using the macro below */
  819. #define __ide_debug_log(lvl, fmt, args...) \
  820. { \
  821. if (unlikely(drive->debug_mask & lvl)) \
  822. printk(KERN_INFO DRV_NAME ": %s: " fmt "\n", \
  823. __func__, ## args); \
  824. }
  825. /*
  826. * Power Management state machine (rq->pm->pm_step).
  827. *
  828. * For each step, the core calls ide_start_power_step() first.
  829. * This can return:
  830. * - ide_stopped : In this case, the core calls us back again unless
  831. * step have been set to ide_power_state_completed.
  832. * - ide_started : In this case, the channel is left busy until an
  833. * async event (interrupt) occurs.
  834. * Typically, ide_start_power_step() will issue a taskfile request with
  835. * do_rw_taskfile().
  836. *
  837. * Upon reception of the interrupt, the core will call ide_complete_power_step()
  838. * with the error code if any. This routine should update the step value
  839. * and return. It should not start a new request. The core will call
  840. * ide_start_power_step() for the new step value, unless step have been
  841. * set to IDE_PM_COMPLETED.
  842. */
  843. enum {
  844. IDE_PM_START_SUSPEND,
  845. IDE_PM_FLUSH_CACHE = IDE_PM_START_SUSPEND,
  846. IDE_PM_STANDBY,
  847. IDE_PM_START_RESUME,
  848. IDE_PM_RESTORE_PIO = IDE_PM_START_RESUME,
  849. IDE_PM_IDLE,
  850. IDE_PM_RESTORE_DMA,
  851. IDE_PM_COMPLETED,
  852. };
  853. int generic_ide_suspend(struct device *, pm_message_t);
  854. int generic_ide_resume(struct device *);
  855. void ide_complete_power_step(ide_drive_t *, struct request *);
  856. ide_startstop_t ide_start_power_step(ide_drive_t *, struct request *);
  857. void ide_complete_pm_rq(ide_drive_t *, struct request *);
  858. void ide_check_pm_state(ide_drive_t *, struct request *);
  859. /*
  860. * Subdrivers support.
  861. *
  862. * The gendriver.owner field should be set to the module owner of this driver.
  863. * The gendriver.name field should be set to the name of this driver
  864. */
  865. struct ide_driver {
  866. const char *version;
  867. ide_startstop_t (*do_request)(ide_drive_t *, struct request *, sector_t);
  868. struct device_driver gen_driver;
  869. int (*probe)(ide_drive_t *);
  870. void (*remove)(ide_drive_t *);
  871. void (*resume)(ide_drive_t *);
  872. void (*shutdown)(ide_drive_t *);
  873. #ifdef CONFIG_IDE_PROC_FS
  874. ide_proc_entry_t * (*proc_entries)(ide_drive_t *);
  875. const struct ide_proc_devset * (*proc_devsets)(ide_drive_t *);
  876. #endif
  877. };
  878. #define to_ide_driver(drv) container_of(drv, struct ide_driver, gen_driver)
  879. int ide_device_get(ide_drive_t *);
  880. void ide_device_put(ide_drive_t *);
  881. struct ide_ioctl_devset {
  882. unsigned int get_ioctl;
  883. unsigned int set_ioctl;
  884. const struct ide_devset *setting;
  885. };
  886. int ide_setting_ioctl(ide_drive_t *, struct block_device *, unsigned int,
  887. unsigned long, const struct ide_ioctl_devset *);
  888. int generic_ide_ioctl(ide_drive_t *, struct block_device *, unsigned, unsigned long);
  889. extern int ide_vlb_clk;
  890. extern int ide_pci_clk;
  891. unsigned int ide_rq_bytes(struct request *);
  892. int ide_end_rq(ide_drive_t *, struct request *, int, unsigned int);
  893. void ide_kill_rq(ide_drive_t *, struct request *);
  894. void __ide_set_handler(ide_drive_t *, ide_handler_t *, unsigned int);
  895. void ide_set_handler(ide_drive_t *, ide_handler_t *, unsigned int);
  896. void ide_execute_command(ide_drive_t *, struct ide_cmd *, ide_handler_t *,
  897. unsigned int);
  898. void ide_pad_transfer(ide_drive_t *, int, int);
  899. ide_startstop_t ide_error(ide_drive_t *, const char *, u8);
  900. void ide_fix_driveid(u16 *);
  901. extern void ide_fixstring(u8 *, const int, const int);
  902. int ide_busy_sleep(ide_drive_t *, unsigned long, int);
  903. int ide_wait_stat(ide_startstop_t *, ide_drive_t *, u8, u8, unsigned long);
  904. ide_startstop_t ide_do_park_unpark(ide_drive_t *, struct request *);
  905. ide_startstop_t ide_do_devset(ide_drive_t *, struct request *);
  906. extern ide_startstop_t ide_do_reset (ide_drive_t *);
  907. extern int ide_devset_execute(ide_drive_t *drive,
  908. const struct ide_devset *setting, int arg);
  909. void ide_complete_cmd(ide_drive_t *, struct ide_cmd *, u8, u8);
  910. int ide_complete_rq(ide_drive_t *, int, unsigned int);
  911. void ide_tf_readback(ide_drive_t *drive, struct ide_cmd *cmd);
  912. void ide_tf_dump(const char *, struct ide_cmd *);
  913. void ide_exec_command(ide_hwif_t *, u8);
  914. u8 ide_read_status(ide_hwif_t *);
  915. u8 ide_read_altstatus(ide_hwif_t *);
  916. void ide_write_devctl(ide_hwif_t *, u8);
  917. void ide_dev_select(ide_drive_t *);
  918. void ide_tf_load(ide_drive_t *, struct ide_taskfile *, u8);
  919. void ide_tf_read(ide_drive_t *, struct ide_taskfile *, u8);
  920. void ide_input_data(ide_drive_t *, struct ide_cmd *, void *, unsigned int);
  921. void ide_output_data(ide_drive_t *, struct ide_cmd *, void *, unsigned int);
  922. void SELECT_MASK(ide_drive_t *, int);
  923. u8 ide_read_error(ide_drive_t *);
  924. void ide_read_bcount_and_ireason(ide_drive_t *, u16 *, u8 *);
  925. int ide_check_atapi_device(ide_drive_t *, const char *);
  926. void ide_init_pc(struct ide_atapi_pc *);
  927. /* Disk head parking */
  928. extern wait_queue_head_t ide_park_wq;
  929. ssize_t ide_park_show(struct device *dev, struct device_attribute *attr,
  930. char *buf);
  931. ssize_t ide_park_store(struct device *dev, struct device_attribute *attr,
  932. const char *buf, size_t len);
  933. /*
  934. * Special requests for ide-tape block device strategy routine.
  935. *
  936. * In order to service a character device command, we add special requests to
  937. * the tail of our block device request queue and wait for their completion.
  938. */
  939. enum {
  940. REQ_IDETAPE_PC1 = (1 << 0), /* packet command (first stage) */
  941. REQ_IDETAPE_PC2 = (1 << 1), /* packet command (second stage) */
  942. REQ_IDETAPE_READ = (1 << 2),
  943. REQ_IDETAPE_WRITE = (1 << 3),
  944. };
  945. int ide_queue_pc_tail(ide_drive_t *, struct gendisk *, struct ide_atapi_pc *);
  946. int ide_do_test_unit_ready(ide_drive_t *, struct gendisk *);
  947. int ide_do_start_stop(ide_drive_t *, struct gendisk *, int);
  948. int ide_set_media_lock(ide_drive_t *, struct gendisk *, int);
  949. void ide_create_request_sense_cmd(ide_drive_t *, struct ide_atapi_pc *);
  950. void ide_retry_pc(ide_drive_t *drive);
  951. void ide_prep_sense(ide_drive_t *drive, struct request *rq);
  952. int ide_queue_sense_rq(ide_drive_t *drive, void *special);
  953. int ide_cd_expiry(ide_drive_t *);
  954. int ide_cd_get_xferlen(struct request *);
  955. ide_startstop_t ide_issue_pc(ide_drive_t *, struct ide_cmd *);
  956. ide_startstop_t do_rw_taskfile(ide_drive_t *, struct ide_cmd *);
  957. void ide_pio_bytes(ide_drive_t *, struct ide_cmd *, unsigned int, unsigned int);
  958. void ide_finish_cmd(ide_drive_t *, struct ide_cmd *, u8);
  959. int ide_raw_taskfile(ide_drive_t *, struct ide_cmd *, u8 *, u16);
  960. int ide_no_data_taskfile(ide_drive_t *, struct ide_cmd *);
  961. int ide_taskfile_ioctl(ide_drive_t *, unsigned long);
  962. int ide_dev_read_id(ide_drive_t *, u8, u16 *);
  963. extern int ide_driveid_update(ide_drive_t *);
  964. extern int ide_config_drive_speed(ide_drive_t *, u8);
  965. extern u8 eighty_ninty_three (ide_drive_t *);
  966. extern int taskfile_lib_get_identify(ide_drive_t *drive, u8 *);
  967. extern int ide_wait_not_busy(ide_hwif_t *hwif, unsigned long timeout);
  968. extern void ide_stall_queue(ide_drive_t *drive, unsigned long timeout);
  969. extern void ide_timer_expiry(unsigned long);
  970. extern irqreturn_t ide_intr(int irq, void *dev_id);
  971. extern void do_ide_request(struct request_queue *);
  972. void ide_init_disk(struct gendisk *, ide_drive_t *);
  973. #ifdef CONFIG_IDEPCI_PCIBUS_ORDER
  974. extern int __ide_pci_register_driver(struct pci_driver *driver, struct module *owner, const char *mod_name);
  975. #define ide_pci_register_driver(d) __ide_pci_register_driver(d, THIS_MODULE, KBUILD_MODNAME)
  976. #else
  977. #define ide_pci_register_driver(d) pci_register_driver(d)
  978. #endif
  979. static inline int ide_pci_is_in_compatibility_mode(struct pci_dev *dev)
  980. {
  981. if ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE && (dev->class & 5) != 5)
  982. return 1;
  983. return 0;
  984. }
  985. void ide_pci_setup_ports(struct pci_dev *, const struct ide_port_info *,
  986. struct ide_hw *, struct ide_hw **);
  987. void ide_setup_pci_noise(struct pci_dev *, const struct ide_port_info *);
  988. #ifdef CONFIG_BLK_DEV_IDEDMA_PCI
  989. int ide_pci_set_master(struct pci_dev *, const char *);
  990. unsigned long ide_pci_dma_base(ide_hwif_t *, const struct ide_port_info *);
  991. int ide_pci_check_simplex(ide_hwif_t *, const struct ide_port_info *);
  992. int ide_hwif_setup_dma(ide_hwif_t *, const struct ide_port_info *);
  993. #else
  994. static inline int ide_hwif_setup_dma(ide_hwif_t *hwif,
  995. const struct ide_port_info *d)
  996. {
  997. return -EINVAL;
  998. }
  999. #endif
  1000. struct ide_pci_enablebit {
  1001. u8 reg; /* byte pci reg holding the enable-bit */
  1002. u8 mask; /* mask to isolate the enable-bit */
  1003. u8 val; /* value of masked reg when "enabled" */
  1004. };
  1005. enum {
  1006. /* Uses ISA control ports not PCI ones. */
  1007. IDE_HFLAG_ISA_PORTS = (1 << 0),
  1008. /* single port device */
  1009. IDE_HFLAG_SINGLE = (1 << 1),
  1010. /* don't use legacy PIO blacklist */
  1011. IDE_HFLAG_PIO_NO_BLACKLIST = (1 << 2),
  1012. /* set for the second port of QD65xx */
  1013. IDE_HFLAG_QD_2ND_PORT = (1 << 3),
  1014. /* use PIO8/9 for prefetch off/on */
  1015. IDE_HFLAG_ABUSE_PREFETCH = (1 << 4),
  1016. /* use PIO6/7 for fast-devsel off/on */
  1017. IDE_HFLAG_ABUSE_FAST_DEVSEL = (1 << 5),
  1018. /* use 100-102 and 200-202 PIO values to set DMA modes */
  1019. IDE_HFLAG_ABUSE_DMA_MODES = (1 << 6),
  1020. /*
  1021. * keep DMA setting when programming PIO mode, may be used only
  1022. * for hosts which have separate PIO and DMA timings (ie. PMAC)
  1023. */
  1024. IDE_HFLAG_SET_PIO_MODE_KEEP_DMA = (1 << 7),
  1025. /* program host for the transfer mode after programming device */
  1026. IDE_HFLAG_POST_SET_MODE = (1 << 8),
  1027. /* don't program host/device for the transfer mode ("smart" hosts) */
  1028. IDE_HFLAG_NO_SET_MODE = (1 << 9),
  1029. /* trust BIOS for programming chipset/device for DMA */
  1030. IDE_HFLAG_TRUST_BIOS_FOR_DMA = (1 << 10),
  1031. /* host is CS5510/CS5520 */
  1032. IDE_HFLAG_CS5520 = (1 << 11),
  1033. /* ATAPI DMA is unsupported */
  1034. IDE_HFLAG_NO_ATAPI_DMA = (1 << 12),
  1035. /* set if host is a "non-bootable" controller */
  1036. IDE_HFLAG_NON_BOOTABLE = (1 << 13),
  1037. /* host doesn't support DMA */
  1038. IDE_HFLAG_NO_DMA = (1 << 14),
  1039. /* check if host is PCI IDE device before allowing DMA */
  1040. IDE_HFLAG_NO_AUTODMA = (1 << 15),
  1041. /* host uses MMIO */
  1042. IDE_HFLAG_MMIO = (1 << 16),
  1043. /* no LBA48 */
  1044. IDE_HFLAG_NO_LBA48 = (1 << 17),
  1045. /* no LBA48 DMA */
  1046. IDE_HFLAG_NO_LBA48_DMA = (1 << 18),
  1047. /* data FIFO is cleared by an error */
  1048. IDE_HFLAG_ERROR_STOPS_FIFO = (1 << 19),
  1049. /* serialize ports */
  1050. IDE_HFLAG_SERIALIZE = (1 << 20),
  1051. /* host is DTC2278 */
  1052. IDE_HFLAG_DTC2278 = (1 << 21),
  1053. /* 4 devices on a single set of I/O ports */
  1054. IDE_HFLAG_4DRIVES = (1 << 22),
  1055. /* host is TRM290 */
  1056. IDE_HFLAG_TRM290 = (1 << 23),
  1057. /* use 32-bit I/O ops */
  1058. IDE_HFLAG_IO_32BIT = (1 << 24),
  1059. /* unmask IRQs */
  1060. IDE_HFLAG_UNMASK_IRQS = (1 << 25),
  1061. IDE_HFLAG_BROKEN_ALTSTATUS = (1 << 26),
  1062. /* serialize ports if DMA is possible (for sl82c105) */
  1063. IDE_HFLAG_SERIALIZE_DMA = (1 << 27),
  1064. /* force host out of "simplex" mode */
  1065. IDE_HFLAG_CLEAR_SIMPLEX = (1 << 28),
  1066. /* DSC overlap is unsupported */
  1067. IDE_HFLAG_NO_DSC = (1 << 29),
  1068. /* never use 32-bit I/O ops */
  1069. IDE_HFLAG_NO_IO_32BIT = (1 << 30),
  1070. /* never unmask IRQs */
  1071. IDE_HFLAG_NO_UNMASK_IRQS = (1 << 31),
  1072. };
  1073. #ifdef CONFIG_BLK_DEV_OFFBOARD
  1074. # define IDE_HFLAG_OFF_BOARD 0
  1075. #else
  1076. # define IDE_HFLAG_OFF_BOARD IDE_HFLAG_NON_BOOTABLE
  1077. #endif
  1078. struct ide_port_info {
  1079. char *name;
  1080. int (*init_chipset)(struct pci_dev *);
  1081. void (*get_lock)(irq_handler_t, void *);
  1082. void (*release_lock)(void);
  1083. void (*init_iops)(ide_hwif_t *);
  1084. void (*init_hwif)(ide_hwif_t *);
  1085. int (*init_dma)(ide_hwif_t *,
  1086. const struct ide_port_info *);
  1087. const struct ide_tp_ops *tp_ops;
  1088. const struct ide_port_ops *port_ops;
  1089. const struct ide_dma_ops *dma_ops;
  1090. struct ide_pci_enablebit enablebits[2];
  1091. hwif_chipset_t chipset;
  1092. u16 max_sectors; /* if < than the default one */
  1093. u32 host_flags;
  1094. int irq_flags;
  1095. u8 pio_mask;
  1096. u8 swdma_mask;
  1097. u8 mwdma_mask;
  1098. u8 udma_mask;
  1099. };
  1100. int ide_pci_init_one(struct pci_dev *, const struct ide_port_info *, void *);
  1101. int ide_pci_init_two(struct pci_dev *, struct pci_dev *,
  1102. const struct ide_port_info *, void *);
  1103. void ide_pci_remove(struct pci_dev *);
  1104. #ifdef CONFIG_PM
  1105. int ide_pci_suspend(struct pci_dev *, pm_message_t);
  1106. int ide_pci_resume(struct pci_dev *);
  1107. #else
  1108. #define ide_pci_suspend NULL
  1109. #define ide_pci_resume NULL
  1110. #endif
  1111. void ide_map_sg(ide_drive_t *, struct ide_cmd *);
  1112. void ide_init_sg_cmd(struct ide_cmd *, unsigned int);
  1113. #define BAD_DMA_DRIVE 0
  1114. #define GOOD_DMA_DRIVE 1
  1115. struct drive_list_entry {
  1116. const char *id_model;
  1117. const char *id_firmware;
  1118. };
  1119. int ide_in_drive_list(u16 *, const struct drive_list_entry *);
  1120. #ifdef CONFIG_BLK_DEV_IDEDMA
  1121. int ide_dma_good_drive(ide_drive_t *);
  1122. int __ide_dma_bad_drive(ide_drive_t *);
  1123. int ide_id_dma_bug(ide_drive_t *);
  1124. u8 ide_find_dma_mode(ide_drive_t *, u8);
  1125. static inline u8 ide_max_dma_mode(ide_drive_t *drive)
  1126. {
  1127. return ide_find_dma_mode(drive, XFER_UDMA_6);
  1128. }
  1129. void ide_dma_off_quietly(ide_drive_t *);
  1130. void ide_dma_off(ide_drive_t *);
  1131. void ide_dma_on(ide_drive_t *);
  1132. int ide_set_dma(ide_drive_t *);
  1133. void ide_check_dma_crc(ide_drive_t *);
  1134. ide_startstop_t ide_dma_intr(ide_drive_t *);
  1135. int ide_allocate_dma_engine(ide_hwif_t *);
  1136. void ide_release_dma_engine(ide_hwif_t *);
  1137. int ide_dma_prepare(ide_drive_t *, struct ide_cmd *);
  1138. void ide_dma_unmap_sg(ide_drive_t *, struct ide_cmd *);
  1139. #ifdef CONFIG_BLK_DEV_IDEDMA_SFF
  1140. int config_drive_for_dma(ide_drive_t *);
  1141. int ide_build_dmatable(ide_drive_t *, struct ide_cmd *);
  1142. void ide_dma_host_set(ide_drive_t *, int);
  1143. int ide_dma_setup(ide_drive_t *, struct ide_cmd *);
  1144. extern void ide_dma_start(ide_drive_t *);
  1145. int ide_dma_end(ide_drive_t *);
  1146. int ide_dma_test_irq(ide_drive_t *);
  1147. int ide_dma_sff_timer_expiry(ide_drive_t *);
  1148. u8 ide_dma_sff_read_status(ide_hwif_t *);
  1149. extern const struct ide_dma_ops sff_dma_ops;
  1150. #else
  1151. static inline int config_drive_for_dma(ide_drive_t *drive) { return 0; }
  1152. #endif /* CONFIG_BLK_DEV_IDEDMA_SFF */
  1153. void ide_dma_lost_irq(ide_drive_t *);
  1154. ide_startstop_t ide_dma_timeout_retry(ide_drive_t *, int);
  1155. #else
  1156. static inline int ide_id_dma_bug(ide_drive_t *drive) { return 0; }
  1157. static inline u8 ide_find_dma_mode(ide_drive_t *drive, u8 speed) { return 0; }
  1158. static inline u8 ide_max_dma_mode(ide_drive_t *drive) { return 0; }
  1159. static inline void ide_dma_off_quietly(ide_drive_t *drive) { ; }
  1160. static inline void ide_dma_off(ide_drive_t *drive) { ; }
  1161. static inline void ide_dma_on(ide_drive_t *drive) { ; }
  1162. static inline void ide_dma_verbose(ide_drive_t *drive) { ; }
  1163. static inline int ide_set_dma(ide_drive_t *drive) { return 1; }
  1164. static inline void ide_check_dma_crc(ide_drive_t *drive) { ; }
  1165. static inline ide_startstop_t ide_dma_intr(ide_drive_t *drive) { return ide_stopped; }
  1166. static inline ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error) { return ide_stopped; }
  1167. static inline void ide_release_dma_engine(ide_hwif_t *hwif) { ; }
  1168. static inline int ide_dma_prepare(ide_drive_t *drive,
  1169. struct ide_cmd *cmd) { return 1; }
  1170. static inline void ide_dma_unmap_sg(ide_drive_t *drive,
  1171. struct ide_cmd *cmd) { ; }
  1172. #endif /* CONFIG_BLK_DEV_IDEDMA */
  1173. #ifdef CONFIG_BLK_DEV_IDEACPI
  1174. int ide_acpi_init(void);
  1175. extern int ide_acpi_exec_tfs(ide_drive_t *drive);
  1176. extern void ide_acpi_get_timing(ide_hwif_t *hwif);
  1177. extern void ide_acpi_push_timing(ide_hwif_t *hwif);
  1178. void ide_acpi_init_port(ide_hwif_t *);
  1179. void ide_acpi_port_init_devices(ide_hwif_t *);
  1180. extern void ide_acpi_set_state(ide_hwif_t *hwif, int on);
  1181. #else
  1182. static inline int ide_acpi_init(void) { return 0; }
  1183. static inline int ide_acpi_exec_tfs(ide_drive_t *drive) { return 0; }
  1184. static inline void ide_acpi_get_timing(ide_hwif_t *hwif) { ; }
  1185. static inline void ide_acpi_push_timing(ide_hwif_t *hwif) { ; }
  1186. static inline void ide_acpi_init_port(ide_hwif_t *hwif) { ; }
  1187. static inline void ide_acpi_port_init_devices(ide_hwif_t *hwif) { ; }
  1188. static inline void ide_acpi_set_state(ide_hwif_t *hwif, int on) {}
  1189. #endif
  1190. void ide_register_region(struct gendisk *);
  1191. void ide_unregister_region(struct gendisk *);
  1192. void ide_undecoded_slave(ide_drive_t *);
  1193. void ide_port_apply_params(ide_hwif_t *);
  1194. int ide_sysfs_register_port(ide_hwif_t *);
  1195. struct ide_host *ide_host_alloc(const struct ide_port_info *, struct ide_hw **,
  1196. unsigned int);
  1197. void ide_host_free(struct ide_host *);
  1198. int ide_host_register(struct ide_host *, const struct ide_port_info *,
  1199. struct ide_hw **);
  1200. int ide_host_add(const struct ide_port_info *, struct ide_hw **, unsigned int,
  1201. struct ide_host **);
  1202. void ide_host_remove(struct ide_host *);
  1203. int ide_legacy_device_add(const struct ide_port_info *, unsigned long);
  1204. void ide_port_unregister_devices(ide_hwif_t *);
  1205. void ide_port_scan(ide_hwif_t *);
  1206. static inline void *ide_get_hwifdata (ide_hwif_t * hwif)
  1207. {
  1208. return hwif->hwif_data;
  1209. }
  1210. static inline void ide_set_hwifdata (ide_hwif_t * hwif, void *data)
  1211. {
  1212. hwif->hwif_data = data;
  1213. }
  1214. extern void ide_toggle_bounce(ide_drive_t *drive, int on);
  1215. u64 ide_get_lba_addr(struct ide_cmd *, int);
  1216. u8 ide_dump_status(ide_drive_t *, const char *, u8);
  1217. struct ide_timing {
  1218. u8 mode;
  1219. u8 setup; /* t1 */
  1220. u16 act8b; /* t2 for 8-bit io */
  1221. u16 rec8b; /* t2i for 8-bit io */
  1222. u16 cyc8b; /* t0 for 8-bit io */
  1223. u16 active; /* t2 or tD */
  1224. u16 recover; /* t2i or tK */
  1225. u16 cycle; /* t0 */
  1226. u16 udma; /* t2CYCTYP/2 */
  1227. };
  1228. enum {
  1229. IDE_TIMING_SETUP = (1 << 0),
  1230. IDE_TIMING_ACT8B = (1 << 1),
  1231. IDE_TIMING_REC8B = (1 << 2),
  1232. IDE_TIMING_CYC8B = (1 << 3),
  1233. IDE_TIMING_8BIT = IDE_TIMING_ACT8B | IDE_TIMING_REC8B |
  1234. IDE_TIMING_CYC8B,
  1235. IDE_TIMING_ACTIVE = (1 << 4),
  1236. IDE_TIMING_RECOVER = (1 << 5),
  1237. IDE_TIMING_CYCLE = (1 << 6),
  1238. IDE_TIMING_UDMA = (1 << 7),
  1239. IDE_TIMING_ALL = IDE_TIMING_SETUP | IDE_TIMING_8BIT |
  1240. IDE_TIMING_ACTIVE | IDE_TIMING_RECOVER |
  1241. IDE_TIMING_CYCLE | IDE_TIMING_UDMA,
  1242. };
  1243. struct ide_timing *ide_timing_find_mode(u8);
  1244. u16 ide_pio_cycle_time(ide_drive_t *, u8);
  1245. void ide_timing_merge(struct ide_timing *, struct ide_timing *,
  1246. struct ide_timing *, unsigned int);
  1247. int ide_timing_compute(ide_drive_t *, u8, struct ide_timing *, int, int);
  1248. #ifdef CONFIG_IDE_XFER_MODE
  1249. int ide_scan_pio_blacklist(char *);
  1250. const char *ide_xfer_verbose(u8);
  1251. u8 ide_get_best_pio_mode(ide_drive_t *, u8, u8);
  1252. int ide_set_pio_mode(ide_drive_t *, u8);
  1253. int ide_set_dma_mode(ide_drive_t *, u8);
  1254. void ide_set_pio(ide_drive_t *, u8);
  1255. int ide_set_xfer_rate(ide_drive_t *, u8);
  1256. #else
  1257. static inline void ide_set_pio(ide_drive_t *drive, u8 pio) { ; }
  1258. static inline int ide_set_xfer_rate(ide_drive_t *drive, u8 rate) { return -1; }
  1259. #endif
  1260. static inline void ide_set_max_pio(ide_drive_t *drive)
  1261. {
  1262. ide_set_pio(drive, 255);
  1263. }
  1264. char *ide_media_string(ide_drive_t *);
  1265. extern struct device_attribute ide_dev_attrs[];
  1266. extern struct bus_type ide_bus_type;
  1267. extern struct class *ide_port_class;
  1268. static inline void ide_dump_identify(u8 *id)
  1269. {
  1270. print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 16, 2, id, 512, 0);
  1271. }
  1272. static inline int hwif_to_node(ide_hwif_t *hwif)
  1273. {
  1274. return hwif->dev ? dev_to_node(hwif->dev) : -1;
  1275. }
  1276. static inline ide_drive_t *ide_get_pair_dev(ide_drive_t *drive)
  1277. {
  1278. ide_drive_t *peer = drive->hwif->devices[(drive->dn ^ 1) & 1];
  1279. return (peer->dev_flags & IDE_DFLAG_PRESENT) ? peer : NULL;
  1280. }
  1281. #define ide_port_for_each_dev(i, dev, port) \
  1282. for ((i) = 0; ((dev) = (port)->devices[i]) || (i) < MAX_DRIVES; (i)++)
  1283. #define ide_port_for_each_present_dev(i, dev, port) \
  1284. for ((i) = 0; ((dev) = (port)->devices[i]) || (i) < MAX_DRIVES; (i)++) \
  1285. if ((dev)->dev_flags & IDE_DFLAG_PRESENT)
  1286. #define ide_host_for_each_port(i, port, host) \
  1287. for ((i) = 0; ((port) = (host)->ports[i]) || (i) < MAX_HOST_PORTS; (i)++)
  1288. #endif /* _IDE_H */