ide-iops.c 34 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247
  1. /*
  2. * linux/drivers/ide/ide-iops.c Version 0.37 Mar 05, 2003
  3. *
  4. * Copyright (C) 2000-2002 Andre Hedrick <andre@linux-ide.org>
  5. * Copyright (C) 2003 Red Hat <alan@redhat.com>
  6. *
  7. */
  8. #include <linux/module.h>
  9. #include <linux/types.h>
  10. #include <linux/string.h>
  11. #include <linux/kernel.h>
  12. #include <linux/timer.h>
  13. #include <linux/mm.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/major.h>
  16. #include <linux/errno.h>
  17. #include <linux/genhd.h>
  18. #include <linux/blkpg.h>
  19. #include <linux/slab.h>
  20. #include <linux/pci.h>
  21. #include <linux/delay.h>
  22. #include <linux/hdreg.h>
  23. #include <linux/ide.h>
  24. #include <linux/bitops.h>
  25. #include <linux/nmi.h>
  26. #include <asm/byteorder.h>
  27. #include <asm/irq.h>
  28. #include <asm/uaccess.h>
  29. #include <asm/io.h>
  30. /*
  31. * Conventional PIO operations for ATA devices
  32. */
  33. static u8 ide_inb (unsigned long port)
  34. {
  35. return (u8) inb(port);
  36. }
  37. static u16 ide_inw (unsigned long port)
  38. {
  39. return (u16) inw(port);
  40. }
  41. static void ide_insw (unsigned long port, void *addr, u32 count)
  42. {
  43. insw(port, addr, count);
  44. }
  45. static void ide_insl (unsigned long port, void *addr, u32 count)
  46. {
  47. insl(port, addr, count);
  48. }
  49. static void ide_outb (u8 val, unsigned long port)
  50. {
  51. outb(val, port);
  52. }
  53. static void ide_outbsync (ide_drive_t *drive, u8 addr, unsigned long port)
  54. {
  55. outb(addr, port);
  56. }
  57. static void ide_outw (u16 val, unsigned long port)
  58. {
  59. outw(val, port);
  60. }
  61. static void ide_outsw (unsigned long port, void *addr, u32 count)
  62. {
  63. outsw(port, addr, count);
  64. }
  65. static void ide_outsl (unsigned long port, void *addr, u32 count)
  66. {
  67. outsl(port, addr, count);
  68. }
  69. void default_hwif_iops (ide_hwif_t *hwif)
  70. {
  71. hwif->OUTB = ide_outb;
  72. hwif->OUTBSYNC = ide_outbsync;
  73. hwif->OUTW = ide_outw;
  74. hwif->OUTSW = ide_outsw;
  75. hwif->OUTSL = ide_outsl;
  76. hwif->INB = ide_inb;
  77. hwif->INW = ide_inw;
  78. hwif->INSW = ide_insw;
  79. hwif->INSL = ide_insl;
  80. }
  81. /*
  82. * MMIO operations, typically used for SATA controllers
  83. */
  84. static u8 ide_mm_inb (unsigned long port)
  85. {
  86. return (u8) readb((void __iomem *) port);
  87. }
  88. static u16 ide_mm_inw (unsigned long port)
  89. {
  90. return (u16) readw((void __iomem *) port);
  91. }
  92. static void ide_mm_insw (unsigned long port, void *addr, u32 count)
  93. {
  94. __ide_mm_insw((void __iomem *) port, addr, count);
  95. }
  96. static void ide_mm_insl (unsigned long port, void *addr, u32 count)
  97. {
  98. __ide_mm_insl((void __iomem *) port, addr, count);
  99. }
  100. static void ide_mm_outb (u8 value, unsigned long port)
  101. {
  102. writeb(value, (void __iomem *) port);
  103. }
  104. static void ide_mm_outbsync (ide_drive_t *drive, u8 value, unsigned long port)
  105. {
  106. writeb(value, (void __iomem *) port);
  107. }
  108. static void ide_mm_outw (u16 value, unsigned long port)
  109. {
  110. writew(value, (void __iomem *) port);
  111. }
  112. static void ide_mm_outsw (unsigned long port, void *addr, u32 count)
  113. {
  114. __ide_mm_outsw((void __iomem *) port, addr, count);
  115. }
  116. static void ide_mm_outsl (unsigned long port, void *addr, u32 count)
  117. {
  118. __ide_mm_outsl((void __iomem *) port, addr, count);
  119. }
  120. void default_hwif_mmiops (ide_hwif_t *hwif)
  121. {
  122. hwif->OUTB = ide_mm_outb;
  123. /* Most systems will need to override OUTBSYNC, alas however
  124. this one is controller specific! */
  125. hwif->OUTBSYNC = ide_mm_outbsync;
  126. hwif->OUTW = ide_mm_outw;
  127. hwif->OUTSW = ide_mm_outsw;
  128. hwif->OUTSL = ide_mm_outsl;
  129. hwif->INB = ide_mm_inb;
  130. hwif->INW = ide_mm_inw;
  131. hwif->INSW = ide_mm_insw;
  132. hwif->INSL = ide_mm_insl;
  133. }
  134. EXPORT_SYMBOL(default_hwif_mmiops);
  135. u32 ide_read_24 (ide_drive_t *drive)
  136. {
  137. u8 hcyl = HWIF(drive)->INB(IDE_HCYL_REG);
  138. u8 lcyl = HWIF(drive)->INB(IDE_LCYL_REG);
  139. u8 sect = HWIF(drive)->INB(IDE_SECTOR_REG);
  140. return (hcyl<<16)|(lcyl<<8)|sect;
  141. }
  142. void SELECT_DRIVE (ide_drive_t *drive)
  143. {
  144. if (HWIF(drive)->selectproc)
  145. HWIF(drive)->selectproc(drive);
  146. HWIF(drive)->OUTB(drive->select.all, IDE_SELECT_REG);
  147. }
  148. EXPORT_SYMBOL(SELECT_DRIVE);
  149. void SELECT_INTERRUPT (ide_drive_t *drive)
  150. {
  151. if (HWIF(drive)->intrproc)
  152. HWIF(drive)->intrproc(drive);
  153. else
  154. HWIF(drive)->OUTB(drive->ctl|2, IDE_CONTROL_REG);
  155. }
  156. void SELECT_MASK (ide_drive_t *drive, int mask)
  157. {
  158. if (HWIF(drive)->maskproc)
  159. HWIF(drive)->maskproc(drive, mask);
  160. }
  161. void QUIRK_LIST (ide_drive_t *drive)
  162. {
  163. if (HWIF(drive)->quirkproc)
  164. drive->quirk_list = HWIF(drive)->quirkproc(drive);
  165. }
  166. /*
  167. * Some localbus EIDE interfaces require a special access sequence
  168. * when using 32-bit I/O instructions to transfer data. We call this
  169. * the "vlb_sync" sequence, which consists of three successive reads
  170. * of the sector count register location, with interrupts disabled
  171. * to ensure that the reads all happen together.
  172. */
  173. static void ata_vlb_sync(ide_drive_t *drive, unsigned long port)
  174. {
  175. (void) HWIF(drive)->INB(port);
  176. (void) HWIF(drive)->INB(port);
  177. (void) HWIF(drive)->INB(port);
  178. }
  179. /*
  180. * This is used for most PIO data transfers *from* the IDE interface
  181. */
  182. static void ata_input_data(ide_drive_t *drive, void *buffer, u32 wcount)
  183. {
  184. ide_hwif_t *hwif = HWIF(drive);
  185. u8 io_32bit = drive->io_32bit;
  186. if (io_32bit) {
  187. if (io_32bit & 2) {
  188. unsigned long flags;
  189. local_irq_save(flags);
  190. ata_vlb_sync(drive, IDE_NSECTOR_REG);
  191. hwif->INSL(IDE_DATA_REG, buffer, wcount);
  192. local_irq_restore(flags);
  193. } else
  194. hwif->INSL(IDE_DATA_REG, buffer, wcount);
  195. } else {
  196. hwif->INSW(IDE_DATA_REG, buffer, wcount<<1);
  197. }
  198. }
  199. /*
  200. * This is used for most PIO data transfers *to* the IDE interface
  201. */
  202. static void ata_output_data(ide_drive_t *drive, void *buffer, u32 wcount)
  203. {
  204. ide_hwif_t *hwif = HWIF(drive);
  205. u8 io_32bit = drive->io_32bit;
  206. if (io_32bit) {
  207. if (io_32bit & 2) {
  208. unsigned long flags;
  209. local_irq_save(flags);
  210. ata_vlb_sync(drive, IDE_NSECTOR_REG);
  211. hwif->OUTSL(IDE_DATA_REG, buffer, wcount);
  212. local_irq_restore(flags);
  213. } else
  214. hwif->OUTSL(IDE_DATA_REG, buffer, wcount);
  215. } else {
  216. hwif->OUTSW(IDE_DATA_REG, buffer, wcount<<1);
  217. }
  218. }
  219. /*
  220. * The following routines are mainly used by the ATAPI drivers.
  221. *
  222. * These routines will round up any request for an odd number of bytes,
  223. * so if an odd bytecount is specified, be sure that there's at least one
  224. * extra byte allocated for the buffer.
  225. */
  226. static void atapi_input_bytes(ide_drive_t *drive, void *buffer, u32 bytecount)
  227. {
  228. ide_hwif_t *hwif = HWIF(drive);
  229. ++bytecount;
  230. #if defined(CONFIG_ATARI) || defined(CONFIG_Q40)
  231. if (MACH_IS_ATARI || MACH_IS_Q40) {
  232. /* Atari has a byte-swapped IDE interface */
  233. insw_swapw(IDE_DATA_REG, buffer, bytecount / 2);
  234. return;
  235. }
  236. #endif /* CONFIG_ATARI || CONFIG_Q40 */
  237. hwif->ata_input_data(drive, buffer, bytecount / 4);
  238. if ((bytecount & 0x03) >= 2)
  239. hwif->INSW(IDE_DATA_REG, ((u8 *)buffer)+(bytecount & ~0x03), 1);
  240. }
  241. static void atapi_output_bytes(ide_drive_t *drive, void *buffer, u32 bytecount)
  242. {
  243. ide_hwif_t *hwif = HWIF(drive);
  244. ++bytecount;
  245. #if defined(CONFIG_ATARI) || defined(CONFIG_Q40)
  246. if (MACH_IS_ATARI || MACH_IS_Q40) {
  247. /* Atari has a byte-swapped IDE interface */
  248. outsw_swapw(IDE_DATA_REG, buffer, bytecount / 2);
  249. return;
  250. }
  251. #endif /* CONFIG_ATARI || CONFIG_Q40 */
  252. hwif->ata_output_data(drive, buffer, bytecount / 4);
  253. if ((bytecount & 0x03) >= 2)
  254. hwif->OUTSW(IDE_DATA_REG, ((u8*)buffer)+(bytecount & ~0x03), 1);
  255. }
  256. void default_hwif_transport(ide_hwif_t *hwif)
  257. {
  258. hwif->ata_input_data = ata_input_data;
  259. hwif->ata_output_data = ata_output_data;
  260. hwif->atapi_input_bytes = atapi_input_bytes;
  261. hwif->atapi_output_bytes = atapi_output_bytes;
  262. }
  263. /*
  264. * Beginning of Taskfile OPCODE Library and feature sets.
  265. */
  266. void ide_fix_driveid (struct hd_driveid *id)
  267. {
  268. #ifndef __LITTLE_ENDIAN
  269. # ifdef __BIG_ENDIAN
  270. int i;
  271. u16 *stringcast;
  272. id->config = __le16_to_cpu(id->config);
  273. id->cyls = __le16_to_cpu(id->cyls);
  274. id->reserved2 = __le16_to_cpu(id->reserved2);
  275. id->heads = __le16_to_cpu(id->heads);
  276. id->track_bytes = __le16_to_cpu(id->track_bytes);
  277. id->sector_bytes = __le16_to_cpu(id->sector_bytes);
  278. id->sectors = __le16_to_cpu(id->sectors);
  279. id->vendor0 = __le16_to_cpu(id->vendor0);
  280. id->vendor1 = __le16_to_cpu(id->vendor1);
  281. id->vendor2 = __le16_to_cpu(id->vendor2);
  282. stringcast = (u16 *)&id->serial_no[0];
  283. for (i = 0; i < (20/2); i++)
  284. stringcast[i] = __le16_to_cpu(stringcast[i]);
  285. id->buf_type = __le16_to_cpu(id->buf_type);
  286. id->buf_size = __le16_to_cpu(id->buf_size);
  287. id->ecc_bytes = __le16_to_cpu(id->ecc_bytes);
  288. stringcast = (u16 *)&id->fw_rev[0];
  289. for (i = 0; i < (8/2); i++)
  290. stringcast[i] = __le16_to_cpu(stringcast[i]);
  291. stringcast = (u16 *)&id->model[0];
  292. for (i = 0; i < (40/2); i++)
  293. stringcast[i] = __le16_to_cpu(stringcast[i]);
  294. id->dword_io = __le16_to_cpu(id->dword_io);
  295. id->reserved50 = __le16_to_cpu(id->reserved50);
  296. id->field_valid = __le16_to_cpu(id->field_valid);
  297. id->cur_cyls = __le16_to_cpu(id->cur_cyls);
  298. id->cur_heads = __le16_to_cpu(id->cur_heads);
  299. id->cur_sectors = __le16_to_cpu(id->cur_sectors);
  300. id->cur_capacity0 = __le16_to_cpu(id->cur_capacity0);
  301. id->cur_capacity1 = __le16_to_cpu(id->cur_capacity1);
  302. id->lba_capacity = __le32_to_cpu(id->lba_capacity);
  303. id->dma_1word = __le16_to_cpu(id->dma_1word);
  304. id->dma_mword = __le16_to_cpu(id->dma_mword);
  305. id->eide_pio_modes = __le16_to_cpu(id->eide_pio_modes);
  306. id->eide_dma_min = __le16_to_cpu(id->eide_dma_min);
  307. id->eide_dma_time = __le16_to_cpu(id->eide_dma_time);
  308. id->eide_pio = __le16_to_cpu(id->eide_pio);
  309. id->eide_pio_iordy = __le16_to_cpu(id->eide_pio_iordy);
  310. for (i = 0; i < 2; ++i)
  311. id->words69_70[i] = __le16_to_cpu(id->words69_70[i]);
  312. for (i = 0; i < 4; ++i)
  313. id->words71_74[i] = __le16_to_cpu(id->words71_74[i]);
  314. id->queue_depth = __le16_to_cpu(id->queue_depth);
  315. for (i = 0; i < 4; ++i)
  316. id->words76_79[i] = __le16_to_cpu(id->words76_79[i]);
  317. id->major_rev_num = __le16_to_cpu(id->major_rev_num);
  318. id->minor_rev_num = __le16_to_cpu(id->minor_rev_num);
  319. id->command_set_1 = __le16_to_cpu(id->command_set_1);
  320. id->command_set_2 = __le16_to_cpu(id->command_set_2);
  321. id->cfsse = __le16_to_cpu(id->cfsse);
  322. id->cfs_enable_1 = __le16_to_cpu(id->cfs_enable_1);
  323. id->cfs_enable_2 = __le16_to_cpu(id->cfs_enable_2);
  324. id->csf_default = __le16_to_cpu(id->csf_default);
  325. id->dma_ultra = __le16_to_cpu(id->dma_ultra);
  326. id->trseuc = __le16_to_cpu(id->trseuc);
  327. id->trsEuc = __le16_to_cpu(id->trsEuc);
  328. id->CurAPMvalues = __le16_to_cpu(id->CurAPMvalues);
  329. id->mprc = __le16_to_cpu(id->mprc);
  330. id->hw_config = __le16_to_cpu(id->hw_config);
  331. id->acoustic = __le16_to_cpu(id->acoustic);
  332. id->msrqs = __le16_to_cpu(id->msrqs);
  333. id->sxfert = __le16_to_cpu(id->sxfert);
  334. id->sal = __le16_to_cpu(id->sal);
  335. id->spg = __le32_to_cpu(id->spg);
  336. id->lba_capacity_2 = __le64_to_cpu(id->lba_capacity_2);
  337. for (i = 0; i < 22; i++)
  338. id->words104_125[i] = __le16_to_cpu(id->words104_125[i]);
  339. id->last_lun = __le16_to_cpu(id->last_lun);
  340. id->word127 = __le16_to_cpu(id->word127);
  341. id->dlf = __le16_to_cpu(id->dlf);
  342. id->csfo = __le16_to_cpu(id->csfo);
  343. for (i = 0; i < 26; i++)
  344. id->words130_155[i] = __le16_to_cpu(id->words130_155[i]);
  345. id->word156 = __le16_to_cpu(id->word156);
  346. for (i = 0; i < 3; i++)
  347. id->words157_159[i] = __le16_to_cpu(id->words157_159[i]);
  348. id->cfa_power = __le16_to_cpu(id->cfa_power);
  349. for (i = 0; i < 14; i++)
  350. id->words161_175[i] = __le16_to_cpu(id->words161_175[i]);
  351. for (i = 0; i < 31; i++)
  352. id->words176_205[i] = __le16_to_cpu(id->words176_205[i]);
  353. for (i = 0; i < 48; i++)
  354. id->words206_254[i] = __le16_to_cpu(id->words206_254[i]);
  355. id->integrity_word = __le16_to_cpu(id->integrity_word);
  356. # else
  357. # error "Please fix <asm/byteorder.h>"
  358. # endif
  359. #endif
  360. }
  361. /* FIXME: exported for use by the USB storage (isd200.c) code only */
  362. EXPORT_SYMBOL(ide_fix_driveid);
  363. void ide_fixstring (u8 *s, const int bytecount, const int byteswap)
  364. {
  365. u8 *p = s, *end = &s[bytecount & ~1]; /* bytecount must be even */
  366. if (byteswap) {
  367. /* convert from big-endian to host byte order */
  368. for (p = end ; p != s;) {
  369. unsigned short *pp = (unsigned short *) (p -= 2);
  370. *pp = ntohs(*pp);
  371. }
  372. }
  373. /* strip leading blanks */
  374. while (s != end && *s == ' ')
  375. ++s;
  376. /* compress internal blanks and strip trailing blanks */
  377. while (s != end && *s) {
  378. if (*s++ != ' ' || (s != end && *s && *s != ' '))
  379. *p++ = *(s-1);
  380. }
  381. /* wipe out trailing garbage */
  382. while (p != end)
  383. *p++ = '\0';
  384. }
  385. EXPORT_SYMBOL(ide_fixstring);
  386. /*
  387. * Needed for PCI irq sharing
  388. */
  389. int drive_is_ready (ide_drive_t *drive)
  390. {
  391. ide_hwif_t *hwif = HWIF(drive);
  392. u8 stat = 0;
  393. if (drive->waiting_for_dma)
  394. return hwif->ide_dma_test_irq(drive);
  395. #if 0
  396. /* need to guarantee 400ns since last command was issued */
  397. udelay(1);
  398. #endif
  399. #ifdef CONFIG_IDEPCI_SHARE_IRQ
  400. /*
  401. * We do a passive status test under shared PCI interrupts on
  402. * cards that truly share the ATA side interrupt, but may also share
  403. * an interrupt with another pci card/device. We make no assumptions
  404. * about possible isa-pnp and pci-pnp issues yet.
  405. */
  406. if (IDE_CONTROL_REG)
  407. stat = hwif->INB(IDE_ALTSTATUS_REG);
  408. else
  409. #endif /* CONFIG_IDEPCI_SHARE_IRQ */
  410. /* Note: this may clear a pending IRQ!! */
  411. stat = hwif->INB(IDE_STATUS_REG);
  412. if (stat & BUSY_STAT)
  413. /* drive busy: definitely not interrupting */
  414. return 0;
  415. /* drive ready: *might* be interrupting */
  416. return 1;
  417. }
  418. EXPORT_SYMBOL(drive_is_ready);
  419. /*
  420. * Global for All, and taken from ide-pmac.c. Can be called
  421. * with spinlock held & IRQs disabled, so don't schedule !
  422. */
  423. int wait_for_ready (ide_drive_t *drive, int timeout)
  424. {
  425. ide_hwif_t *hwif = HWIF(drive);
  426. u8 stat = 0;
  427. while(--timeout) {
  428. stat = hwif->INB(IDE_STATUS_REG);
  429. if (!(stat & BUSY_STAT)) {
  430. if (drive->ready_stat == 0)
  431. break;
  432. else if ((stat & drive->ready_stat)||(stat & ERR_STAT))
  433. break;
  434. }
  435. mdelay(1);
  436. }
  437. if ((stat & ERR_STAT) || timeout <= 0) {
  438. if (stat & ERR_STAT) {
  439. printk(KERN_ERR "%s: wait_for_ready, "
  440. "error status: %x\n", drive->name, stat);
  441. }
  442. return 1;
  443. }
  444. return 0;
  445. }
  446. /*
  447. * This routine busy-waits for the drive status to be not "busy".
  448. * It then checks the status for all of the "good" bits and none
  449. * of the "bad" bits, and if all is okay it returns 0. All other
  450. * cases return 1 after invoking ide_error() -- caller should just return.
  451. *
  452. * This routine should get fixed to not hog the cpu during extra long waits..
  453. * That could be done by busy-waiting for the first jiffy or two, and then
  454. * setting a timer to wake up at half second intervals thereafter,
  455. * until timeout is achieved, before timing out.
  456. */
  457. int ide_wait_stat (ide_startstop_t *startstop, ide_drive_t *drive, u8 good, u8 bad, unsigned long timeout)
  458. {
  459. ide_hwif_t *hwif = HWIF(drive);
  460. u8 stat;
  461. int i;
  462. unsigned long flags;
  463. /* bail early if we've exceeded max_failures */
  464. if (drive->max_failures && (drive->failures > drive->max_failures)) {
  465. *startstop = ide_stopped;
  466. return 1;
  467. }
  468. udelay(1); /* spec allows drive 400ns to assert "BUSY" */
  469. if ((stat = hwif->INB(IDE_STATUS_REG)) & BUSY_STAT) {
  470. local_irq_set(flags);
  471. timeout += jiffies;
  472. while ((stat = hwif->INB(IDE_STATUS_REG)) & BUSY_STAT) {
  473. if (time_after(jiffies, timeout)) {
  474. /*
  475. * One last read after the timeout in case
  476. * heavy interrupt load made us not make any
  477. * progress during the timeout..
  478. */
  479. stat = hwif->INB(IDE_STATUS_REG);
  480. if (!(stat & BUSY_STAT))
  481. break;
  482. local_irq_restore(flags);
  483. *startstop = ide_error(drive, "status timeout", stat);
  484. return 1;
  485. }
  486. }
  487. local_irq_restore(flags);
  488. }
  489. /*
  490. * Allow status to settle, then read it again.
  491. * A few rare drives vastly violate the 400ns spec here,
  492. * so we'll wait up to 10usec for a "good" status
  493. * rather than expensively fail things immediately.
  494. * This fix courtesy of Matthew Faupel & Niccolo Rigacci.
  495. */
  496. for (i = 0; i < 10; i++) {
  497. udelay(1);
  498. if (OK_STAT((stat = hwif->INB(IDE_STATUS_REG)), good, bad))
  499. return 0;
  500. }
  501. *startstop = ide_error(drive, "status error", stat);
  502. return 1;
  503. }
  504. EXPORT_SYMBOL(ide_wait_stat);
  505. /*
  506. * All hosts that use the 80c ribbon must use!
  507. * The name is derived from upper byte of word 93 and the 80c ribbon.
  508. */
  509. u8 eighty_ninty_three (ide_drive_t *drive)
  510. {
  511. ide_hwif_t *hwif = drive->hwif;
  512. struct hd_driveid *id = drive->id;
  513. if (hwif->udma_four == 0)
  514. goto no_80w;
  515. /* Check for SATA but only if we are ATA5 or higher */
  516. if (id->hw_config == 0 && (id->major_rev_num & 0x7FE0))
  517. return 1;
  518. /*
  519. * FIXME:
  520. * - change master/slave IDENTIFY order
  521. * - force bit13 (80c cable present) check
  522. * (unless the slave device is pre-ATA3)
  523. */
  524. #ifndef CONFIG_IDEDMA_IVB
  525. if (id->hw_config & 0x4000)
  526. #else
  527. if (id->hw_config & 0x6000)
  528. #endif
  529. return 1;
  530. no_80w:
  531. if (drive->udma33_warned == 1)
  532. return 0;
  533. printk(KERN_WARNING "%s: %s side 80-wire cable detection failed, "
  534. "limiting max speed to UDMA33\n",
  535. drive->name, hwif->udma_four ? "drive" : "host");
  536. drive->udma33_warned = 1;
  537. return 0;
  538. }
  539. int ide_ata66_check (ide_drive_t *drive, ide_task_t *args)
  540. {
  541. if ((args->tfRegister[IDE_COMMAND_OFFSET] == WIN_SETFEATURES) &&
  542. (args->tfRegister[IDE_SECTOR_OFFSET] > XFER_UDMA_2) &&
  543. (args->tfRegister[IDE_FEATURE_OFFSET] == SETFEATURES_XFER)) {
  544. if (eighty_ninty_three(drive) == 0) {
  545. printk(KERN_WARNING "%s: UDMA speeds >UDMA33 cannot "
  546. "be set\n", drive->name);
  547. return 1;
  548. }
  549. }
  550. return 0;
  551. }
  552. /*
  553. * Backside of HDIO_DRIVE_CMD call of SETFEATURES_XFER.
  554. * 1 : Safe to update drive->id DMA registers.
  555. * 0 : OOPs not allowed.
  556. */
  557. int set_transfer (ide_drive_t *drive, ide_task_t *args)
  558. {
  559. if ((args->tfRegister[IDE_COMMAND_OFFSET] == WIN_SETFEATURES) &&
  560. (args->tfRegister[IDE_SECTOR_OFFSET] >= XFER_SW_DMA_0) &&
  561. (args->tfRegister[IDE_FEATURE_OFFSET] == SETFEATURES_XFER) &&
  562. (drive->id->dma_ultra ||
  563. drive->id->dma_mword ||
  564. drive->id->dma_1word))
  565. return 1;
  566. return 0;
  567. }
  568. #ifdef CONFIG_BLK_DEV_IDEDMA
  569. static u8 ide_auto_reduce_xfer (ide_drive_t *drive)
  570. {
  571. if (!drive->crc_count)
  572. return drive->current_speed;
  573. drive->crc_count = 0;
  574. switch(drive->current_speed) {
  575. case XFER_UDMA_7: return XFER_UDMA_6;
  576. case XFER_UDMA_6: return XFER_UDMA_5;
  577. case XFER_UDMA_5: return XFER_UDMA_4;
  578. case XFER_UDMA_4: return XFER_UDMA_3;
  579. case XFER_UDMA_3: return XFER_UDMA_2;
  580. case XFER_UDMA_2: return XFER_UDMA_1;
  581. case XFER_UDMA_1: return XFER_UDMA_0;
  582. /*
  583. * OOPS we do not goto non Ultra DMA modes
  584. * without iCRC's available we force
  585. * the system to PIO and make the user
  586. * invoke the ATA-1 ATA-2 DMA modes.
  587. */
  588. case XFER_UDMA_0:
  589. default: return XFER_PIO_4;
  590. }
  591. }
  592. #endif /* CONFIG_BLK_DEV_IDEDMA */
  593. /*
  594. * Update the
  595. */
  596. int ide_driveid_update (ide_drive_t *drive)
  597. {
  598. ide_hwif_t *hwif = HWIF(drive);
  599. struct hd_driveid *id;
  600. #if 0
  601. id = kmalloc(SECTOR_WORDS*4, GFP_ATOMIC);
  602. if (!id)
  603. return 0;
  604. taskfile_lib_get_identify(drive, (char *)&id);
  605. ide_fix_driveid(id);
  606. if (id) {
  607. drive->id->dma_ultra = id->dma_ultra;
  608. drive->id->dma_mword = id->dma_mword;
  609. drive->id->dma_1word = id->dma_1word;
  610. /* anything more ? */
  611. kfree(id);
  612. }
  613. return 1;
  614. #else
  615. /*
  616. * Re-read drive->id for possible DMA mode
  617. * change (copied from ide-probe.c)
  618. */
  619. unsigned long timeout, flags;
  620. SELECT_MASK(drive, 1);
  621. if (IDE_CONTROL_REG)
  622. hwif->OUTB(drive->ctl,IDE_CONTROL_REG);
  623. msleep(50);
  624. hwif->OUTB(WIN_IDENTIFY, IDE_COMMAND_REG);
  625. timeout = jiffies + WAIT_WORSTCASE;
  626. do {
  627. if (time_after(jiffies, timeout)) {
  628. SELECT_MASK(drive, 0);
  629. return 0; /* drive timed-out */
  630. }
  631. msleep(50); /* give drive a breather */
  632. } while (hwif->INB(IDE_ALTSTATUS_REG) & BUSY_STAT);
  633. msleep(50); /* wait for IRQ and DRQ_STAT */
  634. if (!OK_STAT(hwif->INB(IDE_STATUS_REG),DRQ_STAT,BAD_R_STAT)) {
  635. SELECT_MASK(drive, 0);
  636. printk("%s: CHECK for good STATUS\n", drive->name);
  637. return 0;
  638. }
  639. local_irq_save(flags);
  640. SELECT_MASK(drive, 0);
  641. id = kmalloc(SECTOR_WORDS*4, GFP_ATOMIC);
  642. if (!id) {
  643. local_irq_restore(flags);
  644. return 0;
  645. }
  646. ata_input_data(drive, id, SECTOR_WORDS);
  647. (void) hwif->INB(IDE_STATUS_REG); /* clear drive IRQ */
  648. local_irq_enable();
  649. local_irq_restore(flags);
  650. ide_fix_driveid(id);
  651. if (id) {
  652. drive->id->dma_ultra = id->dma_ultra;
  653. drive->id->dma_mword = id->dma_mword;
  654. drive->id->dma_1word = id->dma_1word;
  655. /* anything more ? */
  656. kfree(id);
  657. }
  658. return 1;
  659. #endif
  660. }
  661. /*
  662. * Similar to ide_wait_stat(), except it never calls ide_error internally.
  663. * This is a kludge to handle the new ide_config_drive_speed() function,
  664. * and should not otherwise be used anywhere. Eventually, the tuneproc's
  665. * should be updated to return ide_startstop_t, in which case we can get
  666. * rid of this abomination again. :) -ml
  667. *
  668. * It is gone..........
  669. *
  670. * const char *msg == consider adding for verbose errors.
  671. */
  672. int ide_config_drive_speed (ide_drive_t *drive, u8 speed)
  673. {
  674. ide_hwif_t *hwif = HWIF(drive);
  675. int i, error = 1;
  676. u8 stat;
  677. // while (HWGROUP(drive)->busy)
  678. // msleep(50);
  679. #ifdef CONFIG_BLK_DEV_IDEDMA
  680. if (hwif->ide_dma_check) /* check if host supports DMA */
  681. hwif->dma_host_off(drive);
  682. #endif
  683. /*
  684. * Don't use ide_wait_cmd here - it will
  685. * attempt to set_geometry and recalibrate,
  686. * but for some reason these don't work at
  687. * this point (lost interrupt).
  688. */
  689. /*
  690. * Select the drive, and issue the SETFEATURES command
  691. */
  692. disable_irq_nosync(hwif->irq);
  693. /*
  694. * FIXME: we race against the running IRQ here if
  695. * this is called from non IRQ context. If we use
  696. * disable_irq() we hang on the error path. Work
  697. * is needed.
  698. */
  699. udelay(1);
  700. SELECT_DRIVE(drive);
  701. SELECT_MASK(drive, 0);
  702. udelay(1);
  703. if (IDE_CONTROL_REG)
  704. hwif->OUTB(drive->ctl | 2, IDE_CONTROL_REG);
  705. hwif->OUTB(speed, IDE_NSECTOR_REG);
  706. hwif->OUTB(SETFEATURES_XFER, IDE_FEATURE_REG);
  707. hwif->OUTB(WIN_SETFEATURES, IDE_COMMAND_REG);
  708. if ((IDE_CONTROL_REG) && (drive->quirk_list == 2))
  709. hwif->OUTB(drive->ctl, IDE_CONTROL_REG);
  710. udelay(1);
  711. /*
  712. * Wait for drive to become non-BUSY
  713. */
  714. if ((stat = hwif->INB(IDE_STATUS_REG)) & BUSY_STAT) {
  715. unsigned long flags, timeout;
  716. local_irq_set(flags);
  717. timeout = jiffies + WAIT_CMD;
  718. while ((stat = hwif->INB(IDE_STATUS_REG)) & BUSY_STAT) {
  719. if (time_after(jiffies, timeout))
  720. break;
  721. }
  722. local_irq_restore(flags);
  723. }
  724. /*
  725. * Allow status to settle, then read it again.
  726. * A few rare drives vastly violate the 400ns spec here,
  727. * so we'll wait up to 10usec for a "good" status
  728. * rather than expensively fail things immediately.
  729. * This fix courtesy of Matthew Faupel & Niccolo Rigacci.
  730. */
  731. for (i = 0; i < 10; i++) {
  732. udelay(1);
  733. if (OK_STAT((stat = hwif->INB(IDE_STATUS_REG)), DRIVE_READY, BUSY_STAT|DRQ_STAT|ERR_STAT)) {
  734. error = 0;
  735. break;
  736. }
  737. }
  738. SELECT_MASK(drive, 0);
  739. enable_irq(hwif->irq);
  740. if (error) {
  741. (void) ide_dump_status(drive, "set_drive_speed_status", stat);
  742. return error;
  743. }
  744. drive->id->dma_ultra &= ~0xFF00;
  745. drive->id->dma_mword &= ~0x0F00;
  746. drive->id->dma_1word &= ~0x0F00;
  747. #ifdef CONFIG_BLK_DEV_IDEDMA
  748. if (speed >= XFER_SW_DMA_0)
  749. hwif->dma_host_on(drive);
  750. else if (hwif->ide_dma_check) /* check if host supports DMA */
  751. hwif->dma_off_quietly(drive);
  752. #endif
  753. switch(speed) {
  754. case XFER_UDMA_7: drive->id->dma_ultra |= 0x8080; break;
  755. case XFER_UDMA_6: drive->id->dma_ultra |= 0x4040; break;
  756. case XFER_UDMA_5: drive->id->dma_ultra |= 0x2020; break;
  757. case XFER_UDMA_4: drive->id->dma_ultra |= 0x1010; break;
  758. case XFER_UDMA_3: drive->id->dma_ultra |= 0x0808; break;
  759. case XFER_UDMA_2: drive->id->dma_ultra |= 0x0404; break;
  760. case XFER_UDMA_1: drive->id->dma_ultra |= 0x0202; break;
  761. case XFER_UDMA_0: drive->id->dma_ultra |= 0x0101; break;
  762. case XFER_MW_DMA_2: drive->id->dma_mword |= 0x0404; break;
  763. case XFER_MW_DMA_1: drive->id->dma_mword |= 0x0202; break;
  764. case XFER_MW_DMA_0: drive->id->dma_mword |= 0x0101; break;
  765. case XFER_SW_DMA_2: drive->id->dma_1word |= 0x0404; break;
  766. case XFER_SW_DMA_1: drive->id->dma_1word |= 0x0202; break;
  767. case XFER_SW_DMA_0: drive->id->dma_1word |= 0x0101; break;
  768. default: break;
  769. }
  770. if (!drive->init_speed)
  771. drive->init_speed = speed;
  772. drive->current_speed = speed;
  773. return error;
  774. }
  775. EXPORT_SYMBOL(ide_config_drive_speed);
  776. /*
  777. * This should get invoked any time we exit the driver to
  778. * wait for an interrupt response from a drive. handler() points
  779. * at the appropriate code to handle the next interrupt, and a
  780. * timer is started to prevent us from waiting forever in case
  781. * something goes wrong (see the ide_timer_expiry() handler later on).
  782. *
  783. * See also ide_execute_command
  784. */
  785. static void __ide_set_handler (ide_drive_t *drive, ide_handler_t *handler,
  786. unsigned int timeout, ide_expiry_t *expiry)
  787. {
  788. ide_hwgroup_t *hwgroup = HWGROUP(drive);
  789. if (hwgroup->handler != NULL) {
  790. printk(KERN_CRIT "%s: ide_set_handler: handler not null; "
  791. "old=%p, new=%p\n",
  792. drive->name, hwgroup->handler, handler);
  793. }
  794. hwgroup->handler = handler;
  795. hwgroup->expiry = expiry;
  796. hwgroup->timer.expires = jiffies + timeout;
  797. hwgroup->req_gen_timer = hwgroup->req_gen;
  798. add_timer(&hwgroup->timer);
  799. }
  800. void ide_set_handler (ide_drive_t *drive, ide_handler_t *handler,
  801. unsigned int timeout, ide_expiry_t *expiry)
  802. {
  803. unsigned long flags;
  804. spin_lock_irqsave(&ide_lock, flags);
  805. __ide_set_handler(drive, handler, timeout, expiry);
  806. spin_unlock_irqrestore(&ide_lock, flags);
  807. }
  808. EXPORT_SYMBOL(ide_set_handler);
  809. /**
  810. * ide_execute_command - execute an IDE command
  811. * @drive: IDE drive to issue the command against
  812. * @command: command byte to write
  813. * @handler: handler for next phase
  814. * @timeout: timeout for command
  815. * @expiry: handler to run on timeout
  816. *
  817. * Helper function to issue an IDE command. This handles the
  818. * atomicity requirements, command timing and ensures that the
  819. * handler and IRQ setup do not race. All IDE command kick off
  820. * should go via this function or do equivalent locking.
  821. */
  822. void ide_execute_command(ide_drive_t *drive, task_ioreg_t cmd, ide_handler_t *handler, unsigned timeout, ide_expiry_t *expiry)
  823. {
  824. unsigned long flags;
  825. ide_hwgroup_t *hwgroup = HWGROUP(drive);
  826. ide_hwif_t *hwif = HWIF(drive);
  827. spin_lock_irqsave(&ide_lock, flags);
  828. BUG_ON(hwgroup->handler);
  829. hwgroup->handler = handler;
  830. hwgroup->expiry = expiry;
  831. hwgroup->timer.expires = jiffies + timeout;
  832. hwgroup->req_gen_timer = hwgroup->req_gen;
  833. add_timer(&hwgroup->timer);
  834. hwif->OUTBSYNC(drive, cmd, IDE_COMMAND_REG);
  835. /* Drive takes 400nS to respond, we must avoid the IRQ being
  836. serviced before that.
  837. FIXME: we could skip this delay with care on non shared
  838. devices
  839. */
  840. ndelay(400);
  841. spin_unlock_irqrestore(&ide_lock, flags);
  842. }
  843. EXPORT_SYMBOL(ide_execute_command);
  844. /* needed below */
  845. static ide_startstop_t do_reset1 (ide_drive_t *, int);
  846. /*
  847. * atapi_reset_pollfunc() gets invoked to poll the interface for completion every 50ms
  848. * during an atapi drive reset operation. If the drive has not yet responded,
  849. * and we have not yet hit our maximum waiting time, then the timer is restarted
  850. * for another 50ms.
  851. */
  852. static ide_startstop_t atapi_reset_pollfunc (ide_drive_t *drive)
  853. {
  854. ide_hwgroup_t *hwgroup = HWGROUP(drive);
  855. ide_hwif_t *hwif = HWIF(drive);
  856. u8 stat;
  857. SELECT_DRIVE(drive);
  858. udelay (10);
  859. if (OK_STAT(stat = hwif->INB(IDE_STATUS_REG), 0, BUSY_STAT)) {
  860. printk("%s: ATAPI reset complete\n", drive->name);
  861. } else {
  862. if (time_before(jiffies, hwgroup->poll_timeout)) {
  863. BUG_ON(HWGROUP(drive)->handler != NULL);
  864. ide_set_handler(drive, &atapi_reset_pollfunc, HZ/20, NULL);
  865. /* continue polling */
  866. return ide_started;
  867. }
  868. /* end of polling */
  869. hwgroup->polling = 0;
  870. printk("%s: ATAPI reset timed-out, status=0x%02x\n",
  871. drive->name, stat);
  872. /* do it the old fashioned way */
  873. return do_reset1(drive, 1);
  874. }
  875. /* done polling */
  876. hwgroup->polling = 0;
  877. hwgroup->resetting = 0;
  878. return ide_stopped;
  879. }
  880. /*
  881. * reset_pollfunc() gets invoked to poll the interface for completion every 50ms
  882. * during an ide reset operation. If the drives have not yet responded,
  883. * and we have not yet hit our maximum waiting time, then the timer is restarted
  884. * for another 50ms.
  885. */
  886. static ide_startstop_t reset_pollfunc (ide_drive_t *drive)
  887. {
  888. ide_hwgroup_t *hwgroup = HWGROUP(drive);
  889. ide_hwif_t *hwif = HWIF(drive);
  890. u8 tmp;
  891. if (hwif->reset_poll != NULL) {
  892. if (hwif->reset_poll(drive)) {
  893. printk(KERN_ERR "%s: host reset_poll failure for %s.\n",
  894. hwif->name, drive->name);
  895. return ide_stopped;
  896. }
  897. }
  898. if (!OK_STAT(tmp = hwif->INB(IDE_STATUS_REG), 0, BUSY_STAT)) {
  899. if (time_before(jiffies, hwgroup->poll_timeout)) {
  900. BUG_ON(HWGROUP(drive)->handler != NULL);
  901. ide_set_handler(drive, &reset_pollfunc, HZ/20, NULL);
  902. /* continue polling */
  903. return ide_started;
  904. }
  905. printk("%s: reset timed-out, status=0x%02x\n", hwif->name, tmp);
  906. drive->failures++;
  907. } else {
  908. printk("%s: reset: ", hwif->name);
  909. if ((tmp = hwif->INB(IDE_ERROR_REG)) == 1) {
  910. printk("success\n");
  911. drive->failures = 0;
  912. } else {
  913. drive->failures++;
  914. printk("master: ");
  915. switch (tmp & 0x7f) {
  916. case 1: printk("passed");
  917. break;
  918. case 2: printk("formatter device error");
  919. break;
  920. case 3: printk("sector buffer error");
  921. break;
  922. case 4: printk("ECC circuitry error");
  923. break;
  924. case 5: printk("controlling MPU error");
  925. break;
  926. default:printk("error (0x%02x?)", tmp);
  927. }
  928. if (tmp & 0x80)
  929. printk("; slave: failed");
  930. printk("\n");
  931. }
  932. }
  933. hwgroup->polling = 0; /* done polling */
  934. hwgroup->resetting = 0; /* done reset attempt */
  935. return ide_stopped;
  936. }
  937. static void check_dma_crc(ide_drive_t *drive)
  938. {
  939. #ifdef CONFIG_BLK_DEV_IDEDMA
  940. if (drive->crc_count) {
  941. drive->hwif->dma_off_quietly(drive);
  942. ide_set_xfer_rate(drive, ide_auto_reduce_xfer(drive));
  943. if (drive->current_speed >= XFER_SW_DMA_0)
  944. (void) HWIF(drive)->ide_dma_on(drive);
  945. } else
  946. ide_dma_off(drive);
  947. #endif
  948. }
  949. static void ide_disk_pre_reset(ide_drive_t *drive)
  950. {
  951. int legacy = (drive->id->cfs_enable_2 & 0x0400) ? 0 : 1;
  952. drive->special.all = 0;
  953. drive->special.b.set_geometry = legacy;
  954. drive->special.b.recalibrate = legacy;
  955. if (OK_TO_RESET_CONTROLLER)
  956. drive->mult_count = 0;
  957. if (!drive->keep_settings && !drive->using_dma)
  958. drive->mult_req = 0;
  959. if (drive->mult_req != drive->mult_count)
  960. drive->special.b.set_multmode = 1;
  961. }
  962. static void pre_reset(ide_drive_t *drive)
  963. {
  964. if (drive->media == ide_disk)
  965. ide_disk_pre_reset(drive);
  966. else
  967. drive->post_reset = 1;
  968. if (!drive->keep_settings) {
  969. if (drive->using_dma) {
  970. check_dma_crc(drive);
  971. } else {
  972. drive->unmask = 0;
  973. drive->io_32bit = 0;
  974. }
  975. return;
  976. }
  977. if (drive->using_dma)
  978. check_dma_crc(drive);
  979. if (HWIF(drive)->pre_reset != NULL)
  980. HWIF(drive)->pre_reset(drive);
  981. if (drive->current_speed != 0xff)
  982. drive->desired_speed = drive->current_speed;
  983. drive->current_speed = 0xff;
  984. }
  985. /*
  986. * do_reset1() attempts to recover a confused drive by resetting it.
  987. * Unfortunately, resetting a disk drive actually resets all devices on
  988. * the same interface, so it can really be thought of as resetting the
  989. * interface rather than resetting the drive.
  990. *
  991. * ATAPI devices have their own reset mechanism which allows them to be
  992. * individually reset without clobbering other devices on the same interface.
  993. *
  994. * Unfortunately, the IDE interface does not generate an interrupt to let
  995. * us know when the reset operation has finished, so we must poll for this.
  996. * Equally poor, though, is the fact that this may a very long time to complete,
  997. * (up to 30 seconds worstcase). So, instead of busy-waiting here for it,
  998. * we set a timer to poll at 50ms intervals.
  999. */
  1000. static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
  1001. {
  1002. unsigned int unit;
  1003. unsigned long flags;
  1004. ide_hwif_t *hwif;
  1005. ide_hwgroup_t *hwgroup;
  1006. spin_lock_irqsave(&ide_lock, flags);
  1007. hwif = HWIF(drive);
  1008. hwgroup = HWGROUP(drive);
  1009. /* We must not reset with running handlers */
  1010. BUG_ON(hwgroup->handler != NULL);
  1011. /* For an ATAPI device, first try an ATAPI SRST. */
  1012. if (drive->media != ide_disk && !do_not_try_atapi) {
  1013. hwgroup->resetting = 1;
  1014. pre_reset(drive);
  1015. SELECT_DRIVE(drive);
  1016. udelay (20);
  1017. hwif->OUTBSYNC(drive, WIN_SRST, IDE_COMMAND_REG);
  1018. ndelay(400);
  1019. hwgroup->poll_timeout = jiffies + WAIT_WORSTCASE;
  1020. hwgroup->polling = 1;
  1021. __ide_set_handler(drive, &atapi_reset_pollfunc, HZ/20, NULL);
  1022. spin_unlock_irqrestore(&ide_lock, flags);
  1023. return ide_started;
  1024. }
  1025. /*
  1026. * First, reset any device state data we were maintaining
  1027. * for any of the drives on this interface.
  1028. */
  1029. for (unit = 0; unit < MAX_DRIVES; ++unit)
  1030. pre_reset(&hwif->drives[unit]);
  1031. #if OK_TO_RESET_CONTROLLER
  1032. if (!IDE_CONTROL_REG) {
  1033. spin_unlock_irqrestore(&ide_lock, flags);
  1034. return ide_stopped;
  1035. }
  1036. hwgroup->resetting = 1;
  1037. /*
  1038. * Note that we also set nIEN while resetting the device,
  1039. * to mask unwanted interrupts from the interface during the reset.
  1040. * However, due to the design of PC hardware, this will cause an
  1041. * immediate interrupt due to the edge transition it produces.
  1042. * This single interrupt gives us a "fast poll" for drives that
  1043. * recover from reset very quickly, saving us the first 50ms wait time.
  1044. */
  1045. /* set SRST and nIEN */
  1046. hwif->OUTBSYNC(drive, drive->ctl|6,IDE_CONTROL_REG);
  1047. /* more than enough time */
  1048. udelay(10);
  1049. if (drive->quirk_list == 2) {
  1050. /* clear SRST and nIEN */
  1051. hwif->OUTBSYNC(drive, drive->ctl, IDE_CONTROL_REG);
  1052. } else {
  1053. /* clear SRST, leave nIEN */
  1054. hwif->OUTBSYNC(drive, drive->ctl|2, IDE_CONTROL_REG);
  1055. }
  1056. /* more than enough time */
  1057. udelay(10);
  1058. hwgroup->poll_timeout = jiffies + WAIT_WORSTCASE;
  1059. hwgroup->polling = 1;
  1060. __ide_set_handler(drive, &reset_pollfunc, HZ/20, NULL);
  1061. /*
  1062. * Some weird controller like resetting themselves to a strange
  1063. * state when the disks are reset this way. At least, the Winbond
  1064. * 553 documentation says that
  1065. */
  1066. if (hwif->resetproc != NULL) {
  1067. hwif->resetproc(drive);
  1068. }
  1069. #endif /* OK_TO_RESET_CONTROLLER */
  1070. spin_unlock_irqrestore(&ide_lock, flags);
  1071. return ide_started;
  1072. }
  1073. /*
  1074. * ide_do_reset() is the entry point to the drive/interface reset code.
  1075. */
  1076. ide_startstop_t ide_do_reset (ide_drive_t *drive)
  1077. {
  1078. return do_reset1(drive, 0);
  1079. }
  1080. EXPORT_SYMBOL(ide_do_reset);
  1081. /*
  1082. * ide_wait_not_busy() waits for the currently selected device on the hwif
  1083. * to report a non-busy status, see comments in probe_hwif().
  1084. */
  1085. int ide_wait_not_busy(ide_hwif_t *hwif, unsigned long timeout)
  1086. {
  1087. u8 stat = 0;
  1088. while(timeout--) {
  1089. /*
  1090. * Turn this into a schedule() sleep once I'm sure
  1091. * about locking issues (2.5 work ?).
  1092. */
  1093. mdelay(1);
  1094. stat = hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]);
  1095. if ((stat & BUSY_STAT) == 0)
  1096. return 0;
  1097. /*
  1098. * Assume a value of 0xff means nothing is connected to
  1099. * the interface and it doesn't implement the pull-down
  1100. * resistor on D7.
  1101. */
  1102. if (stat == 0xff)
  1103. return -ENODEV;
  1104. touch_softlockup_watchdog();
  1105. touch_nmi_watchdog();
  1106. }
  1107. return -EBUSY;
  1108. }
  1109. EXPORT_SYMBOL_GPL(ide_wait_not_busy);