libata-eh.c 52 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227
  1. /*
  2. * libata-eh.c - libata error handling
  3. *
  4. * Maintained by: Jeff Garzik <jgarzik@pobox.com>
  5. * Please ALWAYS copy linux-ide@vger.kernel.org
  6. * on emails.
  7. *
  8. * Copyright 2006 Tejun Heo <htejun@gmail.com>
  9. *
  10. *
  11. * This program is free software; you can redistribute it and/or
  12. * modify it under the terms of the GNU General Public License as
  13. * published by the Free Software Foundation; either version 2, or
  14. * (at your option) any later version.
  15. *
  16. * This program is distributed in the hope that it will be useful,
  17. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  19. * General Public License for more details.
  20. *
  21. * You should have received a copy of the GNU General Public License
  22. * along with this program; see the file COPYING. If not, write to
  23. * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
  24. * USA.
  25. *
  26. *
  27. * libata documentation is available via 'make {ps|pdf}docs',
  28. * as Documentation/DocBook/libata.*
  29. *
  30. * Hardware documentation available from http://www.t13.org/ and
  31. * http://www.sata-io.org/
  32. *
  33. */
  34. #include <linux/config.h>
  35. #include <linux/kernel.h>
  36. #include <scsi/scsi.h>
  37. #include <scsi/scsi_host.h>
  38. #include <scsi/scsi_eh.h>
  39. #include <scsi/scsi_device.h>
  40. #include <scsi/scsi_cmnd.h>
  41. #include "scsi_transport_api.h"
  42. #include <linux/libata.h>
  43. #include "libata.h"
  44. static void __ata_port_freeze(struct ata_port *ap);
  45. static void ata_eh_finish(struct ata_port *ap);
  46. static void ata_eh_handle_port_suspend(struct ata_port *ap);
  47. static void ata_eh_handle_port_resume(struct ata_port *ap);
  48. static void ata_ering_record(struct ata_ering *ering, int is_io,
  49. unsigned int err_mask)
  50. {
  51. struct ata_ering_entry *ent;
  52. WARN_ON(!err_mask);
  53. ering->cursor++;
  54. ering->cursor %= ATA_ERING_SIZE;
  55. ent = &ering->ring[ering->cursor];
  56. ent->is_io = is_io;
  57. ent->err_mask = err_mask;
  58. ent->timestamp = get_jiffies_64();
  59. }
  60. static struct ata_ering_entry * ata_ering_top(struct ata_ering *ering)
  61. {
  62. struct ata_ering_entry *ent = &ering->ring[ering->cursor];
  63. if (!ent->err_mask)
  64. return NULL;
  65. return ent;
  66. }
  67. static int ata_ering_map(struct ata_ering *ering,
  68. int (*map_fn)(struct ata_ering_entry *, void *),
  69. void *arg)
  70. {
  71. int idx, rc = 0;
  72. struct ata_ering_entry *ent;
  73. idx = ering->cursor;
  74. do {
  75. ent = &ering->ring[idx];
  76. if (!ent->err_mask)
  77. break;
  78. rc = map_fn(ent, arg);
  79. if (rc)
  80. break;
  81. idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE;
  82. } while (idx != ering->cursor);
  83. return rc;
  84. }
  85. static unsigned int ata_eh_dev_action(struct ata_device *dev)
  86. {
  87. struct ata_eh_context *ehc = &dev->ap->eh_context;
  88. return ehc->i.action | ehc->i.dev_action[dev->devno];
  89. }
  90. static void ata_eh_clear_action(struct ata_device *dev,
  91. struct ata_eh_info *ehi, unsigned int action)
  92. {
  93. int i;
  94. if (!dev) {
  95. ehi->action &= ~action;
  96. for (i = 0; i < ATA_MAX_DEVICES; i++)
  97. ehi->dev_action[i] &= ~action;
  98. } else {
  99. /* doesn't make sense for port-wide EH actions */
  100. WARN_ON(!(action & ATA_EH_PERDEV_MASK));
  101. /* break ehi->action into ehi->dev_action */
  102. if (ehi->action & action) {
  103. for (i = 0; i < ATA_MAX_DEVICES; i++)
  104. ehi->dev_action[i] |= ehi->action & action;
  105. ehi->action &= ~action;
  106. }
  107. /* turn off the specified per-dev action */
  108. ehi->dev_action[dev->devno] &= ~action;
  109. }
  110. }
  111. /**
  112. * ata_scsi_timed_out - SCSI layer time out callback
  113. * @cmd: timed out SCSI command
  114. *
  115. * Handles SCSI layer timeout. We race with normal completion of
  116. * the qc for @cmd. If the qc is already gone, we lose and let
  117. * the scsi command finish (EH_HANDLED). Otherwise, the qc has
  118. * timed out and EH should be invoked. Prevent ata_qc_complete()
  119. * from finishing it by setting EH_SCHEDULED and return
  120. * EH_NOT_HANDLED.
  121. *
  122. * TODO: kill this function once old EH is gone.
  123. *
  124. * LOCKING:
  125. * Called from timer context
  126. *
  127. * RETURNS:
  128. * EH_HANDLED or EH_NOT_HANDLED
  129. */
  130. enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
  131. {
  132. struct Scsi_Host *host = cmd->device->host;
  133. struct ata_port *ap = ata_shost_to_port(host);
  134. unsigned long flags;
  135. struct ata_queued_cmd *qc;
  136. enum scsi_eh_timer_return ret;
  137. DPRINTK("ENTER\n");
  138. if (ap->ops->error_handler) {
  139. ret = EH_NOT_HANDLED;
  140. goto out;
  141. }
  142. ret = EH_HANDLED;
  143. spin_lock_irqsave(ap->lock, flags);
  144. qc = ata_qc_from_tag(ap, ap->active_tag);
  145. if (qc) {
  146. WARN_ON(qc->scsicmd != cmd);
  147. qc->flags |= ATA_QCFLAG_EH_SCHEDULED;
  148. qc->err_mask |= AC_ERR_TIMEOUT;
  149. ret = EH_NOT_HANDLED;
  150. }
  151. spin_unlock_irqrestore(ap->lock, flags);
  152. out:
  153. DPRINTK("EXIT, ret=%d\n", ret);
  154. return ret;
  155. }
  156. /**
  157. * ata_scsi_error - SCSI layer error handler callback
  158. * @host: SCSI host on which error occurred
  159. *
  160. * Handles SCSI-layer-thrown error events.
  161. *
  162. * LOCKING:
  163. * Inherited from SCSI layer (none, can sleep)
  164. *
  165. * RETURNS:
  166. * Zero.
  167. */
  168. void ata_scsi_error(struct Scsi_Host *host)
  169. {
  170. struct ata_port *ap = ata_shost_to_port(host);
  171. int i, repeat_cnt = ATA_EH_MAX_REPEAT;
  172. unsigned long flags;
  173. DPRINTK("ENTER\n");
  174. /* synchronize with port task */
  175. ata_port_flush_task(ap);
  176. /* synchronize with host_set lock and sort out timeouts */
  177. /* For new EH, all qcs are finished in one of three ways -
  178. * normal completion, error completion, and SCSI timeout.
  179. * Both cmpletions can race against SCSI timeout. When normal
  180. * completion wins, the qc never reaches EH. When error
  181. * completion wins, the qc has ATA_QCFLAG_FAILED set.
  182. *
  183. * When SCSI timeout wins, things are a bit more complex.
  184. * Normal or error completion can occur after the timeout but
  185. * before this point. In such cases, both types of
  186. * completions are honored. A scmd is determined to have
  187. * timed out iff its associated qc is active and not failed.
  188. */
  189. if (ap->ops->error_handler) {
  190. struct scsi_cmnd *scmd, *tmp;
  191. int nr_timedout = 0;
  192. spin_lock_irqsave(ap->lock, flags);
  193. list_for_each_entry_safe(scmd, tmp, &host->eh_cmd_q, eh_entry) {
  194. struct ata_queued_cmd *qc;
  195. for (i = 0; i < ATA_MAX_QUEUE; i++) {
  196. qc = __ata_qc_from_tag(ap, i);
  197. if (qc->flags & ATA_QCFLAG_ACTIVE &&
  198. qc->scsicmd == scmd)
  199. break;
  200. }
  201. if (i < ATA_MAX_QUEUE) {
  202. /* the scmd has an associated qc */
  203. if (!(qc->flags & ATA_QCFLAG_FAILED)) {
  204. /* which hasn't failed yet, timeout */
  205. qc->err_mask |= AC_ERR_TIMEOUT;
  206. qc->flags |= ATA_QCFLAG_FAILED;
  207. nr_timedout++;
  208. }
  209. } else {
  210. /* Normal completion occurred after
  211. * SCSI timeout but before this point.
  212. * Successfully complete it.
  213. */
  214. scmd->retries = scmd->allowed;
  215. scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
  216. }
  217. }
  218. /* If we have timed out qcs. They belong to EH from
  219. * this point but the state of the controller is
  220. * unknown. Freeze the port to make sure the IRQ
  221. * handler doesn't diddle with those qcs. This must
  222. * be done atomically w.r.t. setting QCFLAG_FAILED.
  223. */
  224. if (nr_timedout)
  225. __ata_port_freeze(ap);
  226. spin_unlock_irqrestore(ap->lock, flags);
  227. } else
  228. spin_unlock_wait(ap->lock);
  229. repeat:
  230. /* invoke error handler */
  231. if (ap->ops->error_handler) {
  232. /* process port resume request */
  233. ata_eh_handle_port_resume(ap);
  234. /* fetch & clear EH info */
  235. spin_lock_irqsave(ap->lock, flags);
  236. memset(&ap->eh_context, 0, sizeof(ap->eh_context));
  237. ap->eh_context.i = ap->eh_info;
  238. memset(&ap->eh_info, 0, sizeof(ap->eh_info));
  239. ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS;
  240. ap->pflags &= ~ATA_PFLAG_EH_PENDING;
  241. spin_unlock_irqrestore(ap->lock, flags);
  242. /* invoke EH, skip if unloading or suspended */
  243. if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED)))
  244. ap->ops->error_handler(ap);
  245. else
  246. ata_eh_finish(ap);
  247. /* process port suspend request */
  248. ata_eh_handle_port_suspend(ap);
  249. /* Exception might have happend after ->error_handler
  250. * recovered the port but before this point. Repeat
  251. * EH in such case.
  252. */
  253. spin_lock_irqsave(ap->lock, flags);
  254. if (ap->pflags & ATA_PFLAG_EH_PENDING) {
  255. if (--repeat_cnt) {
  256. ata_port_printk(ap, KERN_INFO,
  257. "EH pending after completion, "
  258. "repeating EH (cnt=%d)\n", repeat_cnt);
  259. spin_unlock_irqrestore(ap->lock, flags);
  260. goto repeat;
  261. }
  262. ata_port_printk(ap, KERN_ERR, "EH pending after %d "
  263. "tries, giving up\n", ATA_EH_MAX_REPEAT);
  264. }
  265. /* this run is complete, make sure EH info is clear */
  266. memset(&ap->eh_info, 0, sizeof(ap->eh_info));
  267. /* Clear host_eh_scheduled while holding ap->lock such
  268. * that if exception occurs after this point but
  269. * before EH completion, SCSI midlayer will
  270. * re-initiate EH.
  271. */
  272. host->host_eh_scheduled = 0;
  273. spin_unlock_irqrestore(ap->lock, flags);
  274. } else {
  275. WARN_ON(ata_qc_from_tag(ap, ap->active_tag) == NULL);
  276. ap->ops->eng_timeout(ap);
  277. }
  278. /* finish or retry handled scmd's and clean up */
  279. WARN_ON(host->host_failed || !list_empty(&host->eh_cmd_q));
  280. scsi_eh_flush_done_q(&ap->eh_done_q);
  281. /* clean up */
  282. spin_lock_irqsave(ap->lock, flags);
  283. if (ap->pflags & ATA_PFLAG_LOADING)
  284. ap->pflags &= ~ATA_PFLAG_LOADING;
  285. else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG)
  286. queue_work(ata_aux_wq, &ap->hotplug_task);
  287. if (ap->pflags & ATA_PFLAG_RECOVERED)
  288. ata_port_printk(ap, KERN_INFO, "EH complete\n");
  289. ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED);
  290. /* tell wait_eh that we're done */
  291. ap->pflags &= ~ATA_PFLAG_EH_IN_PROGRESS;
  292. wake_up_all(&ap->eh_wait_q);
  293. spin_unlock_irqrestore(ap->lock, flags);
  294. DPRINTK("EXIT\n");
  295. }
  296. /**
  297. * ata_port_wait_eh - Wait for the currently pending EH to complete
  298. * @ap: Port to wait EH for
  299. *
  300. * Wait until the currently pending EH is complete.
  301. *
  302. * LOCKING:
  303. * Kernel thread context (may sleep).
  304. */
  305. void ata_port_wait_eh(struct ata_port *ap)
  306. {
  307. unsigned long flags;
  308. DEFINE_WAIT(wait);
  309. retry:
  310. spin_lock_irqsave(ap->lock, flags);
  311. while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) {
  312. prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE);
  313. spin_unlock_irqrestore(ap->lock, flags);
  314. schedule();
  315. spin_lock_irqsave(ap->lock, flags);
  316. }
  317. finish_wait(&ap->eh_wait_q, &wait);
  318. spin_unlock_irqrestore(ap->lock, flags);
  319. /* make sure SCSI EH is complete */
  320. if (scsi_host_in_recovery(ap->host)) {
  321. msleep(10);
  322. goto retry;
  323. }
  324. }
  325. /**
  326. * ata_qc_timeout - Handle timeout of queued command
  327. * @qc: Command that timed out
  328. *
  329. * Some part of the kernel (currently, only the SCSI layer)
  330. * has noticed that the active command on port @ap has not
  331. * completed after a specified length of time. Handle this
  332. * condition by disabling DMA (if necessary) and completing
  333. * transactions, with error if necessary.
  334. *
  335. * This also handles the case of the "lost interrupt", where
  336. * for some reason (possibly hardware bug, possibly driver bug)
  337. * an interrupt was not delivered to the driver, even though the
  338. * transaction completed successfully.
  339. *
  340. * TODO: kill this function once old EH is gone.
  341. *
  342. * LOCKING:
  343. * Inherited from SCSI layer (none, can sleep)
  344. */
  345. static void ata_qc_timeout(struct ata_queued_cmd *qc)
  346. {
  347. struct ata_port *ap = qc->ap;
  348. u8 host_stat = 0, drv_stat;
  349. unsigned long flags;
  350. DPRINTK("ENTER\n");
  351. ap->hsm_task_state = HSM_ST_IDLE;
  352. spin_lock_irqsave(ap->lock, flags);
  353. switch (qc->tf.protocol) {
  354. case ATA_PROT_DMA:
  355. case ATA_PROT_ATAPI_DMA:
  356. host_stat = ap->ops->bmdma_status(ap);
  357. /* before we do anything else, clear DMA-Start bit */
  358. ap->ops->bmdma_stop(qc);
  359. /* fall through */
  360. default:
  361. ata_altstatus(ap);
  362. drv_stat = ata_chk_status(ap);
  363. /* ack bmdma irq events */
  364. ap->ops->irq_clear(ap);
  365. ata_dev_printk(qc->dev, KERN_ERR, "command 0x%x timeout, "
  366. "stat 0x%x host_stat 0x%x\n",
  367. qc->tf.command, drv_stat, host_stat);
  368. /* complete taskfile transaction */
  369. qc->err_mask |= AC_ERR_TIMEOUT;
  370. break;
  371. }
  372. spin_unlock_irqrestore(ap->lock, flags);
  373. ata_eh_qc_complete(qc);
  374. DPRINTK("EXIT\n");
  375. }
  376. /**
  377. * ata_eng_timeout - Handle timeout of queued command
  378. * @ap: Port on which timed-out command is active
  379. *
  380. * Some part of the kernel (currently, only the SCSI layer)
  381. * has noticed that the active command on port @ap has not
  382. * completed after a specified length of time. Handle this
  383. * condition by disabling DMA (if necessary) and completing
  384. * transactions, with error if necessary.
  385. *
  386. * This also handles the case of the "lost interrupt", where
  387. * for some reason (possibly hardware bug, possibly driver bug)
  388. * an interrupt was not delivered to the driver, even though the
  389. * transaction completed successfully.
  390. *
  391. * TODO: kill this function once old EH is gone.
  392. *
  393. * LOCKING:
  394. * Inherited from SCSI layer (none, can sleep)
  395. */
  396. void ata_eng_timeout(struct ata_port *ap)
  397. {
  398. DPRINTK("ENTER\n");
  399. ata_qc_timeout(ata_qc_from_tag(ap, ap->active_tag));
  400. DPRINTK("EXIT\n");
  401. }
  402. /**
  403. * ata_qc_schedule_eh - schedule qc for error handling
  404. * @qc: command to schedule error handling for
  405. *
  406. * Schedule error handling for @qc. EH will kick in as soon as
  407. * other commands are drained.
  408. *
  409. * LOCKING:
  410. * spin_lock_irqsave(host_set lock)
  411. */
  412. void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
  413. {
  414. struct ata_port *ap = qc->ap;
  415. WARN_ON(!ap->ops->error_handler);
  416. qc->flags |= ATA_QCFLAG_FAILED;
  417. qc->ap->pflags |= ATA_PFLAG_EH_PENDING;
  418. /* The following will fail if timeout has already expired.
  419. * ata_scsi_error() takes care of such scmds on EH entry.
  420. * Note that ATA_QCFLAG_FAILED is unconditionally set after
  421. * this function completes.
  422. */
  423. scsi_req_abort_cmd(qc->scsicmd);
  424. }
  425. /**
  426. * ata_port_schedule_eh - schedule error handling without a qc
  427. * @ap: ATA port to schedule EH for
  428. *
  429. * Schedule error handling for @ap. EH will kick in as soon as
  430. * all commands are drained.
  431. *
  432. * LOCKING:
  433. * spin_lock_irqsave(host_set lock)
  434. */
  435. void ata_port_schedule_eh(struct ata_port *ap)
  436. {
  437. WARN_ON(!ap->ops->error_handler);
  438. ap->pflags |= ATA_PFLAG_EH_PENDING;
  439. scsi_schedule_eh(ap->host);
  440. DPRINTK("port EH scheduled\n");
  441. }
  442. /**
  443. * ata_port_abort - abort all qc's on the port
  444. * @ap: ATA port to abort qc's for
  445. *
  446. * Abort all active qc's of @ap and schedule EH.
  447. *
  448. * LOCKING:
  449. * spin_lock_irqsave(host_set lock)
  450. *
  451. * RETURNS:
  452. * Number of aborted qc's.
  453. */
  454. int ata_port_abort(struct ata_port *ap)
  455. {
  456. int tag, nr_aborted = 0;
  457. WARN_ON(!ap->ops->error_handler);
  458. for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
  459. struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
  460. if (qc) {
  461. qc->flags |= ATA_QCFLAG_FAILED;
  462. ata_qc_complete(qc);
  463. nr_aborted++;
  464. }
  465. }
  466. if (!nr_aborted)
  467. ata_port_schedule_eh(ap);
  468. return nr_aborted;
  469. }
  470. /**
  471. * __ata_port_freeze - freeze port
  472. * @ap: ATA port to freeze
  473. *
  474. * This function is called when HSM violation or some other
  475. * condition disrupts normal operation of the port. Frozen port
  476. * is not allowed to perform any operation until the port is
  477. * thawed, which usually follows a successful reset.
  478. *
  479. * ap->ops->freeze() callback can be used for freezing the port
  480. * hardware-wise (e.g. mask interrupt and stop DMA engine). If a
  481. * port cannot be frozen hardware-wise, the interrupt handler
  482. * must ack and clear interrupts unconditionally while the port
  483. * is frozen.
  484. *
  485. * LOCKING:
  486. * spin_lock_irqsave(host_set lock)
  487. */
  488. static void __ata_port_freeze(struct ata_port *ap)
  489. {
  490. WARN_ON(!ap->ops->error_handler);
  491. if (ap->ops->freeze)
  492. ap->ops->freeze(ap);
  493. ap->pflags |= ATA_PFLAG_FROZEN;
  494. DPRINTK("ata%u port frozen\n", ap->id);
  495. }
  496. /**
  497. * ata_port_freeze - abort & freeze port
  498. * @ap: ATA port to freeze
  499. *
  500. * Abort and freeze @ap.
  501. *
  502. * LOCKING:
  503. * spin_lock_irqsave(host_set lock)
  504. *
  505. * RETURNS:
  506. * Number of aborted commands.
  507. */
  508. int ata_port_freeze(struct ata_port *ap)
  509. {
  510. int nr_aborted;
  511. WARN_ON(!ap->ops->error_handler);
  512. nr_aborted = ata_port_abort(ap);
  513. __ata_port_freeze(ap);
  514. return nr_aborted;
  515. }
  516. /**
  517. * ata_eh_freeze_port - EH helper to freeze port
  518. * @ap: ATA port to freeze
  519. *
  520. * Freeze @ap.
  521. *
  522. * LOCKING:
  523. * None.
  524. */
  525. void ata_eh_freeze_port(struct ata_port *ap)
  526. {
  527. unsigned long flags;
  528. if (!ap->ops->error_handler)
  529. return;
  530. spin_lock_irqsave(ap->lock, flags);
  531. __ata_port_freeze(ap);
  532. spin_unlock_irqrestore(ap->lock, flags);
  533. }
  534. /**
  535. * ata_port_thaw_port - EH helper to thaw port
  536. * @ap: ATA port to thaw
  537. *
  538. * Thaw frozen port @ap.
  539. *
  540. * LOCKING:
  541. * None.
  542. */
  543. void ata_eh_thaw_port(struct ata_port *ap)
  544. {
  545. unsigned long flags;
  546. if (!ap->ops->error_handler)
  547. return;
  548. spin_lock_irqsave(ap->lock, flags);
  549. ap->pflags &= ~ATA_PFLAG_FROZEN;
  550. if (ap->ops->thaw)
  551. ap->ops->thaw(ap);
  552. spin_unlock_irqrestore(ap->lock, flags);
  553. DPRINTK("ata%u port thawed\n", ap->id);
  554. }
  555. static void ata_eh_scsidone(struct scsi_cmnd *scmd)
  556. {
  557. /* nada */
  558. }
  559. static void __ata_eh_qc_complete(struct ata_queued_cmd *qc)
  560. {
  561. struct ata_port *ap = qc->ap;
  562. struct scsi_cmnd *scmd = qc->scsicmd;
  563. unsigned long flags;
  564. spin_lock_irqsave(ap->lock, flags);
  565. qc->scsidone = ata_eh_scsidone;
  566. __ata_qc_complete(qc);
  567. WARN_ON(ata_tag_valid(qc->tag));
  568. spin_unlock_irqrestore(ap->lock, flags);
  569. scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
  570. }
  571. /**
  572. * ata_eh_qc_complete - Complete an active ATA command from EH
  573. * @qc: Command to complete
  574. *
  575. * Indicate to the mid and upper layers that an ATA command has
  576. * completed. To be used from EH.
  577. */
  578. void ata_eh_qc_complete(struct ata_queued_cmd *qc)
  579. {
  580. struct scsi_cmnd *scmd = qc->scsicmd;
  581. scmd->retries = scmd->allowed;
  582. __ata_eh_qc_complete(qc);
  583. }
  584. /**
  585. * ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH
  586. * @qc: Command to retry
  587. *
  588. * Indicate to the mid and upper layers that an ATA command
  589. * should be retried. To be used from EH.
  590. *
  591. * SCSI midlayer limits the number of retries to scmd->allowed.
  592. * scmd->retries is decremented for commands which get retried
  593. * due to unrelated failures (qc->err_mask is zero).
  594. */
  595. void ata_eh_qc_retry(struct ata_queued_cmd *qc)
  596. {
  597. struct scsi_cmnd *scmd = qc->scsicmd;
  598. if (!qc->err_mask && scmd->retries)
  599. scmd->retries--;
  600. __ata_eh_qc_complete(qc);
  601. }
  602. /**
  603. * ata_eh_detach_dev - detach ATA device
  604. * @dev: ATA device to detach
  605. *
  606. * Detach @dev.
  607. *
  608. * LOCKING:
  609. * None.
  610. */
  611. static void ata_eh_detach_dev(struct ata_device *dev)
  612. {
  613. struct ata_port *ap = dev->ap;
  614. unsigned long flags;
  615. ata_dev_disable(dev);
  616. spin_lock_irqsave(ap->lock, flags);
  617. dev->flags &= ~ATA_DFLAG_DETACH;
  618. if (ata_scsi_offline_dev(dev)) {
  619. dev->flags |= ATA_DFLAG_DETACHED;
  620. ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
  621. }
  622. /* clear per-dev EH actions */
  623. ata_eh_clear_action(dev, &ap->eh_info, ATA_EH_PERDEV_MASK);
  624. ata_eh_clear_action(dev, &ap->eh_context.i, ATA_EH_PERDEV_MASK);
  625. spin_unlock_irqrestore(ap->lock, flags);
  626. }
  627. /**
  628. * ata_eh_about_to_do - about to perform eh_action
  629. * @ap: target ATA port
  630. * @dev: target ATA dev for per-dev action (can be NULL)
  631. * @action: action about to be performed
  632. *
  633. * Called just before performing EH actions to clear related bits
  634. * in @ap->eh_info such that eh actions are not unnecessarily
  635. * repeated.
  636. *
  637. * LOCKING:
  638. * None.
  639. */
  640. static void ata_eh_about_to_do(struct ata_port *ap, struct ata_device *dev,
  641. unsigned int action)
  642. {
  643. unsigned long flags;
  644. spin_lock_irqsave(ap->lock, flags);
  645. ata_eh_clear_action(dev, &ap->eh_info, action);
  646. if (!(ap->eh_context.i.flags & ATA_EHI_QUIET))
  647. ap->pflags |= ATA_PFLAG_RECOVERED;
  648. spin_unlock_irqrestore(ap->lock, flags);
  649. }
  650. /**
  651. * ata_eh_done - EH action complete
  652. * @ap: target ATA port
  653. * @dev: target ATA dev for per-dev action (can be NULL)
  654. * @action: action just completed
  655. *
  656. * Called right after performing EH actions to clear related bits
  657. * in @ap->eh_context.
  658. *
  659. * LOCKING:
  660. * None.
  661. */
  662. static void ata_eh_done(struct ata_port *ap, struct ata_device *dev,
  663. unsigned int action)
  664. {
  665. ata_eh_clear_action(dev, &ap->eh_context.i, action);
  666. }
  667. /**
  668. * ata_err_string - convert err_mask to descriptive string
  669. * @err_mask: error mask to convert to string
  670. *
  671. * Convert @err_mask to descriptive string. Errors are
  672. * prioritized according to severity and only the most severe
  673. * error is reported.
  674. *
  675. * LOCKING:
  676. * None.
  677. *
  678. * RETURNS:
  679. * Descriptive string for @err_mask
  680. */
  681. static const char * ata_err_string(unsigned int err_mask)
  682. {
  683. if (err_mask & AC_ERR_HOST_BUS)
  684. return "host bus error";
  685. if (err_mask & AC_ERR_ATA_BUS)
  686. return "ATA bus error";
  687. if (err_mask & AC_ERR_TIMEOUT)
  688. return "timeout";
  689. if (err_mask & AC_ERR_HSM)
  690. return "HSM violation";
  691. if (err_mask & AC_ERR_SYSTEM)
  692. return "internal error";
  693. if (err_mask & AC_ERR_MEDIA)
  694. return "media error";
  695. if (err_mask & AC_ERR_INVALID)
  696. return "invalid argument";
  697. if (err_mask & AC_ERR_DEV)
  698. return "device error";
  699. return "unknown error";
  700. }
  701. /**
  702. * ata_read_log_page - read a specific log page
  703. * @dev: target device
  704. * @page: page to read
  705. * @buf: buffer to store read page
  706. * @sectors: number of sectors to read
  707. *
  708. * Read log page using READ_LOG_EXT command.
  709. *
  710. * LOCKING:
  711. * Kernel thread context (may sleep).
  712. *
  713. * RETURNS:
  714. * 0 on success, AC_ERR_* mask otherwise.
  715. */
  716. static unsigned int ata_read_log_page(struct ata_device *dev,
  717. u8 page, void *buf, unsigned int sectors)
  718. {
  719. struct ata_taskfile tf;
  720. unsigned int err_mask;
  721. DPRINTK("read log page - page %d\n", page);
  722. ata_tf_init(dev, &tf);
  723. tf.command = ATA_CMD_READ_LOG_EXT;
  724. tf.lbal = page;
  725. tf.nsect = sectors;
  726. tf.hob_nsect = sectors >> 8;
  727. tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE;
  728. tf.protocol = ATA_PROT_PIO;
  729. err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
  730. buf, sectors * ATA_SECT_SIZE);
  731. DPRINTK("EXIT, err_mask=%x\n", err_mask);
  732. return err_mask;
  733. }
  734. /**
  735. * ata_eh_read_log_10h - Read log page 10h for NCQ error details
  736. * @dev: Device to read log page 10h from
  737. * @tag: Resulting tag of the failed command
  738. * @tf: Resulting taskfile registers of the failed command
  739. *
  740. * Read log page 10h to obtain NCQ error details and clear error
  741. * condition.
  742. *
  743. * LOCKING:
  744. * Kernel thread context (may sleep).
  745. *
  746. * RETURNS:
  747. * 0 on success, -errno otherwise.
  748. */
  749. static int ata_eh_read_log_10h(struct ata_device *dev,
  750. int *tag, struct ata_taskfile *tf)
  751. {
  752. u8 *buf = dev->ap->sector_buf;
  753. unsigned int err_mask;
  754. u8 csum;
  755. int i;
  756. err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, buf, 1);
  757. if (err_mask)
  758. return -EIO;
  759. csum = 0;
  760. for (i = 0; i < ATA_SECT_SIZE; i++)
  761. csum += buf[i];
  762. if (csum)
  763. ata_dev_printk(dev, KERN_WARNING,
  764. "invalid checksum 0x%x on log page 10h\n", csum);
  765. if (buf[0] & 0x80)
  766. return -ENOENT;
  767. *tag = buf[0] & 0x1f;
  768. tf->command = buf[2];
  769. tf->feature = buf[3];
  770. tf->lbal = buf[4];
  771. tf->lbam = buf[5];
  772. tf->lbah = buf[6];
  773. tf->device = buf[7];
  774. tf->hob_lbal = buf[8];
  775. tf->hob_lbam = buf[9];
  776. tf->hob_lbah = buf[10];
  777. tf->nsect = buf[12];
  778. tf->hob_nsect = buf[13];
  779. return 0;
  780. }
  781. /**
  782. * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
  783. * @dev: device to perform REQUEST_SENSE to
  784. * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
  785. *
  786. * Perform ATAPI REQUEST_SENSE after the device reported CHECK
  787. * SENSE. This function is EH helper.
  788. *
  789. * LOCKING:
  790. * Kernel thread context (may sleep).
  791. *
  792. * RETURNS:
  793. * 0 on success, AC_ERR_* mask on failure
  794. */
  795. static unsigned int atapi_eh_request_sense(struct ata_device *dev,
  796. unsigned char *sense_buf)
  797. {
  798. struct ata_port *ap = dev->ap;
  799. struct ata_taskfile tf;
  800. u8 cdb[ATAPI_CDB_LEN];
  801. DPRINTK("ATAPI request sense\n");
  802. ata_tf_init(dev, &tf);
  803. /* FIXME: is this needed? */
  804. memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
  805. /* XXX: why tf_read here? */
  806. ap->ops->tf_read(ap, &tf);
  807. /* fill these in, for the case where they are -not- overwritten */
  808. sense_buf[0] = 0x70;
  809. sense_buf[2] = tf.feature >> 4;
  810. memset(cdb, 0, ATAPI_CDB_LEN);
  811. cdb[0] = REQUEST_SENSE;
  812. cdb[4] = SCSI_SENSE_BUFFERSIZE;
  813. tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
  814. tf.command = ATA_CMD_PACKET;
  815. /* is it pointless to prefer PIO for "safety reasons"? */
  816. if (ap->flags & ATA_FLAG_PIO_DMA) {
  817. tf.protocol = ATA_PROT_ATAPI_DMA;
  818. tf.feature |= ATAPI_PKT_DMA;
  819. } else {
  820. tf.protocol = ATA_PROT_ATAPI;
  821. tf.lbam = (8 * 1024) & 0xff;
  822. tf.lbah = (8 * 1024) >> 8;
  823. }
  824. return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE,
  825. sense_buf, SCSI_SENSE_BUFFERSIZE);
  826. }
  827. /**
  828. * ata_eh_analyze_serror - analyze SError for a failed port
  829. * @ap: ATA port to analyze SError for
  830. *
  831. * Analyze SError if available and further determine cause of
  832. * failure.
  833. *
  834. * LOCKING:
  835. * None.
  836. */
  837. static void ata_eh_analyze_serror(struct ata_port *ap)
  838. {
  839. struct ata_eh_context *ehc = &ap->eh_context;
  840. u32 serror = ehc->i.serror;
  841. unsigned int err_mask = 0, action = 0;
  842. if (serror & SERR_PERSISTENT) {
  843. err_mask |= AC_ERR_ATA_BUS;
  844. action |= ATA_EH_HARDRESET;
  845. }
  846. if (serror &
  847. (SERR_DATA_RECOVERED | SERR_COMM_RECOVERED | SERR_DATA)) {
  848. err_mask |= AC_ERR_ATA_BUS;
  849. action |= ATA_EH_SOFTRESET;
  850. }
  851. if (serror & SERR_PROTOCOL) {
  852. err_mask |= AC_ERR_HSM;
  853. action |= ATA_EH_SOFTRESET;
  854. }
  855. if (serror & SERR_INTERNAL) {
  856. err_mask |= AC_ERR_SYSTEM;
  857. action |= ATA_EH_SOFTRESET;
  858. }
  859. if (serror & (SERR_PHYRDY_CHG | SERR_DEV_XCHG))
  860. ata_ehi_hotplugged(&ehc->i);
  861. ehc->i.err_mask |= err_mask;
  862. ehc->i.action |= action;
  863. }
  864. /**
  865. * ata_eh_analyze_ncq_error - analyze NCQ error
  866. * @ap: ATA port to analyze NCQ error for
  867. *
  868. * Read log page 10h, determine the offending qc and acquire
  869. * error status TF. For NCQ device errors, all LLDDs have to do
  870. * is setting AC_ERR_DEV in ehi->err_mask. This function takes
  871. * care of the rest.
  872. *
  873. * LOCKING:
  874. * Kernel thread context (may sleep).
  875. */
  876. static void ata_eh_analyze_ncq_error(struct ata_port *ap)
  877. {
  878. struct ata_eh_context *ehc = &ap->eh_context;
  879. struct ata_device *dev = ap->device;
  880. struct ata_queued_cmd *qc;
  881. struct ata_taskfile tf;
  882. int tag, rc;
  883. /* if frozen, we can't do much */
  884. if (ap->pflags & ATA_PFLAG_FROZEN)
  885. return;
  886. /* is it NCQ device error? */
  887. if (!ap->sactive || !(ehc->i.err_mask & AC_ERR_DEV))
  888. return;
  889. /* has LLDD analyzed already? */
  890. for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
  891. qc = __ata_qc_from_tag(ap, tag);
  892. if (!(qc->flags & ATA_QCFLAG_FAILED))
  893. continue;
  894. if (qc->err_mask)
  895. return;
  896. }
  897. /* okay, this error is ours */
  898. rc = ata_eh_read_log_10h(dev, &tag, &tf);
  899. if (rc) {
  900. ata_port_printk(ap, KERN_ERR, "failed to read log page 10h "
  901. "(errno=%d)\n", rc);
  902. return;
  903. }
  904. if (!(ap->sactive & (1 << tag))) {
  905. ata_port_printk(ap, KERN_ERR, "log page 10h reported "
  906. "inactive tag %d\n", tag);
  907. return;
  908. }
  909. /* we've got the perpetrator, condemn it */
  910. qc = __ata_qc_from_tag(ap, tag);
  911. memcpy(&qc->result_tf, &tf, sizeof(tf));
  912. qc->err_mask |= AC_ERR_DEV;
  913. ehc->i.err_mask &= ~AC_ERR_DEV;
  914. }
  915. /**
  916. * ata_eh_analyze_tf - analyze taskfile of a failed qc
  917. * @qc: qc to analyze
  918. * @tf: Taskfile registers to analyze
  919. *
  920. * Analyze taskfile of @qc and further determine cause of
  921. * failure. This function also requests ATAPI sense data if
  922. * avaliable.
  923. *
  924. * LOCKING:
  925. * Kernel thread context (may sleep).
  926. *
  927. * RETURNS:
  928. * Determined recovery action
  929. */
  930. static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
  931. const struct ata_taskfile *tf)
  932. {
  933. unsigned int tmp, action = 0;
  934. u8 stat = tf->command, err = tf->feature;
  935. if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) {
  936. qc->err_mask |= AC_ERR_HSM;
  937. return ATA_EH_SOFTRESET;
  938. }
  939. if (!(qc->err_mask & AC_ERR_DEV))
  940. return 0;
  941. switch (qc->dev->class) {
  942. case ATA_DEV_ATA:
  943. if (err & ATA_ICRC)
  944. qc->err_mask |= AC_ERR_ATA_BUS;
  945. if (err & ATA_UNC)
  946. qc->err_mask |= AC_ERR_MEDIA;
  947. if (err & ATA_IDNF)
  948. qc->err_mask |= AC_ERR_INVALID;
  949. break;
  950. case ATA_DEV_ATAPI:
  951. tmp = atapi_eh_request_sense(qc->dev,
  952. qc->scsicmd->sense_buffer);
  953. if (!tmp) {
  954. /* ATA_QCFLAG_SENSE_VALID is used to tell
  955. * atapi_qc_complete() that sense data is
  956. * already valid.
  957. *
  958. * TODO: interpret sense data and set
  959. * appropriate err_mask.
  960. */
  961. qc->flags |= ATA_QCFLAG_SENSE_VALID;
  962. } else
  963. qc->err_mask |= tmp;
  964. }
  965. if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS))
  966. action |= ATA_EH_SOFTRESET;
  967. return action;
  968. }
  969. static int ata_eh_categorize_ering_entry(struct ata_ering_entry *ent)
  970. {
  971. if (ent->err_mask & (AC_ERR_ATA_BUS | AC_ERR_TIMEOUT))
  972. return 1;
  973. if (ent->is_io) {
  974. if (ent->err_mask & AC_ERR_HSM)
  975. return 1;
  976. if ((ent->err_mask &
  977. (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV)
  978. return 2;
  979. }
  980. return 0;
  981. }
  982. struct speed_down_needed_arg {
  983. u64 since;
  984. int nr_errors[3];
  985. };
  986. static int speed_down_needed_cb(struct ata_ering_entry *ent, void *void_arg)
  987. {
  988. struct speed_down_needed_arg *arg = void_arg;
  989. if (ent->timestamp < arg->since)
  990. return -1;
  991. arg->nr_errors[ata_eh_categorize_ering_entry(ent)]++;
  992. return 0;
  993. }
  994. /**
  995. * ata_eh_speed_down_needed - Determine wheter speed down is necessary
  996. * @dev: Device of interest
  997. *
  998. * This function examines error ring of @dev and determines
  999. * whether speed down is necessary. Speed down is necessary if
  1000. * there have been more than 3 of Cat-1 errors or 10 of Cat-2
  1001. * errors during last 15 minutes.
  1002. *
  1003. * Cat-1 errors are ATA_BUS, TIMEOUT for any command and HSM
  1004. * violation for known supported commands.
  1005. *
  1006. * Cat-2 errors are unclassified DEV error for known supported
  1007. * command.
  1008. *
  1009. * LOCKING:
  1010. * Inherited from caller.
  1011. *
  1012. * RETURNS:
  1013. * 1 if speed down is necessary, 0 otherwise
  1014. */
  1015. static int ata_eh_speed_down_needed(struct ata_device *dev)
  1016. {
  1017. const u64 interval = 15LLU * 60 * HZ;
  1018. static const int err_limits[3] = { -1, 3, 10 };
  1019. struct speed_down_needed_arg arg;
  1020. struct ata_ering_entry *ent;
  1021. int err_cat;
  1022. u64 j64;
  1023. ent = ata_ering_top(&dev->ering);
  1024. if (!ent)
  1025. return 0;
  1026. err_cat = ata_eh_categorize_ering_entry(ent);
  1027. if (err_cat == 0)
  1028. return 0;
  1029. memset(&arg, 0, sizeof(arg));
  1030. j64 = get_jiffies_64();
  1031. if (j64 >= interval)
  1032. arg.since = j64 - interval;
  1033. else
  1034. arg.since = 0;
  1035. ata_ering_map(&dev->ering, speed_down_needed_cb, &arg);
  1036. return arg.nr_errors[err_cat] > err_limits[err_cat];
  1037. }
  1038. /**
  1039. * ata_eh_speed_down - record error and speed down if necessary
  1040. * @dev: Failed device
  1041. * @is_io: Did the device fail during normal IO?
  1042. * @err_mask: err_mask of the error
  1043. *
  1044. * Record error and examine error history to determine whether
  1045. * adjusting transmission speed is necessary. It also sets
  1046. * transmission limits appropriately if such adjustment is
  1047. * necessary.
  1048. *
  1049. * LOCKING:
  1050. * Kernel thread context (may sleep).
  1051. *
  1052. * RETURNS:
  1053. * 0 on success, -errno otherwise
  1054. */
  1055. static int ata_eh_speed_down(struct ata_device *dev, int is_io,
  1056. unsigned int err_mask)
  1057. {
  1058. if (!err_mask)
  1059. return 0;
  1060. /* record error and determine whether speed down is necessary */
  1061. ata_ering_record(&dev->ering, is_io, err_mask);
  1062. if (!ata_eh_speed_down_needed(dev))
  1063. return 0;
  1064. /* speed down SATA link speed if possible */
  1065. if (sata_down_spd_limit(dev->ap) == 0)
  1066. return ATA_EH_HARDRESET;
  1067. /* lower transfer mode */
  1068. if (ata_down_xfermask_limit(dev, 0) == 0)
  1069. return ATA_EH_SOFTRESET;
  1070. ata_dev_printk(dev, KERN_ERR,
  1071. "speed down requested but no transfer mode left\n");
  1072. return 0;
  1073. }
  1074. /**
  1075. * ata_eh_autopsy - analyze error and determine recovery action
  1076. * @ap: ATA port to perform autopsy on
  1077. *
  1078. * Analyze why @ap failed and determine which recovery action is
  1079. * needed. This function also sets more detailed AC_ERR_* values
  1080. * and fills sense data for ATAPI CHECK SENSE.
  1081. *
  1082. * LOCKING:
  1083. * Kernel thread context (may sleep).
  1084. */
  1085. static void ata_eh_autopsy(struct ata_port *ap)
  1086. {
  1087. struct ata_eh_context *ehc = &ap->eh_context;
  1088. unsigned int action = ehc->i.action;
  1089. struct ata_device *failed_dev = NULL;
  1090. unsigned int all_err_mask = 0;
  1091. int tag, is_io = 0;
  1092. u32 serror;
  1093. int rc;
  1094. DPRINTK("ENTER\n");
  1095. if (ehc->i.flags & ATA_EHI_NO_AUTOPSY)
  1096. return;
  1097. /* obtain and analyze SError */
  1098. rc = sata_scr_read(ap, SCR_ERROR, &serror);
  1099. if (rc == 0) {
  1100. ehc->i.serror |= serror;
  1101. ata_eh_analyze_serror(ap);
  1102. } else if (rc != -EOPNOTSUPP)
  1103. action |= ATA_EH_HARDRESET;
  1104. /* analyze NCQ failure */
  1105. ata_eh_analyze_ncq_error(ap);
  1106. /* any real error trumps AC_ERR_OTHER */
  1107. if (ehc->i.err_mask & ~AC_ERR_OTHER)
  1108. ehc->i.err_mask &= ~AC_ERR_OTHER;
  1109. all_err_mask |= ehc->i.err_mask;
  1110. for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
  1111. struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
  1112. if (!(qc->flags & ATA_QCFLAG_FAILED))
  1113. continue;
  1114. /* inherit upper level err_mask */
  1115. qc->err_mask |= ehc->i.err_mask;
  1116. /* analyze TF */
  1117. action |= ata_eh_analyze_tf(qc, &qc->result_tf);
  1118. /* DEV errors are probably spurious in case of ATA_BUS error */
  1119. if (qc->err_mask & AC_ERR_ATA_BUS)
  1120. qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA |
  1121. AC_ERR_INVALID);
  1122. /* any real error trumps unknown error */
  1123. if (qc->err_mask & ~AC_ERR_OTHER)
  1124. qc->err_mask &= ~AC_ERR_OTHER;
  1125. /* SENSE_VALID trumps dev/unknown error and revalidation */
  1126. if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
  1127. qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER);
  1128. action &= ~ATA_EH_REVALIDATE;
  1129. }
  1130. /* accumulate error info */
  1131. failed_dev = qc->dev;
  1132. all_err_mask |= qc->err_mask;
  1133. if (qc->flags & ATA_QCFLAG_IO)
  1134. is_io = 1;
  1135. }
  1136. /* enforce default EH actions */
  1137. if (ap->pflags & ATA_PFLAG_FROZEN ||
  1138. all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT))
  1139. action |= ATA_EH_SOFTRESET;
  1140. else if (all_err_mask)
  1141. action |= ATA_EH_REVALIDATE;
  1142. /* if we have offending qcs and the associated failed device */
  1143. if (failed_dev) {
  1144. /* speed down */
  1145. action |= ata_eh_speed_down(failed_dev, is_io, all_err_mask);
  1146. /* perform per-dev EH action only on the offending device */
  1147. ehc->i.dev_action[failed_dev->devno] |=
  1148. action & ATA_EH_PERDEV_MASK;
  1149. action &= ~ATA_EH_PERDEV_MASK;
  1150. }
  1151. /* record autopsy result */
  1152. ehc->i.dev = failed_dev;
  1153. ehc->i.action |= action;
  1154. DPRINTK("EXIT\n");
  1155. }
  1156. /**
  1157. * ata_eh_report - report error handling to user
  1158. * @ap: ATA port EH is going on
  1159. *
  1160. * Report EH to user.
  1161. *
  1162. * LOCKING:
  1163. * None.
  1164. */
  1165. static void ata_eh_report(struct ata_port *ap)
  1166. {
  1167. struct ata_eh_context *ehc = &ap->eh_context;
  1168. const char *frozen, *desc;
  1169. int tag, nr_failed = 0;
  1170. desc = NULL;
  1171. if (ehc->i.desc[0] != '\0')
  1172. desc = ehc->i.desc;
  1173. for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
  1174. struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
  1175. if (!(qc->flags & ATA_QCFLAG_FAILED))
  1176. continue;
  1177. if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask)
  1178. continue;
  1179. nr_failed++;
  1180. }
  1181. if (!nr_failed && !ehc->i.err_mask)
  1182. return;
  1183. frozen = "";
  1184. if (ap->pflags & ATA_PFLAG_FROZEN)
  1185. frozen = " frozen";
  1186. if (ehc->i.dev) {
  1187. ata_dev_printk(ehc->i.dev, KERN_ERR, "exception Emask 0x%x "
  1188. "SAct 0x%x SErr 0x%x action 0x%x%s\n",
  1189. ehc->i.err_mask, ap->sactive, ehc->i.serror,
  1190. ehc->i.action, frozen);
  1191. if (desc)
  1192. ata_dev_printk(ehc->i.dev, KERN_ERR, "(%s)\n", desc);
  1193. } else {
  1194. ata_port_printk(ap, KERN_ERR, "exception Emask 0x%x "
  1195. "SAct 0x%x SErr 0x%x action 0x%x%s\n",
  1196. ehc->i.err_mask, ap->sactive, ehc->i.serror,
  1197. ehc->i.action, frozen);
  1198. if (desc)
  1199. ata_port_printk(ap, KERN_ERR, "(%s)\n", desc);
  1200. }
  1201. for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
  1202. struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
  1203. if (!(qc->flags & ATA_QCFLAG_FAILED) || !qc->err_mask)
  1204. continue;
  1205. ata_dev_printk(qc->dev, KERN_ERR, "tag %d cmd 0x%x "
  1206. "Emask 0x%x stat 0x%x err 0x%x (%s)\n",
  1207. qc->tag, qc->tf.command, qc->err_mask,
  1208. qc->result_tf.command, qc->result_tf.feature,
  1209. ata_err_string(qc->err_mask));
  1210. }
  1211. }
  1212. static int ata_do_reset(struct ata_port *ap, ata_reset_fn_t reset,
  1213. unsigned int *classes)
  1214. {
  1215. int i, rc;
  1216. for (i = 0; i < ATA_MAX_DEVICES; i++)
  1217. classes[i] = ATA_DEV_UNKNOWN;
  1218. rc = reset(ap, classes);
  1219. if (rc)
  1220. return rc;
  1221. /* If any class isn't ATA_DEV_UNKNOWN, consider classification
  1222. * is complete and convert all ATA_DEV_UNKNOWN to
  1223. * ATA_DEV_NONE.
  1224. */
  1225. for (i = 0; i < ATA_MAX_DEVICES; i++)
  1226. if (classes[i] != ATA_DEV_UNKNOWN)
  1227. break;
  1228. if (i < ATA_MAX_DEVICES)
  1229. for (i = 0; i < ATA_MAX_DEVICES; i++)
  1230. if (classes[i] == ATA_DEV_UNKNOWN)
  1231. classes[i] = ATA_DEV_NONE;
  1232. return 0;
  1233. }
  1234. static int ata_eh_followup_srst_needed(int rc, int classify,
  1235. const unsigned int *classes)
  1236. {
  1237. if (rc == -EAGAIN)
  1238. return 1;
  1239. if (rc != 0)
  1240. return 0;
  1241. if (classify && classes[0] == ATA_DEV_UNKNOWN)
  1242. return 1;
  1243. return 0;
  1244. }
  1245. static int ata_eh_reset(struct ata_port *ap, int classify,
  1246. ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
  1247. ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
  1248. {
  1249. struct ata_eh_context *ehc = &ap->eh_context;
  1250. unsigned int *classes = ehc->classes;
  1251. int tries = ATA_EH_RESET_TRIES;
  1252. int verbose = !(ehc->i.flags & ATA_EHI_QUIET);
  1253. unsigned int action;
  1254. ata_reset_fn_t reset;
  1255. int i, did_followup_srst, rc;
  1256. /* Determine which reset to use and record in ehc->i.action.
  1257. * prereset() may examine and modify it.
  1258. */
  1259. action = ehc->i.action;
  1260. ehc->i.action &= ~ATA_EH_RESET_MASK;
  1261. if (softreset && (!hardreset || (!sata_set_spd_needed(ap) &&
  1262. !(action & ATA_EH_HARDRESET))))
  1263. ehc->i.action |= ATA_EH_SOFTRESET;
  1264. else
  1265. ehc->i.action |= ATA_EH_HARDRESET;
  1266. if (prereset) {
  1267. rc = prereset(ap);
  1268. if (rc) {
  1269. ata_port_printk(ap, KERN_ERR,
  1270. "prereset failed (errno=%d)\n", rc);
  1271. return rc;
  1272. }
  1273. }
  1274. /* prereset() might have modified ehc->i.action */
  1275. if (ehc->i.action & ATA_EH_HARDRESET)
  1276. reset = hardreset;
  1277. else if (ehc->i.action & ATA_EH_SOFTRESET)
  1278. reset = softreset;
  1279. else {
  1280. /* prereset told us not to reset, bang classes and return */
  1281. for (i = 0; i < ATA_MAX_DEVICES; i++)
  1282. classes[i] = ATA_DEV_NONE;
  1283. return 0;
  1284. }
  1285. /* did prereset() screw up? if so, fix up to avoid oopsing */
  1286. if (!reset) {
  1287. ata_port_printk(ap, KERN_ERR, "BUG: prereset() requested "
  1288. "invalid reset type\n");
  1289. if (softreset)
  1290. reset = softreset;
  1291. else
  1292. reset = hardreset;
  1293. }
  1294. retry:
  1295. /* shut up during boot probing */
  1296. if (verbose)
  1297. ata_port_printk(ap, KERN_INFO, "%s resetting port\n",
  1298. reset == softreset ? "soft" : "hard");
  1299. /* reset */
  1300. ata_eh_about_to_do(ap, NULL, ATA_EH_RESET_MASK);
  1301. ehc->i.flags |= ATA_EHI_DID_RESET;
  1302. rc = ata_do_reset(ap, reset, classes);
  1303. did_followup_srst = 0;
  1304. if (reset == hardreset &&
  1305. ata_eh_followup_srst_needed(rc, classify, classes)) {
  1306. /* okay, let's do follow-up softreset */
  1307. did_followup_srst = 1;
  1308. reset = softreset;
  1309. if (!reset) {
  1310. ata_port_printk(ap, KERN_ERR,
  1311. "follow-up softreset required "
  1312. "but no softreset avaliable\n");
  1313. return -EINVAL;
  1314. }
  1315. ata_eh_about_to_do(ap, NULL, ATA_EH_RESET_MASK);
  1316. rc = ata_do_reset(ap, reset, classes);
  1317. if (rc == 0 && classify &&
  1318. classes[0] == ATA_DEV_UNKNOWN) {
  1319. ata_port_printk(ap, KERN_ERR,
  1320. "classification failed\n");
  1321. return -EINVAL;
  1322. }
  1323. }
  1324. if (rc && --tries) {
  1325. const char *type;
  1326. if (reset == softreset) {
  1327. if (did_followup_srst)
  1328. type = "follow-up soft";
  1329. else
  1330. type = "soft";
  1331. } else
  1332. type = "hard";
  1333. ata_port_printk(ap, KERN_WARNING,
  1334. "%sreset failed, retrying in 5 secs\n", type);
  1335. ssleep(5);
  1336. if (reset == hardreset)
  1337. sata_down_spd_limit(ap);
  1338. if (hardreset)
  1339. reset = hardreset;
  1340. goto retry;
  1341. }
  1342. if (rc == 0) {
  1343. /* After the reset, the device state is PIO 0 and the
  1344. * controller state is undefined. Record the mode.
  1345. */
  1346. for (i = 0; i < ATA_MAX_DEVICES; i++)
  1347. ap->device[i].pio_mode = XFER_PIO_0;
  1348. if (postreset)
  1349. postreset(ap, classes);
  1350. /* reset successful, schedule revalidation */
  1351. ata_eh_done(ap, NULL, ATA_EH_RESET_MASK);
  1352. ehc->i.action |= ATA_EH_REVALIDATE;
  1353. }
  1354. return rc;
  1355. }
  1356. static int ata_eh_revalidate_and_attach(struct ata_port *ap,
  1357. struct ata_device **r_failed_dev)
  1358. {
  1359. struct ata_eh_context *ehc = &ap->eh_context;
  1360. struct ata_device *dev;
  1361. unsigned long flags;
  1362. int i, rc = 0;
  1363. DPRINTK("ENTER\n");
  1364. for (i = 0; i < ATA_MAX_DEVICES; i++) {
  1365. unsigned int action;
  1366. dev = &ap->device[i];
  1367. action = ata_eh_dev_action(dev);
  1368. if (action & ATA_EH_REVALIDATE && ata_dev_ready(dev)) {
  1369. if (ata_port_offline(ap)) {
  1370. rc = -EIO;
  1371. break;
  1372. }
  1373. ata_eh_about_to_do(ap, dev, ATA_EH_REVALIDATE);
  1374. rc = ata_dev_revalidate(dev,
  1375. ehc->i.flags & ATA_EHI_DID_RESET);
  1376. if (rc)
  1377. break;
  1378. ata_eh_done(ap, dev, ATA_EH_REVALIDATE);
  1379. /* schedule the scsi_rescan_device() here */
  1380. queue_work(ata_aux_wq, &(ap->scsi_rescan_task));
  1381. } else if (dev->class == ATA_DEV_UNKNOWN &&
  1382. ehc->tries[dev->devno] &&
  1383. ata_class_enabled(ehc->classes[dev->devno])) {
  1384. dev->class = ehc->classes[dev->devno];
  1385. rc = ata_dev_read_id(dev, &dev->class, 1, dev->id);
  1386. if (rc == 0)
  1387. rc = ata_dev_configure(dev, 1);
  1388. if (rc) {
  1389. dev->class = ATA_DEV_UNKNOWN;
  1390. break;
  1391. }
  1392. spin_lock_irqsave(ap->lock, flags);
  1393. ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
  1394. spin_unlock_irqrestore(ap->lock, flags);
  1395. }
  1396. }
  1397. if (rc)
  1398. *r_failed_dev = dev;
  1399. DPRINTK("EXIT\n");
  1400. return rc;
  1401. }
  1402. /**
  1403. * ata_eh_suspend - handle suspend EH action
  1404. * @ap: target host port
  1405. * @r_failed_dev: result parameter to indicate failing device
  1406. *
  1407. * Handle suspend EH action. Disk devices are spinned down and
  1408. * other types of devices are just marked suspended. Once
  1409. * suspended, no EH action to the device is allowed until it is
  1410. * resumed.
  1411. *
  1412. * LOCKING:
  1413. * Kernel thread context (may sleep).
  1414. *
  1415. * RETURNS:
  1416. * 0 on success, -errno otherwise
  1417. */
  1418. static int ata_eh_suspend(struct ata_port *ap, struct ata_device **r_failed_dev)
  1419. {
  1420. struct ata_device *dev;
  1421. int i, rc = 0;
  1422. DPRINTK("ENTER\n");
  1423. for (i = 0; i < ATA_MAX_DEVICES; i++) {
  1424. unsigned long flags;
  1425. unsigned int action, err_mask;
  1426. dev = &ap->device[i];
  1427. action = ata_eh_dev_action(dev);
  1428. if (!ata_dev_enabled(dev) || !(action & ATA_EH_SUSPEND))
  1429. continue;
  1430. WARN_ON(dev->flags & ATA_DFLAG_SUSPENDED);
  1431. ata_eh_about_to_do(ap, dev, ATA_EH_SUSPEND);
  1432. if (dev->class == ATA_DEV_ATA && !(action & ATA_EH_PM_FREEZE)) {
  1433. /* flush cache */
  1434. rc = ata_flush_cache(dev);
  1435. if (rc)
  1436. break;
  1437. /* spin down */
  1438. err_mask = ata_do_simple_cmd(dev, ATA_CMD_STANDBYNOW1);
  1439. if (err_mask) {
  1440. ata_dev_printk(dev, KERN_ERR, "failed to "
  1441. "spin down (err_mask=0x%x)\n",
  1442. err_mask);
  1443. rc = -EIO;
  1444. break;
  1445. }
  1446. }
  1447. spin_lock_irqsave(ap->lock, flags);
  1448. dev->flags |= ATA_DFLAG_SUSPENDED;
  1449. spin_unlock_irqrestore(ap->lock, flags);
  1450. ata_eh_done(ap, dev, ATA_EH_SUSPEND);
  1451. }
  1452. if (rc)
  1453. *r_failed_dev = dev;
  1454. DPRINTK("EXIT\n");
  1455. return 0;
  1456. }
  1457. /**
  1458. * ata_eh_prep_resume - prep for resume EH action
  1459. * @ap: target host port
  1460. *
  1461. * Clear SUSPENDED in preparation for scheduled resume actions.
  1462. * This allows other parts of EH to access the devices being
  1463. * resumed.
  1464. *
  1465. * LOCKING:
  1466. * Kernel thread context (may sleep).
  1467. */
  1468. static void ata_eh_prep_resume(struct ata_port *ap)
  1469. {
  1470. struct ata_device *dev;
  1471. unsigned long flags;
  1472. int i;
  1473. DPRINTK("ENTER\n");
  1474. for (i = 0; i < ATA_MAX_DEVICES; i++) {
  1475. unsigned int action;
  1476. dev = &ap->device[i];
  1477. action = ata_eh_dev_action(dev);
  1478. if (!ata_dev_enabled(dev) || !(action & ATA_EH_RESUME))
  1479. continue;
  1480. spin_lock_irqsave(ap->lock, flags);
  1481. dev->flags &= ~ATA_DFLAG_SUSPENDED;
  1482. spin_unlock_irqrestore(ap->lock, flags);
  1483. }
  1484. DPRINTK("EXIT\n");
  1485. }
  1486. /**
  1487. * ata_eh_resume - handle resume EH action
  1488. * @ap: target host port
  1489. * @r_failed_dev: result parameter to indicate failing device
  1490. *
  1491. * Handle resume EH action. Target devices are already reset and
  1492. * revalidated. Spinning up is the only operation left.
  1493. *
  1494. * LOCKING:
  1495. * Kernel thread context (may sleep).
  1496. *
  1497. * RETURNS:
  1498. * 0 on success, -errno otherwise
  1499. */
  1500. static int ata_eh_resume(struct ata_port *ap, struct ata_device **r_failed_dev)
  1501. {
  1502. struct ata_device *dev;
  1503. int i, rc = 0;
  1504. DPRINTK("ENTER\n");
  1505. for (i = 0; i < ATA_MAX_DEVICES; i++) {
  1506. unsigned int action, err_mask;
  1507. dev = &ap->device[i];
  1508. action = ata_eh_dev_action(dev);
  1509. if (!ata_dev_enabled(dev) || !(action & ATA_EH_RESUME))
  1510. continue;
  1511. ata_eh_about_to_do(ap, dev, ATA_EH_RESUME);
  1512. if (dev->class == ATA_DEV_ATA && !(action & ATA_EH_PM_FREEZE)) {
  1513. err_mask = ata_do_simple_cmd(dev,
  1514. ATA_CMD_IDLEIMMEDIATE);
  1515. if (err_mask) {
  1516. ata_dev_printk(dev, KERN_ERR, "failed to "
  1517. "spin up (err_mask=0x%x)\n",
  1518. err_mask);
  1519. rc = -EIO;
  1520. break;
  1521. }
  1522. }
  1523. ata_eh_done(ap, dev, ATA_EH_RESUME);
  1524. }
  1525. if (rc)
  1526. *r_failed_dev = dev;
  1527. DPRINTK("EXIT\n");
  1528. return 0;
  1529. }
  1530. static int ata_port_nr_enabled(struct ata_port *ap)
  1531. {
  1532. int i, cnt = 0;
  1533. for (i = 0; i < ATA_MAX_DEVICES; i++)
  1534. if (ata_dev_enabled(&ap->device[i]))
  1535. cnt++;
  1536. return cnt;
  1537. }
  1538. static int ata_port_nr_vacant(struct ata_port *ap)
  1539. {
  1540. int i, cnt = 0;
  1541. for (i = 0; i < ATA_MAX_DEVICES; i++)
  1542. if (ap->device[i].class == ATA_DEV_UNKNOWN)
  1543. cnt++;
  1544. return cnt;
  1545. }
  1546. static int ata_eh_skip_recovery(struct ata_port *ap)
  1547. {
  1548. struct ata_eh_context *ehc = &ap->eh_context;
  1549. int i;
  1550. /* skip if all possible devices are suspended */
  1551. for (i = 0; i < ata_port_max_devices(ap); i++) {
  1552. struct ata_device *dev = &ap->device[i];
  1553. if (ata_dev_absent(dev) || ata_dev_ready(dev))
  1554. break;
  1555. }
  1556. if (i == ata_port_max_devices(ap))
  1557. return 1;
  1558. /* always thaw frozen port and recover failed devices */
  1559. if (ap->pflags & ATA_PFLAG_FROZEN || ata_port_nr_enabled(ap))
  1560. return 0;
  1561. /* skip if class codes for all vacant slots are ATA_DEV_NONE */
  1562. for (i = 0; i < ATA_MAX_DEVICES; i++) {
  1563. struct ata_device *dev = &ap->device[i];
  1564. if (dev->class == ATA_DEV_UNKNOWN &&
  1565. ehc->classes[dev->devno] != ATA_DEV_NONE)
  1566. return 0;
  1567. }
  1568. return 1;
  1569. }
  1570. /**
  1571. * ata_eh_recover - recover host port after error
  1572. * @ap: host port to recover
  1573. * @prereset: prereset method (can be NULL)
  1574. * @softreset: softreset method (can be NULL)
  1575. * @hardreset: hardreset method (can be NULL)
  1576. * @postreset: postreset method (can be NULL)
  1577. *
  1578. * This is the alpha and omega, eum and yang, heart and soul of
  1579. * libata exception handling. On entry, actions required to
  1580. * recover the port and hotplug requests are recorded in
  1581. * eh_context. This function executes all the operations with
  1582. * appropriate retrials and fallbacks to resurrect failed
  1583. * devices, detach goners and greet newcomers.
  1584. *
  1585. * LOCKING:
  1586. * Kernel thread context (may sleep).
  1587. *
  1588. * RETURNS:
  1589. * 0 on success, -errno on failure.
  1590. */
  1591. static int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
  1592. ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
  1593. ata_postreset_fn_t postreset)
  1594. {
  1595. struct ata_eh_context *ehc = &ap->eh_context;
  1596. struct ata_device *dev;
  1597. int down_xfermask, i, rc;
  1598. DPRINTK("ENTER\n");
  1599. /* prep for recovery */
  1600. for (i = 0; i < ATA_MAX_DEVICES; i++) {
  1601. dev = &ap->device[i];
  1602. ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
  1603. /* process hotplug request */
  1604. if (dev->flags & ATA_DFLAG_DETACH)
  1605. ata_eh_detach_dev(dev);
  1606. if (!ata_dev_enabled(dev) &&
  1607. ((ehc->i.probe_mask & (1 << dev->devno)) &&
  1608. !(ehc->did_probe_mask & (1 << dev->devno)))) {
  1609. ata_eh_detach_dev(dev);
  1610. ata_dev_init(dev);
  1611. ehc->did_probe_mask |= (1 << dev->devno);
  1612. ehc->i.action |= ATA_EH_SOFTRESET;
  1613. }
  1614. }
  1615. retry:
  1616. down_xfermask = 0;
  1617. rc = 0;
  1618. /* if UNLOADING, finish immediately */
  1619. if (ap->pflags & ATA_PFLAG_UNLOADING)
  1620. goto out;
  1621. /* prep for resume */
  1622. ata_eh_prep_resume(ap);
  1623. /* skip EH if possible. */
  1624. if (ata_eh_skip_recovery(ap))
  1625. ehc->i.action = 0;
  1626. for (i = 0; i < ATA_MAX_DEVICES; i++)
  1627. ehc->classes[i] = ATA_DEV_UNKNOWN;
  1628. /* reset */
  1629. if (ehc->i.action & ATA_EH_RESET_MASK) {
  1630. ata_eh_freeze_port(ap);
  1631. rc = ata_eh_reset(ap, ata_port_nr_vacant(ap), prereset,
  1632. softreset, hardreset, postreset);
  1633. if (rc) {
  1634. ata_port_printk(ap, KERN_ERR,
  1635. "reset failed, giving up\n");
  1636. goto out;
  1637. }
  1638. ata_eh_thaw_port(ap);
  1639. }
  1640. /* revalidate existing devices and attach new ones */
  1641. rc = ata_eh_revalidate_and_attach(ap, &dev);
  1642. if (rc)
  1643. goto dev_fail;
  1644. /* resume devices */
  1645. rc = ata_eh_resume(ap, &dev);
  1646. if (rc)
  1647. goto dev_fail;
  1648. /* configure transfer mode if the port has been reset */
  1649. if (ehc->i.flags & ATA_EHI_DID_RESET) {
  1650. rc = ata_set_mode(ap, &dev);
  1651. if (rc) {
  1652. down_xfermask = 1;
  1653. goto dev_fail;
  1654. }
  1655. }
  1656. /* suspend devices */
  1657. rc = ata_eh_suspend(ap, &dev);
  1658. if (rc)
  1659. goto dev_fail;
  1660. goto out;
  1661. dev_fail:
  1662. switch (rc) {
  1663. case -ENODEV:
  1664. /* device missing, schedule probing */
  1665. ehc->i.probe_mask |= (1 << dev->devno);
  1666. case -EINVAL:
  1667. ehc->tries[dev->devno] = 0;
  1668. break;
  1669. case -EIO:
  1670. sata_down_spd_limit(ap);
  1671. default:
  1672. ehc->tries[dev->devno]--;
  1673. if (down_xfermask &&
  1674. ata_down_xfermask_limit(dev, ehc->tries[dev->devno] == 1))
  1675. ehc->tries[dev->devno] = 0;
  1676. }
  1677. if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) {
  1678. /* disable device if it has used up all its chances */
  1679. ata_dev_disable(dev);
  1680. /* detach if offline */
  1681. if (ata_port_offline(ap))
  1682. ata_eh_detach_dev(dev);
  1683. /* probe if requested */
  1684. if ((ehc->i.probe_mask & (1 << dev->devno)) &&
  1685. !(ehc->did_probe_mask & (1 << dev->devno))) {
  1686. ata_eh_detach_dev(dev);
  1687. ata_dev_init(dev);
  1688. ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
  1689. ehc->did_probe_mask |= (1 << dev->devno);
  1690. ehc->i.action |= ATA_EH_SOFTRESET;
  1691. }
  1692. } else {
  1693. /* soft didn't work? be haaaaard */
  1694. if (ehc->i.flags & ATA_EHI_DID_RESET)
  1695. ehc->i.action |= ATA_EH_HARDRESET;
  1696. else
  1697. ehc->i.action |= ATA_EH_SOFTRESET;
  1698. }
  1699. if (ata_port_nr_enabled(ap)) {
  1700. ata_port_printk(ap, KERN_WARNING, "failed to recover some "
  1701. "devices, retrying in 5 secs\n");
  1702. ssleep(5);
  1703. } else {
  1704. /* no device left, repeat fast */
  1705. msleep(500);
  1706. }
  1707. goto retry;
  1708. out:
  1709. if (rc) {
  1710. for (i = 0; i < ATA_MAX_DEVICES; i++)
  1711. ata_dev_disable(&ap->device[i]);
  1712. }
  1713. DPRINTK("EXIT, rc=%d\n", rc);
  1714. return rc;
  1715. }
  1716. /**
  1717. * ata_eh_finish - finish up EH
  1718. * @ap: host port to finish EH for
  1719. *
  1720. * Recovery is complete. Clean up EH states and retry or finish
  1721. * failed qcs.
  1722. *
  1723. * LOCKING:
  1724. * None.
  1725. */
  1726. static void ata_eh_finish(struct ata_port *ap)
  1727. {
  1728. int tag;
  1729. /* retry or finish qcs */
  1730. for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
  1731. struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
  1732. if (!(qc->flags & ATA_QCFLAG_FAILED))
  1733. continue;
  1734. if (qc->err_mask) {
  1735. /* FIXME: Once EH migration is complete,
  1736. * generate sense data in this function,
  1737. * considering both err_mask and tf.
  1738. */
  1739. if (qc->err_mask & AC_ERR_INVALID)
  1740. ata_eh_qc_complete(qc);
  1741. else
  1742. ata_eh_qc_retry(qc);
  1743. } else {
  1744. if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
  1745. ata_eh_qc_complete(qc);
  1746. } else {
  1747. /* feed zero TF to sense generation */
  1748. memset(&qc->result_tf, 0, sizeof(qc->result_tf));
  1749. ata_eh_qc_retry(qc);
  1750. }
  1751. }
  1752. }
  1753. }
  1754. /**
  1755. * ata_do_eh - do standard error handling
  1756. * @ap: host port to handle error for
  1757. * @prereset: prereset method (can be NULL)
  1758. * @softreset: softreset method (can be NULL)
  1759. * @hardreset: hardreset method (can be NULL)
  1760. * @postreset: postreset method (can be NULL)
  1761. *
  1762. * Perform standard error handling sequence.
  1763. *
  1764. * LOCKING:
  1765. * Kernel thread context (may sleep).
  1766. */
  1767. void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
  1768. ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
  1769. ata_postreset_fn_t postreset)
  1770. {
  1771. ata_eh_autopsy(ap);
  1772. ata_eh_report(ap);
  1773. ata_eh_recover(ap, prereset, softreset, hardreset, postreset);
  1774. ata_eh_finish(ap);
  1775. }
  1776. /**
  1777. * ata_eh_handle_port_suspend - perform port suspend operation
  1778. * @ap: port to suspend
  1779. *
  1780. * Suspend @ap.
  1781. *
  1782. * LOCKING:
  1783. * Kernel thread context (may sleep).
  1784. */
  1785. static void ata_eh_handle_port_suspend(struct ata_port *ap)
  1786. {
  1787. unsigned long flags;
  1788. int rc = 0;
  1789. /* are we suspending? */
  1790. spin_lock_irqsave(ap->lock, flags);
  1791. if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
  1792. ap->pm_mesg.event == PM_EVENT_ON) {
  1793. spin_unlock_irqrestore(ap->lock, flags);
  1794. return;
  1795. }
  1796. spin_unlock_irqrestore(ap->lock, flags);
  1797. WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED);
  1798. /* suspend */
  1799. ata_eh_freeze_port(ap);
  1800. if (ap->ops->port_suspend)
  1801. rc = ap->ops->port_suspend(ap, ap->pm_mesg);
  1802. /* report result */
  1803. spin_lock_irqsave(ap->lock, flags);
  1804. ap->pflags &= ~ATA_PFLAG_PM_PENDING;
  1805. if (rc == 0)
  1806. ap->pflags |= ATA_PFLAG_SUSPENDED;
  1807. else
  1808. ata_port_schedule_eh(ap);
  1809. if (ap->pm_result) {
  1810. *ap->pm_result = rc;
  1811. ap->pm_result = NULL;
  1812. }
  1813. spin_unlock_irqrestore(ap->lock, flags);
  1814. return;
  1815. }
  1816. /**
  1817. * ata_eh_handle_port_resume - perform port resume operation
  1818. * @ap: port to resume
  1819. *
  1820. * Resume @ap.
  1821. *
  1822. * This function also waits upto one second until all devices
  1823. * hanging off this port requests resume EH action. This is to
  1824. * prevent invoking EH and thus reset multiple times on resume.
  1825. *
  1826. * On DPM resume, where some of devices might not be resumed
  1827. * together, this may delay port resume upto one second, but such
  1828. * DPM resumes are rare and 1 sec delay isn't too bad.
  1829. *
  1830. * LOCKING:
  1831. * Kernel thread context (may sleep).
  1832. */
  1833. static void ata_eh_handle_port_resume(struct ata_port *ap)
  1834. {
  1835. unsigned long timeout;
  1836. unsigned long flags;
  1837. int i, rc = 0;
  1838. /* are we resuming? */
  1839. spin_lock_irqsave(ap->lock, flags);
  1840. if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
  1841. ap->pm_mesg.event != PM_EVENT_ON) {
  1842. spin_unlock_irqrestore(ap->lock, flags);
  1843. return;
  1844. }
  1845. spin_unlock_irqrestore(ap->lock, flags);
  1846. /* spurious? */
  1847. if (!(ap->pflags & ATA_PFLAG_SUSPENDED))
  1848. goto done;
  1849. if (ap->ops->port_resume)
  1850. rc = ap->ops->port_resume(ap);
  1851. /* give devices time to request EH */
  1852. timeout = jiffies + HZ; /* 1s max */
  1853. while (1) {
  1854. for (i = 0; i < ATA_MAX_DEVICES; i++) {
  1855. struct ata_device *dev = &ap->device[i];
  1856. unsigned int action = ata_eh_dev_action(dev);
  1857. if ((dev->flags & ATA_DFLAG_SUSPENDED) &&
  1858. !(action & ATA_EH_RESUME))
  1859. break;
  1860. }
  1861. if (i == ATA_MAX_DEVICES || time_after(jiffies, timeout))
  1862. break;
  1863. msleep(10);
  1864. }
  1865. done:
  1866. spin_lock_irqsave(ap->lock, flags);
  1867. ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED);
  1868. if (ap->pm_result) {
  1869. *ap->pm_result = rc;
  1870. ap->pm_result = NULL;
  1871. }
  1872. spin_unlock_irqrestore(ap->lock, flags);
  1873. }