eeh.c 35 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246
  1. /*
  2. * Copyright IBM Corporation 2001, 2005, 2006
  3. * Copyright Dave Engebretsen & Todd Inglett 2001
  4. * Copyright Linas Vepstas 2005, 2006
  5. * Copyright 2001-2012 IBM Corporation.
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License, or
  10. * (at your option) any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, write to the Free Software
  19. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  20. *
  21. * Please address comments and feedback to Linas Vepstas <linas@austin.ibm.com>
  22. */
  23. #include <linux/delay.h>
  24. #include <linux/sched.h>
  25. #include <linux/init.h>
  26. #include <linux/list.h>
  27. #include <linux/pci.h>
  28. #include <linux/proc_fs.h>
  29. #include <linux/rbtree.h>
  30. #include <linux/seq_file.h>
  31. #include <linux/spinlock.h>
  32. #include <linux/export.h>
  33. #include <linux/of.h>
  34. #include <linux/atomic.h>
  35. #include <asm/eeh.h>
  36. #include <asm/eeh_event.h>
  37. #include <asm/io.h>
  38. #include <asm/machdep.h>
  39. #include <asm/ppc-pci.h>
  40. #include <asm/rtas.h>
  41. /** Overview:
  42. * EEH, or "Extended Error Handling" is a PCI bridge technology for
  43. * dealing with PCI bus errors that can't be dealt with within the
  44. * usual PCI framework, except by check-stopping the CPU. Systems
  45. * that are designed for high-availability/reliability cannot afford
  46. * to crash due to a "mere" PCI error, thus the need for EEH.
  47. * An EEH-capable bridge operates by converting a detected error
  48. * into a "slot freeze", taking the PCI adapter off-line, making
  49. * the slot behave, from the OS'es point of view, as if the slot
  50. * were "empty": all reads return 0xff's and all writes are silently
  51. * ignored. EEH slot isolation events can be triggered by parity
  52. * errors on the address or data busses (e.g. during posted writes),
  53. * which in turn might be caused by low voltage on the bus, dust,
  54. * vibration, humidity, radioactivity or plain-old failed hardware.
  55. *
  56. * Note, however, that one of the leading causes of EEH slot
  57. * freeze events are buggy device drivers, buggy device microcode,
  58. * or buggy device hardware. This is because any attempt by the
  59. * device to bus-master data to a memory address that is not
  60. * assigned to the device will trigger a slot freeze. (The idea
  61. * is to prevent devices-gone-wild from corrupting system memory).
  62. * Buggy hardware/drivers will have a miserable time co-existing
  63. * with EEH.
  64. *
  65. * Ideally, a PCI device driver, when suspecting that an isolation
  66. * event has occurred (e.g. by reading 0xff's), will then ask EEH
  67. * whether this is the case, and then take appropriate steps to
  68. * reset the PCI slot, the PCI device, and then resume operations.
  69. * However, until that day, the checking is done here, with the
  70. * eeh_check_failure() routine embedded in the MMIO macros. If
  71. * the slot is found to be isolated, an "EEH Event" is synthesized
  72. * and sent out for processing.
  73. */
  74. /* If a device driver keeps reading an MMIO register in an interrupt
  75. * handler after a slot isolation event, it might be broken.
  76. * This sets the threshold for how many read attempts we allow
  77. * before printing an error message.
  78. */
  79. #define EEH_MAX_FAILS 2100000
  80. /* Time to wait for a PCI slot to report status, in milliseconds */
  81. #define PCI_BUS_RESET_WAIT_MSEC (60*1000)
  82. /* RTAS tokens */
  83. static int ibm_configure_bridge;
  84. static int ibm_configure_pe;
  85. /* Platform dependent EEH operations */
  86. struct eeh_ops *eeh_ops = NULL;
  87. int eeh_subsystem_enabled;
  88. EXPORT_SYMBOL(eeh_subsystem_enabled);
  89. /* Lock to avoid races due to multiple reports of an error */
  90. static DEFINE_RAW_SPINLOCK(confirm_error_lock);
  91. /* Buffer for reporting pci register dumps. Its here in BSS, and
  92. * not dynamically alloced, so that it ends up in RMO where RTAS
  93. * can access it.
  94. */
  95. #define EEH_PCI_REGS_LOG_LEN 4096
  96. static unsigned char pci_regs_buf[EEH_PCI_REGS_LOG_LEN];
  97. /* System monitoring statistics */
  98. static unsigned long no_device;
  99. static unsigned long no_dn;
  100. static unsigned long no_cfg_addr;
  101. static unsigned long ignored_check;
  102. static unsigned long total_mmio_ffs;
  103. static unsigned long false_positives;
  104. static unsigned long slot_resets;
  105. #define IS_BRIDGE(class_code) (((class_code)<<16) == PCI_BASE_CLASS_BRIDGE)
  106. /**
  107. * eeh_gather_pci_data - Copy assorted PCI config space registers to buff
  108. * @pdn: device to report data for
  109. * @buf: point to buffer in which to log
  110. * @len: amount of room in buffer
  111. *
  112. * This routine captures assorted PCI configuration space data,
  113. * and puts them into a buffer for RTAS error logging.
  114. */
  115. static size_t eeh_gather_pci_data(struct pci_dn *pdn, char * buf, size_t len)
  116. {
  117. struct pci_dev *dev = pdn->pcidev;
  118. u32 cfg;
  119. int cap, i;
  120. int n = 0;
  121. n += scnprintf(buf+n, len-n, "%s\n", pdn->node->full_name);
  122. printk(KERN_WARNING "EEH: of node=%s\n", pdn->node->full_name);
  123. rtas_read_config(pdn, PCI_VENDOR_ID, 4, &cfg);
  124. n += scnprintf(buf+n, len-n, "dev/vend:%08x\n", cfg);
  125. printk(KERN_WARNING "EEH: PCI device/vendor: %08x\n", cfg);
  126. rtas_read_config(pdn, PCI_COMMAND, 4, &cfg);
  127. n += scnprintf(buf+n, len-n, "cmd/stat:%x\n", cfg);
  128. printk(KERN_WARNING "EEH: PCI cmd/status register: %08x\n", cfg);
  129. if (!dev) {
  130. printk(KERN_WARNING "EEH: no PCI device for this of node\n");
  131. return n;
  132. }
  133. /* Gather bridge-specific registers */
  134. if (dev->class >> 16 == PCI_BASE_CLASS_BRIDGE) {
  135. rtas_read_config(pdn, PCI_SEC_STATUS, 2, &cfg);
  136. n += scnprintf(buf+n, len-n, "sec stat:%x\n", cfg);
  137. printk(KERN_WARNING "EEH: Bridge secondary status: %04x\n", cfg);
  138. rtas_read_config(pdn, PCI_BRIDGE_CONTROL, 2, &cfg);
  139. n += scnprintf(buf+n, len-n, "brdg ctl:%x\n", cfg);
  140. printk(KERN_WARNING "EEH: Bridge control: %04x\n", cfg);
  141. }
  142. /* Dump out the PCI-X command and status regs */
  143. cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
  144. if (cap) {
  145. rtas_read_config(pdn, cap, 4, &cfg);
  146. n += scnprintf(buf+n, len-n, "pcix-cmd:%x\n", cfg);
  147. printk(KERN_WARNING "EEH: PCI-X cmd: %08x\n", cfg);
  148. rtas_read_config(pdn, cap+4, 4, &cfg);
  149. n += scnprintf(buf+n, len-n, "pcix-stat:%x\n", cfg);
  150. printk(KERN_WARNING "EEH: PCI-X status: %08x\n", cfg);
  151. }
  152. /* If PCI-E capable, dump PCI-E cap 10, and the AER */
  153. cap = pci_find_capability(dev, PCI_CAP_ID_EXP);
  154. if (cap) {
  155. n += scnprintf(buf+n, len-n, "pci-e cap10:\n");
  156. printk(KERN_WARNING
  157. "EEH: PCI-E capabilities and status follow:\n");
  158. for (i=0; i<=8; i++) {
  159. rtas_read_config(pdn, cap+4*i, 4, &cfg);
  160. n += scnprintf(buf+n, len-n, "%02x:%x\n", 4*i, cfg);
  161. printk(KERN_WARNING "EEH: PCI-E %02x: %08x\n", i, cfg);
  162. }
  163. cap = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
  164. if (cap) {
  165. n += scnprintf(buf+n, len-n, "pci-e AER:\n");
  166. printk(KERN_WARNING
  167. "EEH: PCI-E AER capability register set follows:\n");
  168. for (i=0; i<14; i++) {
  169. rtas_read_config(pdn, cap+4*i, 4, &cfg);
  170. n += scnprintf(buf+n, len-n, "%02x:%x\n", 4*i, cfg);
  171. printk(KERN_WARNING "EEH: PCI-E AER %02x: %08x\n", i, cfg);
  172. }
  173. }
  174. }
  175. /* Gather status on devices under the bridge */
  176. if (dev->class >> 16 == PCI_BASE_CLASS_BRIDGE) {
  177. struct device_node *dn;
  178. for_each_child_of_node(pdn->node, dn) {
  179. pdn = PCI_DN(dn);
  180. if (pdn)
  181. n += eeh_gather_pci_data(pdn, buf+n, len-n);
  182. }
  183. }
  184. return n;
  185. }
  186. /**
  187. * eeh_slot_error_detail - Generate combined log including driver log and error log
  188. * @pdn: device node
  189. * @severity: temporary or permanent error log
  190. *
  191. * This routine should be called to generate the combined log, which
  192. * is comprised of driver log and error log. The driver log is figured
  193. * out from the config space of the corresponding PCI device, while
  194. * the error log is fetched through platform dependent function call.
  195. */
  196. void eeh_slot_error_detail(struct pci_dn *pdn, int severity)
  197. {
  198. size_t loglen = 0;
  199. pci_regs_buf[0] = 0;
  200. eeh_pci_enable(pdn, EEH_OPT_THAW_MMIO);
  201. eeh_configure_bridge(pdn);
  202. eeh_restore_bars(pdn);
  203. loglen = eeh_gather_pci_data(pdn, pci_regs_buf, EEH_PCI_REGS_LOG_LEN);
  204. eeh_ops->get_log(pdn->node, severity, pci_regs_buf, loglen);
  205. }
  206. /**
  207. * eeh_token_to_phys - Convert EEH address token to phys address
  208. * @token: I/O token, should be address in the form 0xA....
  209. *
  210. * This routine should be called to convert virtual I/O address
  211. * to physical one.
  212. */
  213. static inline unsigned long eeh_token_to_phys(unsigned long token)
  214. {
  215. pte_t *ptep;
  216. unsigned long pa;
  217. ptep = find_linux_pte(init_mm.pgd, token);
  218. if (!ptep)
  219. return token;
  220. pa = pte_pfn(*ptep) << PAGE_SHIFT;
  221. return pa | (token & (PAGE_SIZE-1));
  222. }
  223. /**
  224. * eeh_find_device_pe - Retrieve the PE for the given device
  225. * @dn: device node
  226. *
  227. * Return the PE under which this device lies
  228. */
  229. struct device_node *eeh_find_device_pe(struct device_node *dn)
  230. {
  231. while ((dn->parent) && PCI_DN(dn->parent) &&
  232. (PCI_DN(dn->parent)->eeh_mode & EEH_MODE_SUPPORTED)) {
  233. dn = dn->parent;
  234. }
  235. return dn;
  236. }
  237. /**
  238. * __eeh_mark_slot - Mark all child devices as failed
  239. * @parent: parent device
  240. * @mode_flag: failure flag
  241. *
  242. * Mark all devices that are children of this device as failed.
  243. * Mark the device driver too, so that it can see the failure
  244. * immediately; this is critical, since some drivers poll
  245. * status registers in interrupts ... If a driver is polling,
  246. * and the slot is frozen, then the driver can deadlock in
  247. * an interrupt context, which is bad.
  248. */
  249. static void __eeh_mark_slot(struct device_node *parent, int mode_flag)
  250. {
  251. struct device_node *dn;
  252. for_each_child_of_node(parent, dn) {
  253. if (PCI_DN(dn)) {
  254. /* Mark the pci device driver too */
  255. struct pci_dev *dev = PCI_DN(dn)->pcidev;
  256. PCI_DN(dn)->eeh_mode |= mode_flag;
  257. if (dev && dev->driver)
  258. dev->error_state = pci_channel_io_frozen;
  259. __eeh_mark_slot(dn, mode_flag);
  260. }
  261. }
  262. }
  263. /**
  264. * eeh_mark_slot - Mark the indicated device and its children as failed
  265. * @dn: parent device
  266. * @mode_flag: failure flag
  267. *
  268. * Mark the indicated device and its child devices as failed.
  269. * The device drivers are marked as failed as well.
  270. */
  271. void eeh_mark_slot(struct device_node *dn, int mode_flag)
  272. {
  273. struct pci_dev *dev;
  274. dn = eeh_find_device_pe(dn);
  275. /* Back up one, since config addrs might be shared */
  276. if (!pcibios_find_pci_bus(dn) && PCI_DN(dn->parent))
  277. dn = dn->parent;
  278. PCI_DN(dn)->eeh_mode |= mode_flag;
  279. /* Mark the pci device too */
  280. dev = PCI_DN(dn)->pcidev;
  281. if (dev)
  282. dev->error_state = pci_channel_io_frozen;
  283. __eeh_mark_slot(dn, mode_flag);
  284. }
  285. /**
  286. * __eeh_clear_slot - Clear failure flag for the child devices
  287. * @parent: parent device
  288. * @mode_flag: flag to be cleared
  289. *
  290. * Clear failure flag for the child devices.
  291. */
  292. static void __eeh_clear_slot(struct device_node *parent, int mode_flag)
  293. {
  294. struct device_node *dn;
  295. for_each_child_of_node(parent, dn) {
  296. if (PCI_DN(dn)) {
  297. PCI_DN(dn)->eeh_mode &= ~mode_flag;
  298. PCI_DN(dn)->eeh_check_count = 0;
  299. __eeh_clear_slot(dn, mode_flag);
  300. }
  301. }
  302. }
  303. /**
  304. * eeh_clear_slot - Clear failure flag for the indicated device and its children
  305. * @dn: parent device
  306. * @mode_flag: flag to be cleared
  307. *
  308. * Clear failure flag for the indicated device and its children.
  309. */
  310. void eeh_clear_slot(struct device_node *dn, int mode_flag)
  311. {
  312. unsigned long flags;
  313. raw_spin_lock_irqsave(&confirm_error_lock, flags);
  314. dn = eeh_find_device_pe(dn);
  315. /* Back up one, since config addrs might be shared */
  316. if (!pcibios_find_pci_bus(dn) && PCI_DN(dn->parent))
  317. dn = dn->parent;
  318. PCI_DN(dn)->eeh_mode &= ~mode_flag;
  319. PCI_DN(dn)->eeh_check_count = 0;
  320. __eeh_clear_slot(dn, mode_flag);
  321. raw_spin_unlock_irqrestore(&confirm_error_lock, flags);
  322. }
  323. /**
  324. * eeh_dn_check_failure - Check if all 1's data is due to EEH slot freeze
  325. * @dn: device node
  326. * @dev: pci device, if known
  327. *
  328. * Check for an EEH failure for the given device node. Call this
  329. * routine if the result of a read was all 0xff's and you want to
  330. * find out if this is due to an EEH slot freeze. This routine
  331. * will query firmware for the EEH status.
  332. *
  333. * Returns 0 if there has not been an EEH error; otherwise returns
  334. * a non-zero value and queues up a slot isolation event notification.
  335. *
  336. * It is safe to call this routine in an interrupt context.
  337. */
  338. int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
  339. {
  340. int ret;
  341. unsigned long flags;
  342. struct pci_dn *pdn;
  343. int rc = 0;
  344. const char *location;
  345. total_mmio_ffs++;
  346. if (!eeh_subsystem_enabled)
  347. return 0;
  348. if (!dn) {
  349. no_dn++;
  350. return 0;
  351. }
  352. dn = eeh_find_device_pe(dn);
  353. pdn = PCI_DN(dn);
  354. /* Access to IO BARs might get this far and still not want checking. */
  355. if (!(pdn->eeh_mode & EEH_MODE_SUPPORTED) ||
  356. pdn->eeh_mode & EEH_MODE_NOCHECK) {
  357. ignored_check++;
  358. pr_debug("EEH: Ignored check (%x) for %s %s\n",
  359. pdn->eeh_mode, eeh_pci_name(dev), dn->full_name);
  360. return 0;
  361. }
  362. if (!pdn->eeh_config_addr && !pdn->eeh_pe_config_addr) {
  363. no_cfg_addr++;
  364. return 0;
  365. }
  366. /* If we already have a pending isolation event for this
  367. * slot, we know it's bad already, we don't need to check.
  368. * Do this checking under a lock; as multiple PCI devices
  369. * in one slot might report errors simultaneously, and we
  370. * only want one error recovery routine running.
  371. */
  372. raw_spin_lock_irqsave(&confirm_error_lock, flags);
  373. rc = 1;
  374. if (pdn->eeh_mode & EEH_MODE_ISOLATED) {
  375. pdn->eeh_check_count ++;
  376. if (pdn->eeh_check_count % EEH_MAX_FAILS == 0) {
  377. location = of_get_property(dn, "ibm,loc-code", NULL);
  378. printk(KERN_ERR "EEH: %d reads ignored for recovering device at "
  379. "location=%s driver=%s pci addr=%s\n",
  380. pdn->eeh_check_count, location,
  381. eeh_driver_name(dev), eeh_pci_name(dev));
  382. printk(KERN_ERR "EEH: Might be infinite loop in %s driver\n",
  383. eeh_driver_name(dev));
  384. dump_stack();
  385. }
  386. goto dn_unlock;
  387. }
  388. /*
  389. * Now test for an EEH failure. This is VERY expensive.
  390. * Note that the eeh_config_addr may be a parent device
  391. * in the case of a device behind a bridge, or it may be
  392. * function zero of a multi-function device.
  393. * In any case they must share a common PHB.
  394. */
  395. ret = eeh_ops->get_state(pdn->node, NULL);
  396. /* Note that config-io to empty slots may fail;
  397. * they are empty when they don't have children.
  398. * We will punt with the following conditions: Failure to get
  399. * PE's state, EEH not support and Permanently unavailable
  400. * state, PE is in good state.
  401. */
  402. if ((ret < 0) ||
  403. (ret == EEH_STATE_NOT_SUPPORT) ||
  404. (ret & (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE)) ==
  405. (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE)) {
  406. false_positives++;
  407. pdn->eeh_false_positives ++;
  408. rc = 0;
  409. goto dn_unlock;
  410. }
  411. slot_resets++;
  412. /* Avoid repeated reports of this failure, including problems
  413. * with other functions on this device, and functions under
  414. * bridges.
  415. */
  416. eeh_mark_slot(dn, EEH_MODE_ISOLATED);
  417. raw_spin_unlock_irqrestore(&confirm_error_lock, flags);
  418. eeh_send_failure_event(dn, dev);
  419. /* Most EEH events are due to device driver bugs. Having
  420. * a stack trace will help the device-driver authors figure
  421. * out what happened. So print that out.
  422. */
  423. dump_stack();
  424. return 1;
  425. dn_unlock:
  426. raw_spin_unlock_irqrestore(&confirm_error_lock, flags);
  427. return rc;
  428. }
  429. EXPORT_SYMBOL_GPL(eeh_dn_check_failure);
  430. /**
  431. * eeh_check_failure - Check if all 1's data is due to EEH slot freeze
  432. * @token: I/O token, should be address in the form 0xA....
  433. * @val: value, should be all 1's (XXX why do we need this arg??)
  434. *
  435. * Check for an EEH failure at the given token address. Call this
  436. * routine if the result of a read was all 0xff's and you want to
  437. * find out if this is due to an EEH slot freeze event. This routine
  438. * will query firmware for the EEH status.
  439. *
  440. * Note this routine is safe to call in an interrupt context.
  441. */
  442. unsigned long eeh_check_failure(const volatile void __iomem *token, unsigned long val)
  443. {
  444. unsigned long addr;
  445. struct pci_dev *dev;
  446. struct device_node *dn;
  447. /* Finding the phys addr + pci device; this is pretty quick. */
  448. addr = eeh_token_to_phys((unsigned long __force) token);
  449. dev = pci_get_device_by_addr(addr);
  450. if (!dev) {
  451. no_device++;
  452. return val;
  453. }
  454. dn = pci_device_to_OF_node(dev);
  455. eeh_dn_check_failure(dn, dev);
  456. pci_dev_put(dev);
  457. return val;
  458. }
  459. EXPORT_SYMBOL(eeh_check_failure);
  460. /**
  461. * eeh_pci_enable - Enable MMIO or DMA transfers for this slot
  462. * @pdn pci device node
  463. *
  464. * This routine should be called to reenable frozen MMIO or DMA
  465. * so that it would work correctly again. It's useful while doing
  466. * recovery or log collection on the indicated device.
  467. */
  468. int eeh_pci_enable(struct pci_dn *pdn, int function)
  469. {
  470. int rc;
  471. rc = eeh_ops->set_option(pdn->node, function);
  472. if (rc)
  473. printk(KERN_WARNING "EEH: Unexpected state change %d, err=%d dn=%s\n",
  474. function, rc, pdn->node->full_name);
  475. rc = eeh_ops->wait_state(pdn->node, PCI_BUS_RESET_WAIT_MSEC);
  476. if (rc > 0 && (rc & EEH_STATE_MMIO_ENABLED) &&
  477. (function == EEH_OPT_THAW_MMIO))
  478. return 0;
  479. return rc;
  480. }
  481. /**
  482. * pcibios_set_pcie_slot_reset - Set PCI-E reset state
  483. * @dev: pci device struct
  484. * @state: reset state to enter
  485. *
  486. * Return value:
  487. * 0 if success
  488. */
  489. int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
  490. {
  491. struct device_node *dn = pci_device_to_OF_node(dev);
  492. switch (state) {
  493. case pcie_deassert_reset:
  494. eeh_ops->reset(dn, EEH_RESET_DEACTIVATE);
  495. break;
  496. case pcie_hot_reset:
  497. eeh_ops->reset(dn, EEH_RESET_HOT);
  498. break;
  499. case pcie_warm_reset:
  500. eeh_ops->reset(dn, EEH_RESET_FUNDAMENTAL);
  501. break;
  502. default:
  503. return -EINVAL;
  504. };
  505. return 0;
  506. }
  507. /**
  508. * __eeh_set_pe_freset - Check the required reset for child devices
  509. * @parent: parent device
  510. * @freset: return value
  511. *
  512. * Each device might have its preferred reset type: fundamental or
  513. * hot reset. The routine is used to collect the information from
  514. * the child devices so that they could be reset accordingly.
  515. */
  516. void __eeh_set_pe_freset(struct device_node *parent, unsigned int *freset)
  517. {
  518. struct device_node *dn;
  519. for_each_child_of_node(parent, dn) {
  520. if (PCI_DN(dn)) {
  521. struct pci_dev *dev = PCI_DN(dn)->pcidev;
  522. if (dev && dev->driver)
  523. *freset |= dev->needs_freset;
  524. __eeh_set_pe_freset(dn, freset);
  525. }
  526. }
  527. }
  528. /**
  529. * eeh_set_pe_freset - Check the required reset for the indicated device and its children
  530. * @dn: parent device
  531. * @freset: return value
  532. *
  533. * Each device might have its preferred reset type: fundamental or
  534. * hot reset. The routine is used to collected the information for
  535. * the indicated device and its children so that the bunch of the
  536. * devices could be reset properly.
  537. */
  538. void eeh_set_pe_freset(struct device_node *dn, unsigned int *freset)
  539. {
  540. struct pci_dev *dev;
  541. dn = eeh_find_device_pe(dn);
  542. /* Back up one, since config addrs might be shared */
  543. if (!pcibios_find_pci_bus(dn) && PCI_DN(dn->parent))
  544. dn = dn->parent;
  545. dev = PCI_DN(dn)->pcidev;
  546. if (dev)
  547. *freset |= dev->needs_freset;
  548. __eeh_set_pe_freset(dn, freset);
  549. }
  550. /**
  551. * eeh_reset_pe_once - Assert the pci #RST line for 1/4 second
  552. * @pdn: pci device node to be reset.
  553. *
  554. * Assert the PCI #RST line for 1/4 second.
  555. */
  556. static void eeh_reset_pe_once(struct pci_dn *pdn)
  557. {
  558. unsigned int freset = 0;
  559. /* Determine type of EEH reset required for
  560. * Partitionable Endpoint, a hot-reset (1)
  561. * or a fundamental reset (3).
  562. * A fundamental reset required by any device under
  563. * Partitionable Endpoint trumps hot-reset.
  564. */
  565. eeh_set_pe_freset(pdn->node, &freset);
  566. if (freset)
  567. eeh_ops->reset(pdn->node, EEH_RESET_FUNDAMENTAL);
  568. else
  569. eeh_ops->reset(pdn->node, EEH_RESET_HOT);
  570. /* The PCI bus requires that the reset be held high for at least
  571. * a 100 milliseconds. We wait a bit longer 'just in case'.
  572. */
  573. #define PCI_BUS_RST_HOLD_TIME_MSEC 250
  574. msleep(PCI_BUS_RST_HOLD_TIME_MSEC);
  575. /* We might get hit with another EEH freeze as soon as the
  576. * pci slot reset line is dropped. Make sure we don't miss
  577. * these, and clear the flag now.
  578. */
  579. eeh_clear_slot(pdn->node, EEH_MODE_ISOLATED);
  580. eeh_ops->reset(pdn->node, EEH_RESET_DEACTIVATE);
  581. /* After a PCI slot has been reset, the PCI Express spec requires
  582. * a 1.5 second idle time for the bus to stabilize, before starting
  583. * up traffic.
  584. */
  585. #define PCI_BUS_SETTLE_TIME_MSEC 1800
  586. msleep(PCI_BUS_SETTLE_TIME_MSEC);
  587. }
  588. /**
  589. * eeh_reset_pe - Reset the indicated PE
  590. * @pdn: PCI device node
  591. *
  592. * This routine should be called to reset indicated device, including
  593. * PE. A PE might include multiple PCI devices and sometimes PCI bridges
  594. * might be involved as well.
  595. */
  596. int eeh_reset_pe(struct pci_dn *pdn)
  597. {
  598. int i, rc;
  599. /* Take three shots at resetting the bus */
  600. for (i=0; i<3; i++) {
  601. eeh_reset_pe_once(pdn);
  602. rc = eeh_ops->wait_state(pdn->node, PCI_BUS_RESET_WAIT_MSEC);
  603. if (rc == (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE))
  604. return 0;
  605. if (rc < 0) {
  606. printk(KERN_ERR "EEH: unrecoverable slot failure %s\n",
  607. pdn->node->full_name);
  608. return -1;
  609. }
  610. printk(KERN_ERR "EEH: bus reset %d failed on slot %s, rc=%d\n",
  611. i+1, pdn->node->full_name, rc);
  612. }
  613. return -1;
  614. }
  615. /** Save and restore of PCI BARs
  616. *
  617. * Although firmware will set up BARs during boot, it doesn't
  618. * set up device BAR's after a device reset, although it will,
  619. * if requested, set up bridge configuration. Thus, we need to
  620. * configure the PCI devices ourselves.
  621. */
  622. /**
  623. * eeh_restore_one_device_bars - Restore the Base Address Registers for one device
  624. * @pdn: pci device node
  625. *
  626. * Loads the PCI configuration space base address registers,
  627. * the expansion ROM base address, the latency timer, and etc.
  628. * from the saved values in the device node.
  629. */
  630. static inline void eeh_restore_one_device_bars(struct pci_dn *pdn)
  631. {
  632. int i;
  633. u32 cmd;
  634. if (NULL==pdn->phb) return;
  635. for (i=4; i<10; i++) {
  636. rtas_write_config(pdn, i*4, 4, pdn->config_space[i]);
  637. }
  638. /* 12 == Expansion ROM Address */
  639. rtas_write_config(pdn, 12*4, 4, pdn->config_space[12]);
  640. #define BYTE_SWAP(OFF) (8*((OFF)/4)+3-(OFF))
  641. #define SAVED_BYTE(OFF) (((u8 *)(pdn->config_space))[BYTE_SWAP(OFF)])
  642. rtas_write_config(pdn, PCI_CACHE_LINE_SIZE, 1,
  643. SAVED_BYTE(PCI_CACHE_LINE_SIZE));
  644. rtas_write_config(pdn, PCI_LATENCY_TIMER, 1,
  645. SAVED_BYTE(PCI_LATENCY_TIMER));
  646. /* max latency, min grant, interrupt pin and line */
  647. rtas_write_config(pdn, 15*4, 4, pdn->config_space[15]);
  648. /* Restore PERR & SERR bits, some devices require it,
  649. * don't touch the other command bits
  650. */
  651. rtas_read_config(pdn, PCI_COMMAND, 4, &cmd);
  652. if (pdn->config_space[1] & PCI_COMMAND_PARITY)
  653. cmd |= PCI_COMMAND_PARITY;
  654. else
  655. cmd &= ~PCI_COMMAND_PARITY;
  656. if (pdn->config_space[1] & PCI_COMMAND_SERR)
  657. cmd |= PCI_COMMAND_SERR;
  658. else
  659. cmd &= ~PCI_COMMAND_SERR;
  660. rtas_write_config(pdn, PCI_COMMAND, 4, cmd);
  661. }
  662. /**
  663. * eeh_restore_bars - Restore the PCI config space info
  664. * @pdn: PCI device node
  665. *
  666. * This routine performs a recursive walk to the children
  667. * of this device as well.
  668. */
  669. void eeh_restore_bars(struct pci_dn *pdn)
  670. {
  671. struct device_node *dn;
  672. if (!pdn)
  673. return;
  674. if ((pdn->eeh_mode & EEH_MODE_SUPPORTED) && !IS_BRIDGE(pdn->class_code))
  675. eeh_restore_one_device_bars(pdn);
  676. for_each_child_of_node(pdn->node, dn)
  677. eeh_restore_bars(PCI_DN(dn));
  678. }
  679. /**
  680. * eeh_save_bars - Save device bars
  681. * @pdn: PCI device node
  682. *
  683. * Save the values of the device bars. Unlike the restore
  684. * routine, this routine is *not* recursive. This is because
  685. * PCI devices are added individually; but, for the restore,
  686. * an entire slot is reset at a time.
  687. */
  688. static void eeh_save_bars(struct pci_dn *pdn)
  689. {
  690. int i;
  691. if (!pdn )
  692. return;
  693. for (i = 0; i < 16; i++)
  694. rtas_read_config(pdn, i * 4, 4, &pdn->config_space[i]);
  695. }
  696. /**
  697. * eeh_configure_bridge - Configure PCI bridges for the indicated PE
  698. * @pdn: PCI device node
  699. *
  700. * PCI bridges might be included in PE. In order to make the PE work
  701. * again. The included PCI bridges should be recovered after the PE
  702. * encounters frozen state.
  703. */
  704. void eeh_configure_bridge(struct pci_dn *pdn)
  705. {
  706. int config_addr;
  707. int rc;
  708. int token;
  709. /* Use PE configuration address, if present */
  710. config_addr = pdn->eeh_config_addr;
  711. if (pdn->eeh_pe_config_addr)
  712. config_addr = pdn->eeh_pe_config_addr;
  713. /* Use new configure-pe function, if supported */
  714. if (ibm_configure_pe != RTAS_UNKNOWN_SERVICE)
  715. token = ibm_configure_pe;
  716. else
  717. token = ibm_configure_bridge;
  718. rc = rtas_call(token, 3, 1, NULL,
  719. config_addr,
  720. BUID_HI(pdn->phb->buid),
  721. BUID_LO(pdn->phb->buid));
  722. if (rc) {
  723. printk(KERN_WARNING "EEH: Unable to configure device bridge (%d) for %s\n",
  724. rc, pdn->node->full_name);
  725. }
  726. }
  727. /**
  728. * eeh_early_enable - Early enable EEH on the indicated device
  729. * @dn: device node
  730. * @data: BUID
  731. *
  732. * Enable EEH functionality on the specified PCI device. The function
  733. * is expected to be called before real PCI probing is done. However,
  734. * the PHBs have been initialized at this point.
  735. */
  736. static void *eeh_early_enable(struct device_node *dn, void *data)
  737. {
  738. int ret;
  739. const u32 *class_code = of_get_property(dn, "class-code", NULL);
  740. const u32 *vendor_id = of_get_property(dn, "vendor-id", NULL);
  741. const u32 *device_id = of_get_property(dn, "device-id", NULL);
  742. const u32 *regs;
  743. int enable;
  744. struct pci_dn *pdn = PCI_DN(dn);
  745. pdn->class_code = 0;
  746. pdn->eeh_mode = 0;
  747. pdn->eeh_check_count = 0;
  748. pdn->eeh_freeze_count = 0;
  749. pdn->eeh_false_positives = 0;
  750. if (!of_device_is_available(dn))
  751. return NULL;
  752. /* Ignore bad nodes. */
  753. if (!class_code || !vendor_id || !device_id)
  754. return NULL;
  755. /* There is nothing to check on PCI to ISA bridges */
  756. if (dn->type && !strcmp(dn->type, "isa")) {
  757. pdn->eeh_mode |= EEH_MODE_NOCHECK;
  758. return NULL;
  759. }
  760. pdn->class_code = *class_code;
  761. /* Ok... see if this device supports EEH. Some do, some don't,
  762. * and the only way to find out is to check each and every one.
  763. */
  764. regs = of_get_property(dn, "reg", NULL);
  765. if (regs) {
  766. /* First register entry is addr (00BBSS00) */
  767. /* Try to enable eeh */
  768. ret = eeh_ops->set_option(dn, EEH_OPT_ENABLE);
  769. enable = 0;
  770. if (ret == 0) {
  771. pdn->eeh_config_addr = regs[0];
  772. /* If the newer, better, ibm,get-config-addr-info is supported,
  773. * then use that instead.
  774. */
  775. pdn->eeh_pe_config_addr = eeh_ops->get_pe_addr(dn);
  776. /* Some older systems (Power4) allow the
  777. * ibm,set-eeh-option call to succeed even on nodes
  778. * where EEH is not supported. Verify support
  779. * explicitly.
  780. */
  781. ret = eeh_ops->get_state(pdn->node, NULL);
  782. if (ret > 0 && ret != EEH_STATE_NOT_SUPPORT)
  783. enable = 1;
  784. }
  785. if (enable) {
  786. eeh_subsystem_enabled = 1;
  787. pdn->eeh_mode |= EEH_MODE_SUPPORTED;
  788. pr_debug("EEH: %s: eeh enabled, config=%x pe_config=%x\n",
  789. dn->full_name, pdn->eeh_config_addr,
  790. pdn->eeh_pe_config_addr);
  791. } else {
  792. /* This device doesn't support EEH, but it may have an
  793. * EEH parent, in which case we mark it as supported.
  794. */
  795. if (dn->parent && PCI_DN(dn->parent)
  796. && (PCI_DN(dn->parent)->eeh_mode & EEH_MODE_SUPPORTED)) {
  797. /* Parent supports EEH. */
  798. pdn->eeh_mode |= EEH_MODE_SUPPORTED;
  799. pdn->eeh_config_addr = PCI_DN(dn->parent)->eeh_config_addr;
  800. return NULL;
  801. }
  802. }
  803. } else {
  804. printk(KERN_WARNING "EEH: %s: unable to get reg property.\n",
  805. dn->full_name);
  806. }
  807. eeh_save_bars(pdn);
  808. return NULL;
  809. }
  810. /**
  811. * eeh_ops_register - Register platform dependent EEH operations
  812. * @ops: platform dependent EEH operations
  813. *
  814. * Register the platform dependent EEH operation callback
  815. * functions. The platform should call this function before
  816. * any other EEH operations.
  817. */
  818. int __init eeh_ops_register(struct eeh_ops *ops)
  819. {
  820. if (!ops->name) {
  821. pr_warning("%s: Invalid EEH ops name for %p\n",
  822. __func__, ops);
  823. return -EINVAL;
  824. }
  825. if (eeh_ops && eeh_ops != ops) {
  826. pr_warning("%s: EEH ops of platform %s already existing (%s)\n",
  827. __func__, eeh_ops->name, ops->name);
  828. return -EEXIST;
  829. }
  830. eeh_ops = ops;
  831. return 0;
  832. }
  833. /**
  834. * eeh_ops_unregister - Unreigster platform dependent EEH operations
  835. * @name: name of EEH platform operations
  836. *
  837. * Unregister the platform dependent EEH operation callback
  838. * functions.
  839. */
  840. int __exit eeh_ops_unregister(const char *name)
  841. {
  842. if (!name || !strlen(name)) {
  843. pr_warning("%s: Invalid EEH ops name\n",
  844. __func__);
  845. return -EINVAL;
  846. }
  847. if (eeh_ops && !strcmp(eeh_ops->name, name)) {
  848. eeh_ops = NULL;
  849. return 0;
  850. }
  851. return -EEXIST;
  852. }
  853. /**
  854. * eeh_init - EEH initialization
  855. *
  856. * Initialize EEH by trying to enable it for all of the adapters in the system.
  857. * As a side effect we can determine here if eeh is supported at all.
  858. * Note that we leave EEH on so failed config cycles won't cause a machine
  859. * check. If a user turns off EEH for a particular adapter they are really
  860. * telling Linux to ignore errors. Some hardware (e.g. POWER5) won't
  861. * grant access to a slot if EEH isn't enabled, and so we always enable
  862. * EEH for all slots/all devices.
  863. *
  864. * The eeh-force-off option disables EEH checking globally, for all slots.
  865. * Even if force-off is set, the EEH hardware is still enabled, so that
  866. * newer systems can boot.
  867. */
  868. void __init eeh_init(void)
  869. {
  870. struct device_node *phb, *np;
  871. int ret;
  872. /* call platform initialization function */
  873. if (!eeh_ops) {
  874. pr_warning("%s: Platform EEH operation not found\n",
  875. __func__);
  876. return;
  877. } else if ((ret = eeh_ops->init())) {
  878. pr_warning("%s: Failed to call platform init function (%d)\n",
  879. __func__, ret);
  880. return;
  881. }
  882. raw_spin_lock_init(&confirm_error_lock);
  883. np = of_find_node_by_path("/rtas");
  884. if (np == NULL)
  885. return;
  886. ibm_configure_bridge = rtas_token("ibm,configure-bridge");
  887. ibm_configure_pe = rtas_token("ibm,configure-pe");
  888. /* Enable EEH for all adapters. Note that eeh requires buid's */
  889. for (phb = of_find_node_by_name(NULL, "pci"); phb;
  890. phb = of_find_node_by_name(phb, "pci")) {
  891. unsigned long buid;
  892. buid = get_phb_buid(phb);
  893. if (buid == 0 || PCI_DN(phb) == NULL)
  894. continue;
  895. traverse_pci_devices(phb, eeh_early_enable, NULL);
  896. }
  897. if (eeh_subsystem_enabled)
  898. printk(KERN_INFO "EEH: PCI Enhanced I/O Error Handling Enabled\n");
  899. else
  900. printk(KERN_WARNING "EEH: No capable adapters found\n");
  901. }
  902. /**
  903. * eeh_add_device_early - Enable EEH for the indicated device_node
  904. * @dn: device node for which to set up EEH
  905. *
  906. * This routine must be used to perform EEH initialization for PCI
  907. * devices that were added after system boot (e.g. hotplug, dlpar).
  908. * This routine must be called before any i/o is performed to the
  909. * adapter (inluding any config-space i/o).
  910. * Whether this actually enables EEH or not for this device depends
  911. * on the CEC architecture, type of the device, on earlier boot
  912. * command-line arguments & etc.
  913. */
  914. static void eeh_add_device_early(struct device_node *dn)
  915. {
  916. struct pci_controller *phb;
  917. if (!dn || !PCI_DN(dn))
  918. return;
  919. phb = PCI_DN(dn)->phb;
  920. /* USB Bus children of PCI devices will not have BUID's */
  921. if (NULL == phb || 0 == phb->buid)
  922. return;
  923. eeh_early_enable(dn, NULL);
  924. }
  925. /**
  926. * eeh_add_device_tree_early - Enable EEH for the indicated device
  927. * @dn: device node
  928. *
  929. * This routine must be used to perform EEH initialization for the
  930. * indicated PCI device that was added after system boot (e.g.
  931. * hotplug, dlpar).
  932. */
  933. void eeh_add_device_tree_early(struct device_node *dn)
  934. {
  935. struct device_node *sib;
  936. for_each_child_of_node(dn, sib)
  937. eeh_add_device_tree_early(sib);
  938. eeh_add_device_early(dn);
  939. }
  940. EXPORT_SYMBOL_GPL(eeh_add_device_tree_early);
  941. /**
  942. * eeh_add_device_late - Perform EEH initialization for the indicated pci device
  943. * @dev: pci device for which to set up EEH
  944. *
  945. * This routine must be used to complete EEH initialization for PCI
  946. * devices that were added after system boot (e.g. hotplug, dlpar).
  947. */
  948. static void eeh_add_device_late(struct pci_dev *dev)
  949. {
  950. struct device_node *dn;
  951. struct pci_dn *pdn;
  952. if (!dev || !eeh_subsystem_enabled)
  953. return;
  954. pr_debug("EEH: Adding device %s\n", pci_name(dev));
  955. dn = pci_device_to_OF_node(dev);
  956. pdn = PCI_DN(dn);
  957. if (pdn->pcidev == dev) {
  958. pr_debug("EEH: Already referenced !\n");
  959. return;
  960. }
  961. WARN_ON(pdn->pcidev);
  962. pci_dev_get(dev);
  963. pdn->pcidev = dev;
  964. pci_addr_cache_insert_device(dev);
  965. eeh_sysfs_add_device(dev);
  966. }
  967. /**
  968. * eeh_add_device_tree_late - Perform EEH initialization for the indicated PCI bus
  969. * @bus: PCI bus
  970. *
  971. * This routine must be used to perform EEH initialization for PCI
  972. * devices which are attached to the indicated PCI bus. The PCI bus
  973. * is added after system boot through hotplug or dlpar.
  974. */
  975. void eeh_add_device_tree_late(struct pci_bus *bus)
  976. {
  977. struct pci_dev *dev;
  978. list_for_each_entry(dev, &bus->devices, bus_list) {
  979. eeh_add_device_late(dev);
  980. if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
  981. struct pci_bus *subbus = dev->subordinate;
  982. if (subbus)
  983. eeh_add_device_tree_late(subbus);
  984. }
  985. }
  986. }
  987. EXPORT_SYMBOL_GPL(eeh_add_device_tree_late);
  988. /**
  989. * eeh_remove_device - Undo EEH setup for the indicated pci device
  990. * @dev: pci device to be removed
  991. *
  992. * This routine should be called when a device is removed from
  993. * a running system (e.g. by hotplug or dlpar). It unregisters
  994. * the PCI device from the EEH subsystem. I/O errors affecting
  995. * this device will no longer be detected after this call; thus,
  996. * i/o errors affecting this slot may leave this device unusable.
  997. */
  998. static void eeh_remove_device(struct pci_dev *dev)
  999. {
  1000. struct device_node *dn;
  1001. if (!dev || !eeh_subsystem_enabled)
  1002. return;
  1003. /* Unregister the device with the EEH/PCI address search system */
  1004. pr_debug("EEH: Removing device %s\n", pci_name(dev));
  1005. dn = pci_device_to_OF_node(dev);
  1006. if (PCI_DN(dn)->pcidev == NULL) {
  1007. pr_debug("EEH: Not referenced !\n");
  1008. return;
  1009. }
  1010. PCI_DN(dn)->pcidev = NULL;
  1011. pci_dev_put(dev);
  1012. pci_addr_cache_remove_device(dev);
  1013. eeh_sysfs_remove_device(dev);
  1014. }
  1015. /**
  1016. * eeh_remove_bus_device - Undo EEH setup for the indicated PCI device
  1017. * @dev: PCI device
  1018. *
  1019. * This routine must be called when a device is removed from the
  1020. * running system through hotplug or dlpar. The corresponding
  1021. * PCI address cache will be removed.
  1022. */
  1023. void eeh_remove_bus_device(struct pci_dev *dev)
  1024. {
  1025. struct pci_bus *bus = dev->subordinate;
  1026. struct pci_dev *child, *tmp;
  1027. eeh_remove_device(dev);
  1028. if (bus && dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
  1029. list_for_each_entry_safe(child, tmp, &bus->devices, bus_list)
  1030. eeh_remove_bus_device(child);
  1031. }
  1032. }
  1033. EXPORT_SYMBOL_GPL(eeh_remove_bus_device);
  1034. static int proc_eeh_show(struct seq_file *m, void *v)
  1035. {
  1036. if (0 == eeh_subsystem_enabled) {
  1037. seq_printf(m, "EEH Subsystem is globally disabled\n");
  1038. seq_printf(m, "eeh_total_mmio_ffs=%ld\n", total_mmio_ffs);
  1039. } else {
  1040. seq_printf(m, "EEH Subsystem is enabled\n");
  1041. seq_printf(m,
  1042. "no device=%ld\n"
  1043. "no device node=%ld\n"
  1044. "no config address=%ld\n"
  1045. "check not wanted=%ld\n"
  1046. "eeh_total_mmio_ffs=%ld\n"
  1047. "eeh_false_positives=%ld\n"
  1048. "eeh_slot_resets=%ld\n",
  1049. no_device, no_dn, no_cfg_addr,
  1050. ignored_check, total_mmio_ffs,
  1051. false_positives,
  1052. slot_resets);
  1053. }
  1054. return 0;
  1055. }
  1056. static int proc_eeh_open(struct inode *inode, struct file *file)
  1057. {
  1058. return single_open(file, proc_eeh_show, NULL);
  1059. }
  1060. static const struct file_operations proc_eeh_operations = {
  1061. .open = proc_eeh_open,
  1062. .read = seq_read,
  1063. .llseek = seq_lseek,
  1064. .release = single_release,
  1065. };
  1066. static int __init eeh_init_proc(void)
  1067. {
  1068. if (machine_is(pseries))
  1069. proc_create("powerpc/eeh", 0, NULL, &proc_eeh_operations);
  1070. return 0;
  1071. }
  1072. __initcall(eeh_init_proc);