eeh.c 37 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345
  1. /*
  2. * eeh.c
  3. * Copyright IBM Corporation 2001, 2005, 2006
  4. * Copyright Dave Engebretsen & Todd Inglett 2001
  5. * Copyright Linas Vepstas 2005, 2006
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License, or
  10. * (at your option) any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, write to the Free Software
  19. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  20. *
  21. * Please address comments and feedback to Linas Vepstas <linas@austin.ibm.com>
  22. */
  23. #include <linux/delay.h>
  24. #include <linux/init.h>
  25. #include <linux/list.h>
  26. #include <linux/pci.h>
  27. #include <linux/proc_fs.h>
  28. #include <linux/rbtree.h>
  29. #include <linux/seq_file.h>
  30. #include <linux/spinlock.h>
  31. #include <linux/export.h>
  32. #include <linux/of.h>
  33. #include <linux/atomic.h>
  34. #include <asm/eeh.h>
  35. #include <asm/eeh_event.h>
  36. #include <asm/io.h>
  37. #include <asm/machdep.h>
  38. #include <asm/ppc-pci.h>
  39. #include <asm/rtas.h>
  40. /** Overview:
  41. * EEH, or "Extended Error Handling" is a PCI bridge technology for
  42. * dealing with PCI bus errors that can't be dealt with within the
  43. * usual PCI framework, except by check-stopping the CPU. Systems
  44. * that are designed for high-availability/reliability cannot afford
  45. * to crash due to a "mere" PCI error, thus the need for EEH.
  46. * An EEH-capable bridge operates by converting a detected error
  47. * into a "slot freeze", taking the PCI adapter off-line, making
  48. * the slot behave, from the OS'es point of view, as if the slot
  49. * were "empty": all reads return 0xff's and all writes are silently
  50. * ignored. EEH slot isolation events can be triggered by parity
  51. * errors on the address or data busses (e.g. during posted writes),
  52. * which in turn might be caused by low voltage on the bus, dust,
  53. * vibration, humidity, radioactivity or plain-old failed hardware.
  54. *
  55. * Note, however, that one of the leading causes of EEH slot
  56. * freeze events are buggy device drivers, buggy device microcode,
  57. * or buggy device hardware. This is because any attempt by the
  58. * device to bus-master data to a memory address that is not
  59. * assigned to the device will trigger a slot freeze. (The idea
  60. * is to prevent devices-gone-wild from corrupting system memory).
  61. * Buggy hardware/drivers will have a miserable time co-existing
  62. * with EEH.
  63. *
  64. * Ideally, a PCI device driver, when suspecting that an isolation
  65. * event has occurred (e.g. by reading 0xff's), will then ask EEH
  66. * whether this is the case, and then take appropriate steps to
  67. * reset the PCI slot, the PCI device, and then resume operations.
  68. * However, until that day, the checking is done here, with the
  69. * eeh_check_failure() routine embedded in the MMIO macros. If
  70. * the slot is found to be isolated, an "EEH Event" is synthesized
  71. * and sent out for processing.
  72. */
  73. /* If a device driver keeps reading an MMIO register in an interrupt
  74. * handler after a slot isolation event, it might be broken.
  75. * This sets the threshold for how many read attempts we allow
  76. * before printing an error message.
  77. */
  78. #define EEH_MAX_FAILS 2100000
  79. /* Time to wait for a PCI slot to report status, in milliseconds */
  80. #define PCI_BUS_RESET_WAIT_MSEC (60*1000)
  81. /* RTAS tokens */
  82. static int ibm_set_eeh_option;
  83. static int ibm_set_slot_reset;
  84. static int ibm_read_slot_reset_state;
  85. static int ibm_read_slot_reset_state2;
  86. static int ibm_slot_error_detail;
  87. static int ibm_get_config_addr_info;
  88. static int ibm_get_config_addr_info2;
  89. static int ibm_configure_bridge;
  90. static int ibm_configure_pe;
  91. int eeh_subsystem_enabled;
  92. EXPORT_SYMBOL(eeh_subsystem_enabled);
  93. /* Lock to avoid races due to multiple reports of an error */
  94. static DEFINE_RAW_SPINLOCK(confirm_error_lock);
  95. /* Buffer for reporting slot-error-detail rtas calls. Its here
  96. * in BSS, and not dynamically alloced, so that it ends up in
  97. * RMO where RTAS can access it.
  98. */
  99. static unsigned char slot_errbuf[RTAS_ERROR_LOG_MAX];
  100. static DEFINE_SPINLOCK(slot_errbuf_lock);
  101. static int eeh_error_buf_size;
  102. /* Buffer for reporting pci register dumps. Its here in BSS, and
  103. * not dynamically alloced, so that it ends up in RMO where RTAS
  104. * can access it.
  105. */
  106. #define EEH_PCI_REGS_LOG_LEN 4096
  107. static unsigned char pci_regs_buf[EEH_PCI_REGS_LOG_LEN];
  108. /* System monitoring statistics */
  109. static unsigned long no_device;
  110. static unsigned long no_dn;
  111. static unsigned long no_cfg_addr;
  112. static unsigned long ignored_check;
  113. static unsigned long total_mmio_ffs;
  114. static unsigned long false_positives;
  115. static unsigned long slot_resets;
  116. #define IS_BRIDGE(class_code) (((class_code)<<16) == PCI_BASE_CLASS_BRIDGE)
  117. /* --------------------------------------------------------------- */
  118. /* Below lies the EEH event infrastructure */
  119. static void rtas_slot_error_detail(struct pci_dn *pdn, int severity,
  120. char *driver_log, size_t loglen)
  121. {
  122. int config_addr;
  123. unsigned long flags;
  124. int rc;
  125. /* Log the error with the rtas logger */
  126. spin_lock_irqsave(&slot_errbuf_lock, flags);
  127. memset(slot_errbuf, 0, eeh_error_buf_size);
  128. /* Use PE configuration address, if present */
  129. config_addr = pdn->eeh_config_addr;
  130. if (pdn->eeh_pe_config_addr)
  131. config_addr = pdn->eeh_pe_config_addr;
  132. rc = rtas_call(ibm_slot_error_detail,
  133. 8, 1, NULL, config_addr,
  134. BUID_HI(pdn->phb->buid),
  135. BUID_LO(pdn->phb->buid),
  136. virt_to_phys(driver_log), loglen,
  137. virt_to_phys(slot_errbuf),
  138. eeh_error_buf_size,
  139. severity);
  140. if (rc == 0)
  141. log_error(slot_errbuf, ERR_TYPE_RTAS_LOG, 0);
  142. spin_unlock_irqrestore(&slot_errbuf_lock, flags);
  143. }
  144. /**
  145. * gather_pci_data - copy assorted PCI config space registers to buff
  146. * @pdn: device to report data for
  147. * @buf: point to buffer in which to log
  148. * @len: amount of room in buffer
  149. *
  150. * This routine captures assorted PCI configuration space data,
  151. * and puts them into a buffer for RTAS error logging.
  152. */
  153. static size_t gather_pci_data(struct pci_dn *pdn, char * buf, size_t len)
  154. {
  155. struct pci_dev *dev = pdn->pcidev;
  156. u32 cfg;
  157. int cap, i;
  158. int n = 0;
  159. n += scnprintf(buf+n, len-n, "%s\n", pdn->node->full_name);
  160. printk(KERN_WARNING "EEH: of node=%s\n", pdn->node->full_name);
  161. rtas_read_config(pdn, PCI_VENDOR_ID, 4, &cfg);
  162. n += scnprintf(buf+n, len-n, "dev/vend:%08x\n", cfg);
  163. printk(KERN_WARNING "EEH: PCI device/vendor: %08x\n", cfg);
  164. rtas_read_config(pdn, PCI_COMMAND, 4, &cfg);
  165. n += scnprintf(buf+n, len-n, "cmd/stat:%x\n", cfg);
  166. printk(KERN_WARNING "EEH: PCI cmd/status register: %08x\n", cfg);
  167. if (!dev) {
  168. printk(KERN_WARNING "EEH: no PCI device for this of node\n");
  169. return n;
  170. }
  171. /* Gather bridge-specific registers */
  172. if (dev->class >> 16 == PCI_BASE_CLASS_BRIDGE) {
  173. rtas_read_config(pdn, PCI_SEC_STATUS, 2, &cfg);
  174. n += scnprintf(buf+n, len-n, "sec stat:%x\n", cfg);
  175. printk(KERN_WARNING "EEH: Bridge secondary status: %04x\n", cfg);
  176. rtas_read_config(pdn, PCI_BRIDGE_CONTROL, 2, &cfg);
  177. n += scnprintf(buf+n, len-n, "brdg ctl:%x\n", cfg);
  178. printk(KERN_WARNING "EEH: Bridge control: %04x\n", cfg);
  179. }
  180. /* Dump out the PCI-X command and status regs */
  181. cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
  182. if (cap) {
  183. rtas_read_config(pdn, cap, 4, &cfg);
  184. n += scnprintf(buf+n, len-n, "pcix-cmd:%x\n", cfg);
  185. printk(KERN_WARNING "EEH: PCI-X cmd: %08x\n", cfg);
  186. rtas_read_config(pdn, cap+4, 4, &cfg);
  187. n += scnprintf(buf+n, len-n, "pcix-stat:%x\n", cfg);
  188. printk(KERN_WARNING "EEH: PCI-X status: %08x\n", cfg);
  189. }
  190. /* If PCI-E capable, dump PCI-E cap 10, and the AER */
  191. cap = pci_find_capability(dev, PCI_CAP_ID_EXP);
  192. if (cap) {
  193. n += scnprintf(buf+n, len-n, "pci-e cap10:\n");
  194. printk(KERN_WARNING
  195. "EEH: PCI-E capabilities and status follow:\n");
  196. for (i=0; i<=8; i++) {
  197. rtas_read_config(pdn, cap+4*i, 4, &cfg);
  198. n += scnprintf(buf+n, len-n, "%02x:%x\n", 4*i, cfg);
  199. printk(KERN_WARNING "EEH: PCI-E %02x: %08x\n", i, cfg);
  200. }
  201. cap = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
  202. if (cap) {
  203. n += scnprintf(buf+n, len-n, "pci-e AER:\n");
  204. printk(KERN_WARNING
  205. "EEH: PCI-E AER capability register set follows:\n");
  206. for (i=0; i<14; i++) {
  207. rtas_read_config(pdn, cap+4*i, 4, &cfg);
  208. n += scnprintf(buf+n, len-n, "%02x:%x\n", 4*i, cfg);
  209. printk(KERN_WARNING "EEH: PCI-E AER %02x: %08x\n", i, cfg);
  210. }
  211. }
  212. }
  213. /* Gather status on devices under the bridge */
  214. if (dev->class >> 16 == PCI_BASE_CLASS_BRIDGE) {
  215. struct device_node *dn;
  216. for_each_child_of_node(pdn->node, dn) {
  217. pdn = PCI_DN(dn);
  218. if (pdn)
  219. n += gather_pci_data(pdn, buf+n, len-n);
  220. }
  221. }
  222. return n;
  223. }
  224. void eeh_slot_error_detail(struct pci_dn *pdn, int severity)
  225. {
  226. size_t loglen = 0;
  227. pci_regs_buf[0] = 0;
  228. rtas_pci_enable(pdn, EEH_THAW_MMIO);
  229. rtas_configure_bridge(pdn);
  230. eeh_restore_bars(pdn);
  231. loglen = gather_pci_data(pdn, pci_regs_buf, EEH_PCI_REGS_LOG_LEN);
  232. rtas_slot_error_detail(pdn, severity, pci_regs_buf, loglen);
  233. }
  234. /**
  235. * read_slot_reset_state - Read the reset state of a device node's slot
  236. * @dn: device node to read
  237. * @rets: array to return results in
  238. */
  239. static int read_slot_reset_state(struct pci_dn *pdn, int rets[])
  240. {
  241. int token, outputs;
  242. int config_addr;
  243. if (ibm_read_slot_reset_state2 != RTAS_UNKNOWN_SERVICE) {
  244. token = ibm_read_slot_reset_state2;
  245. outputs = 4;
  246. } else {
  247. token = ibm_read_slot_reset_state;
  248. rets[2] = 0; /* fake PE Unavailable info */
  249. outputs = 3;
  250. }
  251. /* Use PE configuration address, if present */
  252. config_addr = pdn->eeh_config_addr;
  253. if (pdn->eeh_pe_config_addr)
  254. config_addr = pdn->eeh_pe_config_addr;
  255. return rtas_call(token, 3, outputs, rets, config_addr,
  256. BUID_HI(pdn->phb->buid), BUID_LO(pdn->phb->buid));
  257. }
  258. /**
  259. * eeh_wait_for_slot_status - returns error status of slot
  260. * @pdn pci device node
  261. * @max_wait_msecs maximum number to millisecs to wait
  262. *
  263. * Return negative value if a permanent error, else return
  264. * Partition Endpoint (PE) status value.
  265. *
  266. * If @max_wait_msecs is positive, then this routine will
  267. * sleep until a valid status can be obtained, or until
  268. * the max allowed wait time is exceeded, in which case
  269. * a -2 is returned.
  270. */
  271. int
  272. eeh_wait_for_slot_status(struct pci_dn *pdn, int max_wait_msecs)
  273. {
  274. int rc;
  275. int rets[3];
  276. int mwait;
  277. while (1) {
  278. rc = read_slot_reset_state(pdn, rets);
  279. if (rc) return rc;
  280. if (rets[1] == 0) return -1; /* EEH is not supported */
  281. if (rets[0] != 5) return rets[0]; /* return actual status */
  282. if (rets[2] == 0) return -1; /* permanently unavailable */
  283. if (max_wait_msecs <= 0) break;
  284. mwait = rets[2];
  285. if (mwait <= 0) {
  286. printk (KERN_WARNING
  287. "EEH: Firmware returned bad wait value=%d\n", mwait);
  288. mwait = 1000;
  289. } else if (mwait > 300*1000) {
  290. printk (KERN_WARNING
  291. "EEH: Firmware is taking too long, time=%d\n", mwait);
  292. mwait = 300*1000;
  293. }
  294. max_wait_msecs -= mwait;
  295. msleep (mwait);
  296. }
  297. printk(KERN_WARNING "EEH: Timed out waiting for slot status\n");
  298. return -2;
  299. }
  300. /**
  301. * eeh_token_to_phys - convert EEH address token to phys address
  302. * @token i/o token, should be address in the form 0xA....
  303. */
  304. static inline unsigned long eeh_token_to_phys(unsigned long token)
  305. {
  306. pte_t *ptep;
  307. unsigned long pa;
  308. ptep = find_linux_pte(init_mm.pgd, token);
  309. if (!ptep)
  310. return token;
  311. pa = pte_pfn(*ptep) << PAGE_SHIFT;
  312. return pa | (token & (PAGE_SIZE-1));
  313. }
  314. /**
  315. * Return the "partitionable endpoint" (pe) under which this device lies
  316. */
  317. struct device_node * find_device_pe(struct device_node *dn)
  318. {
  319. while ((dn->parent) && PCI_DN(dn->parent) &&
  320. (PCI_DN(dn->parent)->eeh_mode & EEH_MODE_SUPPORTED)) {
  321. dn = dn->parent;
  322. }
  323. return dn;
  324. }
  325. /** Mark all devices that are children of this device as failed.
  326. * Mark the device driver too, so that it can see the failure
  327. * immediately; this is critical, since some drivers poll
  328. * status registers in interrupts ... If a driver is polling,
  329. * and the slot is frozen, then the driver can deadlock in
  330. * an interrupt context, which is bad.
  331. */
  332. static void __eeh_mark_slot(struct device_node *parent, int mode_flag)
  333. {
  334. struct device_node *dn;
  335. for_each_child_of_node(parent, dn) {
  336. if (PCI_DN(dn)) {
  337. /* Mark the pci device driver too */
  338. struct pci_dev *dev = PCI_DN(dn)->pcidev;
  339. PCI_DN(dn)->eeh_mode |= mode_flag;
  340. if (dev && dev->driver)
  341. dev->error_state = pci_channel_io_frozen;
  342. __eeh_mark_slot(dn, mode_flag);
  343. }
  344. }
  345. }
  346. void eeh_mark_slot (struct device_node *dn, int mode_flag)
  347. {
  348. struct pci_dev *dev;
  349. dn = find_device_pe (dn);
  350. /* Back up one, since config addrs might be shared */
  351. if (!pcibios_find_pci_bus(dn) && PCI_DN(dn->parent))
  352. dn = dn->parent;
  353. PCI_DN(dn)->eeh_mode |= mode_flag;
  354. /* Mark the pci device too */
  355. dev = PCI_DN(dn)->pcidev;
  356. if (dev)
  357. dev->error_state = pci_channel_io_frozen;
  358. __eeh_mark_slot(dn, mode_flag);
  359. }
  360. static void __eeh_clear_slot(struct device_node *parent, int mode_flag)
  361. {
  362. struct device_node *dn;
  363. for_each_child_of_node(parent, dn) {
  364. if (PCI_DN(dn)) {
  365. PCI_DN(dn)->eeh_mode &= ~mode_flag;
  366. PCI_DN(dn)->eeh_check_count = 0;
  367. __eeh_clear_slot(dn, mode_flag);
  368. }
  369. }
  370. }
  371. void eeh_clear_slot (struct device_node *dn, int mode_flag)
  372. {
  373. unsigned long flags;
  374. raw_spin_lock_irqsave(&confirm_error_lock, flags);
  375. dn = find_device_pe (dn);
  376. /* Back up one, since config addrs might be shared */
  377. if (!pcibios_find_pci_bus(dn) && PCI_DN(dn->parent))
  378. dn = dn->parent;
  379. PCI_DN(dn)->eeh_mode &= ~mode_flag;
  380. PCI_DN(dn)->eeh_check_count = 0;
  381. __eeh_clear_slot(dn, mode_flag);
  382. raw_spin_unlock_irqrestore(&confirm_error_lock, flags);
  383. }
  384. void __eeh_set_pe_freset(struct device_node *parent, unsigned int *freset)
  385. {
  386. struct device_node *dn;
  387. for_each_child_of_node(parent, dn) {
  388. if (PCI_DN(dn)) {
  389. struct pci_dev *dev = PCI_DN(dn)->pcidev;
  390. if (dev && dev->driver)
  391. *freset |= dev->needs_freset;
  392. __eeh_set_pe_freset(dn, freset);
  393. }
  394. }
  395. }
  396. void eeh_set_pe_freset(struct device_node *dn, unsigned int *freset)
  397. {
  398. struct pci_dev *dev;
  399. dn = find_device_pe(dn);
  400. /* Back up one, since config addrs might be shared */
  401. if (!pcibios_find_pci_bus(dn) && PCI_DN(dn->parent))
  402. dn = dn->parent;
  403. dev = PCI_DN(dn)->pcidev;
  404. if (dev)
  405. *freset |= dev->needs_freset;
  406. __eeh_set_pe_freset(dn, freset);
  407. }
  408. /**
  409. * eeh_dn_check_failure - check if all 1's data is due to EEH slot freeze
  410. * @dn device node
  411. * @dev pci device, if known
  412. *
  413. * Check for an EEH failure for the given device node. Call this
  414. * routine if the result of a read was all 0xff's and you want to
  415. * find out if this is due to an EEH slot freeze. This routine
  416. * will query firmware for the EEH status.
  417. *
  418. * Returns 0 if there has not been an EEH error; otherwise returns
  419. * a non-zero value and queues up a slot isolation event notification.
  420. *
  421. * It is safe to call this routine in an interrupt context.
  422. */
  423. int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
  424. {
  425. int ret;
  426. int rets[3];
  427. unsigned long flags;
  428. struct pci_dn *pdn;
  429. int rc = 0;
  430. const char *location;
  431. total_mmio_ffs++;
  432. if (!eeh_subsystem_enabled)
  433. return 0;
  434. if (!dn) {
  435. no_dn++;
  436. return 0;
  437. }
  438. dn = find_device_pe(dn);
  439. pdn = PCI_DN(dn);
  440. /* Access to IO BARs might get this far and still not want checking. */
  441. if (!(pdn->eeh_mode & EEH_MODE_SUPPORTED) ||
  442. pdn->eeh_mode & EEH_MODE_NOCHECK) {
  443. ignored_check++;
  444. pr_debug("EEH: Ignored check (%x) for %s %s\n",
  445. pdn->eeh_mode, eeh_pci_name(dev), dn->full_name);
  446. return 0;
  447. }
  448. if (!pdn->eeh_config_addr && !pdn->eeh_pe_config_addr) {
  449. no_cfg_addr++;
  450. return 0;
  451. }
  452. /* If we already have a pending isolation event for this
  453. * slot, we know it's bad already, we don't need to check.
  454. * Do this checking under a lock; as multiple PCI devices
  455. * in one slot might report errors simultaneously, and we
  456. * only want one error recovery routine running.
  457. */
  458. raw_spin_lock_irqsave(&confirm_error_lock, flags);
  459. rc = 1;
  460. if (pdn->eeh_mode & EEH_MODE_ISOLATED) {
  461. pdn->eeh_check_count ++;
  462. if (pdn->eeh_check_count % EEH_MAX_FAILS == 0) {
  463. location = of_get_property(dn, "ibm,loc-code", NULL);
  464. printk (KERN_ERR "EEH: %d reads ignored for recovering device at "
  465. "location=%s driver=%s pci addr=%s\n",
  466. pdn->eeh_check_count, location,
  467. dev->driver->name, eeh_pci_name(dev));
  468. printk (KERN_ERR "EEH: Might be infinite loop in %s driver\n",
  469. dev->driver->name);
  470. dump_stack();
  471. }
  472. goto dn_unlock;
  473. }
  474. /*
  475. * Now test for an EEH failure. This is VERY expensive.
  476. * Note that the eeh_config_addr may be a parent device
  477. * in the case of a device behind a bridge, or it may be
  478. * function zero of a multi-function device.
  479. * In any case they must share a common PHB.
  480. */
  481. ret = read_slot_reset_state(pdn, rets);
  482. /* If the call to firmware failed, punt */
  483. if (ret != 0) {
  484. printk(KERN_WARNING "EEH: read_slot_reset_state() failed; rc=%d dn=%s\n",
  485. ret, dn->full_name);
  486. false_positives++;
  487. pdn->eeh_false_positives ++;
  488. rc = 0;
  489. goto dn_unlock;
  490. }
  491. /* Note that config-io to empty slots may fail;
  492. * they are empty when they don't have children. */
  493. if ((rets[0] == 5) && (rets[2] == 0) && (dn->child == NULL)) {
  494. false_positives++;
  495. pdn->eeh_false_positives ++;
  496. rc = 0;
  497. goto dn_unlock;
  498. }
  499. /* If EEH is not supported on this device, punt. */
  500. if (rets[1] != 1) {
  501. printk(KERN_WARNING "EEH: event on unsupported device, rc=%d dn=%s\n",
  502. ret, dn->full_name);
  503. false_positives++;
  504. pdn->eeh_false_positives ++;
  505. rc = 0;
  506. goto dn_unlock;
  507. }
  508. /* If not the kind of error we know about, punt. */
  509. if (rets[0] != 1 && rets[0] != 2 && rets[0] != 4 && rets[0] != 5) {
  510. false_positives++;
  511. pdn->eeh_false_positives ++;
  512. rc = 0;
  513. goto dn_unlock;
  514. }
  515. slot_resets++;
  516. /* Avoid repeated reports of this failure, including problems
  517. * with other functions on this device, and functions under
  518. * bridges. */
  519. eeh_mark_slot (dn, EEH_MODE_ISOLATED);
  520. raw_spin_unlock_irqrestore(&confirm_error_lock, flags);
  521. eeh_send_failure_event (dn, dev);
  522. /* Most EEH events are due to device driver bugs. Having
  523. * a stack trace will help the device-driver authors figure
  524. * out what happened. So print that out. */
  525. dump_stack();
  526. return 1;
  527. dn_unlock:
  528. raw_spin_unlock_irqrestore(&confirm_error_lock, flags);
  529. return rc;
  530. }
  531. EXPORT_SYMBOL_GPL(eeh_dn_check_failure);
  532. /**
  533. * eeh_check_failure - check if all 1's data is due to EEH slot freeze
  534. * @token i/o token, should be address in the form 0xA....
  535. * @val value, should be all 1's (XXX why do we need this arg??)
  536. *
  537. * Check for an EEH failure at the given token address. Call this
  538. * routine if the result of a read was all 0xff's and you want to
  539. * find out if this is due to an EEH slot freeze event. This routine
  540. * will query firmware for the EEH status.
  541. *
  542. * Note this routine is safe to call in an interrupt context.
  543. */
  544. unsigned long eeh_check_failure(const volatile void __iomem *token, unsigned long val)
  545. {
  546. unsigned long addr;
  547. struct pci_dev *dev;
  548. struct device_node *dn;
  549. /* Finding the phys addr + pci device; this is pretty quick. */
  550. addr = eeh_token_to_phys((unsigned long __force) token);
  551. dev = pci_get_device_by_addr(addr);
  552. if (!dev) {
  553. no_device++;
  554. return val;
  555. }
  556. dn = pci_device_to_OF_node(dev);
  557. eeh_dn_check_failure (dn, dev);
  558. pci_dev_put(dev);
  559. return val;
  560. }
  561. EXPORT_SYMBOL(eeh_check_failure);
  562. /* ------------------------------------------------------------- */
  563. /* The code below deals with error recovery */
  564. /**
  565. * rtas_pci_enable - enable MMIO or DMA transfers for this slot
  566. * @pdn pci device node
  567. */
  568. int
  569. rtas_pci_enable(struct pci_dn *pdn, int function)
  570. {
  571. int config_addr;
  572. int rc;
  573. /* Use PE configuration address, if present */
  574. config_addr = pdn->eeh_config_addr;
  575. if (pdn->eeh_pe_config_addr)
  576. config_addr = pdn->eeh_pe_config_addr;
  577. rc = rtas_call(ibm_set_eeh_option, 4, 1, NULL,
  578. config_addr,
  579. BUID_HI(pdn->phb->buid),
  580. BUID_LO(pdn->phb->buid),
  581. function);
  582. if (rc)
  583. printk(KERN_WARNING "EEH: Unexpected state change %d, err=%d dn=%s\n",
  584. function, rc, pdn->node->full_name);
  585. rc = eeh_wait_for_slot_status (pdn, PCI_BUS_RESET_WAIT_MSEC);
  586. if ((rc == 4) && (function == EEH_THAW_MMIO))
  587. return 0;
  588. return rc;
  589. }
  590. /**
  591. * rtas_pci_slot_reset - raises/lowers the pci #RST line
  592. * @pdn pci device node
  593. * @state: 1/0 to raise/lower the #RST
  594. *
  595. * Clear the EEH-frozen condition on a slot. This routine
  596. * asserts the PCI #RST line if the 'state' argument is '1',
  597. * and drops the #RST line if 'state is '0'. This routine is
  598. * safe to call in an interrupt context.
  599. *
  600. */
  601. static void
  602. rtas_pci_slot_reset(struct pci_dn *pdn, int state)
  603. {
  604. int config_addr;
  605. int rc;
  606. BUG_ON (pdn==NULL);
  607. if (!pdn->phb) {
  608. printk (KERN_WARNING "EEH: in slot reset, device node %s has no phb\n",
  609. pdn->node->full_name);
  610. return;
  611. }
  612. /* Use PE configuration address, if present */
  613. config_addr = pdn->eeh_config_addr;
  614. if (pdn->eeh_pe_config_addr)
  615. config_addr = pdn->eeh_pe_config_addr;
  616. rc = rtas_call(ibm_set_slot_reset, 4, 1, NULL,
  617. config_addr,
  618. BUID_HI(pdn->phb->buid),
  619. BUID_LO(pdn->phb->buid),
  620. state);
  621. /* Fundamental-reset not supported on this PE, try hot-reset */
  622. if (rc == -8 && state == 3) {
  623. rc = rtas_call(ibm_set_slot_reset, 4, 1, NULL,
  624. config_addr,
  625. BUID_HI(pdn->phb->buid),
  626. BUID_LO(pdn->phb->buid), 1);
  627. if (rc)
  628. printk(KERN_WARNING
  629. "EEH: Unable to reset the failed slot,"
  630. " #RST=%d dn=%s\n",
  631. rc, pdn->node->full_name);
  632. }
  633. }
  634. /**
  635. * pcibios_set_pcie_slot_reset - Set PCI-E reset state
  636. * @dev: pci device struct
  637. * @state: reset state to enter
  638. *
  639. * Return value:
  640. * 0 if success
  641. **/
  642. int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
  643. {
  644. struct device_node *dn = pci_device_to_OF_node(dev);
  645. struct pci_dn *pdn = PCI_DN(dn);
  646. switch (state) {
  647. case pcie_deassert_reset:
  648. rtas_pci_slot_reset(pdn, 0);
  649. break;
  650. case pcie_hot_reset:
  651. rtas_pci_slot_reset(pdn, 1);
  652. break;
  653. case pcie_warm_reset:
  654. rtas_pci_slot_reset(pdn, 3);
  655. break;
  656. default:
  657. return -EINVAL;
  658. };
  659. return 0;
  660. }
  661. /**
  662. * rtas_set_slot_reset -- assert the pci #RST line for 1/4 second
  663. * @pdn: pci device node to be reset.
  664. */
  665. static void __rtas_set_slot_reset(struct pci_dn *pdn)
  666. {
  667. unsigned int freset = 0;
  668. /* Determine type of EEH reset required for
  669. * Partitionable Endpoint, a hot-reset (1)
  670. * or a fundamental reset (3).
  671. * A fundamental reset required by any device under
  672. * Partitionable Endpoint trumps hot-reset.
  673. */
  674. eeh_set_pe_freset(pdn->node, &freset);
  675. if (freset)
  676. rtas_pci_slot_reset(pdn, 3);
  677. else
  678. rtas_pci_slot_reset(pdn, 1);
  679. /* The PCI bus requires that the reset be held high for at least
  680. * a 100 milliseconds. We wait a bit longer 'just in case'. */
  681. #define PCI_BUS_RST_HOLD_TIME_MSEC 250
  682. msleep (PCI_BUS_RST_HOLD_TIME_MSEC);
  683. /* We might get hit with another EEH freeze as soon as the
  684. * pci slot reset line is dropped. Make sure we don't miss
  685. * these, and clear the flag now. */
  686. eeh_clear_slot (pdn->node, EEH_MODE_ISOLATED);
  687. rtas_pci_slot_reset (pdn, 0);
  688. /* After a PCI slot has been reset, the PCI Express spec requires
  689. * a 1.5 second idle time for the bus to stabilize, before starting
  690. * up traffic. */
  691. #define PCI_BUS_SETTLE_TIME_MSEC 1800
  692. msleep (PCI_BUS_SETTLE_TIME_MSEC);
  693. }
  694. int rtas_set_slot_reset(struct pci_dn *pdn)
  695. {
  696. int i, rc;
  697. /* Take three shots at resetting the bus */
  698. for (i=0; i<3; i++) {
  699. __rtas_set_slot_reset(pdn);
  700. rc = eeh_wait_for_slot_status(pdn, PCI_BUS_RESET_WAIT_MSEC);
  701. if (rc == 0)
  702. return 0;
  703. if (rc < 0) {
  704. printk(KERN_ERR "EEH: unrecoverable slot failure %s\n",
  705. pdn->node->full_name);
  706. return -1;
  707. }
  708. printk(KERN_ERR "EEH: bus reset %d failed on slot %s, rc=%d\n",
  709. i+1, pdn->node->full_name, rc);
  710. }
  711. return -1;
  712. }
  713. /* ------------------------------------------------------- */
  714. /** Save and restore of PCI BARs
  715. *
  716. * Although firmware will set up BARs during boot, it doesn't
  717. * set up device BAR's after a device reset, although it will,
  718. * if requested, set up bridge configuration. Thus, we need to
  719. * configure the PCI devices ourselves.
  720. */
  721. /**
  722. * __restore_bars - Restore the Base Address Registers
  723. * @pdn: pci device node
  724. *
  725. * Loads the PCI configuration space base address registers,
  726. * the expansion ROM base address, the latency timer, and etc.
  727. * from the saved values in the device node.
  728. */
  729. static inline void __restore_bars (struct pci_dn *pdn)
  730. {
  731. int i;
  732. u32 cmd;
  733. if (NULL==pdn->phb) return;
  734. for (i=4; i<10; i++) {
  735. rtas_write_config(pdn, i*4, 4, pdn->config_space[i]);
  736. }
  737. /* 12 == Expansion ROM Address */
  738. rtas_write_config(pdn, 12*4, 4, pdn->config_space[12]);
  739. #define BYTE_SWAP(OFF) (8*((OFF)/4)+3-(OFF))
  740. #define SAVED_BYTE(OFF) (((u8 *)(pdn->config_space))[BYTE_SWAP(OFF)])
  741. rtas_write_config (pdn, PCI_CACHE_LINE_SIZE, 1,
  742. SAVED_BYTE(PCI_CACHE_LINE_SIZE));
  743. rtas_write_config (pdn, PCI_LATENCY_TIMER, 1,
  744. SAVED_BYTE(PCI_LATENCY_TIMER));
  745. /* max latency, min grant, interrupt pin and line */
  746. rtas_write_config(pdn, 15*4, 4, pdn->config_space[15]);
  747. /* Restore PERR & SERR bits, some devices require it,
  748. don't touch the other command bits */
  749. rtas_read_config(pdn, PCI_COMMAND, 4, &cmd);
  750. if (pdn->config_space[1] & PCI_COMMAND_PARITY)
  751. cmd |= PCI_COMMAND_PARITY;
  752. else
  753. cmd &= ~PCI_COMMAND_PARITY;
  754. if (pdn->config_space[1] & PCI_COMMAND_SERR)
  755. cmd |= PCI_COMMAND_SERR;
  756. else
  757. cmd &= ~PCI_COMMAND_SERR;
  758. rtas_write_config(pdn, PCI_COMMAND, 4, cmd);
  759. }
  760. /**
  761. * eeh_restore_bars - restore the PCI config space info
  762. *
  763. * This routine performs a recursive walk to the children
  764. * of this device as well.
  765. */
  766. void eeh_restore_bars(struct pci_dn *pdn)
  767. {
  768. struct device_node *dn;
  769. if (!pdn)
  770. return;
  771. if ((pdn->eeh_mode & EEH_MODE_SUPPORTED) && !IS_BRIDGE(pdn->class_code))
  772. __restore_bars (pdn);
  773. for_each_child_of_node(pdn->node, dn)
  774. eeh_restore_bars (PCI_DN(dn));
  775. }
  776. /**
  777. * eeh_save_bars - save device bars
  778. *
  779. * Save the values of the device bars. Unlike the restore
  780. * routine, this routine is *not* recursive. This is because
  781. * PCI devices are added individually; but, for the restore,
  782. * an entire slot is reset at a time.
  783. */
  784. static void eeh_save_bars(struct pci_dn *pdn)
  785. {
  786. int i;
  787. if (!pdn )
  788. return;
  789. for (i = 0; i < 16; i++)
  790. rtas_read_config(pdn, i * 4, 4, &pdn->config_space[i]);
  791. }
  792. void
  793. rtas_configure_bridge(struct pci_dn *pdn)
  794. {
  795. int config_addr;
  796. int rc;
  797. int token;
  798. /* Use PE configuration address, if present */
  799. config_addr = pdn->eeh_config_addr;
  800. if (pdn->eeh_pe_config_addr)
  801. config_addr = pdn->eeh_pe_config_addr;
  802. /* Use new configure-pe function, if supported */
  803. if (ibm_configure_pe != RTAS_UNKNOWN_SERVICE)
  804. token = ibm_configure_pe;
  805. else
  806. token = ibm_configure_bridge;
  807. rc = rtas_call(token, 3, 1, NULL,
  808. config_addr,
  809. BUID_HI(pdn->phb->buid),
  810. BUID_LO(pdn->phb->buid));
  811. if (rc) {
  812. printk (KERN_WARNING "EEH: Unable to configure device bridge (%d) for %s\n",
  813. rc, pdn->node->full_name);
  814. }
  815. }
  816. /* ------------------------------------------------------------- */
  817. /* The code below deals with enabling EEH for devices during the
  818. * early boot sequence. EEH must be enabled before any PCI probing
  819. * can be done.
  820. */
  821. #define EEH_ENABLE 1
  822. struct eeh_early_enable_info {
  823. unsigned int buid_hi;
  824. unsigned int buid_lo;
  825. };
  826. static int get_pe_addr (int config_addr,
  827. struct eeh_early_enable_info *info)
  828. {
  829. unsigned int rets[3];
  830. int ret;
  831. /* Use latest config-addr token on power6 */
  832. if (ibm_get_config_addr_info2 != RTAS_UNKNOWN_SERVICE) {
  833. /* Make sure we have a PE in hand */
  834. ret = rtas_call (ibm_get_config_addr_info2, 4, 2, rets,
  835. config_addr, info->buid_hi, info->buid_lo, 1);
  836. if (ret || (rets[0]==0))
  837. return 0;
  838. ret = rtas_call (ibm_get_config_addr_info2, 4, 2, rets,
  839. config_addr, info->buid_hi, info->buid_lo, 0);
  840. if (ret)
  841. return 0;
  842. return rets[0];
  843. }
  844. /* Use older config-addr token on power5 */
  845. if (ibm_get_config_addr_info != RTAS_UNKNOWN_SERVICE) {
  846. ret = rtas_call (ibm_get_config_addr_info, 4, 2, rets,
  847. config_addr, info->buid_hi, info->buid_lo, 0);
  848. if (ret)
  849. return 0;
  850. return rets[0];
  851. }
  852. return 0;
  853. }
  854. /* Enable eeh for the given device node. */
  855. static void *early_enable_eeh(struct device_node *dn, void *data)
  856. {
  857. unsigned int rets[3];
  858. struct eeh_early_enable_info *info = data;
  859. int ret;
  860. const u32 *class_code = of_get_property(dn, "class-code", NULL);
  861. const u32 *vendor_id = of_get_property(dn, "vendor-id", NULL);
  862. const u32 *device_id = of_get_property(dn, "device-id", NULL);
  863. const u32 *regs;
  864. int enable;
  865. struct pci_dn *pdn = PCI_DN(dn);
  866. pdn->class_code = 0;
  867. pdn->eeh_mode = 0;
  868. pdn->eeh_check_count = 0;
  869. pdn->eeh_freeze_count = 0;
  870. pdn->eeh_false_positives = 0;
  871. if (!of_device_is_available(dn))
  872. return NULL;
  873. /* Ignore bad nodes. */
  874. if (!class_code || !vendor_id || !device_id)
  875. return NULL;
  876. /* There is nothing to check on PCI to ISA bridges */
  877. if (dn->type && !strcmp(dn->type, "isa")) {
  878. pdn->eeh_mode |= EEH_MODE_NOCHECK;
  879. return NULL;
  880. }
  881. pdn->class_code = *class_code;
  882. /* Ok... see if this device supports EEH. Some do, some don't,
  883. * and the only way to find out is to check each and every one. */
  884. regs = of_get_property(dn, "reg", NULL);
  885. if (regs) {
  886. /* First register entry is addr (00BBSS00) */
  887. /* Try to enable eeh */
  888. ret = rtas_call(ibm_set_eeh_option, 4, 1, NULL,
  889. regs[0], info->buid_hi, info->buid_lo,
  890. EEH_ENABLE);
  891. enable = 0;
  892. if (ret == 0) {
  893. pdn->eeh_config_addr = regs[0];
  894. /* If the newer, better, ibm,get-config-addr-info is supported,
  895. * then use that instead. */
  896. pdn->eeh_pe_config_addr = get_pe_addr(pdn->eeh_config_addr, info);
  897. /* Some older systems (Power4) allow the
  898. * ibm,set-eeh-option call to succeed even on nodes
  899. * where EEH is not supported. Verify support
  900. * explicitly. */
  901. ret = read_slot_reset_state(pdn, rets);
  902. if ((ret == 0) && (rets[1] == 1))
  903. enable = 1;
  904. }
  905. if (enable) {
  906. eeh_subsystem_enabled = 1;
  907. pdn->eeh_mode |= EEH_MODE_SUPPORTED;
  908. pr_debug("EEH: %s: eeh enabled, config=%x pe_config=%x\n",
  909. dn->full_name, pdn->eeh_config_addr,
  910. pdn->eeh_pe_config_addr);
  911. } else {
  912. /* This device doesn't support EEH, but it may have an
  913. * EEH parent, in which case we mark it as supported. */
  914. if (dn->parent && PCI_DN(dn->parent)
  915. && (PCI_DN(dn->parent)->eeh_mode & EEH_MODE_SUPPORTED)) {
  916. /* Parent supports EEH. */
  917. pdn->eeh_mode |= EEH_MODE_SUPPORTED;
  918. pdn->eeh_config_addr = PCI_DN(dn->parent)->eeh_config_addr;
  919. return NULL;
  920. }
  921. }
  922. } else {
  923. printk(KERN_WARNING "EEH: %s: unable to get reg property.\n",
  924. dn->full_name);
  925. }
  926. eeh_save_bars(pdn);
  927. return NULL;
  928. }
  929. /*
  930. * Initialize EEH by trying to enable it for all of the adapters in the system.
  931. * As a side effect we can determine here if eeh is supported at all.
  932. * Note that we leave EEH on so failed config cycles won't cause a machine
  933. * check. If a user turns off EEH for a particular adapter they are really
  934. * telling Linux to ignore errors. Some hardware (e.g. POWER5) won't
  935. * grant access to a slot if EEH isn't enabled, and so we always enable
  936. * EEH for all slots/all devices.
  937. *
  938. * The eeh-force-off option disables EEH checking globally, for all slots.
  939. * Even if force-off is set, the EEH hardware is still enabled, so that
  940. * newer systems can boot.
  941. */
  942. void __init eeh_init(void)
  943. {
  944. struct device_node *phb, *np;
  945. struct eeh_early_enable_info info;
  946. raw_spin_lock_init(&confirm_error_lock);
  947. spin_lock_init(&slot_errbuf_lock);
  948. np = of_find_node_by_path("/rtas");
  949. if (np == NULL)
  950. return;
  951. ibm_set_eeh_option = rtas_token("ibm,set-eeh-option");
  952. ibm_set_slot_reset = rtas_token("ibm,set-slot-reset");
  953. ibm_read_slot_reset_state2 = rtas_token("ibm,read-slot-reset-state2");
  954. ibm_read_slot_reset_state = rtas_token("ibm,read-slot-reset-state");
  955. ibm_slot_error_detail = rtas_token("ibm,slot-error-detail");
  956. ibm_get_config_addr_info = rtas_token("ibm,get-config-addr-info");
  957. ibm_get_config_addr_info2 = rtas_token("ibm,get-config-addr-info2");
  958. ibm_configure_bridge = rtas_token ("ibm,configure-bridge");
  959. ibm_configure_pe = rtas_token("ibm,configure-pe");
  960. if (ibm_set_eeh_option == RTAS_UNKNOWN_SERVICE)
  961. return;
  962. eeh_error_buf_size = rtas_token("rtas-error-log-max");
  963. if (eeh_error_buf_size == RTAS_UNKNOWN_SERVICE) {
  964. eeh_error_buf_size = 1024;
  965. }
  966. if (eeh_error_buf_size > RTAS_ERROR_LOG_MAX) {
  967. printk(KERN_WARNING "EEH: rtas-error-log-max is bigger than allocated "
  968. "buffer ! (%d vs %d)", eeh_error_buf_size, RTAS_ERROR_LOG_MAX);
  969. eeh_error_buf_size = RTAS_ERROR_LOG_MAX;
  970. }
  971. /* Enable EEH for all adapters. Note that eeh requires buid's */
  972. for (phb = of_find_node_by_name(NULL, "pci"); phb;
  973. phb = of_find_node_by_name(phb, "pci")) {
  974. unsigned long buid;
  975. buid = get_phb_buid(phb);
  976. if (buid == 0 || PCI_DN(phb) == NULL)
  977. continue;
  978. info.buid_lo = BUID_LO(buid);
  979. info.buid_hi = BUID_HI(buid);
  980. traverse_pci_devices(phb, early_enable_eeh, &info);
  981. }
  982. if (eeh_subsystem_enabled)
  983. printk(KERN_INFO "EEH: PCI Enhanced I/O Error Handling Enabled\n");
  984. else
  985. printk(KERN_WARNING "EEH: No capable adapters found\n");
  986. }
  987. /**
  988. * eeh_add_device_early - enable EEH for the indicated device_node
  989. * @dn: device node for which to set up EEH
  990. *
  991. * This routine must be used to perform EEH initialization for PCI
  992. * devices that were added after system boot (e.g. hotplug, dlpar).
  993. * This routine must be called before any i/o is performed to the
  994. * adapter (inluding any config-space i/o).
  995. * Whether this actually enables EEH or not for this device depends
  996. * on the CEC architecture, type of the device, on earlier boot
  997. * command-line arguments & etc.
  998. */
  999. static void eeh_add_device_early(struct device_node *dn)
  1000. {
  1001. struct pci_controller *phb;
  1002. struct eeh_early_enable_info info;
  1003. if (!dn || !PCI_DN(dn))
  1004. return;
  1005. phb = PCI_DN(dn)->phb;
  1006. /* USB Bus children of PCI devices will not have BUID's */
  1007. if (NULL == phb || 0 == phb->buid)
  1008. return;
  1009. info.buid_hi = BUID_HI(phb->buid);
  1010. info.buid_lo = BUID_LO(phb->buid);
  1011. early_enable_eeh(dn, &info);
  1012. }
  1013. void eeh_add_device_tree_early(struct device_node *dn)
  1014. {
  1015. struct device_node *sib;
  1016. for_each_child_of_node(dn, sib)
  1017. eeh_add_device_tree_early(sib);
  1018. eeh_add_device_early(dn);
  1019. }
  1020. EXPORT_SYMBOL_GPL(eeh_add_device_tree_early);
  1021. /**
  1022. * eeh_add_device_late - perform EEH initialization for the indicated pci device
  1023. * @dev: pci device for which to set up EEH
  1024. *
  1025. * This routine must be used to complete EEH initialization for PCI
  1026. * devices that were added after system boot (e.g. hotplug, dlpar).
  1027. */
  1028. static void eeh_add_device_late(struct pci_dev *dev)
  1029. {
  1030. struct device_node *dn;
  1031. struct pci_dn *pdn;
  1032. if (!dev || !eeh_subsystem_enabled)
  1033. return;
  1034. pr_debug("EEH: Adding device %s\n", pci_name(dev));
  1035. dn = pci_device_to_OF_node(dev);
  1036. pdn = PCI_DN(dn);
  1037. if (pdn->pcidev == dev) {
  1038. pr_debug("EEH: Already referenced !\n");
  1039. return;
  1040. }
  1041. WARN_ON(pdn->pcidev);
  1042. pci_dev_get (dev);
  1043. pdn->pcidev = dev;
  1044. pci_addr_cache_insert_device(dev);
  1045. eeh_sysfs_add_device(dev);
  1046. }
  1047. void eeh_add_device_tree_late(struct pci_bus *bus)
  1048. {
  1049. struct pci_dev *dev;
  1050. list_for_each_entry(dev, &bus->devices, bus_list) {
  1051. eeh_add_device_late(dev);
  1052. if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
  1053. struct pci_bus *subbus = dev->subordinate;
  1054. if (subbus)
  1055. eeh_add_device_tree_late(subbus);
  1056. }
  1057. }
  1058. }
  1059. EXPORT_SYMBOL_GPL(eeh_add_device_tree_late);
  1060. /**
  1061. * eeh_remove_device - undo EEH setup for the indicated pci device
  1062. * @dev: pci device to be removed
  1063. *
  1064. * This routine should be called when a device is removed from
  1065. * a running system (e.g. by hotplug or dlpar). It unregisters
  1066. * the PCI device from the EEH subsystem. I/O errors affecting
  1067. * this device will no longer be detected after this call; thus,
  1068. * i/o errors affecting this slot may leave this device unusable.
  1069. */
  1070. static void eeh_remove_device(struct pci_dev *dev)
  1071. {
  1072. struct device_node *dn;
  1073. if (!dev || !eeh_subsystem_enabled)
  1074. return;
  1075. /* Unregister the device with the EEH/PCI address search system */
  1076. pr_debug("EEH: Removing device %s\n", pci_name(dev));
  1077. dn = pci_device_to_OF_node(dev);
  1078. if (PCI_DN(dn)->pcidev == NULL) {
  1079. pr_debug("EEH: Not referenced !\n");
  1080. return;
  1081. }
  1082. PCI_DN(dn)->pcidev = NULL;
  1083. pci_dev_put (dev);
  1084. pci_addr_cache_remove_device(dev);
  1085. eeh_sysfs_remove_device(dev);
  1086. }
  1087. void eeh_remove_bus_device(struct pci_dev *dev)
  1088. {
  1089. struct pci_bus *bus = dev->subordinate;
  1090. struct pci_dev *child, *tmp;
  1091. eeh_remove_device(dev);
  1092. if (bus && dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
  1093. list_for_each_entry_safe(child, tmp, &bus->devices, bus_list)
  1094. eeh_remove_bus_device(child);
  1095. }
  1096. }
  1097. EXPORT_SYMBOL_GPL(eeh_remove_bus_device);
  1098. static int proc_eeh_show(struct seq_file *m, void *v)
  1099. {
  1100. if (0 == eeh_subsystem_enabled) {
  1101. seq_printf(m, "EEH Subsystem is globally disabled\n");
  1102. seq_printf(m, "eeh_total_mmio_ffs=%ld\n", total_mmio_ffs);
  1103. } else {
  1104. seq_printf(m, "EEH Subsystem is enabled\n");
  1105. seq_printf(m,
  1106. "no device=%ld\n"
  1107. "no device node=%ld\n"
  1108. "no config address=%ld\n"
  1109. "check not wanted=%ld\n"
  1110. "eeh_total_mmio_ffs=%ld\n"
  1111. "eeh_false_positives=%ld\n"
  1112. "eeh_slot_resets=%ld\n",
  1113. no_device, no_dn, no_cfg_addr,
  1114. ignored_check, total_mmio_ffs,
  1115. false_positives,
  1116. slot_resets);
  1117. }
  1118. return 0;
  1119. }
  1120. static int proc_eeh_open(struct inode *inode, struct file *file)
  1121. {
  1122. return single_open(file, proc_eeh_show, NULL);
  1123. }
  1124. static const struct file_operations proc_eeh_operations = {
  1125. .open = proc_eeh_open,
  1126. .read = seq_read,
  1127. .llseek = seq_lseek,
  1128. .release = single_release,
  1129. };
  1130. static int __init eeh_init_proc(void)
  1131. {
  1132. if (machine_is(pseries))
  1133. proc_create("ppc64/eeh", 0, NULL, &proc_eeh_operations);
  1134. return 0;
  1135. }
  1136. __initcall(eeh_init_proc);