aerdrv_core.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770
  1. /*
  2. * drivers/pci/pcie/aer/aerdrv_core.c
  3. *
  4. * This file is subject to the terms and conditions of the GNU General Public
  5. * License. See the file "COPYING" in the main directory of this archive
  6. * for more details.
  7. *
  8. * This file implements the core part of PCI-Express AER. When an pci-express
  9. * error is delivered, an error message will be collected and printed to
  10. * console, then, an error recovery procedure will be executed by following
  11. * the pci error recovery rules.
  12. *
  13. * Copyright (C) 2006 Intel Corp.
  14. * Tom Long Nguyen (tom.l.nguyen@intel.com)
  15. * Zhang Yanmin (yanmin.zhang@intel.com)
  16. *
  17. */
  18. #include <linux/module.h>
  19. #include <linux/pci.h>
  20. #include <linux/kernel.h>
  21. #include <linux/errno.h>
  22. #include <linux/pm.h>
  23. #include <linux/suspend.h>
  24. #include <linux/delay.h>
  25. #include "aerdrv.h"
  26. static int forceload;
  27. module_param(forceload, bool, 0);
  28. int pci_enable_pcie_error_reporting(struct pci_dev *dev)
  29. {
  30. u16 reg16 = 0;
  31. int pos;
  32. pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
  33. if (!pos)
  34. return -EIO;
  35. pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
  36. if (!pos)
  37. return -EIO;
  38. pci_read_config_word(dev, pos+PCI_EXP_DEVCTL, &reg16);
  39. reg16 = reg16 |
  40. PCI_EXP_DEVCTL_CERE |
  41. PCI_EXP_DEVCTL_NFERE |
  42. PCI_EXP_DEVCTL_FERE |
  43. PCI_EXP_DEVCTL_URRE;
  44. pci_write_config_word(dev, pos+PCI_EXP_DEVCTL,
  45. reg16);
  46. return 0;
  47. }
  48. int pci_disable_pcie_error_reporting(struct pci_dev *dev)
  49. {
  50. u16 reg16 = 0;
  51. int pos;
  52. pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
  53. if (!pos)
  54. return -EIO;
  55. pci_read_config_word(dev, pos+PCI_EXP_DEVCTL, &reg16);
  56. reg16 = reg16 & ~(PCI_EXP_DEVCTL_CERE |
  57. PCI_EXP_DEVCTL_NFERE |
  58. PCI_EXP_DEVCTL_FERE |
  59. PCI_EXP_DEVCTL_URRE);
  60. pci_write_config_word(dev, pos+PCI_EXP_DEVCTL,
  61. reg16);
  62. return 0;
  63. }
  64. int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
  65. {
  66. int pos;
  67. u32 status, mask;
  68. pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
  69. if (!pos)
  70. return -EIO;
  71. pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
  72. pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask);
  73. if (dev->error_state == pci_channel_io_normal)
  74. status &= ~mask; /* Clear corresponding nonfatal bits */
  75. else
  76. status &= mask; /* Clear corresponding fatal bits */
  77. pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
  78. return 0;
  79. }
  80. #if 0
  81. int pci_cleanup_aer_correct_error_status(struct pci_dev *dev)
  82. {
  83. int pos;
  84. u32 status;
  85. pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
  86. if (!pos)
  87. return -EIO;
  88. pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, &status);
  89. pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS, status);
  90. return 0;
  91. }
  92. #endif /* 0 */
  93. static void set_device_error_reporting(struct pci_dev *dev, void *data)
  94. {
  95. bool enable = *((bool *)data);
  96. if (dev->pcie_type == PCIE_RC_PORT ||
  97. dev->pcie_type == PCIE_SW_UPSTREAM_PORT ||
  98. dev->pcie_type == PCIE_SW_DOWNSTREAM_PORT) {
  99. if (enable)
  100. pci_enable_pcie_error_reporting(dev);
  101. else
  102. pci_disable_pcie_error_reporting(dev);
  103. }
  104. if (enable)
  105. pcie_set_ecrc_checking(dev);
  106. }
  107. /**
  108. * set_downstream_devices_error_reporting - enable/disable the error reporting bits on the root port and its downstream ports.
  109. * @dev: pointer to root port's pci_dev data structure
  110. * @enable: true = enable error reporting, false = disable error reporting.
  111. */
  112. static void set_downstream_devices_error_reporting(struct pci_dev *dev,
  113. bool enable)
  114. {
  115. set_device_error_reporting(dev, &enable);
  116. if (!dev->subordinate)
  117. return;
  118. pci_walk_bus(dev->subordinate, set_device_error_reporting, &enable);
  119. }
  120. static int find_device_iter(struct device *device, void *data)
  121. {
  122. struct pci_dev *dev;
  123. u16 id = *(unsigned long *)data;
  124. u8 secondary, subordinate, d_bus = id >> 8;
  125. if (device->bus == &pci_bus_type) {
  126. dev = to_pci_dev(device);
  127. if (id == ((dev->bus->number << 8) | dev->devfn)) {
  128. /*
  129. * Device ID match
  130. */
  131. *(unsigned long*)data = (unsigned long)device;
  132. return 1;
  133. }
  134. /*
  135. * If device is P2P, check if it is an upstream?
  136. */
  137. if (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE) {
  138. pci_read_config_byte(dev, PCI_SECONDARY_BUS,
  139. &secondary);
  140. pci_read_config_byte(dev, PCI_SUBORDINATE_BUS,
  141. &subordinate);
  142. if (d_bus >= secondary && d_bus <= subordinate) {
  143. *(unsigned long*)data = (unsigned long)device;
  144. return 1;
  145. }
  146. }
  147. }
  148. return 0;
  149. }
  150. /**
  151. * find_source_device - search through device hierarchy for source device
  152. * @parent: pointer to Root Port pci_dev data structure
  153. * @id: device ID of agent who sends an error message to this Root Port
  154. *
  155. * Invoked when error is detected at the Root Port.
  156. */
  157. static struct device* find_source_device(struct pci_dev *parent, u16 id)
  158. {
  159. struct pci_dev *dev = parent;
  160. struct device *device;
  161. unsigned long device_addr;
  162. int status;
  163. /* Is Root Port an agent that sends error message? */
  164. if (id == ((dev->bus->number << 8) | dev->devfn))
  165. return &dev->dev;
  166. do {
  167. device_addr = id;
  168. if ((status = device_for_each_child(&dev->dev,
  169. &device_addr, find_device_iter))) {
  170. device = (struct device*)device_addr;
  171. dev = to_pci_dev(device);
  172. if (id == ((dev->bus->number << 8) | dev->devfn))
  173. return device;
  174. }
  175. }while (status);
  176. return NULL;
  177. }
  178. static void report_error_detected(struct pci_dev *dev, void *data)
  179. {
  180. pci_ers_result_t vote;
  181. struct pci_error_handlers *err_handler;
  182. struct aer_broadcast_data *result_data;
  183. result_data = (struct aer_broadcast_data *) data;
  184. dev->error_state = result_data->state;
  185. if (!dev->driver ||
  186. !dev->driver->err_handler ||
  187. !dev->driver->err_handler->error_detected) {
  188. if (result_data->state == pci_channel_io_frozen &&
  189. !(dev->hdr_type & PCI_HEADER_TYPE_BRIDGE)) {
  190. /*
  191. * In case of fatal recovery, if one of down-
  192. * stream device has no driver. We might be
  193. * unable to recover because a later insmod
  194. * of a driver for this device is unaware of
  195. * its hw state.
  196. */
  197. dev_printk(KERN_DEBUG, &dev->dev, "device has %s\n",
  198. dev->driver ?
  199. "no AER-aware driver" : "no driver");
  200. }
  201. return;
  202. }
  203. err_handler = dev->driver->err_handler;
  204. vote = err_handler->error_detected(dev, result_data->state);
  205. result_data->result = merge_result(result_data->result, vote);
  206. return;
  207. }
  208. static void report_mmio_enabled(struct pci_dev *dev, void *data)
  209. {
  210. pci_ers_result_t vote;
  211. struct pci_error_handlers *err_handler;
  212. struct aer_broadcast_data *result_data;
  213. result_data = (struct aer_broadcast_data *) data;
  214. if (!dev->driver ||
  215. !dev->driver->err_handler ||
  216. !dev->driver->err_handler->mmio_enabled)
  217. return;
  218. err_handler = dev->driver->err_handler;
  219. vote = err_handler->mmio_enabled(dev);
  220. result_data->result = merge_result(result_data->result, vote);
  221. return;
  222. }
  223. static void report_slot_reset(struct pci_dev *dev, void *data)
  224. {
  225. pci_ers_result_t vote;
  226. struct pci_error_handlers *err_handler;
  227. struct aer_broadcast_data *result_data;
  228. result_data = (struct aer_broadcast_data *) data;
  229. if (!dev->driver ||
  230. !dev->driver->err_handler ||
  231. !dev->driver->err_handler->slot_reset)
  232. return;
  233. err_handler = dev->driver->err_handler;
  234. vote = err_handler->slot_reset(dev);
  235. result_data->result = merge_result(result_data->result, vote);
  236. return;
  237. }
  238. static void report_resume(struct pci_dev *dev, void *data)
  239. {
  240. struct pci_error_handlers *err_handler;
  241. dev->error_state = pci_channel_io_normal;
  242. if (!dev->driver ||
  243. !dev->driver->err_handler ||
  244. !dev->driver->err_handler->resume)
  245. return;
  246. err_handler = dev->driver->err_handler;
  247. err_handler->resume(dev);
  248. return;
  249. }
  250. /**
  251. * broadcast_error_message - handle message broadcast to downstream drivers
  252. * @dev: pointer to from where in a hierarchy message is broadcasted down
  253. * @state: error state
  254. * @error_mesg: message to print
  255. * @cb: callback to be broadcasted
  256. *
  257. * Invoked during error recovery process. Once being invoked, the content
  258. * of error severity will be broadcasted to all downstream drivers in a
  259. * hierarchy in question.
  260. */
  261. static pci_ers_result_t broadcast_error_message(struct pci_dev *dev,
  262. enum pci_channel_state state,
  263. char *error_mesg,
  264. void (*cb)(struct pci_dev *, void *))
  265. {
  266. struct aer_broadcast_data result_data;
  267. dev_printk(KERN_DEBUG, &dev->dev, "broadcast %s message\n", error_mesg);
  268. result_data.state = state;
  269. if (cb == report_error_detected)
  270. result_data.result = PCI_ERS_RESULT_CAN_RECOVER;
  271. else
  272. result_data.result = PCI_ERS_RESULT_RECOVERED;
  273. if (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE) {
  274. /*
  275. * If the error is reported by a bridge, we think this error
  276. * is related to the downstream link of the bridge, so we
  277. * do error recovery on all subordinates of the bridge instead
  278. * of the bridge and clear the error status of the bridge.
  279. */
  280. if (cb == report_error_detected)
  281. dev->error_state = state;
  282. pci_walk_bus(dev->subordinate, cb, &result_data);
  283. if (cb == report_resume) {
  284. pci_cleanup_aer_uncorrect_error_status(dev);
  285. dev->error_state = pci_channel_io_normal;
  286. }
  287. }
  288. else {
  289. /*
  290. * If the error is reported by an end point, we think this
  291. * error is related to the upstream link of the end point.
  292. */
  293. pci_walk_bus(dev->bus, cb, &result_data);
  294. }
  295. return result_data.result;
  296. }
  297. struct find_aer_service_data {
  298. struct pcie_port_service_driver *aer_driver;
  299. int is_downstream;
  300. };
  301. static int find_aer_service_iter(struct device *device, void *data)
  302. {
  303. struct device_driver *driver;
  304. struct pcie_port_service_driver *service_driver;
  305. struct find_aer_service_data *result;
  306. result = (struct find_aer_service_data *) data;
  307. if (device->bus == &pcie_port_bus_type) {
  308. struct pcie_port_data *port_data;
  309. port_data = pci_get_drvdata(to_pcie_device(device)->port);
  310. if (port_data->port_type == PCIE_SW_DOWNSTREAM_PORT)
  311. result->is_downstream = 1;
  312. driver = device->driver;
  313. if (driver) {
  314. service_driver = to_service_driver(driver);
  315. if (service_driver->service == PCIE_PORT_SERVICE_AER) {
  316. result->aer_driver = service_driver;
  317. return 1;
  318. }
  319. }
  320. }
  321. return 0;
  322. }
  323. static void find_aer_service(struct pci_dev *dev,
  324. struct find_aer_service_data *data)
  325. {
  326. int retval;
  327. retval = device_for_each_child(&dev->dev, data, find_aer_service_iter);
  328. }
  329. static pci_ers_result_t reset_link(struct pcie_device *aerdev,
  330. struct pci_dev *dev)
  331. {
  332. struct pci_dev *udev;
  333. pci_ers_result_t status;
  334. struct find_aer_service_data data;
  335. if (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE)
  336. udev = dev;
  337. else
  338. udev= dev->bus->self;
  339. data.is_downstream = 0;
  340. data.aer_driver = NULL;
  341. find_aer_service(udev, &data);
  342. /*
  343. * Use the aer driver of the error agent firstly.
  344. * If it hasn't the aer driver, use the root port's
  345. */
  346. if (!data.aer_driver || !data.aer_driver->reset_link) {
  347. if (data.is_downstream &&
  348. aerdev->device.driver &&
  349. to_service_driver(aerdev->device.driver)->reset_link) {
  350. data.aer_driver =
  351. to_service_driver(aerdev->device.driver);
  352. } else {
  353. dev_printk(KERN_DEBUG, &dev->dev, "no link-reset "
  354. "support\n");
  355. return PCI_ERS_RESULT_DISCONNECT;
  356. }
  357. }
  358. status = data.aer_driver->reset_link(udev);
  359. if (status != PCI_ERS_RESULT_RECOVERED) {
  360. dev_printk(KERN_DEBUG, &dev->dev, "link reset at upstream "
  361. "device %s failed\n", pci_name(udev));
  362. return PCI_ERS_RESULT_DISCONNECT;
  363. }
  364. return status;
  365. }
  366. /**
  367. * do_recovery - handle nonfatal/fatal error recovery process
  368. * @aerdev: pointer to a pcie_device data structure of root port
  369. * @dev: pointer to a pci_dev data structure of agent detecting an error
  370. * @severity: error severity type
  371. *
  372. * Invoked when an error is nonfatal/fatal. Once being invoked, broadcast
  373. * error detected message to all downstream drivers within a hierarchy in
  374. * question and return the returned code.
  375. */
  376. static pci_ers_result_t do_recovery(struct pcie_device *aerdev,
  377. struct pci_dev *dev,
  378. int severity)
  379. {
  380. pci_ers_result_t status, result = PCI_ERS_RESULT_RECOVERED;
  381. enum pci_channel_state state;
  382. if (severity == AER_FATAL)
  383. state = pci_channel_io_frozen;
  384. else
  385. state = pci_channel_io_normal;
  386. status = broadcast_error_message(dev,
  387. state,
  388. "error_detected",
  389. report_error_detected);
  390. if (severity == AER_FATAL) {
  391. result = reset_link(aerdev, dev);
  392. if (result != PCI_ERS_RESULT_RECOVERED) {
  393. /* TODO: Should panic here? */
  394. return result;
  395. }
  396. }
  397. if (status == PCI_ERS_RESULT_CAN_RECOVER)
  398. status = broadcast_error_message(dev,
  399. state,
  400. "mmio_enabled",
  401. report_mmio_enabled);
  402. if (status == PCI_ERS_RESULT_NEED_RESET) {
  403. /*
  404. * TODO: Should call platform-specific
  405. * functions to reset slot before calling
  406. * drivers' slot_reset callbacks?
  407. */
  408. status = broadcast_error_message(dev,
  409. state,
  410. "slot_reset",
  411. report_slot_reset);
  412. }
  413. if (status == PCI_ERS_RESULT_RECOVERED)
  414. broadcast_error_message(dev,
  415. state,
  416. "resume",
  417. report_resume);
  418. return status;
  419. }
  420. /**
  421. * handle_error_source - handle logging error into an event log
  422. * @aerdev: pointer to pcie_device data structure of the root port
  423. * @dev: pointer to pci_dev data structure of error source device
  424. * @info: comprehensive error information
  425. *
  426. * Invoked when an error being detected by Root Port.
  427. */
  428. static void handle_error_source(struct pcie_device * aerdev,
  429. struct pci_dev *dev,
  430. struct aer_err_info info)
  431. {
  432. pci_ers_result_t status = 0;
  433. int pos;
  434. if (info.severity == AER_CORRECTABLE) {
  435. /*
  436. * Correctable error does not need software intevention.
  437. * No need to go through error recovery process.
  438. */
  439. pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
  440. if (pos)
  441. pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS,
  442. info.status);
  443. } else {
  444. status = do_recovery(aerdev, dev, info.severity);
  445. if (status == PCI_ERS_RESULT_RECOVERED) {
  446. dev_printk(KERN_DEBUG, &dev->dev, "AER driver "
  447. "successfully recovered\n");
  448. } else {
  449. /* TODO: Should kernel panic here? */
  450. dev_printk(KERN_DEBUG, &dev->dev, "AER driver didn't "
  451. "recover\n");
  452. }
  453. }
  454. }
  455. /**
  456. * aer_enable_rootport - enable Root Port's interrupts when receiving messages
  457. * @rpc: pointer to a Root Port data structure
  458. *
  459. * Invoked when PCIE bus loads AER service driver.
  460. */
  461. void aer_enable_rootport(struct aer_rpc *rpc)
  462. {
  463. struct pci_dev *pdev = rpc->rpd->port;
  464. int pos, aer_pos;
  465. u16 reg16;
  466. u32 reg32;
  467. pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
  468. /* Clear PCIE Capability's Device Status */
  469. pci_read_config_word(pdev, pos+PCI_EXP_DEVSTA, &reg16);
  470. pci_write_config_word(pdev, pos+PCI_EXP_DEVSTA, reg16);
  471. /* Disable system error generation in response to error messages */
  472. pci_read_config_word(pdev, pos + PCI_EXP_RTCTL, &reg16);
  473. reg16 &= ~(SYSTEM_ERROR_INTR_ON_MESG_MASK);
  474. pci_write_config_word(pdev, pos + PCI_EXP_RTCTL, reg16);
  475. aer_pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
  476. /* Clear error status */
  477. pci_read_config_dword(pdev, aer_pos + PCI_ERR_ROOT_STATUS, &reg32);
  478. pci_write_config_dword(pdev, aer_pos + PCI_ERR_ROOT_STATUS, reg32);
  479. pci_read_config_dword(pdev, aer_pos + PCI_ERR_COR_STATUS, &reg32);
  480. pci_write_config_dword(pdev, aer_pos + PCI_ERR_COR_STATUS, reg32);
  481. pci_read_config_dword(pdev, aer_pos + PCI_ERR_UNCOR_STATUS, &reg32);
  482. pci_write_config_dword(pdev, aer_pos + PCI_ERR_UNCOR_STATUS, reg32);
  483. /*
  484. * Enable error reporting for the root port device and downstream port
  485. * devices.
  486. */
  487. set_downstream_devices_error_reporting(pdev, true);
  488. /* Enable Root Port's interrupt in response to error messages */
  489. pci_write_config_dword(pdev,
  490. aer_pos + PCI_ERR_ROOT_COMMAND,
  491. ROOT_PORT_INTR_ON_MESG_MASK);
  492. }
  493. /**
  494. * disable_root_aer - disable Root Port's interrupts when receiving messages
  495. * @rpc: pointer to a Root Port data structure
  496. *
  497. * Invoked when PCIE bus unloads AER service driver.
  498. */
  499. static void disable_root_aer(struct aer_rpc *rpc)
  500. {
  501. struct pci_dev *pdev = rpc->rpd->port;
  502. u32 reg32;
  503. int pos;
  504. /*
  505. * Disable error reporting for the root port device and downstream port
  506. * devices.
  507. */
  508. set_downstream_devices_error_reporting(pdev, false);
  509. pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
  510. /* Disable Root's interrupt in response to error messages */
  511. pci_write_config_dword(pdev, pos + PCI_ERR_ROOT_COMMAND, 0);
  512. /* Clear Root's error status reg */
  513. pci_read_config_dword(pdev, pos + PCI_ERR_ROOT_STATUS, &reg32);
  514. pci_write_config_dword(pdev, pos + PCI_ERR_ROOT_STATUS, reg32);
  515. }
  516. /**
  517. * get_e_source - retrieve an error source
  518. * @rpc: pointer to the root port which holds an error
  519. *
  520. * Invoked by DPC handler to consume an error.
  521. */
  522. static struct aer_err_source* get_e_source(struct aer_rpc *rpc)
  523. {
  524. struct aer_err_source *e_source;
  525. unsigned long flags;
  526. /* Lock access to Root error producer/consumer index */
  527. spin_lock_irqsave(&rpc->e_lock, flags);
  528. if (rpc->prod_idx == rpc->cons_idx) {
  529. spin_unlock_irqrestore(&rpc->e_lock, flags);
  530. return NULL;
  531. }
  532. e_source = &rpc->e_sources[rpc->cons_idx];
  533. rpc->cons_idx++;
  534. if (rpc->cons_idx == AER_ERROR_SOURCES_MAX)
  535. rpc->cons_idx = 0;
  536. spin_unlock_irqrestore(&rpc->e_lock, flags);
  537. return e_source;
  538. }
  539. static int get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
  540. {
  541. int pos;
  542. pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
  543. /* The device might not support AER */
  544. if (!pos)
  545. return AER_SUCCESS;
  546. if (info->severity == AER_CORRECTABLE) {
  547. pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS,
  548. &info->status);
  549. if (!(info->status & ERR_CORRECTABLE_ERROR_MASK))
  550. return AER_UNSUCCESS;
  551. } else if (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE ||
  552. info->severity == AER_NONFATAL) {
  553. /* Link is still healthy for IO reads */
  554. pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS,
  555. &info->status);
  556. if (!(info->status & ERR_UNCORRECTABLE_ERROR_MASK))
  557. return AER_UNSUCCESS;
  558. if (info->status & AER_LOG_TLP_MASKS) {
  559. info->flags |= AER_TLP_HEADER_VALID_FLAG;
  560. pci_read_config_dword(dev,
  561. pos + PCI_ERR_HEADER_LOG, &info->tlp.dw0);
  562. pci_read_config_dword(dev,
  563. pos + PCI_ERR_HEADER_LOG + 4, &info->tlp.dw1);
  564. pci_read_config_dword(dev,
  565. pos + PCI_ERR_HEADER_LOG + 8, &info->tlp.dw2);
  566. pci_read_config_dword(dev,
  567. pos + PCI_ERR_HEADER_LOG + 12, &info->tlp.dw3);
  568. }
  569. }
  570. return AER_SUCCESS;
  571. }
  572. /**
  573. * aer_isr_one_error - consume an error detected by root port
  574. * @p_device: pointer to error root port service device
  575. * @e_src: pointer to an error source
  576. */
  577. static void aer_isr_one_error(struct pcie_device *p_device,
  578. struct aer_err_source *e_src)
  579. {
  580. struct device *s_device;
  581. struct aer_err_info e_info = {0, 0, 0,};
  582. int i;
  583. u16 id;
  584. /*
  585. * There is a possibility that both correctable error and
  586. * uncorrectable error being logged. Report correctable error first.
  587. */
  588. for (i = 1; i & ROOT_ERR_STATUS_MASKS ; i <<= 2) {
  589. if (i > 4)
  590. break;
  591. if (!(e_src->status & i))
  592. continue;
  593. /* Init comprehensive error information */
  594. if (i & PCI_ERR_ROOT_COR_RCV) {
  595. id = ERR_COR_ID(e_src->id);
  596. e_info.severity = AER_CORRECTABLE;
  597. } else {
  598. id = ERR_UNCOR_ID(e_src->id);
  599. e_info.severity = ((e_src->status >> 6) & 1);
  600. }
  601. if (e_src->status &
  602. (PCI_ERR_ROOT_MULTI_COR_RCV |
  603. PCI_ERR_ROOT_MULTI_UNCOR_RCV))
  604. e_info.flags |= AER_MULTI_ERROR_VALID_FLAG;
  605. if (!(s_device = find_source_device(p_device->port, id))) {
  606. printk(KERN_DEBUG "%s->can't find device of ID%04x\n",
  607. __func__, id);
  608. continue;
  609. }
  610. if (get_device_error_info(to_pci_dev(s_device), &e_info) ==
  611. AER_SUCCESS) {
  612. aer_print_error(to_pci_dev(s_device), &e_info);
  613. handle_error_source(p_device,
  614. to_pci_dev(s_device),
  615. e_info);
  616. }
  617. }
  618. }
  619. /**
  620. * aer_isr - consume errors detected by root port
  621. * @work: definition of this work item
  622. *
  623. * Invoked, as DPC, when root port records new detected error
  624. */
  625. void aer_isr(struct work_struct *work)
  626. {
  627. struct aer_rpc *rpc = container_of(work, struct aer_rpc, dpc_handler);
  628. struct pcie_device *p_device = rpc->rpd;
  629. struct aer_err_source *e_src;
  630. mutex_lock(&rpc->rpc_mutex);
  631. e_src = get_e_source(rpc);
  632. while (e_src) {
  633. aer_isr_one_error(p_device, e_src);
  634. e_src = get_e_source(rpc);
  635. }
  636. mutex_unlock(&rpc->rpc_mutex);
  637. wake_up(&rpc->wait_release);
  638. }
  639. /**
  640. * aer_delete_rootport - disable root port aer and delete service data
  641. * @rpc: pointer to a root port device being deleted
  642. *
  643. * Invoked when AER service unloaded on a specific Root Port
  644. */
  645. void aer_delete_rootport(struct aer_rpc *rpc)
  646. {
  647. /* Disable root port AER itself */
  648. disable_root_aer(rpc);
  649. kfree(rpc);
  650. }
  651. /**
  652. * aer_init - provide AER initialization
  653. * @dev: pointer to AER pcie device
  654. *
  655. * Invoked when AER service driver is loaded.
  656. */
  657. int aer_init(struct pcie_device *dev)
  658. {
  659. if (aer_osc_setup(dev) && !forceload)
  660. return -ENXIO;
  661. return AER_SUCCESS;
  662. }
  663. EXPORT_SYMBOL_GPL(pci_enable_pcie_error_reporting);
  664. EXPORT_SYMBOL_GPL(pci_disable_pcie_error_reporting);
  665. EXPORT_SYMBOL_GPL(pci_cleanup_aer_uncorrect_error_status);