aerdrv_core.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762
  1. /*
  2. * drivers/pci/pcie/aer/aerdrv_core.c
  3. *
  4. * This file is subject to the terms and conditions of the GNU General Public
  5. * License. See the file "COPYING" in the main directory of this archive
  6. * for more details.
  7. *
  8. * This file implements the core part of PCI-Express AER. When an pci-express
  9. * error is delivered, an error message will be collected and printed to
  10. * console, then, an error recovery procedure will be executed by following
  11. * the pci error recovery rules.
  12. *
  13. * Copyright (C) 2006 Intel Corp.
  14. * Tom Long Nguyen (tom.l.nguyen@intel.com)
  15. * Zhang Yanmin (yanmin.zhang@intel.com)
  16. *
  17. */
  18. #include <linux/module.h>
  19. #include <linux/pci.h>
  20. #include <linux/kernel.h>
  21. #include <linux/errno.h>
  22. #include <linux/pm.h>
  23. #include <linux/suspend.h>
  24. #include <linux/delay.h>
  25. #include "aerdrv.h"
  26. static int forceload;
  27. module_param(forceload, bool, 0);
  28. #define PCI_CFG_SPACE_SIZE (0x100)
  29. int pci_find_aer_capability(struct pci_dev *dev)
  30. {
  31. int pos;
  32. u32 reg32 = 0;
  33. /* Check if it's a pci-express device */
  34. pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
  35. if (!pos)
  36. return 0;
  37. /* Check if it supports pci-express AER */
  38. pos = PCI_CFG_SPACE_SIZE;
  39. while (pos) {
  40. if (pci_read_config_dword(dev, pos, &reg32))
  41. return 0;
  42. /* some broken boards return ~0 */
  43. if (reg32 == 0xffffffff)
  44. return 0;
  45. if (PCI_EXT_CAP_ID(reg32) == PCI_EXT_CAP_ID_ERR)
  46. break;
  47. pos = reg32 >> 20;
  48. }
  49. return pos;
  50. }
  51. int pci_enable_pcie_error_reporting(struct pci_dev *dev)
  52. {
  53. u16 reg16 = 0;
  54. int pos;
  55. pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
  56. if (!pos)
  57. return -EIO;
  58. pci_read_config_word(dev, pos+PCI_EXP_DEVCTL, &reg16);
  59. reg16 = reg16 |
  60. PCI_EXP_DEVCTL_CERE |
  61. PCI_EXP_DEVCTL_NFERE |
  62. PCI_EXP_DEVCTL_FERE |
  63. PCI_EXP_DEVCTL_URRE;
  64. pci_write_config_word(dev, pos+PCI_EXP_DEVCTL,
  65. reg16);
  66. return 0;
  67. }
  68. int pci_disable_pcie_error_reporting(struct pci_dev *dev)
  69. {
  70. u16 reg16 = 0;
  71. int pos;
  72. pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
  73. if (!pos)
  74. return -EIO;
  75. pci_read_config_word(dev, pos+PCI_EXP_DEVCTL, &reg16);
  76. reg16 = reg16 & ~(PCI_EXP_DEVCTL_CERE |
  77. PCI_EXP_DEVCTL_NFERE |
  78. PCI_EXP_DEVCTL_FERE |
  79. PCI_EXP_DEVCTL_URRE);
  80. pci_write_config_word(dev, pos+PCI_EXP_DEVCTL,
  81. reg16);
  82. return 0;
  83. }
  84. int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
  85. {
  86. int pos;
  87. u32 status, mask;
  88. pos = pci_find_aer_capability(dev);
  89. if (!pos)
  90. return -EIO;
  91. pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
  92. pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask);
  93. if (dev->error_state == pci_channel_io_normal)
  94. status &= ~mask; /* Clear corresponding nonfatal bits */
  95. else
  96. status &= mask; /* Clear corresponding fatal bits */
  97. pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
  98. return 0;
  99. }
  100. #if 0
  101. int pci_cleanup_aer_correct_error_status(struct pci_dev *dev)
  102. {
  103. int pos;
  104. u32 status;
  105. pos = pci_find_aer_capability(dev);
  106. if (!pos)
  107. return -EIO;
  108. pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, &status);
  109. pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS, status);
  110. return 0;
  111. }
  112. #endif /* 0 */
  113. static int find_device_iter(struct device *device, void *data)
  114. {
  115. struct pci_dev *dev;
  116. u16 id = *(unsigned long *)data;
  117. u8 secondary, subordinate, d_bus = id >> 8;
  118. if (device->bus == &pci_bus_type) {
  119. dev = to_pci_dev(device);
  120. if (id == ((dev->bus->number << 8) | dev->devfn)) {
  121. /*
  122. * Device ID match
  123. */
  124. *(unsigned long*)data = (unsigned long)device;
  125. return 1;
  126. }
  127. /*
  128. * If device is P2P, check if it is an upstream?
  129. */
  130. if (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE) {
  131. pci_read_config_byte(dev, PCI_SECONDARY_BUS,
  132. &secondary);
  133. pci_read_config_byte(dev, PCI_SUBORDINATE_BUS,
  134. &subordinate);
  135. if (d_bus >= secondary && d_bus <= subordinate) {
  136. *(unsigned long*)data = (unsigned long)device;
  137. return 1;
  138. }
  139. }
  140. }
  141. return 0;
  142. }
  143. /**
  144. * find_source_device - search through device hierarchy for source device
  145. * @parent: pointer to Root Port pci_dev data structure
  146. * @id: device ID of agent who sends an error message to this Root Port
  147. *
  148. * Invoked when error is detected at the Root Port.
  149. */
  150. static struct device* find_source_device(struct pci_dev *parent, u16 id)
  151. {
  152. struct pci_dev *dev = parent;
  153. struct device *device;
  154. unsigned long device_addr;
  155. int status;
  156. /* Is Root Port an agent that sends error message? */
  157. if (id == ((dev->bus->number << 8) | dev->devfn))
  158. return &dev->dev;
  159. do {
  160. device_addr = id;
  161. if ((status = device_for_each_child(&dev->dev,
  162. &device_addr, find_device_iter))) {
  163. device = (struct device*)device_addr;
  164. dev = to_pci_dev(device);
  165. if (id == ((dev->bus->number << 8) | dev->devfn))
  166. return device;
  167. }
  168. }while (status);
  169. return NULL;
  170. }
  171. static void report_error_detected(struct pci_dev *dev, void *data)
  172. {
  173. pci_ers_result_t vote;
  174. struct pci_error_handlers *err_handler;
  175. struct aer_broadcast_data *result_data;
  176. result_data = (struct aer_broadcast_data *) data;
  177. dev->error_state = result_data->state;
  178. if (!dev->driver ||
  179. !dev->driver->err_handler ||
  180. !dev->driver->err_handler->error_detected) {
  181. if (result_data->state == pci_channel_io_frozen &&
  182. !(dev->hdr_type & PCI_HEADER_TYPE_BRIDGE)) {
  183. /*
  184. * In case of fatal recovery, if one of down-
  185. * stream device has no driver. We might be
  186. * unable to recover because a later insmod
  187. * of a driver for this device is unaware of
  188. * its hw state.
  189. */
  190. dev_printk(KERN_DEBUG, &dev->dev, "device has %s\n",
  191. dev->driver ?
  192. "no AER-aware driver" : "no driver");
  193. }
  194. return;
  195. }
  196. err_handler = dev->driver->err_handler;
  197. vote = err_handler->error_detected(dev, result_data->state);
  198. result_data->result = merge_result(result_data->result, vote);
  199. return;
  200. }
  201. static void report_mmio_enabled(struct pci_dev *dev, void *data)
  202. {
  203. pci_ers_result_t vote;
  204. struct pci_error_handlers *err_handler;
  205. struct aer_broadcast_data *result_data;
  206. result_data = (struct aer_broadcast_data *) data;
  207. if (!dev->driver ||
  208. !dev->driver->err_handler ||
  209. !dev->driver->err_handler->mmio_enabled)
  210. return;
  211. err_handler = dev->driver->err_handler;
  212. vote = err_handler->mmio_enabled(dev);
  213. result_data->result = merge_result(result_data->result, vote);
  214. return;
  215. }
  216. static void report_slot_reset(struct pci_dev *dev, void *data)
  217. {
  218. pci_ers_result_t vote;
  219. struct pci_error_handlers *err_handler;
  220. struct aer_broadcast_data *result_data;
  221. result_data = (struct aer_broadcast_data *) data;
  222. if (!dev->driver ||
  223. !dev->driver->err_handler ||
  224. !dev->driver->err_handler->slot_reset)
  225. return;
  226. err_handler = dev->driver->err_handler;
  227. vote = err_handler->slot_reset(dev);
  228. result_data->result = merge_result(result_data->result, vote);
  229. return;
  230. }
  231. static void report_resume(struct pci_dev *dev, void *data)
  232. {
  233. struct pci_error_handlers *err_handler;
  234. dev->error_state = pci_channel_io_normal;
  235. if (!dev->driver ||
  236. !dev->driver->err_handler ||
  237. !dev->driver->err_handler->slot_reset)
  238. return;
  239. err_handler = dev->driver->err_handler;
  240. err_handler->resume(dev);
  241. return;
  242. }
  243. /**
  244. * broadcast_error_message - handle message broadcast to downstream drivers
  245. * @dev: pointer to from where in a hierarchy message is broadcasted down
  246. * @state: error state
  247. * @error_mesg: message to print
  248. * @cb: callback to be broadcasted
  249. *
  250. * Invoked during error recovery process. Once being invoked, the content
  251. * of error severity will be broadcasted to all downstream drivers in a
  252. * hierarchy in question.
  253. */
  254. static pci_ers_result_t broadcast_error_message(struct pci_dev *dev,
  255. enum pci_channel_state state,
  256. char *error_mesg,
  257. void (*cb)(struct pci_dev *, void *))
  258. {
  259. struct aer_broadcast_data result_data;
  260. dev_printk(KERN_DEBUG, &dev->dev, "broadcast %s message\n", error_mesg);
  261. result_data.state = state;
  262. if (cb == report_error_detected)
  263. result_data.result = PCI_ERS_RESULT_CAN_RECOVER;
  264. else
  265. result_data.result = PCI_ERS_RESULT_RECOVERED;
  266. if (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE) {
  267. /*
  268. * If the error is reported by a bridge, we think this error
  269. * is related to the downstream link of the bridge, so we
  270. * do error recovery on all subordinates of the bridge instead
  271. * of the bridge and clear the error status of the bridge.
  272. */
  273. if (cb == report_error_detected)
  274. dev->error_state = state;
  275. pci_walk_bus(dev->subordinate, cb, &result_data);
  276. if (cb == report_resume) {
  277. pci_cleanup_aer_uncorrect_error_status(dev);
  278. dev->error_state = pci_channel_io_normal;
  279. }
  280. }
  281. else {
  282. /*
  283. * If the error is reported by an end point, we think this
  284. * error is related to the upstream link of the end point.
  285. */
  286. pci_walk_bus(dev->bus, cb, &result_data);
  287. }
  288. return result_data.result;
  289. }
  290. struct find_aer_service_data {
  291. struct pcie_port_service_driver *aer_driver;
  292. int is_downstream;
  293. };
  294. static int find_aer_service_iter(struct device *device, void *data)
  295. {
  296. struct device_driver *driver;
  297. struct pcie_port_service_driver *service_driver;
  298. struct pcie_device *pcie_dev;
  299. struct find_aer_service_data *result;
  300. result = (struct find_aer_service_data *) data;
  301. if (device->bus == &pcie_port_bus_type) {
  302. pcie_dev = to_pcie_device(device);
  303. if (pcie_dev->id.port_type == PCIE_SW_DOWNSTREAM_PORT)
  304. result->is_downstream = 1;
  305. driver = device->driver;
  306. if (driver) {
  307. service_driver = to_service_driver(driver);
  308. if (service_driver->id_table->service_type ==
  309. PCIE_PORT_SERVICE_AER) {
  310. result->aer_driver = service_driver;
  311. return 1;
  312. }
  313. }
  314. }
  315. return 0;
  316. }
  317. static void find_aer_service(struct pci_dev *dev,
  318. struct find_aer_service_data *data)
  319. {
  320. int retval;
  321. retval = device_for_each_child(&dev->dev, data, find_aer_service_iter);
  322. }
  323. static pci_ers_result_t reset_link(struct pcie_device *aerdev,
  324. struct pci_dev *dev)
  325. {
  326. struct pci_dev *udev;
  327. pci_ers_result_t status;
  328. struct find_aer_service_data data;
  329. if (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE)
  330. udev = dev;
  331. else
  332. udev= dev->bus->self;
  333. data.is_downstream = 0;
  334. data.aer_driver = NULL;
  335. find_aer_service(udev, &data);
  336. /*
  337. * Use the aer driver of the error agent firstly.
  338. * If it hasn't the aer driver, use the root port's
  339. */
  340. if (!data.aer_driver || !data.aer_driver->reset_link) {
  341. if (data.is_downstream &&
  342. aerdev->device.driver &&
  343. to_service_driver(aerdev->device.driver)->reset_link) {
  344. data.aer_driver =
  345. to_service_driver(aerdev->device.driver);
  346. } else {
  347. dev_printk(KERN_DEBUG, &dev->dev, "no link-reset "
  348. "support\n");
  349. return PCI_ERS_RESULT_DISCONNECT;
  350. }
  351. }
  352. status = data.aer_driver->reset_link(udev);
  353. if (status != PCI_ERS_RESULT_RECOVERED) {
  354. dev_printk(KERN_DEBUG, &dev->dev, "link reset at upstream "
  355. "device %s failed\n", pci_name(udev));
  356. return PCI_ERS_RESULT_DISCONNECT;
  357. }
  358. return status;
  359. }
  360. /**
  361. * do_recovery - handle nonfatal/fatal error recovery process
  362. * @aerdev: pointer to a pcie_device data structure of root port
  363. * @dev: pointer to a pci_dev data structure of agent detecting an error
  364. * @severity: error severity type
  365. *
  366. * Invoked when an error is nonfatal/fatal. Once being invoked, broadcast
  367. * error detected message to all downstream drivers within a hierarchy in
  368. * question and return the returned code.
  369. */
  370. static pci_ers_result_t do_recovery(struct pcie_device *aerdev,
  371. struct pci_dev *dev,
  372. int severity)
  373. {
  374. pci_ers_result_t status, result = PCI_ERS_RESULT_RECOVERED;
  375. enum pci_channel_state state;
  376. if (severity == AER_FATAL)
  377. state = pci_channel_io_frozen;
  378. else
  379. state = pci_channel_io_normal;
  380. status = broadcast_error_message(dev,
  381. state,
  382. "error_detected",
  383. report_error_detected);
  384. if (severity == AER_FATAL) {
  385. result = reset_link(aerdev, dev);
  386. if (result != PCI_ERS_RESULT_RECOVERED) {
  387. /* TODO: Should panic here? */
  388. return result;
  389. }
  390. }
  391. if (status == PCI_ERS_RESULT_CAN_RECOVER)
  392. status = broadcast_error_message(dev,
  393. state,
  394. "mmio_enabled",
  395. report_mmio_enabled);
  396. if (status == PCI_ERS_RESULT_NEED_RESET) {
  397. /*
  398. * TODO: Should call platform-specific
  399. * functions to reset slot before calling
  400. * drivers' slot_reset callbacks?
  401. */
  402. status = broadcast_error_message(dev,
  403. state,
  404. "slot_reset",
  405. report_slot_reset);
  406. }
  407. if (status == PCI_ERS_RESULT_RECOVERED)
  408. broadcast_error_message(dev,
  409. state,
  410. "resume",
  411. report_resume);
  412. return status;
  413. }
  414. /**
  415. * handle_error_source - handle logging error into an event log
  416. * @aerdev: pointer to pcie_device data structure of the root port
  417. * @dev: pointer to pci_dev data structure of error source device
  418. * @info: comprehensive error information
  419. *
  420. * Invoked when an error being detected by Root Port.
  421. */
  422. static void handle_error_source(struct pcie_device * aerdev,
  423. struct pci_dev *dev,
  424. struct aer_err_info info)
  425. {
  426. pci_ers_result_t status = 0;
  427. int pos;
  428. if (info.severity == AER_CORRECTABLE) {
  429. /*
  430. * Correctable error does not need software intevention.
  431. * No need to go through error recovery process.
  432. */
  433. pos = pci_find_aer_capability(dev);
  434. if (pos)
  435. pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS,
  436. info.status);
  437. } else {
  438. status = do_recovery(aerdev, dev, info.severity);
  439. if (status == PCI_ERS_RESULT_RECOVERED) {
  440. dev_printk(KERN_DEBUG, &dev->dev, "AER driver "
  441. "successfully recovered\n");
  442. } else {
  443. /* TODO: Should kernel panic here? */
  444. dev_printk(KERN_DEBUG, &dev->dev, "AER driver didn't "
  445. "recover\n");
  446. }
  447. }
  448. }
  449. /**
  450. * aer_enable_rootport - enable Root Port's interrupts when receiving messages
  451. * @rpc: pointer to a Root Port data structure
  452. *
  453. * Invoked when PCIE bus loads AER service driver.
  454. */
  455. void aer_enable_rootport(struct aer_rpc *rpc)
  456. {
  457. struct pci_dev *pdev = rpc->rpd->port;
  458. int pos, aer_pos;
  459. u16 reg16;
  460. u32 reg32;
  461. pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
  462. /* Clear PCIE Capability's Device Status */
  463. pci_read_config_word(pdev, pos+PCI_EXP_DEVSTA, &reg16);
  464. pci_write_config_word(pdev, pos+PCI_EXP_DEVSTA, reg16);
  465. /* Disable system error generation in response to error messages */
  466. pci_read_config_word(pdev, pos + PCI_EXP_RTCTL, &reg16);
  467. reg16 &= ~(SYSTEM_ERROR_INTR_ON_MESG_MASK);
  468. pci_write_config_word(pdev, pos + PCI_EXP_RTCTL, reg16);
  469. aer_pos = pci_find_aer_capability(pdev);
  470. /* Clear error status */
  471. pci_read_config_dword(pdev, aer_pos + PCI_ERR_ROOT_STATUS, &reg32);
  472. pci_write_config_dword(pdev, aer_pos + PCI_ERR_ROOT_STATUS, reg32);
  473. pci_read_config_dword(pdev, aer_pos + PCI_ERR_COR_STATUS, &reg32);
  474. pci_write_config_dword(pdev, aer_pos + PCI_ERR_COR_STATUS, reg32);
  475. pci_read_config_dword(pdev, aer_pos + PCI_ERR_UNCOR_STATUS, &reg32);
  476. pci_write_config_dword(pdev, aer_pos + PCI_ERR_UNCOR_STATUS, reg32);
  477. /* Enable Root Port device reporting error itself */
  478. pci_read_config_word(pdev, pos+PCI_EXP_DEVCTL, &reg16);
  479. reg16 = reg16 |
  480. PCI_EXP_DEVCTL_CERE |
  481. PCI_EXP_DEVCTL_NFERE |
  482. PCI_EXP_DEVCTL_FERE |
  483. PCI_EXP_DEVCTL_URRE;
  484. pci_write_config_word(pdev, pos+PCI_EXP_DEVCTL,
  485. reg16);
  486. /* Enable Root Port's interrupt in response to error messages */
  487. pci_write_config_dword(pdev,
  488. aer_pos + PCI_ERR_ROOT_COMMAND,
  489. ROOT_PORT_INTR_ON_MESG_MASK);
  490. }
  491. /**
  492. * disable_root_aer - disable Root Port's interrupts when receiving messages
  493. * @rpc: pointer to a Root Port data structure
  494. *
  495. * Invoked when PCIE bus unloads AER service driver.
  496. */
  497. static void disable_root_aer(struct aer_rpc *rpc)
  498. {
  499. struct pci_dev *pdev = rpc->rpd->port;
  500. u32 reg32;
  501. int pos;
  502. pos = pci_find_aer_capability(pdev);
  503. /* Disable Root's interrupt in response to error messages */
  504. pci_write_config_dword(pdev, pos + PCI_ERR_ROOT_COMMAND, 0);
  505. /* Clear Root's error status reg */
  506. pci_read_config_dword(pdev, pos + PCI_ERR_ROOT_STATUS, &reg32);
  507. pci_write_config_dword(pdev, pos + PCI_ERR_ROOT_STATUS, reg32);
  508. }
  509. /**
  510. * get_e_source - retrieve an error source
  511. * @rpc: pointer to the root port which holds an error
  512. *
  513. * Invoked by DPC handler to consume an error.
  514. */
  515. static struct aer_err_source* get_e_source(struct aer_rpc *rpc)
  516. {
  517. struct aer_err_source *e_source;
  518. unsigned long flags;
  519. /* Lock access to Root error producer/consumer index */
  520. spin_lock_irqsave(&rpc->e_lock, flags);
  521. if (rpc->prod_idx == rpc->cons_idx) {
  522. spin_unlock_irqrestore(&rpc->e_lock, flags);
  523. return NULL;
  524. }
  525. e_source = &rpc->e_sources[rpc->cons_idx];
  526. rpc->cons_idx++;
  527. if (rpc->cons_idx == AER_ERROR_SOURCES_MAX)
  528. rpc->cons_idx = 0;
  529. spin_unlock_irqrestore(&rpc->e_lock, flags);
  530. return e_source;
  531. }
  532. static int get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
  533. {
  534. int pos;
  535. pos = pci_find_aer_capability(dev);
  536. /* The device might not support AER */
  537. if (!pos)
  538. return AER_SUCCESS;
  539. if (info->severity == AER_CORRECTABLE) {
  540. pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS,
  541. &info->status);
  542. if (!(info->status & ERR_CORRECTABLE_ERROR_MASK))
  543. return AER_UNSUCCESS;
  544. } else if (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE ||
  545. info->severity == AER_NONFATAL) {
  546. /* Link is still healthy for IO reads */
  547. pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS,
  548. &info->status);
  549. if (!(info->status & ERR_UNCORRECTABLE_ERROR_MASK))
  550. return AER_UNSUCCESS;
  551. if (info->status & AER_LOG_TLP_MASKS) {
  552. info->flags |= AER_TLP_HEADER_VALID_FLAG;
  553. pci_read_config_dword(dev,
  554. pos + PCI_ERR_HEADER_LOG, &info->tlp.dw0);
  555. pci_read_config_dword(dev,
  556. pos + PCI_ERR_HEADER_LOG + 4, &info->tlp.dw1);
  557. pci_read_config_dword(dev,
  558. pos + PCI_ERR_HEADER_LOG + 8, &info->tlp.dw2);
  559. pci_read_config_dword(dev,
  560. pos + PCI_ERR_HEADER_LOG + 12, &info->tlp.dw3);
  561. }
  562. }
  563. return AER_SUCCESS;
  564. }
  565. /**
  566. * aer_isr_one_error - consume an error detected by root port
  567. * @p_device: pointer to error root port service device
  568. * @e_src: pointer to an error source
  569. */
  570. static void aer_isr_one_error(struct pcie_device *p_device,
  571. struct aer_err_source *e_src)
  572. {
  573. struct device *s_device;
  574. struct aer_err_info e_info = {0, 0, 0,};
  575. int i;
  576. u16 id;
  577. /*
  578. * There is a possibility that both correctable error and
  579. * uncorrectable error being logged. Report correctable error first.
  580. */
  581. for (i = 1; i & ROOT_ERR_STATUS_MASKS ; i <<= 2) {
  582. if (i > 4)
  583. break;
  584. if (!(e_src->status & i))
  585. continue;
  586. /* Init comprehensive error information */
  587. if (i & PCI_ERR_ROOT_COR_RCV) {
  588. id = ERR_COR_ID(e_src->id);
  589. e_info.severity = AER_CORRECTABLE;
  590. } else {
  591. id = ERR_UNCOR_ID(e_src->id);
  592. e_info.severity = ((e_src->status >> 6) & 1);
  593. }
  594. if (e_src->status &
  595. (PCI_ERR_ROOT_MULTI_COR_RCV |
  596. PCI_ERR_ROOT_MULTI_UNCOR_RCV))
  597. e_info.flags |= AER_MULTI_ERROR_VALID_FLAG;
  598. if (!(s_device = find_source_device(p_device->port, id))) {
  599. printk(KERN_DEBUG "%s->can't find device of ID%04x\n",
  600. __func__, id);
  601. continue;
  602. }
  603. if (get_device_error_info(to_pci_dev(s_device), &e_info) ==
  604. AER_SUCCESS) {
  605. aer_print_error(to_pci_dev(s_device), &e_info);
  606. handle_error_source(p_device,
  607. to_pci_dev(s_device),
  608. e_info);
  609. }
  610. }
  611. }
  612. /**
  613. * aer_isr - consume errors detected by root port
  614. * @work: definition of this work item
  615. *
  616. * Invoked, as DPC, when root port records new detected error
  617. */
  618. void aer_isr(struct work_struct *work)
  619. {
  620. struct aer_rpc *rpc = container_of(work, struct aer_rpc, dpc_handler);
  621. struct pcie_device *p_device = rpc->rpd;
  622. struct aer_err_source *e_src;
  623. mutex_lock(&rpc->rpc_mutex);
  624. e_src = get_e_source(rpc);
  625. while (e_src) {
  626. aer_isr_one_error(p_device, e_src);
  627. e_src = get_e_source(rpc);
  628. }
  629. mutex_unlock(&rpc->rpc_mutex);
  630. wake_up(&rpc->wait_release);
  631. }
  632. /**
  633. * aer_delete_rootport - disable root port aer and delete service data
  634. * @rpc: pointer to a root port device being deleted
  635. *
  636. * Invoked when AER service unloaded on a specific Root Port
  637. */
  638. void aer_delete_rootport(struct aer_rpc *rpc)
  639. {
  640. /* Disable root port AER itself */
  641. disable_root_aer(rpc);
  642. kfree(rpc);
  643. }
  644. /**
  645. * aer_init - provide AER initialization
  646. * @dev: pointer to AER pcie device
  647. *
  648. * Invoked when AER service driver is loaded.
  649. */
  650. int aer_init(struct pcie_device *dev)
  651. {
  652. if (aer_osc_setup(dev) && !forceload)
  653. return -ENXIO;
  654. return AER_SUCCESS;
  655. }
  656. EXPORT_SYMBOL_GPL(pci_find_aer_capability);
  657. EXPORT_SYMBOL_GPL(pci_enable_pcie_error_reporting);
  658. EXPORT_SYMBOL_GPL(pci_disable_pcie_error_reporting);
  659. EXPORT_SYMBOL_GPL(pci_cleanup_aer_uncorrect_error_status);