aerdrv_core.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812
  1. /*
  2. * drivers/pci/pcie/aer/aerdrv_core.c
  3. *
  4. * This file is subject to the terms and conditions of the GNU General Public
  5. * License. See the file "COPYING" in the main directory of this archive
  6. * for more details.
  7. *
  8. * This file implements the core part of PCI-Express AER. When an pci-express
  9. * error is delivered, an error message will be collected and printed to
  10. * console, then, an error recovery procedure will be executed by following
  11. * the pci error recovery rules.
  12. *
  13. * Copyright (C) 2006 Intel Corp.
  14. * Tom Long Nguyen (tom.l.nguyen@intel.com)
  15. * Zhang Yanmin (yanmin.zhang@intel.com)
  16. *
  17. */
  18. #include <linux/module.h>
  19. #include <linux/pci.h>
  20. #include <linux/kernel.h>
  21. #include <linux/errno.h>
  22. #include <linux/pm.h>
  23. #include <linux/suspend.h>
  24. #include <linux/delay.h>
  25. #include <linux/slab.h>
  26. #include <linux/kfifo.h>
  27. #include "aerdrv.h"
  28. static bool forceload;
  29. static bool nosourceid;
  30. module_param(forceload, bool, 0);
  31. module_param(nosourceid, bool, 0);
  32. #define PCI_EXP_AER_FLAGS (PCI_EXP_DEVCTL_CERE | PCI_EXP_DEVCTL_NFERE | \
  33. PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE)
  34. int pci_enable_pcie_error_reporting(struct pci_dev *dev)
  35. {
  36. if (pcie_aer_get_firmware_first(dev))
  37. return -EIO;
  38. if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR))
  39. return -EIO;
  40. return pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_AER_FLAGS);
  41. }
  42. EXPORT_SYMBOL_GPL(pci_enable_pcie_error_reporting);
  43. int pci_disable_pcie_error_reporting(struct pci_dev *dev)
  44. {
  45. if (pcie_aer_get_firmware_first(dev))
  46. return -EIO;
  47. return pcie_capability_clear_word(dev, PCI_EXP_DEVCTL,
  48. PCI_EXP_AER_FLAGS);
  49. }
  50. EXPORT_SYMBOL_GPL(pci_disable_pcie_error_reporting);
  51. int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
  52. {
  53. int pos;
  54. u32 status;
  55. pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
  56. if (!pos)
  57. return -EIO;
  58. pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
  59. if (status)
  60. pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
  61. return 0;
  62. }
  63. EXPORT_SYMBOL_GPL(pci_cleanup_aer_uncorrect_error_status);
  64. /**
  65. * add_error_device - list device to be handled
  66. * @e_info: pointer to error info
  67. * @dev: pointer to pci_dev to be added
  68. */
  69. static int add_error_device(struct aer_err_info *e_info, struct pci_dev *dev)
  70. {
  71. if (e_info->error_dev_num < AER_MAX_MULTI_ERR_DEVICES) {
  72. e_info->dev[e_info->error_dev_num] = dev;
  73. e_info->error_dev_num++;
  74. return 0;
  75. }
  76. return -ENOSPC;
  77. }
  78. #define PCI_BUS(x) (((x) >> 8) & 0xff)
  79. /**
  80. * is_error_source - check whether the device is source of reported error
  81. * @dev: pointer to pci_dev to be checked
  82. * @e_info: pointer to reported error info
  83. */
  84. static bool is_error_source(struct pci_dev *dev, struct aer_err_info *e_info)
  85. {
  86. int pos;
  87. u32 status, mask;
  88. u16 reg16;
  89. /*
  90. * When bus id is equal to 0, it might be a bad id
  91. * reported by root port.
  92. */
  93. if (!nosourceid && (PCI_BUS(e_info->id) != 0)) {
  94. /* Device ID match? */
  95. if (e_info->id == ((dev->bus->number << 8) | dev->devfn))
  96. return true;
  97. /* Continue id comparing if there is no multiple error */
  98. if (!e_info->multi_error_valid)
  99. return false;
  100. }
  101. /*
  102. * When either
  103. * 1) nosourceid==y;
  104. * 2) bus id is equal to 0. Some ports might lose the bus
  105. * id of error source id;
  106. * 3) There are multiple errors and prior id comparing fails;
  107. * We check AER status registers to find possible reporter.
  108. */
  109. if (atomic_read(&dev->enable_cnt) == 0)
  110. return false;
  111. /* Check if AER is enabled */
  112. pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &reg16);
  113. if (!(reg16 & PCI_EXP_AER_FLAGS))
  114. return false;
  115. pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
  116. if (!pos)
  117. return false;
  118. /* Check if error is recorded */
  119. if (e_info->severity == AER_CORRECTABLE) {
  120. pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, &status);
  121. pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &mask);
  122. } else {
  123. pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
  124. pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &mask);
  125. }
  126. if (status & ~mask)
  127. return true;
  128. return false;
  129. }
  130. static int find_device_iter(struct pci_dev *dev, void *data)
  131. {
  132. struct aer_err_info *e_info = (struct aer_err_info *)data;
  133. if (is_error_source(dev, e_info)) {
  134. /* List this device */
  135. if (add_error_device(e_info, dev)) {
  136. /* We cannot handle more... Stop iteration */
  137. /* TODO: Should print error message here? */
  138. return 1;
  139. }
  140. /* If there is only a single error, stop iteration */
  141. if (!e_info->multi_error_valid)
  142. return 1;
  143. }
  144. return 0;
  145. }
  146. /**
  147. * find_source_device - search through device hierarchy for source device
  148. * @parent: pointer to Root Port pci_dev data structure
  149. * @e_info: including detailed error information such like id
  150. *
  151. * Return true if found.
  152. *
  153. * Invoked by DPC when error is detected at the Root Port.
  154. * Caller of this function must set id, severity, and multi_error_valid of
  155. * struct aer_err_info pointed by @e_info properly. This function must fill
  156. * e_info->error_dev_num and e_info->dev[], based on the given information.
  157. */
  158. static bool find_source_device(struct pci_dev *parent,
  159. struct aer_err_info *e_info)
  160. {
  161. struct pci_dev *dev = parent;
  162. int result;
  163. /* Must reset in this function */
  164. e_info->error_dev_num = 0;
  165. /* Is Root Port an agent that sends error message? */
  166. result = find_device_iter(dev, e_info);
  167. if (result)
  168. return true;
  169. pci_walk_bus(parent->subordinate, find_device_iter, e_info);
  170. if (!e_info->error_dev_num) {
  171. dev_printk(KERN_DEBUG, &parent->dev,
  172. "can't find device of ID%04x\n",
  173. e_info->id);
  174. return false;
  175. }
  176. return true;
  177. }
  178. static int report_error_detected(struct pci_dev *dev, void *data)
  179. {
  180. pci_ers_result_t vote;
  181. struct pci_error_handlers *err_handler;
  182. struct aer_broadcast_data *result_data;
  183. result_data = (struct aer_broadcast_data *) data;
  184. dev->error_state = result_data->state;
  185. if (!dev->driver ||
  186. !dev->driver->err_handler ||
  187. !dev->driver->err_handler->error_detected) {
  188. if (result_data->state == pci_channel_io_frozen &&
  189. !(dev->hdr_type & PCI_HEADER_TYPE_BRIDGE)) {
  190. /*
  191. * In case of fatal recovery, if one of down-
  192. * stream device has no driver. We might be
  193. * unable to recover because a later insmod
  194. * of a driver for this device is unaware of
  195. * its hw state.
  196. */
  197. dev_printk(KERN_DEBUG, &dev->dev, "device has %s\n",
  198. dev->driver ?
  199. "no AER-aware driver" : "no driver");
  200. }
  201. return 0;
  202. }
  203. err_handler = dev->driver->err_handler;
  204. vote = err_handler->error_detected(dev, result_data->state);
  205. result_data->result = merge_result(result_data->result, vote);
  206. return 0;
  207. }
  208. static int report_mmio_enabled(struct pci_dev *dev, void *data)
  209. {
  210. pci_ers_result_t vote;
  211. struct pci_error_handlers *err_handler;
  212. struct aer_broadcast_data *result_data;
  213. result_data = (struct aer_broadcast_data *) data;
  214. if (!dev->driver ||
  215. !dev->driver->err_handler ||
  216. !dev->driver->err_handler->mmio_enabled)
  217. return 0;
  218. err_handler = dev->driver->err_handler;
  219. vote = err_handler->mmio_enabled(dev);
  220. result_data->result = merge_result(result_data->result, vote);
  221. return 0;
  222. }
  223. static int report_slot_reset(struct pci_dev *dev, void *data)
  224. {
  225. pci_ers_result_t vote;
  226. struct pci_error_handlers *err_handler;
  227. struct aer_broadcast_data *result_data;
  228. result_data = (struct aer_broadcast_data *) data;
  229. if (!dev->driver ||
  230. !dev->driver->err_handler ||
  231. !dev->driver->err_handler->slot_reset)
  232. return 0;
  233. err_handler = dev->driver->err_handler;
  234. vote = err_handler->slot_reset(dev);
  235. result_data->result = merge_result(result_data->result, vote);
  236. return 0;
  237. }
  238. static int report_resume(struct pci_dev *dev, void *data)
  239. {
  240. struct pci_error_handlers *err_handler;
  241. dev->error_state = pci_channel_io_normal;
  242. if (!dev->driver ||
  243. !dev->driver->err_handler ||
  244. !dev->driver->err_handler->resume)
  245. return 0;
  246. err_handler = dev->driver->err_handler;
  247. err_handler->resume(dev);
  248. return 0;
  249. }
  250. /**
  251. * broadcast_error_message - handle message broadcast to downstream drivers
  252. * @dev: pointer to from where in a hierarchy message is broadcasted down
  253. * @state: error state
  254. * @error_mesg: message to print
  255. * @cb: callback to be broadcasted
  256. *
  257. * Invoked during error recovery process. Once being invoked, the content
  258. * of error severity will be broadcasted to all downstream drivers in a
  259. * hierarchy in question.
  260. */
  261. static pci_ers_result_t broadcast_error_message(struct pci_dev *dev,
  262. enum pci_channel_state state,
  263. char *error_mesg,
  264. int (*cb)(struct pci_dev *, void *))
  265. {
  266. struct aer_broadcast_data result_data;
  267. dev_printk(KERN_DEBUG, &dev->dev, "broadcast %s message\n", error_mesg);
  268. result_data.state = state;
  269. if (cb == report_error_detected)
  270. result_data.result = PCI_ERS_RESULT_CAN_RECOVER;
  271. else
  272. result_data.result = PCI_ERS_RESULT_RECOVERED;
  273. if (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE) {
  274. /*
  275. * If the error is reported by a bridge, we think this error
  276. * is related to the downstream link of the bridge, so we
  277. * do error recovery on all subordinates of the bridge instead
  278. * of the bridge and clear the error status of the bridge.
  279. */
  280. if (cb == report_error_detected)
  281. dev->error_state = state;
  282. pci_walk_bus(dev->subordinate, cb, &result_data);
  283. if (cb == report_resume) {
  284. pci_cleanup_aer_uncorrect_error_status(dev);
  285. dev->error_state = pci_channel_io_normal;
  286. }
  287. } else {
  288. /*
  289. * If the error is reported by an end point, we think this
  290. * error is related to the upstream link of the end point.
  291. */
  292. pci_walk_bus(dev->bus, cb, &result_data);
  293. }
  294. return result_data.result;
  295. }
  296. /**
  297. * aer_do_secondary_bus_reset - perform secondary bus reset
  298. * @dev: pointer to bridge's pci_dev data structure
  299. *
  300. * Invoked when performing link reset at Root Port or Downstream Port.
  301. */
  302. void aer_do_secondary_bus_reset(struct pci_dev *dev)
  303. {
  304. u16 p2p_ctrl;
  305. /* Assert Secondary Bus Reset */
  306. pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &p2p_ctrl);
  307. p2p_ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
  308. pci_write_config_word(dev, PCI_BRIDGE_CONTROL, p2p_ctrl);
  309. /*
  310. * we should send hot reset message for 2ms to allow it time to
  311. * propagate to all downstream ports
  312. */
  313. msleep(2);
  314. /* De-assert Secondary Bus Reset */
  315. p2p_ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
  316. pci_write_config_word(dev, PCI_BRIDGE_CONTROL, p2p_ctrl);
  317. /*
  318. * System software must wait for at least 100ms from the end
  319. * of a reset of one or more device before it is permitted
  320. * to issue Configuration Requests to those devices.
  321. */
  322. msleep(200);
  323. }
  324. /**
  325. * default_downstream_reset_link - default reset function for Downstream Port
  326. * @dev: pointer to downstream port's pci_dev data structure
  327. *
  328. * Invoked when performing link reset at Downstream Port w/ no aer driver.
  329. */
  330. static pci_ers_result_t default_downstream_reset_link(struct pci_dev *dev)
  331. {
  332. aer_do_secondary_bus_reset(dev);
  333. dev_printk(KERN_DEBUG, &dev->dev,
  334. "Downstream Port link has been reset\n");
  335. return PCI_ERS_RESULT_RECOVERED;
  336. }
  337. static int find_aer_service_iter(struct device *device, void *data)
  338. {
  339. struct pcie_port_service_driver *service_driver, **drv;
  340. drv = (struct pcie_port_service_driver **) data;
  341. if (device->bus == &pcie_port_bus_type && device->driver) {
  342. service_driver = to_service_driver(device->driver);
  343. if (service_driver->service == PCIE_PORT_SERVICE_AER) {
  344. *drv = service_driver;
  345. return 1;
  346. }
  347. }
  348. return 0;
  349. }
  350. static struct pcie_port_service_driver *find_aer_service(struct pci_dev *dev)
  351. {
  352. struct pcie_port_service_driver *drv = NULL;
  353. device_for_each_child(&dev->dev, &drv, find_aer_service_iter);
  354. return drv;
  355. }
  356. static pci_ers_result_t reset_link(struct pci_dev *dev)
  357. {
  358. struct pci_dev *udev;
  359. pci_ers_result_t status;
  360. struct pcie_port_service_driver *driver;
  361. if (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE) {
  362. /* Reset this port for all subordinates */
  363. udev = dev;
  364. } else {
  365. /* Reset the upstream component (likely downstream port) */
  366. udev = dev->bus->self;
  367. }
  368. /* Use the aer driver of the component firstly */
  369. driver = find_aer_service(udev);
  370. if (driver && driver->reset_link) {
  371. status = driver->reset_link(udev);
  372. } else if (pci_pcie_type(udev) == PCI_EXP_TYPE_DOWNSTREAM) {
  373. status = default_downstream_reset_link(udev);
  374. } else {
  375. dev_printk(KERN_DEBUG, &dev->dev,
  376. "no link-reset support at upstream device %s\n",
  377. pci_name(udev));
  378. return PCI_ERS_RESULT_DISCONNECT;
  379. }
  380. if (status != PCI_ERS_RESULT_RECOVERED) {
  381. dev_printk(KERN_DEBUG, &dev->dev,
  382. "link reset at upstream device %s failed\n",
  383. pci_name(udev));
  384. return PCI_ERS_RESULT_DISCONNECT;
  385. }
  386. return status;
  387. }
  388. /**
  389. * do_recovery - handle nonfatal/fatal error recovery process
  390. * @dev: pointer to a pci_dev data structure of agent detecting an error
  391. * @severity: error severity type
  392. *
  393. * Invoked when an error is nonfatal/fatal. Once being invoked, broadcast
  394. * error detected message to all downstream drivers within a hierarchy in
  395. * question and return the returned code.
  396. */
  397. static void do_recovery(struct pci_dev *dev, int severity)
  398. {
  399. pci_ers_result_t status, result = PCI_ERS_RESULT_RECOVERED;
  400. enum pci_channel_state state;
  401. if (severity == AER_FATAL)
  402. state = pci_channel_io_frozen;
  403. else
  404. state = pci_channel_io_normal;
  405. status = broadcast_error_message(dev,
  406. state,
  407. "error_detected",
  408. report_error_detected);
  409. if (severity == AER_FATAL) {
  410. result = reset_link(dev);
  411. if (result != PCI_ERS_RESULT_RECOVERED)
  412. goto failed;
  413. }
  414. if (status == PCI_ERS_RESULT_CAN_RECOVER)
  415. status = broadcast_error_message(dev,
  416. state,
  417. "mmio_enabled",
  418. report_mmio_enabled);
  419. if (status == PCI_ERS_RESULT_NEED_RESET) {
  420. /*
  421. * TODO: Should call platform-specific
  422. * functions to reset slot before calling
  423. * drivers' slot_reset callbacks?
  424. */
  425. status = broadcast_error_message(dev,
  426. state,
  427. "slot_reset",
  428. report_slot_reset);
  429. }
  430. if (status != PCI_ERS_RESULT_RECOVERED)
  431. goto failed;
  432. broadcast_error_message(dev,
  433. state,
  434. "resume",
  435. report_resume);
  436. dev_printk(KERN_DEBUG, &dev->dev,
  437. "AER driver successfully recovered\n");
  438. return;
  439. failed:
  440. /* TODO: Should kernel panic here? */
  441. dev_printk(KERN_DEBUG, &dev->dev,
  442. "AER driver didn't recover\n");
  443. }
  444. /**
  445. * handle_error_source - handle logging error into an event log
  446. * @aerdev: pointer to pcie_device data structure of the root port
  447. * @dev: pointer to pci_dev data structure of error source device
  448. * @info: comprehensive error information
  449. *
  450. * Invoked when an error being detected by Root Port.
  451. */
  452. static void handle_error_source(struct pcie_device *aerdev,
  453. struct pci_dev *dev,
  454. struct aer_err_info *info)
  455. {
  456. int pos;
  457. if (info->severity == AER_CORRECTABLE) {
  458. /*
  459. * Correctable error does not need software intevention.
  460. * No need to go through error recovery process.
  461. */
  462. pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
  463. if (pos)
  464. pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS,
  465. info->status);
  466. } else
  467. do_recovery(dev, info->severity);
  468. }
  469. #ifdef CONFIG_ACPI_APEI_PCIEAER
  470. static void aer_recover_work_func(struct work_struct *work);
  471. #define AER_RECOVER_RING_ORDER 4
  472. #define AER_RECOVER_RING_SIZE (1 << AER_RECOVER_RING_ORDER)
  473. struct aer_recover_entry
  474. {
  475. u8 bus;
  476. u8 devfn;
  477. u16 domain;
  478. int severity;
  479. };
  480. static DEFINE_KFIFO(aer_recover_ring, struct aer_recover_entry,
  481. AER_RECOVER_RING_SIZE);
  482. /*
  483. * Mutual exclusion for writers of aer_recover_ring, reader side don't
  484. * need lock, because there is only one reader and lock is not needed
  485. * between reader and writer.
  486. */
  487. static DEFINE_SPINLOCK(aer_recover_ring_lock);
  488. static DECLARE_WORK(aer_recover_work, aer_recover_work_func);
  489. void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn,
  490. int severity)
  491. {
  492. unsigned long flags;
  493. struct aer_recover_entry entry = {
  494. .bus = bus,
  495. .devfn = devfn,
  496. .domain = domain,
  497. .severity = severity,
  498. };
  499. spin_lock_irqsave(&aer_recover_ring_lock, flags);
  500. if (kfifo_put(&aer_recover_ring, &entry))
  501. schedule_work(&aer_recover_work);
  502. else
  503. pr_err("AER recover: Buffer overflow when recovering AER for %04x:%02x:%02x:%x\n",
  504. domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
  505. spin_unlock_irqrestore(&aer_recover_ring_lock, flags);
  506. }
  507. EXPORT_SYMBOL_GPL(aer_recover_queue);
  508. static void aer_recover_work_func(struct work_struct *work)
  509. {
  510. struct aer_recover_entry entry;
  511. struct pci_dev *pdev;
  512. while (kfifo_get(&aer_recover_ring, &entry)) {
  513. pdev = pci_get_domain_bus_and_slot(entry.domain, entry.bus,
  514. entry.devfn);
  515. if (!pdev) {
  516. pr_err("AER recover: Can not find pci_dev for %04x:%02x:%02x:%x\n",
  517. entry.domain, entry.bus,
  518. PCI_SLOT(entry.devfn), PCI_FUNC(entry.devfn));
  519. continue;
  520. }
  521. do_recovery(pdev, entry.severity);
  522. }
  523. }
  524. #endif
  525. /**
  526. * get_device_error_info - read error status from dev and store it to info
  527. * @dev: pointer to the device expected to have a error record
  528. * @info: pointer to structure to store the error record
  529. *
  530. * Return 1 on success, 0 on error.
  531. *
  532. * Note that @info is reused among all error devices. Clear fields properly.
  533. */
  534. static int get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
  535. {
  536. int pos, temp;
  537. /* Must reset in this function */
  538. info->status = 0;
  539. info->tlp_header_valid = 0;
  540. pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
  541. /* The device might not support AER */
  542. if (!pos)
  543. return 1;
  544. if (info->severity == AER_CORRECTABLE) {
  545. pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS,
  546. &info->status);
  547. pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK,
  548. &info->mask);
  549. if (!(info->status & ~info->mask))
  550. return 0;
  551. } else if (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE ||
  552. info->severity == AER_NONFATAL) {
  553. /* Link is still healthy for IO reads */
  554. pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS,
  555. &info->status);
  556. pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK,
  557. &info->mask);
  558. if (!(info->status & ~info->mask))
  559. return 0;
  560. /* Get First Error Pointer */
  561. pci_read_config_dword(dev, pos + PCI_ERR_CAP, &temp);
  562. info->first_error = PCI_ERR_CAP_FEP(temp);
  563. if (info->status & AER_LOG_TLP_MASKS) {
  564. info->tlp_header_valid = 1;
  565. pci_read_config_dword(dev,
  566. pos + PCI_ERR_HEADER_LOG, &info->tlp.dw0);
  567. pci_read_config_dword(dev,
  568. pos + PCI_ERR_HEADER_LOG + 4, &info->tlp.dw1);
  569. pci_read_config_dword(dev,
  570. pos + PCI_ERR_HEADER_LOG + 8, &info->tlp.dw2);
  571. pci_read_config_dword(dev,
  572. pos + PCI_ERR_HEADER_LOG + 12, &info->tlp.dw3);
  573. }
  574. }
  575. return 1;
  576. }
  577. static inline void aer_process_err_devices(struct pcie_device *p_device,
  578. struct aer_err_info *e_info)
  579. {
  580. int i;
  581. /* Report all before handle them, not to lost records by reset etc. */
  582. for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
  583. if (get_device_error_info(e_info->dev[i], e_info))
  584. aer_print_error(e_info->dev[i], e_info);
  585. }
  586. for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
  587. if (get_device_error_info(e_info->dev[i], e_info))
  588. handle_error_source(p_device, e_info->dev[i], e_info);
  589. }
  590. }
  591. /**
  592. * aer_isr_one_error - consume an error detected by root port
  593. * @p_device: pointer to error root port service device
  594. * @e_src: pointer to an error source
  595. */
  596. static void aer_isr_one_error(struct pcie_device *p_device,
  597. struct aer_err_source *e_src)
  598. {
  599. struct aer_err_info *e_info;
  600. /* struct aer_err_info might be big, so we allocate it with slab */
  601. e_info = kmalloc(sizeof(struct aer_err_info), GFP_KERNEL);
  602. if (!e_info) {
  603. dev_printk(KERN_DEBUG, &p_device->port->dev,
  604. "Can't allocate mem when processing AER errors\n");
  605. return;
  606. }
  607. /*
  608. * There is a possibility that both correctable error and
  609. * uncorrectable error being logged. Report correctable error first.
  610. */
  611. if (e_src->status & PCI_ERR_ROOT_COR_RCV) {
  612. e_info->id = ERR_COR_ID(e_src->id);
  613. e_info->severity = AER_CORRECTABLE;
  614. if (e_src->status & PCI_ERR_ROOT_MULTI_COR_RCV)
  615. e_info->multi_error_valid = 1;
  616. else
  617. e_info->multi_error_valid = 0;
  618. aer_print_port_info(p_device->port, e_info);
  619. if (find_source_device(p_device->port, e_info))
  620. aer_process_err_devices(p_device, e_info);
  621. }
  622. if (e_src->status & PCI_ERR_ROOT_UNCOR_RCV) {
  623. e_info->id = ERR_UNCOR_ID(e_src->id);
  624. if (e_src->status & PCI_ERR_ROOT_FATAL_RCV)
  625. e_info->severity = AER_FATAL;
  626. else
  627. e_info->severity = AER_NONFATAL;
  628. if (e_src->status & PCI_ERR_ROOT_MULTI_UNCOR_RCV)
  629. e_info->multi_error_valid = 1;
  630. else
  631. e_info->multi_error_valid = 0;
  632. aer_print_port_info(p_device->port, e_info);
  633. if (find_source_device(p_device->port, e_info))
  634. aer_process_err_devices(p_device, e_info);
  635. }
  636. kfree(e_info);
  637. }
  638. /**
  639. * get_e_source - retrieve an error source
  640. * @rpc: pointer to the root port which holds an error
  641. * @e_src: pointer to store retrieved error source
  642. *
  643. * Return 1 if an error source is retrieved, otherwise 0.
  644. *
  645. * Invoked by DPC handler to consume an error.
  646. */
  647. static int get_e_source(struct aer_rpc *rpc, struct aer_err_source *e_src)
  648. {
  649. unsigned long flags;
  650. /* Lock access to Root error producer/consumer index */
  651. spin_lock_irqsave(&rpc->e_lock, flags);
  652. if (rpc->prod_idx == rpc->cons_idx) {
  653. spin_unlock_irqrestore(&rpc->e_lock, flags);
  654. return 0;
  655. }
  656. *e_src = rpc->e_sources[rpc->cons_idx];
  657. rpc->cons_idx++;
  658. if (rpc->cons_idx == AER_ERROR_SOURCES_MAX)
  659. rpc->cons_idx = 0;
  660. spin_unlock_irqrestore(&rpc->e_lock, flags);
  661. return 1;
  662. }
  663. /**
  664. * aer_isr - consume errors detected by root port
  665. * @work: definition of this work item
  666. *
  667. * Invoked, as DPC, when root port records new detected error
  668. */
  669. void aer_isr(struct work_struct *work)
  670. {
  671. struct aer_rpc *rpc = container_of(work, struct aer_rpc, dpc_handler);
  672. struct pcie_device *p_device = rpc->rpd;
  673. struct aer_err_source uninitialized_var(e_src);
  674. mutex_lock(&rpc->rpc_mutex);
  675. while (get_e_source(rpc, &e_src))
  676. aer_isr_one_error(p_device, &e_src);
  677. mutex_unlock(&rpc->rpc_mutex);
  678. wake_up(&rpc->wait_release);
  679. }
  680. /**
  681. * aer_init - provide AER initialization
  682. * @dev: pointer to AER pcie device
  683. *
  684. * Invoked when AER service driver is loaded.
  685. */
  686. int aer_init(struct pcie_device *dev)
  687. {
  688. if (forceload) {
  689. dev_printk(KERN_DEBUG, &dev->device,
  690. "aerdrv forceload requested.\n");
  691. pcie_aer_force_firmware_first(dev->port, 0);
  692. }
  693. return 0;
  694. }