pci-driver.c 33 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363
  1. /*
  2. * drivers/pci/pci-driver.c
  3. *
  4. * (C) Copyright 2002-2004, 2007 Greg Kroah-Hartman <greg@kroah.com>
  5. * (C) Copyright 2007 Novell Inc.
  6. *
  7. * Released under the GPL v2 only.
  8. *
  9. */
  10. #include <linux/pci.h>
  11. #include <linux/module.h>
  12. #include <linux/init.h>
  13. #include <linux/device.h>
  14. #include <linux/mempolicy.h>
  15. #include <linux/string.h>
  16. #include <linux/slab.h>
  17. #include <linux/sched.h>
  18. #include <linux/cpu.h>
  19. #include <linux/pm_runtime.h>
  20. #include <linux/suspend.h>
  21. #include <linux/kexec.h>
  22. #include "pci.h"
  23. struct pci_dynid {
  24. struct list_head node;
  25. struct pci_device_id id;
  26. };
  27. /**
  28. * pci_add_dynid - add a new PCI device ID to this driver and re-probe devices
  29. * @drv: target pci driver
  30. * @vendor: PCI vendor ID
  31. * @device: PCI device ID
  32. * @subvendor: PCI subvendor ID
  33. * @subdevice: PCI subdevice ID
  34. * @class: PCI class
  35. * @class_mask: PCI class mask
  36. * @driver_data: private driver data
  37. *
  38. * Adds a new dynamic pci device ID to this driver and causes the
  39. * driver to probe for all devices again. @drv must have been
  40. * registered prior to calling this function.
  41. *
  42. * CONTEXT:
  43. * Does GFP_KERNEL allocation.
  44. *
  45. * RETURNS:
  46. * 0 on success, -errno on failure.
  47. */
  48. int pci_add_dynid(struct pci_driver *drv,
  49. unsigned int vendor, unsigned int device,
  50. unsigned int subvendor, unsigned int subdevice,
  51. unsigned int class, unsigned int class_mask,
  52. unsigned long driver_data)
  53. {
  54. struct pci_dynid *dynid;
  55. int retval;
  56. dynid = kzalloc(sizeof(*dynid), GFP_KERNEL);
  57. if (!dynid)
  58. return -ENOMEM;
  59. dynid->id.vendor = vendor;
  60. dynid->id.device = device;
  61. dynid->id.subvendor = subvendor;
  62. dynid->id.subdevice = subdevice;
  63. dynid->id.class = class;
  64. dynid->id.class_mask = class_mask;
  65. dynid->id.driver_data = driver_data;
  66. spin_lock(&drv->dynids.lock);
  67. list_add_tail(&dynid->node, &drv->dynids.list);
  68. spin_unlock(&drv->dynids.lock);
  69. retval = driver_attach(&drv->driver);
  70. return retval;
  71. }
  72. static void pci_free_dynids(struct pci_driver *drv)
  73. {
  74. struct pci_dynid *dynid, *n;
  75. spin_lock(&drv->dynids.lock);
  76. list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) {
  77. list_del(&dynid->node);
  78. kfree(dynid);
  79. }
  80. spin_unlock(&drv->dynids.lock);
  81. }
  82. /**
  83. * store_new_id - sysfs frontend to pci_add_dynid()
  84. * @driver: target device driver
  85. * @buf: buffer for scanning device ID data
  86. * @count: input size
  87. *
  88. * Allow PCI IDs to be added to an existing driver via sysfs.
  89. */
  90. static ssize_t
  91. store_new_id(struct device_driver *driver, const char *buf, size_t count)
  92. {
  93. struct pci_driver *pdrv = to_pci_driver(driver);
  94. const struct pci_device_id *ids = pdrv->id_table;
  95. __u32 vendor, device, subvendor=PCI_ANY_ID,
  96. subdevice=PCI_ANY_ID, class=0, class_mask=0;
  97. unsigned long driver_data=0;
  98. int fields=0;
  99. int retval;
  100. fields = sscanf(buf, "%x %x %x %x %x %x %lx",
  101. &vendor, &device, &subvendor, &subdevice,
  102. &class, &class_mask, &driver_data);
  103. if (fields < 2)
  104. return -EINVAL;
  105. /* Only accept driver_data values that match an existing id_table
  106. entry */
  107. if (ids) {
  108. retval = -EINVAL;
  109. while (ids->vendor || ids->subvendor || ids->class_mask) {
  110. if (driver_data == ids->driver_data) {
  111. retval = 0;
  112. break;
  113. }
  114. ids++;
  115. }
  116. if (retval) /* No match */
  117. return retval;
  118. }
  119. retval = pci_add_dynid(pdrv, vendor, device, subvendor, subdevice,
  120. class, class_mask, driver_data);
  121. if (retval)
  122. return retval;
  123. return count;
  124. }
  125. static DRIVER_ATTR(new_id, S_IWUSR, NULL, store_new_id);
  126. /**
  127. * store_remove_id - remove a PCI device ID from this driver
  128. * @driver: target device driver
  129. * @buf: buffer for scanning device ID data
  130. * @count: input size
  131. *
  132. * Removes a dynamic pci device ID to this driver.
  133. */
  134. static ssize_t
  135. store_remove_id(struct device_driver *driver, const char *buf, size_t count)
  136. {
  137. struct pci_dynid *dynid, *n;
  138. struct pci_driver *pdrv = to_pci_driver(driver);
  139. __u32 vendor, device, subvendor = PCI_ANY_ID,
  140. subdevice = PCI_ANY_ID, class = 0, class_mask = 0;
  141. int fields = 0;
  142. int retval = -ENODEV;
  143. fields = sscanf(buf, "%x %x %x %x %x %x",
  144. &vendor, &device, &subvendor, &subdevice,
  145. &class, &class_mask);
  146. if (fields < 2)
  147. return -EINVAL;
  148. spin_lock(&pdrv->dynids.lock);
  149. list_for_each_entry_safe(dynid, n, &pdrv->dynids.list, node) {
  150. struct pci_device_id *id = &dynid->id;
  151. if ((id->vendor == vendor) &&
  152. (id->device == device) &&
  153. (subvendor == PCI_ANY_ID || id->subvendor == subvendor) &&
  154. (subdevice == PCI_ANY_ID || id->subdevice == subdevice) &&
  155. !((id->class ^ class) & class_mask)) {
  156. list_del(&dynid->node);
  157. kfree(dynid);
  158. retval = 0;
  159. break;
  160. }
  161. }
  162. spin_unlock(&pdrv->dynids.lock);
  163. if (retval)
  164. return retval;
  165. return count;
  166. }
  167. static DRIVER_ATTR(remove_id, S_IWUSR, NULL, store_remove_id);
  168. static struct attribute *pci_drv_attrs[] = {
  169. &driver_attr_new_id.attr,
  170. &driver_attr_remove_id.attr,
  171. NULL,
  172. };
  173. ATTRIBUTE_GROUPS(pci_drv);
  174. /**
  175. * pci_match_id - See if a pci device matches a given pci_id table
  176. * @ids: array of PCI device id structures to search in
  177. * @dev: the PCI device structure to match against.
  178. *
  179. * Used by a driver to check whether a PCI device present in the
  180. * system is in its list of supported devices. Returns the matching
  181. * pci_device_id structure or %NULL if there is no match.
  182. *
  183. * Deprecated, don't use this as it will not catch any dynamic ids
  184. * that a driver might want to check for.
  185. */
  186. const struct pci_device_id *pci_match_id(const struct pci_device_id *ids,
  187. struct pci_dev *dev)
  188. {
  189. if (ids) {
  190. while (ids->vendor || ids->subvendor || ids->class_mask) {
  191. if (pci_match_one_device(ids, dev))
  192. return ids;
  193. ids++;
  194. }
  195. }
  196. return NULL;
  197. }
  198. /**
  199. * pci_match_device - Tell if a PCI device structure has a matching PCI device id structure
  200. * @drv: the PCI driver to match against
  201. * @dev: the PCI device structure to match against
  202. *
  203. * Used by a driver to check whether a PCI device present in the
  204. * system is in its list of supported devices. Returns the matching
  205. * pci_device_id structure or %NULL if there is no match.
  206. */
  207. static const struct pci_device_id *pci_match_device(struct pci_driver *drv,
  208. struct pci_dev *dev)
  209. {
  210. struct pci_dynid *dynid;
  211. /* Look at the dynamic ids first, before the static ones */
  212. spin_lock(&drv->dynids.lock);
  213. list_for_each_entry(dynid, &drv->dynids.list, node) {
  214. if (pci_match_one_device(&dynid->id, dev)) {
  215. spin_unlock(&drv->dynids.lock);
  216. return &dynid->id;
  217. }
  218. }
  219. spin_unlock(&drv->dynids.lock);
  220. return pci_match_id(drv->id_table, dev);
  221. }
  222. struct drv_dev_and_id {
  223. struct pci_driver *drv;
  224. struct pci_dev *dev;
  225. const struct pci_device_id *id;
  226. };
  227. static long local_pci_probe(void *_ddi)
  228. {
  229. struct drv_dev_and_id *ddi = _ddi;
  230. struct pci_dev *pci_dev = ddi->dev;
  231. struct pci_driver *pci_drv = ddi->drv;
  232. struct device *dev = &pci_dev->dev;
  233. int rc;
  234. /*
  235. * Unbound PCI devices are always put in D0, regardless of
  236. * runtime PM status. During probe, the device is set to
  237. * active and the usage count is incremented. If the driver
  238. * supports runtime PM, it should call pm_runtime_put_noidle()
  239. * in its probe routine and pm_runtime_get_noresume() in its
  240. * remove routine.
  241. */
  242. pm_runtime_get_sync(dev);
  243. pci_dev->driver = pci_drv;
  244. rc = pci_drv->probe(pci_dev, ddi->id);
  245. if (!rc)
  246. return rc;
  247. if (rc < 0) {
  248. pci_dev->driver = NULL;
  249. pm_runtime_put_sync(dev);
  250. return rc;
  251. }
  252. /*
  253. * Probe function should return < 0 for failure, 0 for success
  254. * Treat values > 0 as success, but warn.
  255. */
  256. dev_warn(dev, "Driver probe function unexpectedly returned %d\n", rc);
  257. return 0;
  258. }
  259. static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev,
  260. const struct pci_device_id *id)
  261. {
  262. int error, node;
  263. struct drv_dev_and_id ddi = { drv, dev, id };
  264. /*
  265. * Execute driver initialization on node where the device is
  266. * attached. This way the driver likely allocates its local memory
  267. * on the right node.
  268. */
  269. node = dev_to_node(&dev->dev);
  270. /*
  271. * On NUMA systems, we are likely to call a PF probe function using
  272. * work_on_cpu(). If that probe calls pci_enable_sriov() (which
  273. * adds the VF devices via pci_bus_add_device()), we may re-enter
  274. * this function to call the VF probe function. Calling
  275. * work_on_cpu() again will cause a lockdep warning. Since VFs are
  276. * always on the same node as the PF, we can work around this by
  277. * avoiding work_on_cpu() when we're already on the correct node.
  278. *
  279. * Preemption is enabled, so it's theoretically unsafe to use
  280. * numa_node_id(), but even if we run the probe function on the
  281. * wrong node, it should be functionally correct.
  282. */
  283. if (node >= 0 && node != numa_node_id()) {
  284. int cpu;
  285. get_online_cpus();
  286. cpu = cpumask_any_and(cpumask_of_node(node), cpu_online_mask);
  287. if (cpu < nr_cpu_ids)
  288. error = work_on_cpu(cpu, local_pci_probe, &ddi);
  289. else
  290. error = local_pci_probe(&ddi);
  291. put_online_cpus();
  292. } else
  293. error = local_pci_probe(&ddi);
  294. return error;
  295. }
  296. /**
  297. * __pci_device_probe - check if a driver wants to claim a specific PCI device
  298. * @drv: driver to call to check if it wants the PCI device
  299. * @pci_dev: PCI device being probed
  300. *
  301. * returns 0 on success, else error.
  302. * side-effect: pci_dev->driver is set to drv when drv claims pci_dev.
  303. */
  304. static int
  305. __pci_device_probe(struct pci_driver *drv, struct pci_dev *pci_dev)
  306. {
  307. const struct pci_device_id *id;
  308. int error = 0;
  309. if (!pci_dev->driver && drv->probe) {
  310. error = -ENODEV;
  311. id = pci_match_device(drv, pci_dev);
  312. if (id)
  313. error = pci_call_probe(drv, pci_dev, id);
  314. if (error >= 0)
  315. error = 0;
  316. }
  317. return error;
  318. }
  319. static int pci_device_probe(struct device * dev)
  320. {
  321. int error = 0;
  322. struct pci_driver *drv;
  323. struct pci_dev *pci_dev;
  324. drv = to_pci_driver(dev->driver);
  325. pci_dev = to_pci_dev(dev);
  326. pci_dev_get(pci_dev);
  327. error = __pci_device_probe(drv, pci_dev);
  328. if (error)
  329. pci_dev_put(pci_dev);
  330. return error;
  331. }
  332. static int pci_device_remove(struct device * dev)
  333. {
  334. struct pci_dev * pci_dev = to_pci_dev(dev);
  335. struct pci_driver * drv = pci_dev->driver;
  336. if (drv) {
  337. if (drv->remove) {
  338. pm_runtime_get_sync(dev);
  339. drv->remove(pci_dev);
  340. pm_runtime_put_noidle(dev);
  341. }
  342. pci_dev->driver = NULL;
  343. }
  344. /* Undo the runtime PM settings in local_pci_probe() */
  345. pm_runtime_put_sync(dev);
  346. /*
  347. * If the device is still on, set the power state as "unknown",
  348. * since it might change by the next time we load the driver.
  349. */
  350. if (pci_dev->current_state == PCI_D0)
  351. pci_dev->current_state = PCI_UNKNOWN;
  352. /*
  353. * We would love to complain here if pci_dev->is_enabled is set, that
  354. * the driver should have called pci_disable_device(), but the
  355. * unfortunate fact is there are too many odd BIOS and bridge setups
  356. * that don't like drivers doing that all of the time.
  357. * Oh well, we can dream of sane hardware when we sleep, no matter how
  358. * horrible the crap we have to deal with is when we are awake...
  359. */
  360. pci_dev_put(pci_dev);
  361. return 0;
  362. }
  363. static void pci_device_shutdown(struct device *dev)
  364. {
  365. struct pci_dev *pci_dev = to_pci_dev(dev);
  366. struct pci_driver *drv = pci_dev->driver;
  367. pm_runtime_resume(dev);
  368. if (drv && drv->shutdown)
  369. drv->shutdown(pci_dev);
  370. pci_msi_shutdown(pci_dev);
  371. pci_msix_shutdown(pci_dev);
  372. #ifdef CONFIG_KEXEC
  373. /*
  374. * If this is a kexec reboot, turn off Bus Master bit on the
  375. * device to tell it to not continue to do DMA. Don't touch
  376. * devices in D3cold or unknown states.
  377. * If it is not a kexec reboot, firmware will hit the PCI
  378. * devices with big hammer and stop their DMA any way.
  379. */
  380. if (kexec_in_progress && (pci_dev->current_state <= PCI_D3hot))
  381. pci_clear_master(pci_dev);
  382. #endif
  383. }
  384. #ifdef CONFIG_PM
  385. /* Auxiliary functions used for system resume and run-time resume. */
  386. /**
  387. * pci_restore_standard_config - restore standard config registers of PCI device
  388. * @pci_dev: PCI device to handle
  389. */
  390. static int pci_restore_standard_config(struct pci_dev *pci_dev)
  391. {
  392. pci_update_current_state(pci_dev, PCI_UNKNOWN);
  393. if (pci_dev->current_state != PCI_D0) {
  394. int error = pci_set_power_state(pci_dev, PCI_D0);
  395. if (error)
  396. return error;
  397. }
  398. pci_restore_state(pci_dev);
  399. return 0;
  400. }
  401. #endif
  402. #ifdef CONFIG_PM_SLEEP
  403. static void pci_pm_default_resume_early(struct pci_dev *pci_dev)
  404. {
  405. pci_power_up(pci_dev);
  406. pci_restore_state(pci_dev);
  407. pci_fixup_device(pci_fixup_resume_early, pci_dev);
  408. }
  409. /*
  410. * Default "suspend" method for devices that have no driver provided suspend,
  411. * or not even a driver at all (second part).
  412. */
  413. static void pci_pm_set_unknown_state(struct pci_dev *pci_dev)
  414. {
  415. /*
  416. * mark its power state as "unknown", since we don't know if
  417. * e.g. the BIOS will change its device state when we suspend.
  418. */
  419. if (pci_dev->current_state == PCI_D0)
  420. pci_dev->current_state = PCI_UNKNOWN;
  421. }
  422. /*
  423. * Default "resume" method for devices that have no driver provided resume,
  424. * or not even a driver at all (second part).
  425. */
  426. static int pci_pm_reenable_device(struct pci_dev *pci_dev)
  427. {
  428. int retval;
  429. /* if the device was enabled before suspend, reenable */
  430. retval = pci_reenable_device(pci_dev);
  431. /*
  432. * if the device was busmaster before the suspend, make it busmaster
  433. * again
  434. */
  435. if (pci_dev->is_busmaster)
  436. pci_set_master(pci_dev);
  437. return retval;
  438. }
  439. static int pci_legacy_suspend(struct device *dev, pm_message_t state)
  440. {
  441. struct pci_dev * pci_dev = to_pci_dev(dev);
  442. struct pci_driver * drv = pci_dev->driver;
  443. if (drv && drv->suspend) {
  444. pci_power_t prev = pci_dev->current_state;
  445. int error;
  446. error = drv->suspend(pci_dev, state);
  447. suspend_report_result(drv->suspend, error);
  448. if (error)
  449. return error;
  450. if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
  451. && pci_dev->current_state != PCI_UNKNOWN) {
  452. WARN_ONCE(pci_dev->current_state != prev,
  453. "PCI PM: Device state not saved by %pF\n",
  454. drv->suspend);
  455. }
  456. }
  457. pci_fixup_device(pci_fixup_suspend, pci_dev);
  458. return 0;
  459. }
  460. static int pci_legacy_suspend_late(struct device *dev, pm_message_t state)
  461. {
  462. struct pci_dev * pci_dev = to_pci_dev(dev);
  463. struct pci_driver * drv = pci_dev->driver;
  464. if (drv && drv->suspend_late) {
  465. pci_power_t prev = pci_dev->current_state;
  466. int error;
  467. error = drv->suspend_late(pci_dev, state);
  468. suspend_report_result(drv->suspend_late, error);
  469. if (error)
  470. return error;
  471. if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
  472. && pci_dev->current_state != PCI_UNKNOWN) {
  473. WARN_ONCE(pci_dev->current_state != prev,
  474. "PCI PM: Device state not saved by %pF\n",
  475. drv->suspend_late);
  476. return 0;
  477. }
  478. }
  479. if (!pci_dev->state_saved)
  480. pci_save_state(pci_dev);
  481. pci_pm_set_unknown_state(pci_dev);
  482. return 0;
  483. }
  484. static int pci_legacy_resume_early(struct device *dev)
  485. {
  486. struct pci_dev * pci_dev = to_pci_dev(dev);
  487. struct pci_driver * drv = pci_dev->driver;
  488. return drv && drv->resume_early ?
  489. drv->resume_early(pci_dev) : 0;
  490. }
  491. static int pci_legacy_resume(struct device *dev)
  492. {
  493. struct pci_dev * pci_dev = to_pci_dev(dev);
  494. struct pci_driver * drv = pci_dev->driver;
  495. pci_fixup_device(pci_fixup_resume, pci_dev);
  496. return drv && drv->resume ?
  497. drv->resume(pci_dev) : pci_pm_reenable_device(pci_dev);
  498. }
  499. /* Auxiliary functions used by the new power management framework */
  500. static void pci_pm_default_resume(struct pci_dev *pci_dev)
  501. {
  502. pci_fixup_device(pci_fixup_resume, pci_dev);
  503. if (!pci_is_bridge(pci_dev))
  504. pci_enable_wake(pci_dev, PCI_D0, false);
  505. }
  506. static void pci_pm_default_suspend(struct pci_dev *pci_dev)
  507. {
  508. /* Disable non-bridge devices without PM support */
  509. if (!pci_is_bridge(pci_dev))
  510. pci_disable_enabled_device(pci_dev);
  511. }
  512. static bool pci_has_legacy_pm_support(struct pci_dev *pci_dev)
  513. {
  514. struct pci_driver *drv = pci_dev->driver;
  515. bool ret = drv && (drv->suspend || drv->suspend_late || drv->resume
  516. || drv->resume_early);
  517. /*
  518. * Legacy PM support is used by default, so warn if the new framework is
  519. * supported as well. Drivers are supposed to support either the
  520. * former, or the latter, but not both at the same time.
  521. */
  522. WARN(ret && drv->driver.pm, "driver %s device %04x:%04x\n",
  523. drv->name, pci_dev->vendor, pci_dev->device);
  524. return ret;
  525. }
  526. /* New power management framework */
  527. static int pci_pm_prepare(struct device *dev)
  528. {
  529. struct device_driver *drv = dev->driver;
  530. int error = 0;
  531. /*
  532. * PCI devices suspended at run time need to be resumed at this
  533. * point, because in general it is necessary to reconfigure them for
  534. * system suspend. Namely, if the device is supposed to wake up the
  535. * system from the sleep state, we may need to reconfigure it for this
  536. * purpose. In turn, if the device is not supposed to wake up the
  537. * system from the sleep state, we'll have to prevent it from signaling
  538. * wake-up.
  539. */
  540. pm_runtime_resume(dev);
  541. if (drv && drv->pm && drv->pm->prepare)
  542. error = drv->pm->prepare(dev);
  543. return error;
  544. }
  545. #else /* !CONFIG_PM_SLEEP */
  546. #define pci_pm_prepare NULL
  547. #endif /* !CONFIG_PM_SLEEP */
  548. #ifdef CONFIG_SUSPEND
  549. static int pci_pm_suspend(struct device *dev)
  550. {
  551. struct pci_dev *pci_dev = to_pci_dev(dev);
  552. const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
  553. if (pci_has_legacy_pm_support(pci_dev))
  554. return pci_legacy_suspend(dev, PMSG_SUSPEND);
  555. if (!pm) {
  556. pci_pm_default_suspend(pci_dev);
  557. goto Fixup;
  558. }
  559. pci_dev->state_saved = false;
  560. if (pm->suspend) {
  561. pci_power_t prev = pci_dev->current_state;
  562. int error;
  563. error = pm->suspend(dev);
  564. suspend_report_result(pm->suspend, error);
  565. if (error)
  566. return error;
  567. if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
  568. && pci_dev->current_state != PCI_UNKNOWN) {
  569. WARN_ONCE(pci_dev->current_state != prev,
  570. "PCI PM: State of device not saved by %pF\n",
  571. pm->suspend);
  572. }
  573. }
  574. Fixup:
  575. pci_fixup_device(pci_fixup_suspend, pci_dev);
  576. return 0;
  577. }
  578. static int pci_pm_suspend_noirq(struct device *dev)
  579. {
  580. struct pci_dev *pci_dev = to_pci_dev(dev);
  581. const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
  582. if (pci_has_legacy_pm_support(pci_dev))
  583. return pci_legacy_suspend_late(dev, PMSG_SUSPEND);
  584. if (!pm) {
  585. pci_save_state(pci_dev);
  586. return 0;
  587. }
  588. if (pm->suspend_noirq) {
  589. pci_power_t prev = pci_dev->current_state;
  590. int error;
  591. error = pm->suspend_noirq(dev);
  592. suspend_report_result(pm->suspend_noirq, error);
  593. if (error)
  594. return error;
  595. if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
  596. && pci_dev->current_state != PCI_UNKNOWN) {
  597. WARN_ONCE(pci_dev->current_state != prev,
  598. "PCI PM: State of device not saved by %pF\n",
  599. pm->suspend_noirq);
  600. return 0;
  601. }
  602. }
  603. if (!pci_dev->state_saved) {
  604. pci_save_state(pci_dev);
  605. if (!pci_is_bridge(pci_dev))
  606. pci_prepare_to_sleep(pci_dev);
  607. }
  608. pci_pm_set_unknown_state(pci_dev);
  609. /*
  610. * Some BIOSes from ASUS have a bug: If a USB EHCI host controller's
  611. * PCI COMMAND register isn't 0, the BIOS assumes that the controller
  612. * hasn't been quiesced and tries to turn it off. If the controller
  613. * is already in D3, this can hang or cause memory corruption.
  614. *
  615. * Since the value of the COMMAND register doesn't matter once the
  616. * device has been suspended, we can safely set it to 0 here.
  617. */
  618. if (pci_dev->class == PCI_CLASS_SERIAL_USB_EHCI)
  619. pci_write_config_word(pci_dev, PCI_COMMAND, 0);
  620. return 0;
  621. }
  622. static int pci_pm_resume_noirq(struct device *dev)
  623. {
  624. struct pci_dev *pci_dev = to_pci_dev(dev);
  625. struct device_driver *drv = dev->driver;
  626. int error = 0;
  627. pci_pm_default_resume_early(pci_dev);
  628. if (pci_has_legacy_pm_support(pci_dev))
  629. return pci_legacy_resume_early(dev);
  630. if (drv && drv->pm && drv->pm->resume_noirq)
  631. error = drv->pm->resume_noirq(dev);
  632. return error;
  633. }
  634. static int pci_pm_resume(struct device *dev)
  635. {
  636. struct pci_dev *pci_dev = to_pci_dev(dev);
  637. const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
  638. int error = 0;
  639. /*
  640. * This is necessary for the suspend error path in which resume is
  641. * called without restoring the standard config registers of the device.
  642. */
  643. if (pci_dev->state_saved)
  644. pci_restore_standard_config(pci_dev);
  645. if (pci_has_legacy_pm_support(pci_dev))
  646. return pci_legacy_resume(dev);
  647. pci_pm_default_resume(pci_dev);
  648. if (pm) {
  649. if (pm->resume)
  650. error = pm->resume(dev);
  651. } else {
  652. pci_pm_reenable_device(pci_dev);
  653. }
  654. return error;
  655. }
  656. #else /* !CONFIG_SUSPEND */
  657. #define pci_pm_suspend NULL
  658. #define pci_pm_suspend_noirq NULL
  659. #define pci_pm_resume NULL
  660. #define pci_pm_resume_noirq NULL
  661. #endif /* !CONFIG_SUSPEND */
  662. #ifdef CONFIG_HIBERNATE_CALLBACKS
  663. /*
  664. * pcibios_pm_ops - provide arch-specific hooks when a PCI device is doing
  665. * a hibernate transition
  666. */
  667. struct dev_pm_ops __weak pcibios_pm_ops;
  668. static int pci_pm_freeze(struct device *dev)
  669. {
  670. struct pci_dev *pci_dev = to_pci_dev(dev);
  671. const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
  672. if (pci_has_legacy_pm_support(pci_dev))
  673. return pci_legacy_suspend(dev, PMSG_FREEZE);
  674. if (!pm) {
  675. pci_pm_default_suspend(pci_dev);
  676. return 0;
  677. }
  678. pci_dev->state_saved = false;
  679. if (pm->freeze) {
  680. int error;
  681. error = pm->freeze(dev);
  682. suspend_report_result(pm->freeze, error);
  683. if (error)
  684. return error;
  685. }
  686. if (pcibios_pm_ops.freeze)
  687. return pcibios_pm_ops.freeze(dev);
  688. return 0;
  689. }
  690. static int pci_pm_freeze_noirq(struct device *dev)
  691. {
  692. struct pci_dev *pci_dev = to_pci_dev(dev);
  693. struct device_driver *drv = dev->driver;
  694. if (pci_has_legacy_pm_support(pci_dev))
  695. return pci_legacy_suspend_late(dev, PMSG_FREEZE);
  696. if (drv && drv->pm && drv->pm->freeze_noirq) {
  697. int error;
  698. error = drv->pm->freeze_noirq(dev);
  699. suspend_report_result(drv->pm->freeze_noirq, error);
  700. if (error)
  701. return error;
  702. }
  703. if (!pci_dev->state_saved)
  704. pci_save_state(pci_dev);
  705. pci_pm_set_unknown_state(pci_dev);
  706. if (pcibios_pm_ops.freeze_noirq)
  707. return pcibios_pm_ops.freeze_noirq(dev);
  708. return 0;
  709. }
  710. static int pci_pm_thaw_noirq(struct device *dev)
  711. {
  712. struct pci_dev *pci_dev = to_pci_dev(dev);
  713. struct device_driver *drv = dev->driver;
  714. int error = 0;
  715. if (pcibios_pm_ops.thaw_noirq) {
  716. error = pcibios_pm_ops.thaw_noirq(dev);
  717. if (error)
  718. return error;
  719. }
  720. if (pci_has_legacy_pm_support(pci_dev))
  721. return pci_legacy_resume_early(dev);
  722. pci_update_current_state(pci_dev, PCI_D0);
  723. if (drv && drv->pm && drv->pm->thaw_noirq)
  724. error = drv->pm->thaw_noirq(dev);
  725. return error;
  726. }
  727. static int pci_pm_thaw(struct device *dev)
  728. {
  729. struct pci_dev *pci_dev = to_pci_dev(dev);
  730. const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
  731. int error = 0;
  732. if (pcibios_pm_ops.thaw) {
  733. error = pcibios_pm_ops.thaw(dev);
  734. if (error)
  735. return error;
  736. }
  737. if (pci_has_legacy_pm_support(pci_dev))
  738. return pci_legacy_resume(dev);
  739. if (pm) {
  740. if (pm->thaw)
  741. error = pm->thaw(dev);
  742. } else {
  743. pci_pm_reenable_device(pci_dev);
  744. }
  745. pci_dev->state_saved = false;
  746. return error;
  747. }
  748. static int pci_pm_poweroff(struct device *dev)
  749. {
  750. struct pci_dev *pci_dev = to_pci_dev(dev);
  751. const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
  752. if (pci_has_legacy_pm_support(pci_dev))
  753. return pci_legacy_suspend(dev, PMSG_HIBERNATE);
  754. if (!pm) {
  755. pci_pm_default_suspend(pci_dev);
  756. goto Fixup;
  757. }
  758. pci_dev->state_saved = false;
  759. if (pm->poweroff) {
  760. int error;
  761. error = pm->poweroff(dev);
  762. suspend_report_result(pm->poweroff, error);
  763. if (error)
  764. return error;
  765. }
  766. Fixup:
  767. pci_fixup_device(pci_fixup_suspend, pci_dev);
  768. if (pcibios_pm_ops.poweroff)
  769. return pcibios_pm_ops.poweroff(dev);
  770. return 0;
  771. }
  772. static int pci_pm_poweroff_noirq(struct device *dev)
  773. {
  774. struct pci_dev *pci_dev = to_pci_dev(dev);
  775. struct device_driver *drv = dev->driver;
  776. if (pci_has_legacy_pm_support(to_pci_dev(dev)))
  777. return pci_legacy_suspend_late(dev, PMSG_HIBERNATE);
  778. if (!drv || !drv->pm)
  779. return 0;
  780. if (drv->pm->poweroff_noirq) {
  781. int error;
  782. error = drv->pm->poweroff_noirq(dev);
  783. suspend_report_result(drv->pm->poweroff_noirq, error);
  784. if (error)
  785. return error;
  786. }
  787. if (!pci_dev->state_saved && !pci_is_bridge(pci_dev))
  788. pci_prepare_to_sleep(pci_dev);
  789. /*
  790. * The reason for doing this here is the same as for the analogous code
  791. * in pci_pm_suspend_noirq().
  792. */
  793. if (pci_dev->class == PCI_CLASS_SERIAL_USB_EHCI)
  794. pci_write_config_word(pci_dev, PCI_COMMAND, 0);
  795. if (pcibios_pm_ops.poweroff_noirq)
  796. return pcibios_pm_ops.poweroff_noirq(dev);
  797. return 0;
  798. }
  799. static int pci_pm_restore_noirq(struct device *dev)
  800. {
  801. struct pci_dev *pci_dev = to_pci_dev(dev);
  802. struct device_driver *drv = dev->driver;
  803. int error = 0;
  804. if (pcibios_pm_ops.restore_noirq) {
  805. error = pcibios_pm_ops.restore_noirq(dev);
  806. if (error)
  807. return error;
  808. }
  809. pci_pm_default_resume_early(pci_dev);
  810. if (pci_has_legacy_pm_support(pci_dev))
  811. return pci_legacy_resume_early(dev);
  812. if (drv && drv->pm && drv->pm->restore_noirq)
  813. error = drv->pm->restore_noirq(dev);
  814. return error;
  815. }
  816. static int pci_pm_restore(struct device *dev)
  817. {
  818. struct pci_dev *pci_dev = to_pci_dev(dev);
  819. const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
  820. int error = 0;
  821. if (pcibios_pm_ops.restore) {
  822. error = pcibios_pm_ops.restore(dev);
  823. if (error)
  824. return error;
  825. }
  826. /*
  827. * This is necessary for the hibernation error path in which restore is
  828. * called without restoring the standard config registers of the device.
  829. */
  830. if (pci_dev->state_saved)
  831. pci_restore_standard_config(pci_dev);
  832. if (pci_has_legacy_pm_support(pci_dev))
  833. return pci_legacy_resume(dev);
  834. pci_pm_default_resume(pci_dev);
  835. if (pm) {
  836. if (pm->restore)
  837. error = pm->restore(dev);
  838. } else {
  839. pci_pm_reenable_device(pci_dev);
  840. }
  841. return error;
  842. }
  843. #else /* !CONFIG_HIBERNATE_CALLBACKS */
  844. #define pci_pm_freeze NULL
  845. #define pci_pm_freeze_noirq NULL
  846. #define pci_pm_thaw NULL
  847. #define pci_pm_thaw_noirq NULL
  848. #define pci_pm_poweroff NULL
  849. #define pci_pm_poweroff_noirq NULL
  850. #define pci_pm_restore NULL
  851. #define pci_pm_restore_noirq NULL
  852. #endif /* !CONFIG_HIBERNATE_CALLBACKS */
  853. #ifdef CONFIG_PM_RUNTIME
  854. static int pci_pm_runtime_suspend(struct device *dev)
  855. {
  856. struct pci_dev *pci_dev = to_pci_dev(dev);
  857. const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
  858. pci_power_t prev = pci_dev->current_state;
  859. int error;
  860. /*
  861. * If pci_dev->driver is not set (unbound), the device should
  862. * always remain in D0 regardless of the runtime PM status
  863. */
  864. if (!pci_dev->driver)
  865. return 0;
  866. if (!pm || !pm->runtime_suspend)
  867. return -ENOSYS;
  868. pci_dev->state_saved = false;
  869. pci_dev->no_d3cold = false;
  870. error = pm->runtime_suspend(dev);
  871. suspend_report_result(pm->runtime_suspend, error);
  872. if (error)
  873. return error;
  874. if (!pci_dev->d3cold_allowed)
  875. pci_dev->no_d3cold = true;
  876. pci_fixup_device(pci_fixup_suspend, pci_dev);
  877. if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0
  878. && pci_dev->current_state != PCI_UNKNOWN) {
  879. WARN_ONCE(pci_dev->current_state != prev,
  880. "PCI PM: State of device not saved by %pF\n",
  881. pm->runtime_suspend);
  882. return 0;
  883. }
  884. if (!pci_dev->state_saved) {
  885. pci_save_state(pci_dev);
  886. pci_finish_runtime_suspend(pci_dev);
  887. }
  888. return 0;
  889. }
  890. static int pci_pm_runtime_resume(struct device *dev)
  891. {
  892. int rc;
  893. struct pci_dev *pci_dev = to_pci_dev(dev);
  894. const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
  895. /*
  896. * If pci_dev->driver is not set (unbound), the device should
  897. * always remain in D0 regardless of the runtime PM status
  898. */
  899. if (!pci_dev->driver)
  900. return 0;
  901. if (!pm || !pm->runtime_resume)
  902. return -ENOSYS;
  903. pci_restore_standard_config(pci_dev);
  904. pci_fixup_device(pci_fixup_resume_early, pci_dev);
  905. __pci_enable_wake(pci_dev, PCI_D0, true, false);
  906. pci_fixup_device(pci_fixup_resume, pci_dev);
  907. rc = pm->runtime_resume(dev);
  908. pci_dev->runtime_d3cold = false;
  909. return rc;
  910. }
  911. static int pci_pm_runtime_idle(struct device *dev)
  912. {
  913. struct pci_dev *pci_dev = to_pci_dev(dev);
  914. const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
  915. int ret = 0;
  916. /*
  917. * If pci_dev->driver is not set (unbound), the device should
  918. * always remain in D0 regardless of the runtime PM status
  919. */
  920. if (!pci_dev->driver)
  921. return 0;
  922. if (!pm)
  923. return -ENOSYS;
  924. if (pm->runtime_idle)
  925. ret = pm->runtime_idle(dev);
  926. return ret;
  927. }
  928. #else /* !CONFIG_PM_RUNTIME */
  929. #define pci_pm_runtime_suspend NULL
  930. #define pci_pm_runtime_resume NULL
  931. #define pci_pm_runtime_idle NULL
  932. #endif /* !CONFIG_PM_RUNTIME */
  933. #ifdef CONFIG_PM
  934. static const struct dev_pm_ops pci_dev_pm_ops = {
  935. .prepare = pci_pm_prepare,
  936. .suspend = pci_pm_suspend,
  937. .resume = pci_pm_resume,
  938. .freeze = pci_pm_freeze,
  939. .thaw = pci_pm_thaw,
  940. .poweroff = pci_pm_poweroff,
  941. .restore = pci_pm_restore,
  942. .suspend_noirq = pci_pm_suspend_noirq,
  943. .resume_noirq = pci_pm_resume_noirq,
  944. .freeze_noirq = pci_pm_freeze_noirq,
  945. .thaw_noirq = pci_pm_thaw_noirq,
  946. .poweroff_noirq = pci_pm_poweroff_noirq,
  947. .restore_noirq = pci_pm_restore_noirq,
  948. .runtime_suspend = pci_pm_runtime_suspend,
  949. .runtime_resume = pci_pm_runtime_resume,
  950. .runtime_idle = pci_pm_runtime_idle,
  951. };
  952. #define PCI_PM_OPS_PTR (&pci_dev_pm_ops)
  953. #else /* !COMFIG_PM_OPS */
  954. #define PCI_PM_OPS_PTR NULL
  955. #endif /* !COMFIG_PM_OPS */
  956. /**
  957. * __pci_register_driver - register a new pci driver
  958. * @drv: the driver structure to register
  959. * @owner: owner module of drv
  960. * @mod_name: module name string
  961. *
  962. * Adds the driver structure to the list of registered drivers.
  963. * Returns a negative value on error, otherwise 0.
  964. * If no error occurred, the driver remains registered even if
  965. * no device was claimed during registration.
  966. */
  967. int __pci_register_driver(struct pci_driver *drv, struct module *owner,
  968. const char *mod_name)
  969. {
  970. /* initialize common driver fields */
  971. drv->driver.name = drv->name;
  972. drv->driver.bus = &pci_bus_type;
  973. drv->driver.owner = owner;
  974. drv->driver.mod_name = mod_name;
  975. spin_lock_init(&drv->dynids.lock);
  976. INIT_LIST_HEAD(&drv->dynids.list);
  977. /* register with core */
  978. return driver_register(&drv->driver);
  979. }
  980. /**
  981. * pci_unregister_driver - unregister a pci driver
  982. * @drv: the driver structure to unregister
  983. *
  984. * Deletes the driver structure from the list of registered PCI drivers,
  985. * gives it a chance to clean up by calling its remove() function for
  986. * each device it was responsible for, and marks those devices as
  987. * driverless.
  988. */
  989. void
  990. pci_unregister_driver(struct pci_driver *drv)
  991. {
  992. driver_unregister(&drv->driver);
  993. pci_free_dynids(drv);
  994. }
  995. static struct pci_driver pci_compat_driver = {
  996. .name = "compat"
  997. };
  998. /**
  999. * pci_dev_driver - get the pci_driver of a device
  1000. * @dev: the device to query
  1001. *
  1002. * Returns the appropriate pci_driver structure or %NULL if there is no
  1003. * registered driver for the device.
  1004. */
  1005. struct pci_driver *
  1006. pci_dev_driver(const struct pci_dev *dev)
  1007. {
  1008. if (dev->driver)
  1009. return dev->driver;
  1010. else {
  1011. int i;
  1012. for(i=0; i<=PCI_ROM_RESOURCE; i++)
  1013. if (dev->resource[i].flags & IORESOURCE_BUSY)
  1014. return &pci_compat_driver;
  1015. }
  1016. return NULL;
  1017. }
  1018. /**
  1019. * pci_bus_match - Tell if a PCI device structure has a matching PCI device id structure
  1020. * @dev: the PCI device structure to match against
  1021. * @drv: the device driver to search for matching PCI device id structures
  1022. *
  1023. * Used by a driver to check whether a PCI device present in the
  1024. * system is in its list of supported devices. Returns the matching
  1025. * pci_device_id structure or %NULL if there is no match.
  1026. */
  1027. static int pci_bus_match(struct device *dev, struct device_driver *drv)
  1028. {
  1029. struct pci_dev *pci_dev = to_pci_dev(dev);
  1030. struct pci_driver *pci_drv;
  1031. const struct pci_device_id *found_id;
  1032. if (!pci_dev->match_driver)
  1033. return 0;
  1034. pci_drv = to_pci_driver(drv);
  1035. found_id = pci_match_device(pci_drv, pci_dev);
  1036. if (found_id)
  1037. return 1;
  1038. return 0;
  1039. }
  1040. /**
  1041. * pci_dev_get - increments the reference count of the pci device structure
  1042. * @dev: the device being referenced
  1043. *
  1044. * Each live reference to a device should be refcounted.
  1045. *
  1046. * Drivers for PCI devices should normally record such references in
  1047. * their probe() methods, when they bind to a device, and release
  1048. * them by calling pci_dev_put(), in their disconnect() methods.
  1049. *
  1050. * A pointer to the device with the incremented reference counter is returned.
  1051. */
  1052. struct pci_dev *pci_dev_get(struct pci_dev *dev)
  1053. {
  1054. if (dev)
  1055. get_device(&dev->dev);
  1056. return dev;
  1057. }
  1058. /**
  1059. * pci_dev_put - release a use of the pci device structure
  1060. * @dev: device that's been disconnected
  1061. *
  1062. * Must be called when a user of a device is finished with it. When the last
  1063. * user of the device calls this function, the memory of the device is freed.
  1064. */
  1065. void pci_dev_put(struct pci_dev *dev)
  1066. {
  1067. if (dev)
  1068. put_device(&dev->dev);
  1069. }
  1070. static int pci_uevent(struct device *dev, struct kobj_uevent_env *env)
  1071. {
  1072. struct pci_dev *pdev;
  1073. if (!dev)
  1074. return -ENODEV;
  1075. pdev = to_pci_dev(dev);
  1076. if (!pdev)
  1077. return -ENODEV;
  1078. if (add_uevent_var(env, "PCI_CLASS=%04X", pdev->class))
  1079. return -ENOMEM;
  1080. if (add_uevent_var(env, "PCI_ID=%04X:%04X", pdev->vendor, pdev->device))
  1081. return -ENOMEM;
  1082. if (add_uevent_var(env, "PCI_SUBSYS_ID=%04X:%04X", pdev->subsystem_vendor,
  1083. pdev->subsystem_device))
  1084. return -ENOMEM;
  1085. if (add_uevent_var(env, "PCI_SLOT_NAME=%s", pci_name(pdev)))
  1086. return -ENOMEM;
  1087. if (add_uevent_var(env, "MODALIAS=pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02x",
  1088. pdev->vendor, pdev->device,
  1089. pdev->subsystem_vendor, pdev->subsystem_device,
  1090. (u8)(pdev->class >> 16), (u8)(pdev->class >> 8),
  1091. (u8)(pdev->class)))
  1092. return -ENOMEM;
  1093. return 0;
  1094. }
  1095. struct bus_type pci_bus_type = {
  1096. .name = "pci",
  1097. .match = pci_bus_match,
  1098. .uevent = pci_uevent,
  1099. .probe = pci_device_probe,
  1100. .remove = pci_device_remove,
  1101. .shutdown = pci_device_shutdown,
  1102. .dev_groups = pci_dev_groups,
  1103. .bus_groups = pci_bus_groups,
  1104. .drv_groups = pci_drv_groups,
  1105. .pm = PCI_PM_OPS_PTR,
  1106. };
  1107. static int __init pci_driver_init(void)
  1108. {
  1109. return bus_register(&pci_bus_type);
  1110. }
  1111. postcore_initcall(pci_driver_init);
  1112. EXPORT_SYMBOL_GPL(pci_add_dynid);
  1113. EXPORT_SYMBOL(pci_match_id);
  1114. EXPORT_SYMBOL(__pci_register_driver);
  1115. EXPORT_SYMBOL(pci_unregister_driver);
  1116. EXPORT_SYMBOL(pci_dev_driver);
  1117. EXPORT_SYMBOL(pci_bus_type);
  1118. EXPORT_SYMBOL(pci_dev_get);
  1119. EXPORT_SYMBOL(pci_dev_put);