pci_stub.c 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524
  1. /*
  2. * PCI Stub Driver - Grabs devices in backend to be exported later
  3. *
  4. * Ryan Wilson <hap9@epoch.ncsc.mil>
  5. * Chris Bookholt <hap10@epoch.ncsc.mil>
  6. */
  7. #include <linux/module.h>
  8. #include <linux/init.h>
  9. #include <linux/rwsem.h>
  10. #include <linux/list.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/kref.h>
  13. #include <linux/pci.h>
  14. #include <linux/wait.h>
  15. #include <linux/sched.h>
  16. #include <linux/atomic.h>
  17. #include <xen/events.h>
  18. #include <asm/xen/pci.h>
  19. #include <asm/xen/hypervisor.h>
  20. #include <xen/interface/physdev.h>
  21. #include "pciback.h"
  22. #include "conf_space.h"
  23. #include "conf_space_quirks.h"
  24. static char *pci_devs_to_hide;
  25. wait_queue_head_t xen_pcibk_aer_wait_queue;
  26. /*Add sem for sync AER handling and xen_pcibk remove/reconfigue ops,
  27. * We want to avoid in middle of AER ops, xen_pcibk devices is being removed
  28. */
  29. static DECLARE_RWSEM(pcistub_sem);
  30. module_param_named(hide, pci_devs_to_hide, charp, 0444);
  31. struct pcistub_device_id {
  32. struct list_head slot_list;
  33. int domain;
  34. unsigned char bus;
  35. unsigned int devfn;
  36. };
  37. static LIST_HEAD(pcistub_device_ids);
  38. static DEFINE_SPINLOCK(device_ids_lock);
  39. struct pcistub_device {
  40. struct kref kref;
  41. struct list_head dev_list;
  42. spinlock_t lock;
  43. struct pci_dev *dev;
  44. struct xen_pcibk_device *pdev;/* non-NULL if struct pci_dev is in use */
  45. };
  46. /* Access to pcistub_devices & seized_devices lists and the initialize_devices
  47. * flag must be locked with pcistub_devices_lock
  48. */
  49. static DEFINE_SPINLOCK(pcistub_devices_lock);
  50. static LIST_HEAD(pcistub_devices);
  51. /* wait for device_initcall before initializing our devices
  52. * (see pcistub_init_devices_late)
  53. */
  54. static int initialize_devices;
  55. static LIST_HEAD(seized_devices);
  56. static struct pcistub_device *pcistub_device_alloc(struct pci_dev *dev)
  57. {
  58. struct pcistub_device *psdev;
  59. dev_dbg(&dev->dev, "pcistub_device_alloc\n");
  60. psdev = kzalloc(sizeof(*psdev), GFP_ATOMIC);
  61. if (!psdev)
  62. return NULL;
  63. psdev->dev = pci_dev_get(dev);
  64. if (!psdev->dev) {
  65. kfree(psdev);
  66. return NULL;
  67. }
  68. kref_init(&psdev->kref);
  69. spin_lock_init(&psdev->lock);
  70. return psdev;
  71. }
  72. /* Don't call this directly as it's called by pcistub_device_put */
  73. static void pcistub_device_release(struct kref *kref)
  74. {
  75. struct pcistub_device *psdev;
  76. struct pci_dev *dev;
  77. struct xen_pcibk_dev_data *dev_data;
  78. psdev = container_of(kref, struct pcistub_device, kref);
  79. dev = psdev->dev;
  80. dev_data = pci_get_drvdata(dev);
  81. dev_dbg(&dev->dev, "pcistub_device_release\n");
  82. xen_unregister_device_domain_owner(dev);
  83. /* Call the reset function which does not take lock as this
  84. * is called from "unbind" which takes a device_lock mutex.
  85. */
  86. __pci_reset_function_locked(dev);
  87. if (pci_load_and_free_saved_state(dev, &dev_data->pci_saved_state))
  88. dev_dbg(&dev->dev, "Could not reload PCI state\n");
  89. else
  90. pci_restore_state(dev);
  91. if (pci_find_capability(dev, PCI_CAP_ID_MSIX)) {
  92. struct physdev_pci_device ppdev = {
  93. .seg = pci_domain_nr(dev->bus),
  94. .bus = dev->bus->number,
  95. .devfn = dev->devfn
  96. };
  97. int err = HYPERVISOR_physdev_op(PHYSDEVOP_release_msix,
  98. &ppdev);
  99. if (err)
  100. dev_warn(&dev->dev, "MSI-X release failed (%d)\n",
  101. err);
  102. }
  103. /* Disable the device */
  104. xen_pcibk_reset_device(dev);
  105. kfree(dev_data);
  106. pci_set_drvdata(dev, NULL);
  107. /* Clean-up the device */
  108. xen_pcibk_config_free_dyn_fields(dev);
  109. xen_pcibk_config_free_dev(dev);
  110. dev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED;
  111. pci_dev_put(dev);
  112. kfree(psdev);
  113. }
  114. static inline void pcistub_device_get(struct pcistub_device *psdev)
  115. {
  116. kref_get(&psdev->kref);
  117. }
  118. static inline void pcistub_device_put(struct pcistub_device *psdev)
  119. {
  120. kref_put(&psdev->kref, pcistub_device_release);
  121. }
  122. static struct pcistub_device *pcistub_device_find(int domain, int bus,
  123. int slot, int func)
  124. {
  125. struct pcistub_device *psdev = NULL;
  126. unsigned long flags;
  127. spin_lock_irqsave(&pcistub_devices_lock, flags);
  128. list_for_each_entry(psdev, &pcistub_devices, dev_list) {
  129. if (psdev->dev != NULL
  130. && domain == pci_domain_nr(psdev->dev->bus)
  131. && bus == psdev->dev->bus->number
  132. && slot == PCI_SLOT(psdev->dev->devfn)
  133. && func == PCI_FUNC(psdev->dev->devfn)) {
  134. pcistub_device_get(psdev);
  135. goto out;
  136. }
  137. }
  138. /* didn't find it */
  139. psdev = NULL;
  140. out:
  141. spin_unlock_irqrestore(&pcistub_devices_lock, flags);
  142. return psdev;
  143. }
  144. static struct pci_dev *pcistub_device_get_pci_dev(struct xen_pcibk_device *pdev,
  145. struct pcistub_device *psdev)
  146. {
  147. struct pci_dev *pci_dev = NULL;
  148. unsigned long flags;
  149. pcistub_device_get(psdev);
  150. spin_lock_irqsave(&psdev->lock, flags);
  151. if (!psdev->pdev) {
  152. psdev->pdev = pdev;
  153. pci_dev = psdev->dev;
  154. }
  155. spin_unlock_irqrestore(&psdev->lock, flags);
  156. if (!pci_dev)
  157. pcistub_device_put(psdev);
  158. return pci_dev;
  159. }
  160. struct pci_dev *pcistub_get_pci_dev_by_slot(struct xen_pcibk_device *pdev,
  161. int domain, int bus,
  162. int slot, int func)
  163. {
  164. struct pcistub_device *psdev;
  165. struct pci_dev *found_dev = NULL;
  166. unsigned long flags;
  167. spin_lock_irqsave(&pcistub_devices_lock, flags);
  168. list_for_each_entry(psdev, &pcistub_devices, dev_list) {
  169. if (psdev->dev != NULL
  170. && domain == pci_domain_nr(psdev->dev->bus)
  171. && bus == psdev->dev->bus->number
  172. && slot == PCI_SLOT(psdev->dev->devfn)
  173. && func == PCI_FUNC(psdev->dev->devfn)) {
  174. found_dev = pcistub_device_get_pci_dev(pdev, psdev);
  175. break;
  176. }
  177. }
  178. spin_unlock_irqrestore(&pcistub_devices_lock, flags);
  179. return found_dev;
  180. }
  181. struct pci_dev *pcistub_get_pci_dev(struct xen_pcibk_device *pdev,
  182. struct pci_dev *dev)
  183. {
  184. struct pcistub_device *psdev;
  185. struct pci_dev *found_dev = NULL;
  186. unsigned long flags;
  187. spin_lock_irqsave(&pcistub_devices_lock, flags);
  188. list_for_each_entry(psdev, &pcistub_devices, dev_list) {
  189. if (psdev->dev == dev) {
  190. found_dev = pcistub_device_get_pci_dev(pdev, psdev);
  191. break;
  192. }
  193. }
  194. spin_unlock_irqrestore(&pcistub_devices_lock, flags);
  195. return found_dev;
  196. }
  197. void pcistub_put_pci_dev(struct pci_dev *dev)
  198. {
  199. struct pcistub_device *psdev, *found_psdev = NULL;
  200. unsigned long flags;
  201. spin_lock_irqsave(&pcistub_devices_lock, flags);
  202. list_for_each_entry(psdev, &pcistub_devices, dev_list) {
  203. if (psdev->dev == dev) {
  204. found_psdev = psdev;
  205. break;
  206. }
  207. }
  208. spin_unlock_irqrestore(&pcistub_devices_lock, flags);
  209. if (WARN_ON(!found_psdev))
  210. return;
  211. /*hold this lock for avoiding breaking link between
  212. * pcistub and xen_pcibk when AER is in processing
  213. */
  214. down_write(&pcistub_sem);
  215. /* Cleanup our device
  216. * (so it's ready for the next domain)
  217. */
  218. /* This is OK - we are running from workqueue context
  219. * and want to inhibit the user from fiddling with 'reset'
  220. */
  221. pci_reset_function(dev);
  222. pci_restore_state(psdev->dev);
  223. /* This disables the device. */
  224. xen_pcibk_reset_device(found_psdev->dev);
  225. /* And cleanup up our emulated fields. */
  226. xen_pcibk_config_free_dyn_fields(found_psdev->dev);
  227. xen_pcibk_config_reset_dev(found_psdev->dev);
  228. xen_unregister_device_domain_owner(found_psdev->dev);
  229. spin_lock_irqsave(&found_psdev->lock, flags);
  230. found_psdev->pdev = NULL;
  231. spin_unlock_irqrestore(&found_psdev->lock, flags);
  232. pcistub_device_put(found_psdev);
  233. up_write(&pcistub_sem);
  234. }
  235. static int pcistub_match_one(struct pci_dev *dev,
  236. struct pcistub_device_id *pdev_id)
  237. {
  238. /* Match the specified device by domain, bus, slot, func and also if
  239. * any of the device's parent bridges match.
  240. */
  241. for (; dev != NULL; dev = dev->bus->self) {
  242. if (pci_domain_nr(dev->bus) == pdev_id->domain
  243. && dev->bus->number == pdev_id->bus
  244. && dev->devfn == pdev_id->devfn)
  245. return 1;
  246. /* Sometimes topmost bridge links to itself. */
  247. if (dev == dev->bus->self)
  248. break;
  249. }
  250. return 0;
  251. }
  252. static int pcistub_match(struct pci_dev *dev)
  253. {
  254. struct pcistub_device_id *pdev_id;
  255. unsigned long flags;
  256. int found = 0;
  257. spin_lock_irqsave(&device_ids_lock, flags);
  258. list_for_each_entry(pdev_id, &pcistub_device_ids, slot_list) {
  259. if (pcistub_match_one(dev, pdev_id)) {
  260. found = 1;
  261. break;
  262. }
  263. }
  264. spin_unlock_irqrestore(&device_ids_lock, flags);
  265. return found;
  266. }
  267. static int pcistub_init_device(struct pci_dev *dev)
  268. {
  269. struct xen_pcibk_dev_data *dev_data;
  270. int err = 0;
  271. dev_dbg(&dev->dev, "initializing...\n");
  272. /* The PCI backend is not intended to be a module (or to work with
  273. * removable PCI devices (yet). If it were, xen_pcibk_config_free()
  274. * would need to be called somewhere to free the memory allocated
  275. * here and then to call kfree(pci_get_drvdata(psdev->dev)).
  276. */
  277. dev_data = kzalloc(sizeof(*dev_data) + strlen(DRV_NAME "[]")
  278. + strlen(pci_name(dev)) + 1, GFP_ATOMIC);
  279. if (!dev_data) {
  280. err = -ENOMEM;
  281. goto out;
  282. }
  283. pci_set_drvdata(dev, dev_data);
  284. /*
  285. * Setup name for fake IRQ handler. It will only be enabled
  286. * once the device is turned on by the guest.
  287. */
  288. sprintf(dev_data->irq_name, DRV_NAME "[%s]", pci_name(dev));
  289. dev_dbg(&dev->dev, "initializing config\n");
  290. init_waitqueue_head(&xen_pcibk_aer_wait_queue);
  291. err = xen_pcibk_config_init_dev(dev);
  292. if (err)
  293. goto out;
  294. /* HACK: Force device (& ACPI) to determine what IRQ it's on - we
  295. * must do this here because pcibios_enable_device may specify
  296. * the pci device's true irq (and possibly its other resources)
  297. * if they differ from what's in the configuration space.
  298. * This makes the assumption that the device's resources won't
  299. * change after this point (otherwise this code may break!)
  300. */
  301. dev_dbg(&dev->dev, "enabling device\n");
  302. err = pci_enable_device(dev);
  303. if (err)
  304. goto config_release;
  305. if (pci_find_capability(dev, PCI_CAP_ID_MSIX)) {
  306. struct physdev_pci_device ppdev = {
  307. .seg = pci_domain_nr(dev->bus),
  308. .bus = dev->bus->number,
  309. .devfn = dev->devfn
  310. };
  311. err = HYPERVISOR_physdev_op(PHYSDEVOP_prepare_msix, &ppdev);
  312. if (err)
  313. dev_err(&dev->dev, "MSI-X preparation failed (%d)\n",
  314. err);
  315. }
  316. /* We need the device active to save the state. */
  317. dev_dbg(&dev->dev, "save state of device\n");
  318. pci_save_state(dev);
  319. dev_data->pci_saved_state = pci_store_saved_state(dev);
  320. if (!dev_data->pci_saved_state)
  321. dev_err(&dev->dev, "Could not store PCI conf saved state!\n");
  322. else {
  323. dev_dbg(&dev->dev, "resetting (FLR, D3, etc) the device\n");
  324. __pci_reset_function_locked(dev);
  325. pci_restore_state(dev);
  326. }
  327. /* Now disable the device (this also ensures some private device
  328. * data is setup before we export)
  329. */
  330. dev_dbg(&dev->dev, "reset device\n");
  331. xen_pcibk_reset_device(dev);
  332. dev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED;
  333. return 0;
  334. config_release:
  335. xen_pcibk_config_free_dev(dev);
  336. out:
  337. pci_set_drvdata(dev, NULL);
  338. kfree(dev_data);
  339. return err;
  340. }
  341. /*
  342. * Because some initialization still happens on
  343. * devices during fs_initcall, we need to defer
  344. * full initialization of our devices until
  345. * device_initcall.
  346. */
  347. static int __init pcistub_init_devices_late(void)
  348. {
  349. struct pcistub_device *psdev;
  350. unsigned long flags;
  351. int err = 0;
  352. pr_debug(DRV_NAME ": pcistub_init_devices_late\n");
  353. spin_lock_irqsave(&pcistub_devices_lock, flags);
  354. while (!list_empty(&seized_devices)) {
  355. psdev = container_of(seized_devices.next,
  356. struct pcistub_device, dev_list);
  357. list_del(&psdev->dev_list);
  358. spin_unlock_irqrestore(&pcistub_devices_lock, flags);
  359. err = pcistub_init_device(psdev->dev);
  360. if (err) {
  361. dev_err(&psdev->dev->dev,
  362. "error %d initializing device\n", err);
  363. kfree(psdev);
  364. psdev = NULL;
  365. }
  366. spin_lock_irqsave(&pcistub_devices_lock, flags);
  367. if (psdev)
  368. list_add_tail(&psdev->dev_list, &pcistub_devices);
  369. }
  370. initialize_devices = 1;
  371. spin_unlock_irqrestore(&pcistub_devices_lock, flags);
  372. return 0;
  373. }
  374. static int pcistub_seize(struct pci_dev *dev)
  375. {
  376. struct pcistub_device *psdev;
  377. unsigned long flags;
  378. int err = 0;
  379. psdev = pcistub_device_alloc(dev);
  380. if (!psdev)
  381. return -ENOMEM;
  382. spin_lock_irqsave(&pcistub_devices_lock, flags);
  383. if (initialize_devices) {
  384. spin_unlock_irqrestore(&pcistub_devices_lock, flags);
  385. /* don't want irqs disabled when calling pcistub_init_device */
  386. err = pcistub_init_device(psdev->dev);
  387. spin_lock_irqsave(&pcistub_devices_lock, flags);
  388. if (!err)
  389. list_add(&psdev->dev_list, &pcistub_devices);
  390. } else {
  391. dev_dbg(&dev->dev, "deferring initialization\n");
  392. list_add(&psdev->dev_list, &seized_devices);
  393. }
  394. spin_unlock_irqrestore(&pcistub_devices_lock, flags);
  395. if (err)
  396. pcistub_device_put(psdev);
  397. return err;
  398. }
  399. static int pcistub_probe(struct pci_dev *dev, const struct pci_device_id *id)
  400. {
  401. int err = 0;
  402. dev_dbg(&dev->dev, "probing...\n");
  403. if (pcistub_match(dev)) {
  404. if (dev->hdr_type != PCI_HEADER_TYPE_NORMAL
  405. && dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) {
  406. dev_err(&dev->dev, "can't export pci devices that "
  407. "don't have a normal (0) or bridge (1) "
  408. "header type!\n");
  409. err = -ENODEV;
  410. goto out;
  411. }
  412. dev_info(&dev->dev, "seizing device\n");
  413. err = pcistub_seize(dev);
  414. } else
  415. /* Didn't find the device */
  416. err = -ENODEV;
  417. out:
  418. return err;
  419. }
  420. static void pcistub_remove(struct pci_dev *dev)
  421. {
  422. struct pcistub_device *psdev, *found_psdev = NULL;
  423. unsigned long flags;
  424. dev_dbg(&dev->dev, "removing\n");
  425. spin_lock_irqsave(&pcistub_devices_lock, flags);
  426. xen_pcibk_config_quirk_release(dev);
  427. list_for_each_entry(psdev, &pcistub_devices, dev_list) {
  428. if (psdev->dev == dev) {
  429. found_psdev = psdev;
  430. break;
  431. }
  432. }
  433. spin_unlock_irqrestore(&pcistub_devices_lock, flags);
  434. if (found_psdev) {
  435. dev_dbg(&dev->dev, "found device to remove - in use? %p\n",
  436. found_psdev->pdev);
  437. if (found_psdev->pdev) {
  438. printk(KERN_WARNING DRV_NAME ": ****** removing device "
  439. "%s while still in-use! ******\n",
  440. pci_name(found_psdev->dev));
  441. printk(KERN_WARNING DRV_NAME ": ****** driver domain may"
  442. " still access this device's i/o resources!\n");
  443. printk(KERN_WARNING DRV_NAME ": ****** shutdown driver "
  444. "domain before binding device\n");
  445. printk(KERN_WARNING DRV_NAME ": ****** to other drivers "
  446. "or domains\n");
  447. xen_pcibk_release_pci_dev(found_psdev->pdev,
  448. found_psdev->dev);
  449. }
  450. spin_lock_irqsave(&pcistub_devices_lock, flags);
  451. list_del(&found_psdev->dev_list);
  452. spin_unlock_irqrestore(&pcistub_devices_lock, flags);
  453. /* the final put for releasing from the list */
  454. pcistub_device_put(found_psdev);
  455. }
  456. }
  457. static DEFINE_PCI_DEVICE_TABLE(pcistub_ids) = {
  458. {
  459. .vendor = PCI_ANY_ID,
  460. .device = PCI_ANY_ID,
  461. .subvendor = PCI_ANY_ID,
  462. .subdevice = PCI_ANY_ID,
  463. },
  464. {0,},
  465. };
  466. #define PCI_NODENAME_MAX 40
  467. static void kill_domain_by_device(struct pcistub_device *psdev)
  468. {
  469. struct xenbus_transaction xbt;
  470. int err;
  471. char nodename[PCI_NODENAME_MAX];
  472. BUG_ON(!psdev);
  473. snprintf(nodename, PCI_NODENAME_MAX, "/local/domain/0/backend/pci/%d/0",
  474. psdev->pdev->xdev->otherend_id);
  475. again:
  476. err = xenbus_transaction_start(&xbt);
  477. if (err) {
  478. dev_err(&psdev->dev->dev,
  479. "error %d when start xenbus transaction\n", err);
  480. return;
  481. }
  482. /*PV AER handlers will set this flag*/
  483. xenbus_printf(xbt, nodename, "aerState" , "aerfail");
  484. err = xenbus_transaction_end(xbt, 0);
  485. if (err) {
  486. if (err == -EAGAIN)
  487. goto again;
  488. dev_err(&psdev->dev->dev,
  489. "error %d when end xenbus transaction\n", err);
  490. return;
  491. }
  492. }
  493. /* For each aer recovery step error_detected, mmio_enabled, etc, front_end and
  494. * backend need to have cooperation. In xen_pcibk, those steps will do similar
  495. * jobs: send service request and waiting for front_end response.
  496. */
  497. static pci_ers_result_t common_process(struct pcistub_device *psdev,
  498. pci_channel_state_t state, int aer_cmd,
  499. pci_ers_result_t result)
  500. {
  501. pci_ers_result_t res = result;
  502. struct xen_pcie_aer_op *aer_op;
  503. int ret;
  504. /*with PV AER drivers*/
  505. aer_op = &(psdev->pdev->sh_info->aer_op);
  506. aer_op->cmd = aer_cmd ;
  507. /*useful for error_detected callback*/
  508. aer_op->err = state;
  509. /*pcifront_end BDF*/
  510. ret = xen_pcibk_get_pcifront_dev(psdev->dev, psdev->pdev,
  511. &aer_op->domain, &aer_op->bus, &aer_op->devfn);
  512. if (!ret) {
  513. dev_err(&psdev->dev->dev,
  514. DRV_NAME ": failed to get pcifront device\n");
  515. return PCI_ERS_RESULT_NONE;
  516. }
  517. wmb();
  518. dev_dbg(&psdev->dev->dev,
  519. DRV_NAME ": aer_op %x dom %x bus %x devfn %x\n",
  520. aer_cmd, aer_op->domain, aer_op->bus, aer_op->devfn);
  521. /*local flag to mark there's aer request, xen_pcibk callback will use
  522. * this flag to judge whether we need to check pci-front give aer
  523. * service ack signal
  524. */
  525. set_bit(_PCIB_op_pending, (unsigned long *)&psdev->pdev->flags);
  526. /*It is possible that a pcifront conf_read_write ops request invokes
  527. * the callback which cause the spurious execution of wake_up.
  528. * Yet it is harmless and better than a spinlock here
  529. */
  530. set_bit(_XEN_PCIB_active,
  531. (unsigned long *)&psdev->pdev->sh_info->flags);
  532. wmb();
  533. notify_remote_via_irq(psdev->pdev->evtchn_irq);
  534. ret = wait_event_timeout(xen_pcibk_aer_wait_queue,
  535. !(test_bit(_XEN_PCIB_active, (unsigned long *)
  536. &psdev->pdev->sh_info->flags)), 300*HZ);
  537. if (!ret) {
  538. if (test_bit(_XEN_PCIB_active,
  539. (unsigned long *)&psdev->pdev->sh_info->flags)) {
  540. dev_err(&psdev->dev->dev,
  541. "pcifront aer process not responding!\n");
  542. clear_bit(_XEN_PCIB_active,
  543. (unsigned long *)&psdev->pdev->sh_info->flags);
  544. aer_op->err = PCI_ERS_RESULT_NONE;
  545. return res;
  546. }
  547. }
  548. clear_bit(_PCIB_op_pending, (unsigned long *)&psdev->pdev->flags);
  549. if (test_bit(_XEN_PCIF_active,
  550. (unsigned long *)&psdev->pdev->sh_info->flags)) {
  551. dev_dbg(&psdev->dev->dev,
  552. "schedule pci_conf service in " DRV_NAME "\n");
  553. xen_pcibk_test_and_schedule_op(psdev->pdev);
  554. }
  555. res = (pci_ers_result_t)aer_op->err;
  556. return res;
  557. }
  558. /*
  559. * xen_pcibk_slot_reset: it will send the slot_reset request to pcifront in case
  560. * of the device driver could provide this service, and then wait for pcifront
  561. * ack.
  562. * @dev: pointer to PCI devices
  563. * return value is used by aer_core do_recovery policy
  564. */
  565. static pci_ers_result_t xen_pcibk_slot_reset(struct pci_dev *dev)
  566. {
  567. struct pcistub_device *psdev;
  568. pci_ers_result_t result;
  569. result = PCI_ERS_RESULT_RECOVERED;
  570. dev_dbg(&dev->dev, "xen_pcibk_slot_reset(bus:%x,devfn:%x)\n",
  571. dev->bus->number, dev->devfn);
  572. down_write(&pcistub_sem);
  573. psdev = pcistub_device_find(pci_domain_nr(dev->bus),
  574. dev->bus->number,
  575. PCI_SLOT(dev->devfn),
  576. PCI_FUNC(dev->devfn));
  577. if (!psdev || !psdev->pdev) {
  578. dev_err(&dev->dev,
  579. DRV_NAME " device is not found/assigned\n");
  580. goto end;
  581. }
  582. if (!psdev->pdev->sh_info) {
  583. dev_err(&dev->dev, DRV_NAME " device is not connected or owned"
  584. " by HVM, kill it\n");
  585. kill_domain_by_device(psdev);
  586. goto end;
  587. }
  588. if (!test_bit(_XEN_PCIB_AERHANDLER,
  589. (unsigned long *)&psdev->pdev->sh_info->flags)) {
  590. dev_err(&dev->dev,
  591. "guest with no AER driver should have been killed\n");
  592. goto end;
  593. }
  594. result = common_process(psdev, 1, XEN_PCI_OP_aer_slotreset, result);
  595. if (result == PCI_ERS_RESULT_NONE ||
  596. result == PCI_ERS_RESULT_DISCONNECT) {
  597. dev_dbg(&dev->dev,
  598. "No AER slot_reset service or disconnected!\n");
  599. kill_domain_by_device(psdev);
  600. }
  601. end:
  602. if (psdev)
  603. pcistub_device_put(psdev);
  604. up_write(&pcistub_sem);
  605. return result;
  606. }
  607. /*xen_pcibk_mmio_enabled: it will send the mmio_enabled request to pcifront
  608. * in case of the device driver could provide this service, and then wait
  609. * for pcifront ack
  610. * @dev: pointer to PCI devices
  611. * return value is used by aer_core do_recovery policy
  612. */
  613. static pci_ers_result_t xen_pcibk_mmio_enabled(struct pci_dev *dev)
  614. {
  615. struct pcistub_device *psdev;
  616. pci_ers_result_t result;
  617. result = PCI_ERS_RESULT_RECOVERED;
  618. dev_dbg(&dev->dev, "xen_pcibk_mmio_enabled(bus:%x,devfn:%x)\n",
  619. dev->bus->number, dev->devfn);
  620. down_write(&pcistub_sem);
  621. psdev = pcistub_device_find(pci_domain_nr(dev->bus),
  622. dev->bus->number,
  623. PCI_SLOT(dev->devfn),
  624. PCI_FUNC(dev->devfn));
  625. if (!psdev || !psdev->pdev) {
  626. dev_err(&dev->dev,
  627. DRV_NAME " device is not found/assigned\n");
  628. goto end;
  629. }
  630. if (!psdev->pdev->sh_info) {
  631. dev_err(&dev->dev, DRV_NAME " device is not connected or owned"
  632. " by HVM, kill it\n");
  633. kill_domain_by_device(psdev);
  634. goto end;
  635. }
  636. if (!test_bit(_XEN_PCIB_AERHANDLER,
  637. (unsigned long *)&psdev->pdev->sh_info->flags)) {
  638. dev_err(&dev->dev,
  639. "guest with no AER driver should have been killed\n");
  640. goto end;
  641. }
  642. result = common_process(psdev, 1, XEN_PCI_OP_aer_mmio, result);
  643. if (result == PCI_ERS_RESULT_NONE ||
  644. result == PCI_ERS_RESULT_DISCONNECT) {
  645. dev_dbg(&dev->dev,
  646. "No AER mmio_enabled service or disconnected!\n");
  647. kill_domain_by_device(psdev);
  648. }
  649. end:
  650. if (psdev)
  651. pcistub_device_put(psdev);
  652. up_write(&pcistub_sem);
  653. return result;
  654. }
  655. /*xen_pcibk_error_detected: it will send the error_detected request to pcifront
  656. * in case of the device driver could provide this service, and then wait
  657. * for pcifront ack.
  658. * @dev: pointer to PCI devices
  659. * @error: the current PCI connection state
  660. * return value is used by aer_core do_recovery policy
  661. */
  662. static pci_ers_result_t xen_pcibk_error_detected(struct pci_dev *dev,
  663. pci_channel_state_t error)
  664. {
  665. struct pcistub_device *psdev;
  666. pci_ers_result_t result;
  667. result = PCI_ERS_RESULT_CAN_RECOVER;
  668. dev_dbg(&dev->dev, "xen_pcibk_error_detected(bus:%x,devfn:%x)\n",
  669. dev->bus->number, dev->devfn);
  670. down_write(&pcistub_sem);
  671. psdev = pcistub_device_find(pci_domain_nr(dev->bus),
  672. dev->bus->number,
  673. PCI_SLOT(dev->devfn),
  674. PCI_FUNC(dev->devfn));
  675. if (!psdev || !psdev->pdev) {
  676. dev_err(&dev->dev,
  677. DRV_NAME " device is not found/assigned\n");
  678. goto end;
  679. }
  680. if (!psdev->pdev->sh_info) {
  681. dev_err(&dev->dev, DRV_NAME " device is not connected or owned"
  682. " by HVM, kill it\n");
  683. kill_domain_by_device(psdev);
  684. goto end;
  685. }
  686. /*Guest owns the device yet no aer handler regiested, kill guest*/
  687. if (!test_bit(_XEN_PCIB_AERHANDLER,
  688. (unsigned long *)&psdev->pdev->sh_info->flags)) {
  689. dev_dbg(&dev->dev, "guest may have no aer driver, kill it\n");
  690. kill_domain_by_device(psdev);
  691. goto end;
  692. }
  693. result = common_process(psdev, error, XEN_PCI_OP_aer_detected, result);
  694. if (result == PCI_ERS_RESULT_NONE ||
  695. result == PCI_ERS_RESULT_DISCONNECT) {
  696. dev_dbg(&dev->dev,
  697. "No AER error_detected service or disconnected!\n");
  698. kill_domain_by_device(psdev);
  699. }
  700. end:
  701. if (psdev)
  702. pcistub_device_put(psdev);
  703. up_write(&pcistub_sem);
  704. return result;
  705. }
  706. /*xen_pcibk_error_resume: it will send the error_resume request to pcifront
  707. * in case of the device driver could provide this service, and then wait
  708. * for pcifront ack.
  709. * @dev: pointer to PCI devices
  710. */
  711. static void xen_pcibk_error_resume(struct pci_dev *dev)
  712. {
  713. struct pcistub_device *psdev;
  714. dev_dbg(&dev->dev, "xen_pcibk_error_resume(bus:%x,devfn:%x)\n",
  715. dev->bus->number, dev->devfn);
  716. down_write(&pcistub_sem);
  717. psdev = pcistub_device_find(pci_domain_nr(dev->bus),
  718. dev->bus->number,
  719. PCI_SLOT(dev->devfn),
  720. PCI_FUNC(dev->devfn));
  721. if (!psdev || !psdev->pdev) {
  722. dev_err(&dev->dev,
  723. DRV_NAME " device is not found/assigned\n");
  724. goto end;
  725. }
  726. if (!psdev->pdev->sh_info) {
  727. dev_err(&dev->dev, DRV_NAME " device is not connected or owned"
  728. " by HVM, kill it\n");
  729. kill_domain_by_device(psdev);
  730. goto end;
  731. }
  732. if (!test_bit(_XEN_PCIB_AERHANDLER,
  733. (unsigned long *)&psdev->pdev->sh_info->flags)) {
  734. dev_err(&dev->dev,
  735. "guest with no AER driver should have been killed\n");
  736. kill_domain_by_device(psdev);
  737. goto end;
  738. }
  739. common_process(psdev, 1, XEN_PCI_OP_aer_resume,
  740. PCI_ERS_RESULT_RECOVERED);
  741. end:
  742. if (psdev)
  743. pcistub_device_put(psdev);
  744. up_write(&pcistub_sem);
  745. return;
  746. }
  747. /*add xen_pcibk AER handling*/
  748. static const struct pci_error_handlers xen_pcibk_error_handler = {
  749. .error_detected = xen_pcibk_error_detected,
  750. .mmio_enabled = xen_pcibk_mmio_enabled,
  751. .slot_reset = xen_pcibk_slot_reset,
  752. .resume = xen_pcibk_error_resume,
  753. };
  754. /*
  755. * Note: There is no MODULE_DEVICE_TABLE entry here because this isn't
  756. * for a normal device. I don't want it to be loaded automatically.
  757. */
  758. static struct pci_driver xen_pcibk_pci_driver = {
  759. /* The name should be xen_pciback, but until the tools are updated
  760. * we will keep it as pciback. */
  761. .name = "pciback",
  762. .id_table = pcistub_ids,
  763. .probe = pcistub_probe,
  764. .remove = pcistub_remove,
  765. .err_handler = &xen_pcibk_error_handler,
  766. };
  767. static inline int str_to_slot(const char *buf, int *domain, int *bus,
  768. int *slot, int *func)
  769. {
  770. int parsed = 0;
  771. switch (sscanf(buf, " %x:%x:%x.%x %n", domain, bus, slot, func,
  772. &parsed)) {
  773. case 3:
  774. *func = -1;
  775. sscanf(buf, " %x:%x:%x.* %n", domain, bus, slot, &parsed);
  776. break;
  777. case 2:
  778. *slot = *func = -1;
  779. sscanf(buf, " %x:%x:*.* %n", domain, bus, &parsed);
  780. break;
  781. }
  782. if (parsed && !buf[parsed])
  783. return 0;
  784. /* try again without domain */
  785. *domain = 0;
  786. switch (sscanf(buf, " %x:%x.%x %n", bus, slot, func, &parsed)) {
  787. case 2:
  788. *func = -1;
  789. sscanf(buf, " %x:%x.* %n", bus, slot, &parsed);
  790. break;
  791. case 1:
  792. *slot = *func = -1;
  793. sscanf(buf, " %x:*.* %n", bus, &parsed);
  794. break;
  795. }
  796. if (parsed && !buf[parsed])
  797. return 0;
  798. return -EINVAL;
  799. }
  800. static inline int str_to_quirk(const char *buf, int *domain, int *bus, int
  801. *slot, int *func, int *reg, int *size, int *mask)
  802. {
  803. int parsed = 0;
  804. sscanf(buf, " %x:%x:%x.%x-%x:%x:%x %n", domain, bus, slot, func,
  805. reg, size, mask, &parsed);
  806. if (parsed && !buf[parsed])
  807. return 0;
  808. /* try again without domain */
  809. *domain = 0;
  810. sscanf(buf, " %x:%x.%x-%x:%x:%x %n", bus, slot, func, reg, size,
  811. mask, &parsed);
  812. if (parsed && !buf[parsed])
  813. return 0;
  814. return -EINVAL;
  815. }
  816. static int pcistub_device_id_add(int domain, int bus, int slot, int func)
  817. {
  818. struct pcistub_device_id *pci_dev_id;
  819. unsigned long flags;
  820. int rc = 0, devfn = PCI_DEVFN(slot, func);
  821. if (slot < 0) {
  822. for (slot = 0; !rc && slot < 32; ++slot)
  823. rc = pcistub_device_id_add(domain, bus, slot, func);
  824. return rc;
  825. }
  826. if (func < 0) {
  827. for (func = 0; !rc && func < 8; ++func)
  828. rc = pcistub_device_id_add(domain, bus, slot, func);
  829. return rc;
  830. }
  831. if ((
  832. #if !defined(MODULE) /* pci_domains_supported is not being exported */ \
  833. || !defined(CONFIG_PCI_DOMAINS)
  834. !pci_domains_supported ? domain :
  835. #endif
  836. domain < 0 || domain > 0xffff)
  837. || bus < 0 || bus > 0xff
  838. || PCI_SLOT(devfn) != slot
  839. || PCI_FUNC(devfn) != func)
  840. return -EINVAL;
  841. pci_dev_id = kmalloc(sizeof(*pci_dev_id), GFP_KERNEL);
  842. if (!pci_dev_id)
  843. return -ENOMEM;
  844. pci_dev_id->domain = domain;
  845. pci_dev_id->bus = bus;
  846. pci_dev_id->devfn = devfn;
  847. pr_debug(DRV_NAME ": wants to seize %04x:%02x:%02x.%d\n",
  848. domain, bus, slot, func);
  849. spin_lock_irqsave(&device_ids_lock, flags);
  850. list_add_tail(&pci_dev_id->slot_list, &pcistub_device_ids);
  851. spin_unlock_irqrestore(&device_ids_lock, flags);
  852. return 0;
  853. }
  854. static int pcistub_device_id_remove(int domain, int bus, int slot, int func)
  855. {
  856. struct pcistub_device_id *pci_dev_id, *t;
  857. int err = -ENOENT;
  858. unsigned long flags;
  859. spin_lock_irqsave(&device_ids_lock, flags);
  860. list_for_each_entry_safe(pci_dev_id, t, &pcistub_device_ids,
  861. slot_list) {
  862. if (pci_dev_id->domain == domain && pci_dev_id->bus == bus
  863. && (slot < 0 || PCI_SLOT(pci_dev_id->devfn) == slot)
  864. && (func < 0 || PCI_FUNC(pci_dev_id->devfn) == func)) {
  865. /* Don't break; here because it's possible the same
  866. * slot could be in the list more than once
  867. */
  868. list_del(&pci_dev_id->slot_list);
  869. kfree(pci_dev_id);
  870. err = 0;
  871. pr_debug(DRV_NAME ": removed %04x:%02x:%02x.%d from "
  872. "seize list\n", domain, bus, slot, func);
  873. }
  874. }
  875. spin_unlock_irqrestore(&device_ids_lock, flags);
  876. return err;
  877. }
  878. static int pcistub_reg_add(int domain, int bus, int slot, int func,
  879. unsigned int reg, unsigned int size,
  880. unsigned int mask)
  881. {
  882. int err = 0;
  883. struct pcistub_device *psdev;
  884. struct pci_dev *dev;
  885. struct config_field *field;
  886. if (reg > 0xfff || (size < 4 && (mask >> (size * 8))))
  887. return -EINVAL;
  888. psdev = pcistub_device_find(domain, bus, slot, func);
  889. if (!psdev) {
  890. err = -ENODEV;
  891. goto out;
  892. }
  893. dev = psdev->dev;
  894. field = kzalloc(sizeof(*field), GFP_ATOMIC);
  895. if (!field) {
  896. err = -ENOMEM;
  897. goto out;
  898. }
  899. field->offset = reg;
  900. field->size = size;
  901. field->mask = mask;
  902. field->init = NULL;
  903. field->reset = NULL;
  904. field->release = NULL;
  905. field->clean = xen_pcibk_config_field_free;
  906. err = xen_pcibk_config_quirks_add_field(dev, field);
  907. if (err)
  908. kfree(field);
  909. out:
  910. if (psdev)
  911. pcistub_device_put(psdev);
  912. return err;
  913. }
  914. static ssize_t pcistub_slot_add(struct device_driver *drv, const char *buf,
  915. size_t count)
  916. {
  917. int domain, bus, slot, func;
  918. int err;
  919. err = str_to_slot(buf, &domain, &bus, &slot, &func);
  920. if (err)
  921. goto out;
  922. err = pcistub_device_id_add(domain, bus, slot, func);
  923. out:
  924. if (!err)
  925. err = count;
  926. return err;
  927. }
  928. static DRIVER_ATTR(new_slot, S_IWUSR, NULL, pcistub_slot_add);
  929. static ssize_t pcistub_slot_remove(struct device_driver *drv, const char *buf,
  930. size_t count)
  931. {
  932. int domain, bus, slot, func;
  933. int err;
  934. err = str_to_slot(buf, &domain, &bus, &slot, &func);
  935. if (err)
  936. goto out;
  937. err = pcistub_device_id_remove(domain, bus, slot, func);
  938. out:
  939. if (!err)
  940. err = count;
  941. return err;
  942. }
  943. static DRIVER_ATTR(remove_slot, S_IWUSR, NULL, pcistub_slot_remove);
  944. static ssize_t pcistub_slot_show(struct device_driver *drv, char *buf)
  945. {
  946. struct pcistub_device_id *pci_dev_id;
  947. size_t count = 0;
  948. unsigned long flags;
  949. spin_lock_irqsave(&device_ids_lock, flags);
  950. list_for_each_entry(pci_dev_id, &pcistub_device_ids, slot_list) {
  951. if (count >= PAGE_SIZE)
  952. break;
  953. count += scnprintf(buf + count, PAGE_SIZE - count,
  954. "%04x:%02x:%02x.%d\n",
  955. pci_dev_id->domain, pci_dev_id->bus,
  956. PCI_SLOT(pci_dev_id->devfn),
  957. PCI_FUNC(pci_dev_id->devfn));
  958. }
  959. spin_unlock_irqrestore(&device_ids_lock, flags);
  960. return count;
  961. }
  962. static DRIVER_ATTR(slots, S_IRUSR, pcistub_slot_show, NULL);
  963. static ssize_t pcistub_irq_handler_show(struct device_driver *drv, char *buf)
  964. {
  965. struct pcistub_device *psdev;
  966. struct xen_pcibk_dev_data *dev_data;
  967. size_t count = 0;
  968. unsigned long flags;
  969. spin_lock_irqsave(&pcistub_devices_lock, flags);
  970. list_for_each_entry(psdev, &pcistub_devices, dev_list) {
  971. if (count >= PAGE_SIZE)
  972. break;
  973. if (!psdev->dev)
  974. continue;
  975. dev_data = pci_get_drvdata(psdev->dev);
  976. if (!dev_data)
  977. continue;
  978. count +=
  979. scnprintf(buf + count, PAGE_SIZE - count,
  980. "%s:%s:%sing:%ld\n",
  981. pci_name(psdev->dev),
  982. dev_data->isr_on ? "on" : "off",
  983. dev_data->ack_intr ? "ack" : "not ack",
  984. dev_data->handled);
  985. }
  986. spin_unlock_irqrestore(&pcistub_devices_lock, flags);
  987. return count;
  988. }
  989. static DRIVER_ATTR(irq_handlers, S_IRUSR, pcistub_irq_handler_show, NULL);
  990. static ssize_t pcistub_irq_handler_switch(struct device_driver *drv,
  991. const char *buf,
  992. size_t count)
  993. {
  994. struct pcistub_device *psdev;
  995. struct xen_pcibk_dev_data *dev_data;
  996. int domain, bus, slot, func;
  997. int err = -ENOENT;
  998. err = str_to_slot(buf, &domain, &bus, &slot, &func);
  999. if (err)
  1000. return err;
  1001. psdev = pcistub_device_find(domain, bus, slot, func);
  1002. if (!psdev)
  1003. goto out;
  1004. dev_data = pci_get_drvdata(psdev->dev);
  1005. if (!dev_data)
  1006. goto out;
  1007. dev_dbg(&psdev->dev->dev, "%s fake irq handler: %d->%d\n",
  1008. dev_data->irq_name, dev_data->isr_on,
  1009. !dev_data->isr_on);
  1010. dev_data->isr_on = !(dev_data->isr_on);
  1011. if (dev_data->isr_on)
  1012. dev_data->ack_intr = 1;
  1013. out:
  1014. if (psdev)
  1015. pcistub_device_put(psdev);
  1016. if (!err)
  1017. err = count;
  1018. return err;
  1019. }
  1020. static DRIVER_ATTR(irq_handler_state, S_IWUSR, NULL,
  1021. pcistub_irq_handler_switch);
  1022. static ssize_t pcistub_quirk_add(struct device_driver *drv, const char *buf,
  1023. size_t count)
  1024. {
  1025. int domain, bus, slot, func, reg, size, mask;
  1026. int err;
  1027. err = str_to_quirk(buf, &domain, &bus, &slot, &func, &reg, &size,
  1028. &mask);
  1029. if (err)
  1030. goto out;
  1031. err = pcistub_reg_add(domain, bus, slot, func, reg, size, mask);
  1032. out:
  1033. if (!err)
  1034. err = count;
  1035. return err;
  1036. }
  1037. static ssize_t pcistub_quirk_show(struct device_driver *drv, char *buf)
  1038. {
  1039. int count = 0;
  1040. unsigned long flags;
  1041. struct xen_pcibk_config_quirk *quirk;
  1042. struct xen_pcibk_dev_data *dev_data;
  1043. const struct config_field *field;
  1044. const struct config_field_entry *cfg_entry;
  1045. spin_lock_irqsave(&device_ids_lock, flags);
  1046. list_for_each_entry(quirk, &xen_pcibk_quirks, quirks_list) {
  1047. if (count >= PAGE_SIZE)
  1048. goto out;
  1049. count += scnprintf(buf + count, PAGE_SIZE - count,
  1050. "%02x:%02x.%01x\n\t%04x:%04x:%04x:%04x\n",
  1051. quirk->pdev->bus->number,
  1052. PCI_SLOT(quirk->pdev->devfn),
  1053. PCI_FUNC(quirk->pdev->devfn),
  1054. quirk->devid.vendor, quirk->devid.device,
  1055. quirk->devid.subvendor,
  1056. quirk->devid.subdevice);
  1057. dev_data = pci_get_drvdata(quirk->pdev);
  1058. list_for_each_entry(cfg_entry, &dev_data->config_fields, list) {
  1059. field = cfg_entry->field;
  1060. if (count >= PAGE_SIZE)
  1061. goto out;
  1062. count += scnprintf(buf + count, PAGE_SIZE - count,
  1063. "\t\t%08x:%01x:%08x\n",
  1064. cfg_entry->base_offset +
  1065. field->offset, field->size,
  1066. field->mask);
  1067. }
  1068. }
  1069. out:
  1070. spin_unlock_irqrestore(&device_ids_lock, flags);
  1071. return count;
  1072. }
  1073. static DRIVER_ATTR(quirks, S_IRUSR | S_IWUSR, pcistub_quirk_show,
  1074. pcistub_quirk_add);
  1075. static ssize_t permissive_add(struct device_driver *drv, const char *buf,
  1076. size_t count)
  1077. {
  1078. int domain, bus, slot, func;
  1079. int err;
  1080. struct pcistub_device *psdev;
  1081. struct xen_pcibk_dev_data *dev_data;
  1082. err = str_to_slot(buf, &domain, &bus, &slot, &func);
  1083. if (err)
  1084. goto out;
  1085. psdev = pcistub_device_find(domain, bus, slot, func);
  1086. if (!psdev) {
  1087. err = -ENODEV;
  1088. goto out;
  1089. }
  1090. dev_data = pci_get_drvdata(psdev->dev);
  1091. /* the driver data for a device should never be null at this point */
  1092. if (!dev_data) {
  1093. err = -ENXIO;
  1094. goto release;
  1095. }
  1096. if (!dev_data->permissive) {
  1097. dev_data->permissive = 1;
  1098. /* Let user know that what they're doing could be unsafe */
  1099. dev_warn(&psdev->dev->dev, "enabling permissive mode "
  1100. "configuration space accesses!\n");
  1101. dev_warn(&psdev->dev->dev,
  1102. "permissive mode is potentially unsafe!\n");
  1103. }
  1104. release:
  1105. pcistub_device_put(psdev);
  1106. out:
  1107. if (!err)
  1108. err = count;
  1109. return err;
  1110. }
  1111. static ssize_t permissive_show(struct device_driver *drv, char *buf)
  1112. {
  1113. struct pcistub_device *psdev;
  1114. struct xen_pcibk_dev_data *dev_data;
  1115. size_t count = 0;
  1116. unsigned long flags;
  1117. spin_lock_irqsave(&pcistub_devices_lock, flags);
  1118. list_for_each_entry(psdev, &pcistub_devices, dev_list) {
  1119. if (count >= PAGE_SIZE)
  1120. break;
  1121. if (!psdev->dev)
  1122. continue;
  1123. dev_data = pci_get_drvdata(psdev->dev);
  1124. if (!dev_data || !dev_data->permissive)
  1125. continue;
  1126. count +=
  1127. scnprintf(buf + count, PAGE_SIZE - count, "%s\n",
  1128. pci_name(psdev->dev));
  1129. }
  1130. spin_unlock_irqrestore(&pcistub_devices_lock, flags);
  1131. return count;
  1132. }
  1133. static DRIVER_ATTR(permissive, S_IRUSR | S_IWUSR, permissive_show,
  1134. permissive_add);
  1135. static void pcistub_exit(void)
  1136. {
  1137. driver_remove_file(&xen_pcibk_pci_driver.driver, &driver_attr_new_slot);
  1138. driver_remove_file(&xen_pcibk_pci_driver.driver,
  1139. &driver_attr_remove_slot);
  1140. driver_remove_file(&xen_pcibk_pci_driver.driver, &driver_attr_slots);
  1141. driver_remove_file(&xen_pcibk_pci_driver.driver, &driver_attr_quirks);
  1142. driver_remove_file(&xen_pcibk_pci_driver.driver,
  1143. &driver_attr_permissive);
  1144. driver_remove_file(&xen_pcibk_pci_driver.driver,
  1145. &driver_attr_irq_handlers);
  1146. driver_remove_file(&xen_pcibk_pci_driver.driver,
  1147. &driver_attr_irq_handler_state);
  1148. pci_unregister_driver(&xen_pcibk_pci_driver);
  1149. }
  1150. static int __init pcistub_init(void)
  1151. {
  1152. int pos = 0;
  1153. int err = 0;
  1154. int domain, bus, slot, func;
  1155. int parsed;
  1156. if (pci_devs_to_hide && *pci_devs_to_hide) {
  1157. do {
  1158. parsed = 0;
  1159. err = sscanf(pci_devs_to_hide + pos,
  1160. " (%x:%x:%x.%x) %n",
  1161. &domain, &bus, &slot, &func, &parsed);
  1162. switch (err) {
  1163. case 3:
  1164. func = -1;
  1165. sscanf(pci_devs_to_hide + pos,
  1166. " (%x:%x:%x.*) %n",
  1167. &domain, &bus, &slot, &parsed);
  1168. break;
  1169. case 2:
  1170. slot = func = -1;
  1171. sscanf(pci_devs_to_hide + pos,
  1172. " (%x:%x:*.*) %n",
  1173. &domain, &bus, &parsed);
  1174. break;
  1175. }
  1176. if (!parsed) {
  1177. domain = 0;
  1178. err = sscanf(pci_devs_to_hide + pos,
  1179. " (%x:%x.%x) %n",
  1180. &bus, &slot, &func, &parsed);
  1181. switch (err) {
  1182. case 2:
  1183. func = -1;
  1184. sscanf(pci_devs_to_hide + pos,
  1185. " (%x:%x.*) %n",
  1186. &bus, &slot, &parsed);
  1187. break;
  1188. case 1:
  1189. slot = func = -1;
  1190. sscanf(pci_devs_to_hide + pos,
  1191. " (%x:*.*) %n",
  1192. &bus, &parsed);
  1193. break;
  1194. }
  1195. }
  1196. if (parsed <= 0)
  1197. goto parse_error;
  1198. err = pcistub_device_id_add(domain, bus, slot, func);
  1199. if (err)
  1200. goto out;
  1201. pos += parsed;
  1202. } while (pci_devs_to_hide[pos]);
  1203. }
  1204. /* If we're the first PCI Device Driver to register, we're the
  1205. * first one to get offered PCI devices as they become
  1206. * available (and thus we can be the first to grab them)
  1207. */
  1208. err = pci_register_driver(&xen_pcibk_pci_driver);
  1209. if (err < 0)
  1210. goto out;
  1211. err = driver_create_file(&xen_pcibk_pci_driver.driver,
  1212. &driver_attr_new_slot);
  1213. if (!err)
  1214. err = driver_create_file(&xen_pcibk_pci_driver.driver,
  1215. &driver_attr_remove_slot);
  1216. if (!err)
  1217. err = driver_create_file(&xen_pcibk_pci_driver.driver,
  1218. &driver_attr_slots);
  1219. if (!err)
  1220. err = driver_create_file(&xen_pcibk_pci_driver.driver,
  1221. &driver_attr_quirks);
  1222. if (!err)
  1223. err = driver_create_file(&xen_pcibk_pci_driver.driver,
  1224. &driver_attr_permissive);
  1225. if (!err)
  1226. err = driver_create_file(&xen_pcibk_pci_driver.driver,
  1227. &driver_attr_irq_handlers);
  1228. if (!err)
  1229. err = driver_create_file(&xen_pcibk_pci_driver.driver,
  1230. &driver_attr_irq_handler_state);
  1231. if (err)
  1232. pcistub_exit();
  1233. out:
  1234. return err;
  1235. parse_error:
  1236. printk(KERN_ERR DRV_NAME ": Error parsing pci_devs_to_hide at \"%s\"\n",
  1237. pci_devs_to_hide + pos);
  1238. return -EINVAL;
  1239. }
  1240. #ifndef MODULE
  1241. /*
  1242. * fs_initcall happens before device_initcall
  1243. * so xen_pcibk *should* get called first (b/c we
  1244. * want to suck up any device before other drivers
  1245. * get a chance by being the first pci device
  1246. * driver to register)
  1247. */
  1248. fs_initcall(pcistub_init);
  1249. #endif
  1250. static int __init xen_pcibk_init(void)
  1251. {
  1252. int err;
  1253. if (!xen_initial_domain())
  1254. return -ENODEV;
  1255. err = xen_pcibk_config_init();
  1256. if (err)
  1257. return err;
  1258. #ifdef MODULE
  1259. err = pcistub_init();
  1260. if (err < 0)
  1261. return err;
  1262. #endif
  1263. pcistub_init_devices_late();
  1264. err = xen_pcibk_xenbus_register();
  1265. if (err)
  1266. pcistub_exit();
  1267. return err;
  1268. }
  1269. static void __exit xen_pcibk_cleanup(void)
  1270. {
  1271. xen_pcibk_xenbus_unregister();
  1272. pcistub_exit();
  1273. }
  1274. module_init(xen_pcibk_init);
  1275. module_exit(xen_pcibk_cleanup);
  1276. MODULE_LICENSE("Dual BSD/GPL");
  1277. MODULE_ALIAS("xen-backend:pci");