iov.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824
  1. /*
  2. * drivers/pci/iov.c
  3. *
  4. * Copyright (C) 2009 Intel Corporation, Yu Zhao <yu.zhao@intel.com>
  5. *
  6. * PCI Express I/O Virtualization (IOV) support.
  7. * Single Root IOV 1.0
  8. * Address Translation Service 1.0
  9. */
  10. #include <linux/pci.h>
  11. #include <linux/mutex.h>
  12. #include <linux/string.h>
  13. #include <linux/delay.h>
  14. #include "pci.h"
  15. #define VIRTFN_ID_LEN 16
  16. static inline u8 virtfn_bus(struct pci_dev *dev, int id)
  17. {
  18. return dev->bus->number + ((dev->devfn + dev->sriov->offset +
  19. dev->sriov->stride * id) >> 8);
  20. }
  21. static inline u8 virtfn_devfn(struct pci_dev *dev, int id)
  22. {
  23. return (dev->devfn + dev->sriov->offset +
  24. dev->sriov->stride * id) & 0xff;
  25. }
  26. static struct pci_bus *virtfn_add_bus(struct pci_bus *bus, int busnr)
  27. {
  28. int rc;
  29. struct pci_bus *child;
  30. if (bus->number == busnr)
  31. return bus;
  32. child = pci_find_bus(pci_domain_nr(bus), busnr);
  33. if (child)
  34. return child;
  35. child = pci_add_new_bus(bus, NULL, busnr);
  36. if (!child)
  37. return NULL;
  38. child->subordinate = busnr;
  39. child->dev.parent = bus->bridge;
  40. rc = pci_bus_add_child(child);
  41. if (rc) {
  42. pci_remove_bus(child);
  43. return NULL;
  44. }
  45. return child;
  46. }
  47. static void virtfn_remove_bus(struct pci_bus *bus, int busnr)
  48. {
  49. struct pci_bus *child;
  50. if (bus->number == busnr)
  51. return;
  52. child = pci_find_bus(pci_domain_nr(bus), busnr);
  53. BUG_ON(!child);
  54. if (list_empty(&child->devices))
  55. pci_remove_bus(child);
  56. }
  57. static int virtfn_add(struct pci_dev *dev, int id, int reset)
  58. {
  59. int i;
  60. int rc;
  61. u64 size;
  62. char buf[VIRTFN_ID_LEN];
  63. struct pci_dev *virtfn;
  64. struct resource *res;
  65. struct pci_sriov *iov = dev->sriov;
  66. virtfn = alloc_pci_dev();
  67. if (!virtfn)
  68. return -ENOMEM;
  69. mutex_lock(&iov->dev->sriov->lock);
  70. virtfn->bus = virtfn_add_bus(dev->bus, virtfn_bus(dev, id));
  71. if (!virtfn->bus) {
  72. kfree(virtfn);
  73. mutex_unlock(&iov->dev->sriov->lock);
  74. return -ENOMEM;
  75. }
  76. virtfn->devfn = virtfn_devfn(dev, id);
  77. virtfn->vendor = dev->vendor;
  78. pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_DID, &virtfn->device);
  79. pci_setup_device(virtfn);
  80. virtfn->dev.parent = dev->dev.parent;
  81. for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
  82. res = dev->resource + PCI_IOV_RESOURCES + i;
  83. if (!res->parent)
  84. continue;
  85. virtfn->resource[i].name = pci_name(virtfn);
  86. virtfn->resource[i].flags = res->flags;
  87. size = resource_size(res);
  88. do_div(size, iov->total);
  89. virtfn->resource[i].start = res->start + size * id;
  90. virtfn->resource[i].end = virtfn->resource[i].start + size - 1;
  91. rc = request_resource(res, &virtfn->resource[i]);
  92. BUG_ON(rc);
  93. }
  94. if (reset)
  95. pci_execute_reset_function(virtfn);
  96. pci_device_add(virtfn, virtfn->bus);
  97. mutex_unlock(&iov->dev->sriov->lock);
  98. virtfn->physfn = pci_dev_get(dev);
  99. virtfn->is_virtfn = 1;
  100. rc = pci_bus_add_device(virtfn);
  101. if (rc)
  102. goto failed1;
  103. sprintf(buf, "virtfn%u", id);
  104. rc = sysfs_create_link(&dev->dev.kobj, &virtfn->dev.kobj, buf);
  105. if (rc)
  106. goto failed1;
  107. rc = sysfs_create_link(&virtfn->dev.kobj, &dev->dev.kobj, "physfn");
  108. if (rc)
  109. goto failed2;
  110. kobject_uevent(&virtfn->dev.kobj, KOBJ_CHANGE);
  111. return 0;
  112. failed2:
  113. sysfs_remove_link(&dev->dev.kobj, buf);
  114. failed1:
  115. pci_dev_put(dev);
  116. mutex_lock(&iov->dev->sriov->lock);
  117. pci_remove_bus_device(virtfn);
  118. virtfn_remove_bus(dev->bus, virtfn_bus(dev, id));
  119. mutex_unlock(&iov->dev->sriov->lock);
  120. return rc;
  121. }
  122. static void virtfn_remove(struct pci_dev *dev, int id, int reset)
  123. {
  124. char buf[VIRTFN_ID_LEN];
  125. struct pci_bus *bus;
  126. struct pci_dev *virtfn;
  127. struct pci_sriov *iov = dev->sriov;
  128. bus = pci_find_bus(pci_domain_nr(dev->bus), virtfn_bus(dev, id));
  129. if (!bus)
  130. return;
  131. virtfn = pci_get_slot(bus, virtfn_devfn(dev, id));
  132. if (!virtfn)
  133. return;
  134. pci_dev_put(virtfn);
  135. if (reset) {
  136. device_release_driver(&virtfn->dev);
  137. pci_execute_reset_function(virtfn);
  138. }
  139. sprintf(buf, "virtfn%u", id);
  140. sysfs_remove_link(&dev->dev.kobj, buf);
  141. sysfs_remove_link(&virtfn->dev.kobj, "physfn");
  142. mutex_lock(&iov->dev->sriov->lock);
  143. pci_remove_bus_device(virtfn);
  144. virtfn_remove_bus(dev->bus, virtfn_bus(dev, id));
  145. mutex_unlock(&iov->dev->sriov->lock);
  146. pci_dev_put(dev);
  147. }
  148. static int sriov_migration(struct pci_dev *dev)
  149. {
  150. u16 status;
  151. struct pci_sriov *iov = dev->sriov;
  152. if (!iov->nr_virtfn)
  153. return 0;
  154. if (!(iov->cap & PCI_SRIOV_CAP_VFM))
  155. return 0;
  156. pci_read_config_word(dev, iov->pos + PCI_SRIOV_STATUS, &status);
  157. if (!(status & PCI_SRIOV_STATUS_VFM))
  158. return 0;
  159. schedule_work(&iov->mtask);
  160. return 1;
  161. }
  162. static void sriov_migration_task(struct work_struct *work)
  163. {
  164. int i;
  165. u8 state;
  166. u16 status;
  167. struct pci_sriov *iov = container_of(work, struct pci_sriov, mtask);
  168. for (i = iov->initial; i < iov->nr_virtfn; i++) {
  169. state = readb(iov->mstate + i);
  170. if (state == PCI_SRIOV_VFM_MI) {
  171. writeb(PCI_SRIOV_VFM_AV, iov->mstate + i);
  172. state = readb(iov->mstate + i);
  173. if (state == PCI_SRIOV_VFM_AV)
  174. virtfn_add(iov->self, i, 1);
  175. } else if (state == PCI_SRIOV_VFM_MO) {
  176. virtfn_remove(iov->self, i, 1);
  177. writeb(PCI_SRIOV_VFM_UA, iov->mstate + i);
  178. state = readb(iov->mstate + i);
  179. if (state == PCI_SRIOV_VFM_AV)
  180. virtfn_add(iov->self, i, 0);
  181. }
  182. }
  183. pci_read_config_word(iov->self, iov->pos + PCI_SRIOV_STATUS, &status);
  184. status &= ~PCI_SRIOV_STATUS_VFM;
  185. pci_write_config_word(iov->self, iov->pos + PCI_SRIOV_STATUS, status);
  186. }
  187. static int sriov_enable_migration(struct pci_dev *dev, int nr_virtfn)
  188. {
  189. int bir;
  190. u32 table;
  191. resource_size_t pa;
  192. struct pci_sriov *iov = dev->sriov;
  193. if (nr_virtfn <= iov->initial)
  194. return 0;
  195. pci_read_config_dword(dev, iov->pos + PCI_SRIOV_VFM, &table);
  196. bir = PCI_SRIOV_VFM_BIR(table);
  197. if (bir > PCI_STD_RESOURCE_END)
  198. return -EIO;
  199. table = PCI_SRIOV_VFM_OFFSET(table);
  200. if (table + nr_virtfn > pci_resource_len(dev, bir))
  201. return -EIO;
  202. pa = pci_resource_start(dev, bir) + table;
  203. iov->mstate = ioremap(pa, nr_virtfn);
  204. if (!iov->mstate)
  205. return -ENOMEM;
  206. INIT_WORK(&iov->mtask, sriov_migration_task);
  207. iov->ctrl |= PCI_SRIOV_CTRL_VFM | PCI_SRIOV_CTRL_INTR;
  208. pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
  209. return 0;
  210. }
  211. static void sriov_disable_migration(struct pci_dev *dev)
  212. {
  213. struct pci_sriov *iov = dev->sriov;
  214. iov->ctrl &= ~(PCI_SRIOV_CTRL_VFM | PCI_SRIOV_CTRL_INTR);
  215. pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
  216. cancel_work_sync(&iov->mtask);
  217. iounmap(iov->mstate);
  218. }
  219. static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
  220. {
  221. int rc;
  222. int i, j;
  223. int nres;
  224. u16 offset, stride, initial;
  225. struct resource *res;
  226. struct pci_dev *pdev;
  227. struct pci_sriov *iov = dev->sriov;
  228. if (!nr_virtfn)
  229. return 0;
  230. if (iov->nr_virtfn)
  231. return -EINVAL;
  232. pci_read_config_word(dev, iov->pos + PCI_SRIOV_INITIAL_VF, &initial);
  233. if (initial > iov->total ||
  234. (!(iov->cap & PCI_SRIOV_CAP_VFM) && (initial != iov->total)))
  235. return -EIO;
  236. if (nr_virtfn < 0 || nr_virtfn > iov->total ||
  237. (!(iov->cap & PCI_SRIOV_CAP_VFM) && (nr_virtfn > initial)))
  238. return -EINVAL;
  239. pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, nr_virtfn);
  240. pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_OFFSET, &offset);
  241. pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_STRIDE, &stride);
  242. if (!offset || (nr_virtfn > 1 && !stride))
  243. return -EIO;
  244. nres = 0;
  245. for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
  246. res = dev->resource + PCI_IOV_RESOURCES + i;
  247. if (res->parent)
  248. nres++;
  249. }
  250. if (nres != iov->nres) {
  251. dev_err(&dev->dev, "not enough MMIO resources for SR-IOV\n");
  252. return -ENOMEM;
  253. }
  254. iov->offset = offset;
  255. iov->stride = stride;
  256. if (virtfn_bus(dev, nr_virtfn - 1) > dev->bus->subordinate) {
  257. dev_err(&dev->dev, "SR-IOV: bus number out of range\n");
  258. return -ENOMEM;
  259. }
  260. if (iov->link != dev->devfn) {
  261. pdev = pci_get_slot(dev->bus, iov->link);
  262. if (!pdev)
  263. return -ENODEV;
  264. pci_dev_put(pdev);
  265. if (!pdev->is_physfn)
  266. return -ENODEV;
  267. rc = sysfs_create_link(&dev->dev.kobj,
  268. &pdev->dev.kobj, "dep_link");
  269. if (rc)
  270. return rc;
  271. }
  272. iov->ctrl |= PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE;
  273. pci_block_user_cfg_access(dev);
  274. pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
  275. msleep(100);
  276. pci_unblock_user_cfg_access(dev);
  277. iov->initial = initial;
  278. if (nr_virtfn < initial)
  279. initial = nr_virtfn;
  280. for (i = 0; i < initial; i++) {
  281. rc = virtfn_add(dev, i, 0);
  282. if (rc)
  283. goto failed;
  284. }
  285. if (iov->cap & PCI_SRIOV_CAP_VFM) {
  286. rc = sriov_enable_migration(dev, nr_virtfn);
  287. if (rc)
  288. goto failed;
  289. }
  290. kobject_uevent(&dev->dev.kobj, KOBJ_CHANGE);
  291. iov->nr_virtfn = nr_virtfn;
  292. return 0;
  293. failed:
  294. for (j = 0; j < i; j++)
  295. virtfn_remove(dev, j, 0);
  296. iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE);
  297. pci_block_user_cfg_access(dev);
  298. pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
  299. ssleep(1);
  300. pci_unblock_user_cfg_access(dev);
  301. if (iov->link != dev->devfn)
  302. sysfs_remove_link(&dev->dev.kobj, "dep_link");
  303. return rc;
  304. }
  305. static void sriov_disable(struct pci_dev *dev)
  306. {
  307. int i;
  308. struct pci_sriov *iov = dev->sriov;
  309. if (!iov->nr_virtfn)
  310. return;
  311. if (iov->cap & PCI_SRIOV_CAP_VFM)
  312. sriov_disable_migration(dev);
  313. for (i = 0; i < iov->nr_virtfn; i++)
  314. virtfn_remove(dev, i, 0);
  315. iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE);
  316. pci_block_user_cfg_access(dev);
  317. pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
  318. ssleep(1);
  319. pci_unblock_user_cfg_access(dev);
  320. if (iov->link != dev->devfn)
  321. sysfs_remove_link(&dev->dev.kobj, "dep_link");
  322. iov->nr_virtfn = 0;
  323. }
  324. static int sriov_init(struct pci_dev *dev, int pos)
  325. {
  326. int i;
  327. int rc;
  328. int nres;
  329. u32 pgsz;
  330. u16 ctrl, total, offset, stride;
  331. struct pci_sriov *iov;
  332. struct resource *res;
  333. struct pci_dev *pdev;
  334. if (dev->pcie_type != PCI_EXP_TYPE_RC_END &&
  335. dev->pcie_type != PCI_EXP_TYPE_ENDPOINT)
  336. return -ENODEV;
  337. pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &ctrl);
  338. if (ctrl & PCI_SRIOV_CTRL_VFE) {
  339. pci_write_config_word(dev, pos + PCI_SRIOV_CTRL, 0);
  340. ssleep(1);
  341. }
  342. pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &total);
  343. if (!total)
  344. return 0;
  345. ctrl = 0;
  346. list_for_each_entry(pdev, &dev->bus->devices, bus_list)
  347. if (pdev->is_physfn)
  348. goto found;
  349. pdev = NULL;
  350. if (pci_ari_enabled(dev->bus))
  351. ctrl |= PCI_SRIOV_CTRL_ARI;
  352. found:
  353. pci_write_config_word(dev, pos + PCI_SRIOV_CTRL, ctrl);
  354. pci_write_config_word(dev, pos + PCI_SRIOV_NUM_VF, total);
  355. pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &offset);
  356. pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &stride);
  357. if (!offset || (total > 1 && !stride))
  358. return -EIO;
  359. pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &pgsz);
  360. i = PAGE_SHIFT > 12 ? PAGE_SHIFT - 12 : 0;
  361. pgsz &= ~((1 << i) - 1);
  362. if (!pgsz)
  363. return -EIO;
  364. pgsz &= ~(pgsz - 1);
  365. pci_write_config_dword(dev, pos + PCI_SRIOV_SYS_PGSIZE, pgsz);
  366. nres = 0;
  367. for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
  368. res = dev->resource + PCI_IOV_RESOURCES + i;
  369. i += __pci_read_base(dev, pci_bar_unknown, res,
  370. pos + PCI_SRIOV_BAR + i * 4);
  371. if (!res->flags)
  372. continue;
  373. if (resource_size(res) & (PAGE_SIZE - 1)) {
  374. rc = -EIO;
  375. goto failed;
  376. }
  377. res->end = res->start + resource_size(res) * total - 1;
  378. nres++;
  379. }
  380. iov = kzalloc(sizeof(*iov), GFP_KERNEL);
  381. if (!iov) {
  382. rc = -ENOMEM;
  383. goto failed;
  384. }
  385. iov->pos = pos;
  386. iov->nres = nres;
  387. iov->ctrl = ctrl;
  388. iov->total = total;
  389. iov->offset = offset;
  390. iov->stride = stride;
  391. iov->pgsz = pgsz;
  392. iov->self = dev;
  393. pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap);
  394. pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
  395. if (pdev)
  396. iov->dev = pci_dev_get(pdev);
  397. else
  398. iov->dev = dev;
  399. mutex_init(&iov->lock);
  400. dev->sriov = iov;
  401. dev->is_physfn = 1;
  402. return 0;
  403. failed:
  404. for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
  405. res = dev->resource + PCI_IOV_RESOURCES + i;
  406. res->flags = 0;
  407. }
  408. return rc;
  409. }
  410. static void sriov_release(struct pci_dev *dev)
  411. {
  412. BUG_ON(dev->sriov->nr_virtfn);
  413. if (dev != dev->sriov->dev)
  414. pci_dev_put(dev->sriov->dev);
  415. mutex_destroy(&dev->sriov->lock);
  416. kfree(dev->sriov);
  417. dev->sriov = NULL;
  418. }
  419. static void sriov_restore_state(struct pci_dev *dev)
  420. {
  421. int i;
  422. u16 ctrl;
  423. struct pci_sriov *iov = dev->sriov;
  424. pci_read_config_word(dev, iov->pos + PCI_SRIOV_CTRL, &ctrl);
  425. if (ctrl & PCI_SRIOV_CTRL_VFE)
  426. return;
  427. for (i = PCI_IOV_RESOURCES; i <= PCI_IOV_RESOURCE_END; i++)
  428. pci_update_resource(dev, i);
  429. pci_write_config_dword(dev, iov->pos + PCI_SRIOV_SYS_PGSIZE, iov->pgsz);
  430. pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, iov->nr_virtfn);
  431. pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
  432. if (iov->ctrl & PCI_SRIOV_CTRL_VFE)
  433. msleep(100);
  434. }
  435. /**
  436. * pci_iov_init - initialize the IOV capability
  437. * @dev: the PCI device
  438. *
  439. * Returns 0 on success, or negative on failure.
  440. */
  441. int pci_iov_init(struct pci_dev *dev)
  442. {
  443. int pos;
  444. if (!dev->is_pcie)
  445. return -ENODEV;
  446. pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
  447. if (pos)
  448. return sriov_init(dev, pos);
  449. return -ENODEV;
  450. }
  451. /**
  452. * pci_iov_release - release resources used by the IOV capability
  453. * @dev: the PCI device
  454. */
  455. void pci_iov_release(struct pci_dev *dev)
  456. {
  457. if (dev->is_physfn)
  458. sriov_release(dev);
  459. }
  460. /**
  461. * pci_iov_resource_bar - get position of the SR-IOV BAR
  462. * @dev: the PCI device
  463. * @resno: the resource number
  464. * @type: the BAR type to be filled in
  465. *
  466. * Returns position of the BAR encapsulated in the SR-IOV capability.
  467. */
  468. int pci_iov_resource_bar(struct pci_dev *dev, int resno,
  469. enum pci_bar_type *type)
  470. {
  471. if (resno < PCI_IOV_RESOURCES || resno > PCI_IOV_RESOURCE_END)
  472. return 0;
  473. BUG_ON(!dev->is_physfn);
  474. *type = pci_bar_unknown;
  475. return dev->sriov->pos + PCI_SRIOV_BAR +
  476. 4 * (resno - PCI_IOV_RESOURCES);
  477. }
  478. /**
  479. * pci_restore_iov_state - restore the state of the IOV capability
  480. * @dev: the PCI device
  481. */
  482. void pci_restore_iov_state(struct pci_dev *dev)
  483. {
  484. if (dev->is_physfn)
  485. sriov_restore_state(dev);
  486. }
  487. /**
  488. * pci_iov_bus_range - find bus range used by Virtual Function
  489. * @bus: the PCI bus
  490. *
  491. * Returns max number of buses (exclude current one) used by Virtual
  492. * Functions.
  493. */
  494. int pci_iov_bus_range(struct pci_bus *bus)
  495. {
  496. int max = 0;
  497. u8 busnr;
  498. struct pci_dev *dev;
  499. list_for_each_entry(dev, &bus->devices, bus_list) {
  500. if (!dev->is_physfn)
  501. continue;
  502. busnr = virtfn_bus(dev, dev->sriov->total - 1);
  503. if (busnr > max)
  504. max = busnr;
  505. }
  506. return max ? max - bus->number : 0;
  507. }
  508. /**
  509. * pci_enable_sriov - enable the SR-IOV capability
  510. * @dev: the PCI device
  511. * @nr_virtfn: number of virtual functions to enable
  512. *
  513. * Returns 0 on success, or negative on failure.
  514. */
  515. int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
  516. {
  517. might_sleep();
  518. if (!dev->is_physfn)
  519. return -ENODEV;
  520. return sriov_enable(dev, nr_virtfn);
  521. }
  522. EXPORT_SYMBOL_GPL(pci_enable_sriov);
  523. /**
  524. * pci_disable_sriov - disable the SR-IOV capability
  525. * @dev: the PCI device
  526. */
  527. void pci_disable_sriov(struct pci_dev *dev)
  528. {
  529. might_sleep();
  530. if (!dev->is_physfn)
  531. return;
  532. sriov_disable(dev);
  533. }
  534. EXPORT_SYMBOL_GPL(pci_disable_sriov);
  535. /**
  536. * pci_sriov_migration - notify SR-IOV core of Virtual Function Migration
  537. * @dev: the PCI device
  538. *
  539. * Returns IRQ_HANDLED if the IRQ is handled, or IRQ_NONE if not.
  540. *
  541. * Physical Function driver is responsible to register IRQ handler using
  542. * VF Migration Interrupt Message Number, and call this function when the
  543. * interrupt is generated by the hardware.
  544. */
  545. irqreturn_t pci_sriov_migration(struct pci_dev *dev)
  546. {
  547. if (!dev->is_physfn)
  548. return IRQ_NONE;
  549. return sriov_migration(dev) ? IRQ_HANDLED : IRQ_NONE;
  550. }
  551. EXPORT_SYMBOL_GPL(pci_sriov_migration);
  552. static int ats_alloc_one(struct pci_dev *dev, int ps)
  553. {
  554. int pos;
  555. u16 cap;
  556. struct pci_ats *ats;
  557. pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ATS);
  558. if (!pos)
  559. return -ENODEV;
  560. ats = kzalloc(sizeof(*ats), GFP_KERNEL);
  561. if (!ats)
  562. return -ENOMEM;
  563. ats->pos = pos;
  564. ats->stu = ps;
  565. pci_read_config_word(dev, pos + PCI_ATS_CAP, &cap);
  566. ats->qdep = PCI_ATS_CAP_QDEP(cap) ? PCI_ATS_CAP_QDEP(cap) :
  567. PCI_ATS_MAX_QDEP;
  568. dev->ats = ats;
  569. return 0;
  570. }
  571. static void ats_free_one(struct pci_dev *dev)
  572. {
  573. kfree(dev->ats);
  574. dev->ats = NULL;
  575. }
  576. /**
  577. * pci_enable_ats - enable the ATS capability
  578. * @dev: the PCI device
  579. * @ps: the IOMMU page shift
  580. *
  581. * Returns 0 on success, or negative on failure.
  582. */
  583. int pci_enable_ats(struct pci_dev *dev, int ps)
  584. {
  585. int rc;
  586. u16 ctrl;
  587. BUG_ON(dev->ats && dev->ats->is_enabled);
  588. if (ps < PCI_ATS_MIN_STU)
  589. return -EINVAL;
  590. if (dev->is_physfn || dev->is_virtfn) {
  591. struct pci_dev *pdev = dev->is_physfn ? dev : dev->physfn;
  592. mutex_lock(&pdev->sriov->lock);
  593. if (pdev->ats)
  594. rc = pdev->ats->stu == ps ? 0 : -EINVAL;
  595. else
  596. rc = ats_alloc_one(pdev, ps);
  597. if (!rc)
  598. pdev->ats->ref_cnt++;
  599. mutex_unlock(&pdev->sriov->lock);
  600. if (rc)
  601. return rc;
  602. }
  603. if (!dev->is_physfn) {
  604. rc = ats_alloc_one(dev, ps);
  605. if (rc)
  606. return rc;
  607. }
  608. ctrl = PCI_ATS_CTRL_ENABLE;
  609. if (!dev->is_virtfn)
  610. ctrl |= PCI_ATS_CTRL_STU(ps - PCI_ATS_MIN_STU);
  611. pci_write_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, ctrl);
  612. dev->ats->is_enabled = 1;
  613. return 0;
  614. }
  615. /**
  616. * pci_disable_ats - disable the ATS capability
  617. * @dev: the PCI device
  618. */
  619. void pci_disable_ats(struct pci_dev *dev)
  620. {
  621. u16 ctrl;
  622. BUG_ON(!dev->ats || !dev->ats->is_enabled);
  623. pci_read_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, &ctrl);
  624. ctrl &= ~PCI_ATS_CTRL_ENABLE;
  625. pci_write_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, ctrl);
  626. dev->ats->is_enabled = 0;
  627. if (dev->is_physfn || dev->is_virtfn) {
  628. struct pci_dev *pdev = dev->is_physfn ? dev : dev->physfn;
  629. mutex_lock(&pdev->sriov->lock);
  630. pdev->ats->ref_cnt--;
  631. if (!pdev->ats->ref_cnt)
  632. ats_free_one(pdev);
  633. mutex_unlock(&pdev->sriov->lock);
  634. }
  635. if (!dev->is_physfn)
  636. ats_free_one(dev);
  637. }
  638. /**
  639. * pci_ats_queue_depth - query the ATS Invalidate Queue Depth
  640. * @dev: the PCI device
  641. *
  642. * Returns the queue depth on success, or negative on failure.
  643. *
  644. * The ATS spec uses 0 in the Invalidate Queue Depth field to
  645. * indicate that the function can accept 32 Invalidate Request.
  646. * But here we use the `real' values (i.e. 1~32) for the Queue
  647. * Depth; and 0 indicates the function shares the Queue with
  648. * other functions (doesn't exclusively own a Queue).
  649. */
  650. int pci_ats_queue_depth(struct pci_dev *dev)
  651. {
  652. int pos;
  653. u16 cap;
  654. if (dev->is_virtfn)
  655. return 0;
  656. if (dev->ats)
  657. return dev->ats->qdep;
  658. pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ATS);
  659. if (!pos)
  660. return -ENODEV;
  661. pci_read_config_word(dev, pos + PCI_ATS_CAP, &cap);
  662. return PCI_ATS_CAP_QDEP(cap) ? PCI_ATS_CAP_QDEP(cap) :
  663. PCI_ATS_MAX_QDEP;
  664. }