iov.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826
  1. /*
  2. * drivers/pci/iov.c
  3. *
  4. * Copyright (C) 2009 Intel Corporation, Yu Zhao <yu.zhao@intel.com>
  5. *
  6. * PCI Express I/O Virtualization (IOV) support.
  7. * Single Root IOV 1.0
  8. * Address Translation Service 1.0
  9. */
  10. #include <linux/pci.h>
  11. #include <linux/mutex.h>
  12. #include <linux/string.h>
  13. #include <linux/delay.h>
  14. #include "pci.h"
  15. #define VIRTFN_ID_LEN 16
  16. static inline u8 virtfn_bus(struct pci_dev *dev, int id)
  17. {
  18. return dev->bus->number + ((dev->devfn + dev->sriov->offset +
  19. dev->sriov->stride * id) >> 8);
  20. }
  21. static inline u8 virtfn_devfn(struct pci_dev *dev, int id)
  22. {
  23. return (dev->devfn + dev->sriov->offset +
  24. dev->sriov->stride * id) & 0xff;
  25. }
  26. static struct pci_bus *virtfn_add_bus(struct pci_bus *bus, int busnr)
  27. {
  28. int rc;
  29. struct pci_bus *child;
  30. if (bus->number == busnr)
  31. return bus;
  32. child = pci_find_bus(pci_domain_nr(bus), busnr);
  33. if (child)
  34. return child;
  35. child = pci_add_new_bus(bus, NULL, busnr);
  36. if (!child)
  37. return NULL;
  38. child->subordinate = busnr;
  39. child->dev.parent = bus->bridge;
  40. rc = pci_bus_add_child(child);
  41. if (rc) {
  42. pci_remove_bus(child);
  43. return NULL;
  44. }
  45. return child;
  46. }
  47. static void virtfn_remove_bus(struct pci_bus *bus, int busnr)
  48. {
  49. struct pci_bus *child;
  50. if (bus->number == busnr)
  51. return;
  52. child = pci_find_bus(pci_domain_nr(bus), busnr);
  53. BUG_ON(!child);
  54. if (list_empty(&child->devices))
  55. pci_remove_bus(child);
  56. }
  57. static int virtfn_add(struct pci_dev *dev, int id, int reset)
  58. {
  59. int i;
  60. int rc;
  61. u64 size;
  62. char buf[VIRTFN_ID_LEN];
  63. struct pci_dev *virtfn;
  64. struct resource *res;
  65. struct pci_sriov *iov = dev->sriov;
  66. virtfn = alloc_pci_dev();
  67. if (!virtfn)
  68. return -ENOMEM;
  69. mutex_lock(&iov->dev->sriov->lock);
  70. virtfn->bus = virtfn_add_bus(dev->bus, virtfn_bus(dev, id));
  71. if (!virtfn->bus) {
  72. kfree(virtfn);
  73. mutex_unlock(&iov->dev->sriov->lock);
  74. return -ENOMEM;
  75. }
  76. virtfn->devfn = virtfn_devfn(dev, id);
  77. virtfn->vendor = dev->vendor;
  78. pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_DID, &virtfn->device);
  79. pci_setup_device(virtfn);
  80. virtfn->dev.parent = dev->dev.parent;
  81. for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
  82. res = dev->resource + PCI_IOV_RESOURCES + i;
  83. if (!res->parent)
  84. continue;
  85. virtfn->resource[i].name = pci_name(virtfn);
  86. virtfn->resource[i].flags = res->flags;
  87. size = resource_size(res);
  88. do_div(size, iov->total);
  89. virtfn->resource[i].start = res->start + size * id;
  90. virtfn->resource[i].end = virtfn->resource[i].start + size - 1;
  91. rc = request_resource(res, &virtfn->resource[i]);
  92. BUG_ON(rc);
  93. }
  94. if (reset)
  95. __pci_reset_function(virtfn);
  96. pci_device_add(virtfn, virtfn->bus);
  97. mutex_unlock(&iov->dev->sriov->lock);
  98. virtfn->physfn = pci_dev_get(dev);
  99. virtfn->is_virtfn = 1;
  100. rc = pci_bus_add_device(virtfn);
  101. if (rc)
  102. goto failed1;
  103. sprintf(buf, "virtfn%u", id);
  104. rc = sysfs_create_link(&dev->dev.kobj, &virtfn->dev.kobj, buf);
  105. if (rc)
  106. goto failed1;
  107. rc = sysfs_create_link(&virtfn->dev.kobj, &dev->dev.kobj, "physfn");
  108. if (rc)
  109. goto failed2;
  110. kobject_uevent(&virtfn->dev.kobj, KOBJ_CHANGE);
  111. return 0;
  112. failed2:
  113. sysfs_remove_link(&dev->dev.kobj, buf);
  114. failed1:
  115. pci_dev_put(dev);
  116. mutex_lock(&iov->dev->sriov->lock);
  117. pci_remove_bus_device(virtfn);
  118. virtfn_remove_bus(dev->bus, virtfn_bus(dev, id));
  119. mutex_unlock(&iov->dev->sriov->lock);
  120. return rc;
  121. }
  122. static void virtfn_remove(struct pci_dev *dev, int id, int reset)
  123. {
  124. char buf[VIRTFN_ID_LEN];
  125. struct pci_bus *bus;
  126. struct pci_dev *virtfn;
  127. struct pci_sriov *iov = dev->sriov;
  128. bus = pci_find_bus(pci_domain_nr(dev->bus), virtfn_bus(dev, id));
  129. if (!bus)
  130. return;
  131. virtfn = pci_get_slot(bus, virtfn_devfn(dev, id));
  132. if (!virtfn)
  133. return;
  134. pci_dev_put(virtfn);
  135. if (reset) {
  136. device_release_driver(&virtfn->dev);
  137. __pci_reset_function(virtfn);
  138. }
  139. sprintf(buf, "virtfn%u", id);
  140. sysfs_remove_link(&dev->dev.kobj, buf);
  141. sysfs_remove_link(&virtfn->dev.kobj, "physfn");
  142. mutex_lock(&iov->dev->sriov->lock);
  143. pci_remove_bus_device(virtfn);
  144. virtfn_remove_bus(dev->bus, virtfn_bus(dev, id));
  145. mutex_unlock(&iov->dev->sriov->lock);
  146. pci_dev_put(dev);
  147. }
  148. static int sriov_migration(struct pci_dev *dev)
  149. {
  150. u16 status;
  151. struct pci_sriov *iov = dev->sriov;
  152. if (!iov->nr_virtfn)
  153. return 0;
  154. if (!(iov->cap & PCI_SRIOV_CAP_VFM))
  155. return 0;
  156. pci_read_config_word(dev, iov->pos + PCI_SRIOV_STATUS, &status);
  157. if (!(status & PCI_SRIOV_STATUS_VFM))
  158. return 0;
  159. schedule_work(&iov->mtask);
  160. return 1;
  161. }
  162. static void sriov_migration_task(struct work_struct *work)
  163. {
  164. int i;
  165. u8 state;
  166. u16 status;
  167. struct pci_sriov *iov = container_of(work, struct pci_sriov, mtask);
  168. for (i = iov->initial; i < iov->nr_virtfn; i++) {
  169. state = readb(iov->mstate + i);
  170. if (state == PCI_SRIOV_VFM_MI) {
  171. writeb(PCI_SRIOV_VFM_AV, iov->mstate + i);
  172. state = readb(iov->mstate + i);
  173. if (state == PCI_SRIOV_VFM_AV)
  174. virtfn_add(iov->self, i, 1);
  175. } else if (state == PCI_SRIOV_VFM_MO) {
  176. virtfn_remove(iov->self, i, 1);
  177. writeb(PCI_SRIOV_VFM_UA, iov->mstate + i);
  178. state = readb(iov->mstate + i);
  179. if (state == PCI_SRIOV_VFM_AV)
  180. virtfn_add(iov->self, i, 0);
  181. }
  182. }
  183. pci_read_config_word(iov->self, iov->pos + PCI_SRIOV_STATUS, &status);
  184. status &= ~PCI_SRIOV_STATUS_VFM;
  185. pci_write_config_word(iov->self, iov->pos + PCI_SRIOV_STATUS, status);
  186. }
  187. static int sriov_enable_migration(struct pci_dev *dev, int nr_virtfn)
  188. {
  189. int bir;
  190. u32 table;
  191. resource_size_t pa;
  192. struct pci_sriov *iov = dev->sriov;
  193. if (nr_virtfn <= iov->initial)
  194. return 0;
  195. pci_read_config_dword(dev, iov->pos + PCI_SRIOV_VFM, &table);
  196. bir = PCI_SRIOV_VFM_BIR(table);
  197. if (bir > PCI_STD_RESOURCE_END)
  198. return -EIO;
  199. table = PCI_SRIOV_VFM_OFFSET(table);
  200. if (table + nr_virtfn > pci_resource_len(dev, bir))
  201. return -EIO;
  202. pa = pci_resource_start(dev, bir) + table;
  203. iov->mstate = ioremap(pa, nr_virtfn);
  204. if (!iov->mstate)
  205. return -ENOMEM;
  206. INIT_WORK(&iov->mtask, sriov_migration_task);
  207. iov->ctrl |= PCI_SRIOV_CTRL_VFM | PCI_SRIOV_CTRL_INTR;
  208. pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
  209. return 0;
  210. }
  211. static void sriov_disable_migration(struct pci_dev *dev)
  212. {
  213. struct pci_sriov *iov = dev->sriov;
  214. iov->ctrl &= ~(PCI_SRIOV_CTRL_VFM | PCI_SRIOV_CTRL_INTR);
  215. pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
  216. cancel_work_sync(&iov->mtask);
  217. iounmap(iov->mstate);
  218. }
  219. static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
  220. {
  221. int rc;
  222. int i, j;
  223. int nres;
  224. u16 offset, stride, initial;
  225. struct resource *res;
  226. struct pci_dev *pdev;
  227. struct pci_sriov *iov = dev->sriov;
  228. if (!nr_virtfn)
  229. return 0;
  230. if (iov->nr_virtfn)
  231. return -EINVAL;
  232. pci_read_config_word(dev, iov->pos + PCI_SRIOV_INITIAL_VF, &initial);
  233. if (initial > iov->total ||
  234. (!(iov->cap & PCI_SRIOV_CAP_VFM) && (initial != iov->total)))
  235. return -EIO;
  236. if (nr_virtfn < 0 || nr_virtfn > iov->total ||
  237. (!(iov->cap & PCI_SRIOV_CAP_VFM) && (nr_virtfn > initial)))
  238. return -EINVAL;
  239. pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, nr_virtfn);
  240. pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_OFFSET, &offset);
  241. pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_STRIDE, &stride);
  242. if (!offset || (nr_virtfn > 1 && !stride))
  243. return -EIO;
  244. nres = 0;
  245. for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
  246. res = dev->resource + PCI_IOV_RESOURCES + i;
  247. if (res->parent)
  248. nres++;
  249. }
  250. if (nres != iov->nres) {
  251. dev_err(&dev->dev, "not enough MMIO resources for SR-IOV\n");
  252. return -ENOMEM;
  253. }
  254. iov->offset = offset;
  255. iov->stride = stride;
  256. if (virtfn_bus(dev, nr_virtfn - 1) > dev->bus->subordinate) {
  257. dev_err(&dev->dev, "SR-IOV: bus number out of range\n");
  258. return -ENOMEM;
  259. }
  260. if (iov->link != dev->devfn) {
  261. pdev = pci_get_slot(dev->bus, iov->link);
  262. if (!pdev)
  263. return -ENODEV;
  264. pci_dev_put(pdev);
  265. if (!pdev->is_physfn)
  266. return -ENODEV;
  267. rc = sysfs_create_link(&dev->dev.kobj,
  268. &pdev->dev.kobj, "dep_link");
  269. if (rc)
  270. return rc;
  271. }
  272. iov->ctrl |= PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE;
  273. pci_block_user_cfg_access(dev);
  274. pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
  275. msleep(100);
  276. pci_unblock_user_cfg_access(dev);
  277. iov->initial = initial;
  278. if (nr_virtfn < initial)
  279. initial = nr_virtfn;
  280. for (i = 0; i < initial; i++) {
  281. rc = virtfn_add(dev, i, 0);
  282. if (rc)
  283. goto failed;
  284. }
  285. if (iov->cap & PCI_SRIOV_CAP_VFM) {
  286. rc = sriov_enable_migration(dev, nr_virtfn);
  287. if (rc)
  288. goto failed;
  289. }
  290. kobject_uevent(&dev->dev.kobj, KOBJ_CHANGE);
  291. iov->nr_virtfn = nr_virtfn;
  292. return 0;
  293. failed:
  294. for (j = 0; j < i; j++)
  295. virtfn_remove(dev, j, 0);
  296. iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE);
  297. pci_block_user_cfg_access(dev);
  298. pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
  299. ssleep(1);
  300. pci_unblock_user_cfg_access(dev);
  301. if (iov->link != dev->devfn)
  302. sysfs_remove_link(&dev->dev.kobj, "dep_link");
  303. return rc;
  304. }
  305. static void sriov_disable(struct pci_dev *dev)
  306. {
  307. int i;
  308. struct pci_sriov *iov = dev->sriov;
  309. if (!iov->nr_virtfn)
  310. return;
  311. if (iov->cap & PCI_SRIOV_CAP_VFM)
  312. sriov_disable_migration(dev);
  313. for (i = 0; i < iov->nr_virtfn; i++)
  314. virtfn_remove(dev, i, 0);
  315. iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE);
  316. pci_block_user_cfg_access(dev);
  317. pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
  318. ssleep(1);
  319. pci_unblock_user_cfg_access(dev);
  320. if (iov->link != dev->devfn)
  321. sysfs_remove_link(&dev->dev.kobj, "dep_link");
  322. iov->nr_virtfn = 0;
  323. }
  324. static int sriov_init(struct pci_dev *dev, int pos)
  325. {
  326. int i;
  327. int rc;
  328. int nres;
  329. u32 pgsz;
  330. u16 ctrl, total, offset, stride;
  331. struct pci_sriov *iov;
  332. struct resource *res;
  333. struct pci_dev *pdev;
  334. if (dev->pcie_type != PCI_EXP_TYPE_RC_END &&
  335. dev->pcie_type != PCI_EXP_TYPE_ENDPOINT)
  336. return -ENODEV;
  337. pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &ctrl);
  338. if (ctrl & PCI_SRIOV_CTRL_VFE) {
  339. pci_write_config_word(dev, pos + PCI_SRIOV_CTRL, 0);
  340. ssleep(1);
  341. }
  342. pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &total);
  343. if (!total)
  344. return 0;
  345. ctrl = 0;
  346. list_for_each_entry(pdev, &dev->bus->devices, bus_list)
  347. if (pdev->is_physfn)
  348. goto found;
  349. pdev = NULL;
  350. if (pci_ari_enabled(dev->bus))
  351. ctrl |= PCI_SRIOV_CTRL_ARI;
  352. found:
  353. pci_write_config_word(dev, pos + PCI_SRIOV_CTRL, ctrl);
  354. pci_write_config_word(dev, pos + PCI_SRIOV_NUM_VF, total);
  355. pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &offset);
  356. pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &stride);
  357. if (!offset || (total > 1 && !stride))
  358. return -EIO;
  359. pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &pgsz);
  360. i = PAGE_SHIFT > 12 ? PAGE_SHIFT - 12 : 0;
  361. pgsz &= ~((1 << i) - 1);
  362. if (!pgsz)
  363. return -EIO;
  364. pgsz &= ~(pgsz - 1);
  365. pci_write_config_dword(dev, pos + PCI_SRIOV_SYS_PGSIZE, pgsz);
  366. nres = 0;
  367. for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
  368. res = dev->resource + PCI_IOV_RESOURCES + i;
  369. i += __pci_read_base(dev, pci_bar_unknown, res,
  370. pos + PCI_SRIOV_BAR + i * 4);
  371. if (!res->flags)
  372. continue;
  373. if (resource_size(res) & (PAGE_SIZE - 1)) {
  374. rc = -EIO;
  375. goto failed;
  376. }
  377. res->end = res->start + resource_size(res) * total - 1;
  378. nres++;
  379. }
  380. iov = kzalloc(sizeof(*iov), GFP_KERNEL);
  381. if (!iov) {
  382. rc = -ENOMEM;
  383. goto failed;
  384. }
  385. iov->pos = pos;
  386. iov->nres = nres;
  387. iov->ctrl = ctrl;
  388. iov->total = total;
  389. iov->offset = offset;
  390. iov->stride = stride;
  391. iov->pgsz = pgsz;
  392. iov->self = dev;
  393. pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap);
  394. pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
  395. if (dev->pcie_type == PCI_EXP_TYPE_RC_END)
  396. iov->link = PCI_DEVFN(PCI_SLOT(dev->devfn), iov->link);
  397. if (pdev)
  398. iov->dev = pci_dev_get(pdev);
  399. else
  400. iov->dev = dev;
  401. mutex_init(&iov->lock);
  402. dev->sriov = iov;
  403. dev->is_physfn = 1;
  404. return 0;
  405. failed:
  406. for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
  407. res = dev->resource + PCI_IOV_RESOURCES + i;
  408. res->flags = 0;
  409. }
  410. return rc;
  411. }
  412. static void sriov_release(struct pci_dev *dev)
  413. {
  414. BUG_ON(dev->sriov->nr_virtfn);
  415. if (dev != dev->sriov->dev)
  416. pci_dev_put(dev->sriov->dev);
  417. mutex_destroy(&dev->sriov->lock);
  418. kfree(dev->sriov);
  419. dev->sriov = NULL;
  420. }
  421. static void sriov_restore_state(struct pci_dev *dev)
  422. {
  423. int i;
  424. u16 ctrl;
  425. struct pci_sriov *iov = dev->sriov;
  426. pci_read_config_word(dev, iov->pos + PCI_SRIOV_CTRL, &ctrl);
  427. if (ctrl & PCI_SRIOV_CTRL_VFE)
  428. return;
  429. for (i = PCI_IOV_RESOURCES; i <= PCI_IOV_RESOURCE_END; i++)
  430. pci_update_resource(dev, i);
  431. pci_write_config_dword(dev, iov->pos + PCI_SRIOV_SYS_PGSIZE, iov->pgsz);
  432. pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, iov->nr_virtfn);
  433. pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
  434. if (iov->ctrl & PCI_SRIOV_CTRL_VFE)
  435. msleep(100);
  436. }
  437. /**
  438. * pci_iov_init - initialize the IOV capability
  439. * @dev: the PCI device
  440. *
  441. * Returns 0 on success, or negative on failure.
  442. */
  443. int pci_iov_init(struct pci_dev *dev)
  444. {
  445. int pos;
  446. if (!dev->is_pcie)
  447. return -ENODEV;
  448. pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
  449. if (pos)
  450. return sriov_init(dev, pos);
  451. return -ENODEV;
  452. }
  453. /**
  454. * pci_iov_release - release resources used by the IOV capability
  455. * @dev: the PCI device
  456. */
  457. void pci_iov_release(struct pci_dev *dev)
  458. {
  459. if (dev->is_physfn)
  460. sriov_release(dev);
  461. }
  462. /**
  463. * pci_iov_resource_bar - get position of the SR-IOV BAR
  464. * @dev: the PCI device
  465. * @resno: the resource number
  466. * @type: the BAR type to be filled in
  467. *
  468. * Returns position of the BAR encapsulated in the SR-IOV capability.
  469. */
  470. int pci_iov_resource_bar(struct pci_dev *dev, int resno,
  471. enum pci_bar_type *type)
  472. {
  473. if (resno < PCI_IOV_RESOURCES || resno > PCI_IOV_RESOURCE_END)
  474. return 0;
  475. BUG_ON(!dev->is_physfn);
  476. *type = pci_bar_unknown;
  477. return dev->sriov->pos + PCI_SRIOV_BAR +
  478. 4 * (resno - PCI_IOV_RESOURCES);
  479. }
  480. /**
  481. * pci_restore_iov_state - restore the state of the IOV capability
  482. * @dev: the PCI device
  483. */
  484. void pci_restore_iov_state(struct pci_dev *dev)
  485. {
  486. if (dev->is_physfn)
  487. sriov_restore_state(dev);
  488. }
  489. /**
  490. * pci_iov_bus_range - find bus range used by Virtual Function
  491. * @bus: the PCI bus
  492. *
  493. * Returns max number of buses (exclude current one) used by Virtual
  494. * Functions.
  495. */
  496. int pci_iov_bus_range(struct pci_bus *bus)
  497. {
  498. int max = 0;
  499. u8 busnr;
  500. struct pci_dev *dev;
  501. list_for_each_entry(dev, &bus->devices, bus_list) {
  502. if (!dev->is_physfn)
  503. continue;
  504. busnr = virtfn_bus(dev, dev->sriov->total - 1);
  505. if (busnr > max)
  506. max = busnr;
  507. }
  508. return max ? max - bus->number : 0;
  509. }
  510. /**
  511. * pci_enable_sriov - enable the SR-IOV capability
  512. * @dev: the PCI device
  513. * @nr_virtfn: number of virtual functions to enable
  514. *
  515. * Returns 0 on success, or negative on failure.
  516. */
  517. int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
  518. {
  519. might_sleep();
  520. if (!dev->is_physfn)
  521. return -ENODEV;
  522. return sriov_enable(dev, nr_virtfn);
  523. }
  524. EXPORT_SYMBOL_GPL(pci_enable_sriov);
  525. /**
  526. * pci_disable_sriov - disable the SR-IOV capability
  527. * @dev: the PCI device
  528. */
  529. void pci_disable_sriov(struct pci_dev *dev)
  530. {
  531. might_sleep();
  532. if (!dev->is_physfn)
  533. return;
  534. sriov_disable(dev);
  535. }
  536. EXPORT_SYMBOL_GPL(pci_disable_sriov);
  537. /**
  538. * pci_sriov_migration - notify SR-IOV core of Virtual Function Migration
  539. * @dev: the PCI device
  540. *
  541. * Returns IRQ_HANDLED if the IRQ is handled, or IRQ_NONE if not.
  542. *
  543. * Physical Function driver is responsible to register IRQ handler using
  544. * VF Migration Interrupt Message Number, and call this function when the
  545. * interrupt is generated by the hardware.
  546. */
  547. irqreturn_t pci_sriov_migration(struct pci_dev *dev)
  548. {
  549. if (!dev->is_physfn)
  550. return IRQ_NONE;
  551. return sriov_migration(dev) ? IRQ_HANDLED : IRQ_NONE;
  552. }
  553. EXPORT_SYMBOL_GPL(pci_sriov_migration);
  554. static int ats_alloc_one(struct pci_dev *dev, int ps)
  555. {
  556. int pos;
  557. u16 cap;
  558. struct pci_ats *ats;
  559. pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ATS);
  560. if (!pos)
  561. return -ENODEV;
  562. ats = kzalloc(sizeof(*ats), GFP_KERNEL);
  563. if (!ats)
  564. return -ENOMEM;
  565. ats->pos = pos;
  566. ats->stu = ps;
  567. pci_read_config_word(dev, pos + PCI_ATS_CAP, &cap);
  568. ats->qdep = PCI_ATS_CAP_QDEP(cap) ? PCI_ATS_CAP_QDEP(cap) :
  569. PCI_ATS_MAX_QDEP;
  570. dev->ats = ats;
  571. return 0;
  572. }
  573. static void ats_free_one(struct pci_dev *dev)
  574. {
  575. kfree(dev->ats);
  576. dev->ats = NULL;
  577. }
  578. /**
  579. * pci_enable_ats - enable the ATS capability
  580. * @dev: the PCI device
  581. * @ps: the IOMMU page shift
  582. *
  583. * Returns 0 on success, or negative on failure.
  584. */
  585. int pci_enable_ats(struct pci_dev *dev, int ps)
  586. {
  587. int rc;
  588. u16 ctrl;
  589. BUG_ON(dev->ats && dev->ats->is_enabled);
  590. if (ps < PCI_ATS_MIN_STU)
  591. return -EINVAL;
  592. if (dev->is_physfn || dev->is_virtfn) {
  593. struct pci_dev *pdev = dev->is_physfn ? dev : dev->physfn;
  594. mutex_lock(&pdev->sriov->lock);
  595. if (pdev->ats)
  596. rc = pdev->ats->stu == ps ? 0 : -EINVAL;
  597. else
  598. rc = ats_alloc_one(pdev, ps);
  599. if (!rc)
  600. pdev->ats->ref_cnt++;
  601. mutex_unlock(&pdev->sriov->lock);
  602. if (rc)
  603. return rc;
  604. }
  605. if (!dev->is_physfn) {
  606. rc = ats_alloc_one(dev, ps);
  607. if (rc)
  608. return rc;
  609. }
  610. ctrl = PCI_ATS_CTRL_ENABLE;
  611. if (!dev->is_virtfn)
  612. ctrl |= PCI_ATS_CTRL_STU(ps - PCI_ATS_MIN_STU);
  613. pci_write_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, ctrl);
  614. dev->ats->is_enabled = 1;
  615. return 0;
  616. }
  617. /**
  618. * pci_disable_ats - disable the ATS capability
  619. * @dev: the PCI device
  620. */
  621. void pci_disable_ats(struct pci_dev *dev)
  622. {
  623. u16 ctrl;
  624. BUG_ON(!dev->ats || !dev->ats->is_enabled);
  625. pci_read_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, &ctrl);
  626. ctrl &= ~PCI_ATS_CTRL_ENABLE;
  627. pci_write_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, ctrl);
  628. dev->ats->is_enabled = 0;
  629. if (dev->is_physfn || dev->is_virtfn) {
  630. struct pci_dev *pdev = dev->is_physfn ? dev : dev->physfn;
  631. mutex_lock(&pdev->sriov->lock);
  632. pdev->ats->ref_cnt--;
  633. if (!pdev->ats->ref_cnt)
  634. ats_free_one(pdev);
  635. mutex_unlock(&pdev->sriov->lock);
  636. }
  637. if (!dev->is_physfn)
  638. ats_free_one(dev);
  639. }
  640. /**
  641. * pci_ats_queue_depth - query the ATS Invalidate Queue Depth
  642. * @dev: the PCI device
  643. *
  644. * Returns the queue depth on success, or negative on failure.
  645. *
  646. * The ATS spec uses 0 in the Invalidate Queue Depth field to
  647. * indicate that the function can accept 32 Invalidate Request.
  648. * But here we use the `real' values (i.e. 1~32) for the Queue
  649. * Depth; and 0 indicates the function shares the Queue with
  650. * other functions (doesn't exclusively own a Queue).
  651. */
  652. int pci_ats_queue_depth(struct pci_dev *dev)
  653. {
  654. int pos;
  655. u16 cap;
  656. if (dev->is_virtfn)
  657. return 0;
  658. if (dev->ats)
  659. return dev->ats->qdep;
  660. pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ATS);
  661. if (!pos)
  662. return -ENODEV;
  663. pci_read_config_word(dev, pos + PCI_ATS_CAP, &cap);
  664. return PCI_ATS_CAP_QDEP(cap) ? PCI_ATS_CAP_QDEP(cap) :
  665. PCI_ATS_MAX_QDEP;
  666. }