iov.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865
  1. /*
  2. * drivers/pci/iov.c
  3. *
  4. * Copyright (C) 2009 Intel Corporation, Yu Zhao <yu.zhao@intel.com>
  5. *
  6. * PCI Express I/O Virtualization (IOV) support.
  7. * Single Root IOV 1.0
  8. * Address Translation Service 1.0
  9. */
  10. #include <linux/pci.h>
  11. #include <linux/slab.h>
  12. #include <linux/mutex.h>
  13. #include <linux/string.h>
  14. #include <linux/delay.h>
  15. #include "pci.h"
  16. #define VIRTFN_ID_LEN 16
  17. static inline u8 virtfn_bus(struct pci_dev *dev, int id)
  18. {
  19. return dev->bus->number + ((dev->devfn + dev->sriov->offset +
  20. dev->sriov->stride * id) >> 8);
  21. }
  22. static inline u8 virtfn_devfn(struct pci_dev *dev, int id)
  23. {
  24. return (dev->devfn + dev->sriov->offset +
  25. dev->sriov->stride * id) & 0xff;
  26. }
  27. static struct pci_bus *virtfn_add_bus(struct pci_bus *bus, int busnr)
  28. {
  29. int rc;
  30. struct pci_bus *child;
  31. if (bus->number == busnr)
  32. return bus;
  33. child = pci_find_bus(pci_domain_nr(bus), busnr);
  34. if (child)
  35. return child;
  36. child = pci_add_new_bus(bus, NULL, busnr);
  37. if (!child)
  38. return NULL;
  39. child->subordinate = busnr;
  40. child->dev.parent = bus->bridge;
  41. rc = pci_bus_add_child(child);
  42. if (rc) {
  43. pci_remove_bus(child);
  44. return NULL;
  45. }
  46. return child;
  47. }
  48. static void virtfn_remove_bus(struct pci_bus *bus, int busnr)
  49. {
  50. struct pci_bus *child;
  51. if (bus->number == busnr)
  52. return;
  53. child = pci_find_bus(pci_domain_nr(bus), busnr);
  54. BUG_ON(!child);
  55. if (list_empty(&child->devices))
  56. pci_remove_bus(child);
  57. }
  58. static int virtfn_add(struct pci_dev *dev, int id, int reset)
  59. {
  60. int i;
  61. int rc;
  62. u64 size;
  63. char buf[VIRTFN_ID_LEN];
  64. struct pci_dev *virtfn;
  65. struct resource *res;
  66. struct pci_sriov *iov = dev->sriov;
  67. virtfn = alloc_pci_dev();
  68. if (!virtfn)
  69. return -ENOMEM;
  70. mutex_lock(&iov->dev->sriov->lock);
  71. virtfn->bus = virtfn_add_bus(dev->bus, virtfn_bus(dev, id));
  72. if (!virtfn->bus) {
  73. kfree(virtfn);
  74. mutex_unlock(&iov->dev->sriov->lock);
  75. return -ENOMEM;
  76. }
  77. virtfn->devfn = virtfn_devfn(dev, id);
  78. virtfn->vendor = dev->vendor;
  79. pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_DID, &virtfn->device);
  80. pci_setup_device(virtfn);
  81. virtfn->dev.parent = dev->dev.parent;
  82. for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
  83. res = dev->resource + PCI_IOV_RESOURCES + i;
  84. if (!res->parent)
  85. continue;
  86. virtfn->resource[i].name = pci_name(virtfn);
  87. virtfn->resource[i].flags = res->flags;
  88. size = resource_size(res);
  89. do_div(size, iov->total);
  90. virtfn->resource[i].start = res->start + size * id;
  91. virtfn->resource[i].end = virtfn->resource[i].start + size - 1;
  92. rc = request_resource(res, &virtfn->resource[i]);
  93. BUG_ON(rc);
  94. }
  95. if (reset)
  96. __pci_reset_function(virtfn);
  97. pci_device_add(virtfn, virtfn->bus);
  98. mutex_unlock(&iov->dev->sriov->lock);
  99. virtfn->physfn = pci_dev_get(dev);
  100. virtfn->is_virtfn = 1;
  101. rc = pci_bus_add_device(virtfn);
  102. if (rc)
  103. goto failed1;
  104. sprintf(buf, "virtfn%u", id);
  105. rc = sysfs_create_link(&dev->dev.kobj, &virtfn->dev.kobj, buf);
  106. if (rc)
  107. goto failed1;
  108. rc = sysfs_create_link(&virtfn->dev.kobj, &dev->dev.kobj, "physfn");
  109. if (rc)
  110. goto failed2;
  111. kobject_uevent(&virtfn->dev.kobj, KOBJ_CHANGE);
  112. return 0;
  113. failed2:
  114. sysfs_remove_link(&dev->dev.kobj, buf);
  115. failed1:
  116. pci_dev_put(dev);
  117. mutex_lock(&iov->dev->sriov->lock);
  118. pci_remove_bus_device(virtfn);
  119. virtfn_remove_bus(dev->bus, virtfn_bus(dev, id));
  120. mutex_unlock(&iov->dev->sriov->lock);
  121. return rc;
  122. }
  123. static void virtfn_remove(struct pci_dev *dev, int id, int reset)
  124. {
  125. char buf[VIRTFN_ID_LEN];
  126. struct pci_bus *bus;
  127. struct pci_dev *virtfn;
  128. struct pci_sriov *iov = dev->sriov;
  129. bus = pci_find_bus(pci_domain_nr(dev->bus), virtfn_bus(dev, id));
  130. if (!bus)
  131. return;
  132. virtfn = pci_get_slot(bus, virtfn_devfn(dev, id));
  133. if (!virtfn)
  134. return;
  135. pci_dev_put(virtfn);
  136. if (reset) {
  137. device_release_driver(&virtfn->dev);
  138. __pci_reset_function(virtfn);
  139. }
  140. sprintf(buf, "virtfn%u", id);
  141. sysfs_remove_link(&dev->dev.kobj, buf);
  142. sysfs_remove_link(&virtfn->dev.kobj, "physfn");
  143. mutex_lock(&iov->dev->sriov->lock);
  144. pci_remove_bus_device(virtfn);
  145. virtfn_remove_bus(dev->bus, virtfn_bus(dev, id));
  146. mutex_unlock(&iov->dev->sriov->lock);
  147. pci_dev_put(dev);
  148. }
  149. static int sriov_migration(struct pci_dev *dev)
  150. {
  151. u16 status;
  152. struct pci_sriov *iov = dev->sriov;
  153. if (!iov->nr_virtfn)
  154. return 0;
  155. if (!(iov->cap & PCI_SRIOV_CAP_VFM))
  156. return 0;
  157. pci_read_config_word(dev, iov->pos + PCI_SRIOV_STATUS, &status);
  158. if (!(status & PCI_SRIOV_STATUS_VFM))
  159. return 0;
  160. schedule_work(&iov->mtask);
  161. return 1;
  162. }
  163. static void sriov_migration_task(struct work_struct *work)
  164. {
  165. int i;
  166. u8 state;
  167. u16 status;
  168. struct pci_sriov *iov = container_of(work, struct pci_sriov, mtask);
  169. for (i = iov->initial; i < iov->nr_virtfn; i++) {
  170. state = readb(iov->mstate + i);
  171. if (state == PCI_SRIOV_VFM_MI) {
  172. writeb(PCI_SRIOV_VFM_AV, iov->mstate + i);
  173. state = readb(iov->mstate + i);
  174. if (state == PCI_SRIOV_VFM_AV)
  175. virtfn_add(iov->self, i, 1);
  176. } else if (state == PCI_SRIOV_VFM_MO) {
  177. virtfn_remove(iov->self, i, 1);
  178. writeb(PCI_SRIOV_VFM_UA, iov->mstate + i);
  179. state = readb(iov->mstate + i);
  180. if (state == PCI_SRIOV_VFM_AV)
  181. virtfn_add(iov->self, i, 0);
  182. }
  183. }
  184. pci_read_config_word(iov->self, iov->pos + PCI_SRIOV_STATUS, &status);
  185. status &= ~PCI_SRIOV_STATUS_VFM;
  186. pci_write_config_word(iov->self, iov->pos + PCI_SRIOV_STATUS, status);
  187. }
  188. static int sriov_enable_migration(struct pci_dev *dev, int nr_virtfn)
  189. {
  190. int bir;
  191. u32 table;
  192. resource_size_t pa;
  193. struct pci_sriov *iov = dev->sriov;
  194. if (nr_virtfn <= iov->initial)
  195. return 0;
  196. pci_read_config_dword(dev, iov->pos + PCI_SRIOV_VFM, &table);
  197. bir = PCI_SRIOV_VFM_BIR(table);
  198. if (bir > PCI_STD_RESOURCE_END)
  199. return -EIO;
  200. table = PCI_SRIOV_VFM_OFFSET(table);
  201. if (table + nr_virtfn > pci_resource_len(dev, bir))
  202. return -EIO;
  203. pa = pci_resource_start(dev, bir) + table;
  204. iov->mstate = ioremap(pa, nr_virtfn);
  205. if (!iov->mstate)
  206. return -ENOMEM;
  207. INIT_WORK(&iov->mtask, sriov_migration_task);
  208. iov->ctrl |= PCI_SRIOV_CTRL_VFM | PCI_SRIOV_CTRL_INTR;
  209. pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
  210. return 0;
  211. }
  212. static void sriov_disable_migration(struct pci_dev *dev)
  213. {
  214. struct pci_sriov *iov = dev->sriov;
  215. iov->ctrl &= ~(PCI_SRIOV_CTRL_VFM | PCI_SRIOV_CTRL_INTR);
  216. pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
  217. cancel_work_sync(&iov->mtask);
  218. iounmap(iov->mstate);
  219. }
  220. static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
  221. {
  222. int rc;
  223. int i, j;
  224. int nres;
  225. u16 offset, stride, initial;
  226. struct resource *res;
  227. struct pci_dev *pdev;
  228. struct pci_sriov *iov = dev->sriov;
  229. if (!nr_virtfn)
  230. return 0;
  231. if (iov->nr_virtfn)
  232. return -EINVAL;
  233. pci_read_config_word(dev, iov->pos + PCI_SRIOV_INITIAL_VF, &initial);
  234. if (initial > iov->total ||
  235. (!(iov->cap & PCI_SRIOV_CAP_VFM) && (initial != iov->total)))
  236. return -EIO;
  237. if (nr_virtfn < 0 || nr_virtfn > iov->total ||
  238. (!(iov->cap & PCI_SRIOV_CAP_VFM) && (nr_virtfn > initial)))
  239. return -EINVAL;
  240. pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, nr_virtfn);
  241. pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_OFFSET, &offset);
  242. pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_STRIDE, &stride);
  243. if (!offset || (nr_virtfn > 1 && !stride))
  244. return -EIO;
  245. nres = 0;
  246. for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
  247. res = dev->resource + PCI_IOV_RESOURCES + i;
  248. if (res->parent)
  249. nres++;
  250. }
  251. if (nres != iov->nres) {
  252. dev_err(&dev->dev, "not enough MMIO resources for SR-IOV\n");
  253. return -ENOMEM;
  254. }
  255. iov->offset = offset;
  256. iov->stride = stride;
  257. if (virtfn_bus(dev, nr_virtfn - 1) > dev->bus->subordinate) {
  258. dev_err(&dev->dev, "SR-IOV: bus number out of range\n");
  259. return -ENOMEM;
  260. }
  261. if (iov->link != dev->devfn) {
  262. pdev = pci_get_slot(dev->bus, iov->link);
  263. if (!pdev)
  264. return -ENODEV;
  265. pci_dev_put(pdev);
  266. if (!pdev->is_physfn)
  267. return -ENODEV;
  268. rc = sysfs_create_link(&dev->dev.kobj,
  269. &pdev->dev.kobj, "dep_link");
  270. if (rc)
  271. return rc;
  272. }
  273. iov->ctrl |= PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE;
  274. pci_block_user_cfg_access(dev);
  275. pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
  276. msleep(100);
  277. pci_unblock_user_cfg_access(dev);
  278. iov->initial = initial;
  279. if (nr_virtfn < initial)
  280. initial = nr_virtfn;
  281. for (i = 0; i < initial; i++) {
  282. rc = virtfn_add(dev, i, 0);
  283. if (rc)
  284. goto failed;
  285. }
  286. if (iov->cap & PCI_SRIOV_CAP_VFM) {
  287. rc = sriov_enable_migration(dev, nr_virtfn);
  288. if (rc)
  289. goto failed;
  290. }
  291. kobject_uevent(&dev->dev.kobj, KOBJ_CHANGE);
  292. iov->nr_virtfn = nr_virtfn;
  293. return 0;
  294. failed:
  295. for (j = 0; j < i; j++)
  296. virtfn_remove(dev, j, 0);
  297. iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE);
  298. pci_block_user_cfg_access(dev);
  299. pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
  300. ssleep(1);
  301. pci_unblock_user_cfg_access(dev);
  302. if (iov->link != dev->devfn)
  303. sysfs_remove_link(&dev->dev.kobj, "dep_link");
  304. return rc;
  305. }
  306. static void sriov_disable(struct pci_dev *dev)
  307. {
  308. int i;
  309. struct pci_sriov *iov = dev->sriov;
  310. if (!iov->nr_virtfn)
  311. return;
  312. if (iov->cap & PCI_SRIOV_CAP_VFM)
  313. sriov_disable_migration(dev);
  314. for (i = 0; i < iov->nr_virtfn; i++)
  315. virtfn_remove(dev, i, 0);
  316. iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE);
  317. pci_block_user_cfg_access(dev);
  318. pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
  319. ssleep(1);
  320. pci_unblock_user_cfg_access(dev);
  321. if (iov->link != dev->devfn)
  322. sysfs_remove_link(&dev->dev.kobj, "dep_link");
  323. iov->nr_virtfn = 0;
  324. }
  325. static int sriov_init(struct pci_dev *dev, int pos)
  326. {
  327. int i;
  328. int rc;
  329. int nres;
  330. u32 pgsz;
  331. u16 ctrl, total, offset, stride;
  332. struct pci_sriov *iov;
  333. struct resource *res;
  334. struct pci_dev *pdev;
  335. if (dev->pcie_type != PCI_EXP_TYPE_RC_END &&
  336. dev->pcie_type != PCI_EXP_TYPE_ENDPOINT)
  337. return -ENODEV;
  338. pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &ctrl);
  339. if (ctrl & PCI_SRIOV_CTRL_VFE) {
  340. pci_write_config_word(dev, pos + PCI_SRIOV_CTRL, 0);
  341. ssleep(1);
  342. }
  343. pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &total);
  344. if (!total)
  345. return 0;
  346. ctrl = 0;
  347. list_for_each_entry(pdev, &dev->bus->devices, bus_list)
  348. if (pdev->is_physfn)
  349. goto found;
  350. pdev = NULL;
  351. if (pci_ari_enabled(dev->bus))
  352. ctrl |= PCI_SRIOV_CTRL_ARI;
  353. found:
  354. pci_write_config_word(dev, pos + PCI_SRIOV_CTRL, ctrl);
  355. pci_write_config_word(dev, pos + PCI_SRIOV_NUM_VF, total);
  356. pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &offset);
  357. pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &stride);
  358. if (!offset || (total > 1 && !stride))
  359. return -EIO;
  360. pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &pgsz);
  361. i = PAGE_SHIFT > 12 ? PAGE_SHIFT - 12 : 0;
  362. pgsz &= ~((1 << i) - 1);
  363. if (!pgsz)
  364. return -EIO;
  365. pgsz &= ~(pgsz - 1);
  366. pci_write_config_dword(dev, pos + PCI_SRIOV_SYS_PGSIZE, pgsz);
  367. nres = 0;
  368. for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
  369. res = dev->resource + PCI_IOV_RESOURCES + i;
  370. i += __pci_read_base(dev, pci_bar_unknown, res,
  371. pos + PCI_SRIOV_BAR + i * 4);
  372. if (!res->flags)
  373. continue;
  374. if (resource_size(res) & (PAGE_SIZE - 1)) {
  375. rc = -EIO;
  376. goto failed;
  377. }
  378. res->end = res->start + resource_size(res) * total - 1;
  379. nres++;
  380. }
  381. iov = kzalloc(sizeof(*iov), GFP_KERNEL);
  382. if (!iov) {
  383. rc = -ENOMEM;
  384. goto failed;
  385. }
  386. iov->pos = pos;
  387. iov->nres = nres;
  388. iov->ctrl = ctrl;
  389. iov->total = total;
  390. iov->offset = offset;
  391. iov->stride = stride;
  392. iov->pgsz = pgsz;
  393. iov->self = dev;
  394. pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap);
  395. pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
  396. if (dev->pcie_type == PCI_EXP_TYPE_RC_END)
  397. iov->link = PCI_DEVFN(PCI_SLOT(dev->devfn), iov->link);
  398. if (pdev)
  399. iov->dev = pci_dev_get(pdev);
  400. else
  401. iov->dev = dev;
  402. mutex_init(&iov->lock);
  403. dev->sriov = iov;
  404. dev->is_physfn = 1;
  405. return 0;
  406. failed:
  407. for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
  408. res = dev->resource + PCI_IOV_RESOURCES + i;
  409. res->flags = 0;
  410. }
  411. return rc;
  412. }
  413. static void sriov_release(struct pci_dev *dev)
  414. {
  415. BUG_ON(dev->sriov->nr_virtfn);
  416. if (dev != dev->sriov->dev)
  417. pci_dev_put(dev->sriov->dev);
  418. mutex_destroy(&dev->sriov->lock);
  419. kfree(dev->sriov);
  420. dev->sriov = NULL;
  421. }
  422. static void sriov_restore_state(struct pci_dev *dev)
  423. {
  424. int i;
  425. u16 ctrl;
  426. struct pci_sriov *iov = dev->sriov;
  427. pci_read_config_word(dev, iov->pos + PCI_SRIOV_CTRL, &ctrl);
  428. if (ctrl & PCI_SRIOV_CTRL_VFE)
  429. return;
  430. for (i = PCI_IOV_RESOURCES; i <= PCI_IOV_RESOURCE_END; i++)
  431. pci_update_resource(dev, i);
  432. pci_write_config_dword(dev, iov->pos + PCI_SRIOV_SYS_PGSIZE, iov->pgsz);
  433. pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, iov->nr_virtfn);
  434. pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
  435. if (iov->ctrl & PCI_SRIOV_CTRL_VFE)
  436. msleep(100);
  437. }
  438. /**
  439. * pci_iov_init - initialize the IOV capability
  440. * @dev: the PCI device
  441. *
  442. * Returns 0 on success, or negative on failure.
  443. */
  444. int pci_iov_init(struct pci_dev *dev)
  445. {
  446. int pos;
  447. if (!pci_is_pcie(dev))
  448. return -ENODEV;
  449. pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
  450. if (pos)
  451. return sriov_init(dev, pos);
  452. return -ENODEV;
  453. }
  454. /**
  455. * pci_iov_release - release resources used by the IOV capability
  456. * @dev: the PCI device
  457. */
  458. void pci_iov_release(struct pci_dev *dev)
  459. {
  460. if (dev->is_physfn)
  461. sriov_release(dev);
  462. }
  463. /**
  464. * pci_iov_resource_bar - get position of the SR-IOV BAR
  465. * @dev: the PCI device
  466. * @resno: the resource number
  467. * @type: the BAR type to be filled in
  468. *
  469. * Returns position of the BAR encapsulated in the SR-IOV capability.
  470. */
  471. int pci_iov_resource_bar(struct pci_dev *dev, int resno,
  472. enum pci_bar_type *type)
  473. {
  474. if (resno < PCI_IOV_RESOURCES || resno > PCI_IOV_RESOURCE_END)
  475. return 0;
  476. BUG_ON(!dev->is_physfn);
  477. *type = pci_bar_unknown;
  478. return dev->sriov->pos + PCI_SRIOV_BAR +
  479. 4 * (resno - PCI_IOV_RESOURCES);
  480. }
  481. /**
  482. * pci_sriov_resource_alignment - get resource alignment for VF BAR
  483. * @dev: the PCI device
  484. * @resno: the resource number
  485. *
  486. * Returns the alignment of the VF BAR found in the SR-IOV capability.
  487. * This is not the same as the resource size which is defined as
  488. * the VF BAR size multiplied by the number of VFs. The alignment
  489. * is just the VF BAR size.
  490. */
  491. resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno)
  492. {
  493. struct resource tmp;
  494. enum pci_bar_type type;
  495. int reg = pci_iov_resource_bar(dev, resno, &type);
  496. if (!reg)
  497. return 0;
  498. __pci_read_base(dev, type, &tmp, reg);
  499. return resource_alignment(&tmp);
  500. }
  501. /**
  502. * pci_restore_iov_state - restore the state of the IOV capability
  503. * @dev: the PCI device
  504. */
  505. void pci_restore_iov_state(struct pci_dev *dev)
  506. {
  507. if (dev->is_physfn)
  508. sriov_restore_state(dev);
  509. }
  510. /**
  511. * pci_iov_bus_range - find bus range used by Virtual Function
  512. * @bus: the PCI bus
  513. *
  514. * Returns max number of buses (exclude current one) used by Virtual
  515. * Functions.
  516. */
  517. int pci_iov_bus_range(struct pci_bus *bus)
  518. {
  519. int max = 0;
  520. u8 busnr;
  521. struct pci_dev *dev;
  522. list_for_each_entry(dev, &bus->devices, bus_list) {
  523. if (!dev->is_physfn)
  524. continue;
  525. busnr = virtfn_bus(dev, dev->sriov->total - 1);
  526. if (busnr > max)
  527. max = busnr;
  528. }
  529. return max ? max - bus->number : 0;
  530. }
  531. /**
  532. * pci_enable_sriov - enable the SR-IOV capability
  533. * @dev: the PCI device
  534. * @nr_virtfn: number of virtual functions to enable
  535. *
  536. * Returns 0 on success, or negative on failure.
  537. */
  538. int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
  539. {
  540. might_sleep();
  541. if (!dev->is_physfn)
  542. return -ENODEV;
  543. return sriov_enable(dev, nr_virtfn);
  544. }
  545. EXPORT_SYMBOL_GPL(pci_enable_sriov);
  546. /**
  547. * pci_disable_sriov - disable the SR-IOV capability
  548. * @dev: the PCI device
  549. */
  550. void pci_disable_sriov(struct pci_dev *dev)
  551. {
  552. might_sleep();
  553. if (!dev->is_physfn)
  554. return;
  555. sriov_disable(dev);
  556. }
  557. EXPORT_SYMBOL_GPL(pci_disable_sriov);
  558. /**
  559. * pci_sriov_migration - notify SR-IOV core of Virtual Function Migration
  560. * @dev: the PCI device
  561. *
  562. * Returns IRQ_HANDLED if the IRQ is handled, or IRQ_NONE if not.
  563. *
  564. * Physical Function driver is responsible to register IRQ handler using
  565. * VF Migration Interrupt Message Number, and call this function when the
  566. * interrupt is generated by the hardware.
  567. */
  568. irqreturn_t pci_sriov_migration(struct pci_dev *dev)
  569. {
  570. if (!dev->is_physfn)
  571. return IRQ_NONE;
  572. return sriov_migration(dev) ? IRQ_HANDLED : IRQ_NONE;
  573. }
  574. EXPORT_SYMBOL_GPL(pci_sriov_migration);
  575. /**
  576. * pci_num_vf - return number of VFs associated with a PF device_release_driver
  577. * @dev: the PCI device
  578. *
  579. * Returns number of VFs, or 0 if SR-IOV is not enabled.
  580. */
  581. int pci_num_vf(struct pci_dev *dev)
  582. {
  583. if (!dev || !dev->is_physfn)
  584. return 0;
  585. else
  586. return dev->sriov->nr_virtfn;
  587. }
  588. EXPORT_SYMBOL_GPL(pci_num_vf);
  589. static int ats_alloc_one(struct pci_dev *dev, int ps)
  590. {
  591. int pos;
  592. u16 cap;
  593. struct pci_ats *ats;
  594. pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ATS);
  595. if (!pos)
  596. return -ENODEV;
  597. ats = kzalloc(sizeof(*ats), GFP_KERNEL);
  598. if (!ats)
  599. return -ENOMEM;
  600. ats->pos = pos;
  601. ats->stu = ps;
  602. pci_read_config_word(dev, pos + PCI_ATS_CAP, &cap);
  603. ats->qdep = PCI_ATS_CAP_QDEP(cap) ? PCI_ATS_CAP_QDEP(cap) :
  604. PCI_ATS_MAX_QDEP;
  605. dev->ats = ats;
  606. return 0;
  607. }
  608. static void ats_free_one(struct pci_dev *dev)
  609. {
  610. kfree(dev->ats);
  611. dev->ats = NULL;
  612. }
  613. /**
  614. * pci_enable_ats - enable the ATS capability
  615. * @dev: the PCI device
  616. * @ps: the IOMMU page shift
  617. *
  618. * Returns 0 on success, or negative on failure.
  619. */
  620. int pci_enable_ats(struct pci_dev *dev, int ps)
  621. {
  622. int rc;
  623. u16 ctrl;
  624. BUG_ON(dev->ats && dev->ats->is_enabled);
  625. if (ps < PCI_ATS_MIN_STU)
  626. return -EINVAL;
  627. if (dev->is_physfn || dev->is_virtfn) {
  628. struct pci_dev *pdev = dev->is_physfn ? dev : dev->physfn;
  629. mutex_lock(&pdev->sriov->lock);
  630. if (pdev->ats)
  631. rc = pdev->ats->stu == ps ? 0 : -EINVAL;
  632. else
  633. rc = ats_alloc_one(pdev, ps);
  634. if (!rc)
  635. pdev->ats->ref_cnt++;
  636. mutex_unlock(&pdev->sriov->lock);
  637. if (rc)
  638. return rc;
  639. }
  640. if (!dev->is_physfn) {
  641. rc = ats_alloc_one(dev, ps);
  642. if (rc)
  643. return rc;
  644. }
  645. ctrl = PCI_ATS_CTRL_ENABLE;
  646. if (!dev->is_virtfn)
  647. ctrl |= PCI_ATS_CTRL_STU(ps - PCI_ATS_MIN_STU);
  648. pci_write_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, ctrl);
  649. dev->ats->is_enabled = 1;
  650. return 0;
  651. }
  652. /**
  653. * pci_disable_ats - disable the ATS capability
  654. * @dev: the PCI device
  655. */
  656. void pci_disable_ats(struct pci_dev *dev)
  657. {
  658. u16 ctrl;
  659. BUG_ON(!dev->ats || !dev->ats->is_enabled);
  660. pci_read_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, &ctrl);
  661. ctrl &= ~PCI_ATS_CTRL_ENABLE;
  662. pci_write_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, ctrl);
  663. dev->ats->is_enabled = 0;
  664. if (dev->is_physfn || dev->is_virtfn) {
  665. struct pci_dev *pdev = dev->is_physfn ? dev : dev->physfn;
  666. mutex_lock(&pdev->sriov->lock);
  667. pdev->ats->ref_cnt--;
  668. if (!pdev->ats->ref_cnt)
  669. ats_free_one(pdev);
  670. mutex_unlock(&pdev->sriov->lock);
  671. }
  672. if (!dev->is_physfn)
  673. ats_free_one(dev);
  674. }
  675. /**
  676. * pci_ats_queue_depth - query the ATS Invalidate Queue Depth
  677. * @dev: the PCI device
  678. *
  679. * Returns the queue depth on success, or negative on failure.
  680. *
  681. * The ATS spec uses 0 in the Invalidate Queue Depth field to
  682. * indicate that the function can accept 32 Invalidate Request.
  683. * But here we use the `real' values (i.e. 1~32) for the Queue
  684. * Depth; and 0 indicates the function shares the Queue with
  685. * other functions (doesn't exclusively own a Queue).
  686. */
  687. int pci_ats_queue_depth(struct pci_dev *dev)
  688. {
  689. int pos;
  690. u16 cap;
  691. if (dev->is_virtfn)
  692. return 0;
  693. if (dev->ats)
  694. return dev->ats->qdep;
  695. pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ATS);
  696. if (!pos)
  697. return -ENODEV;
  698. pci_read_config_word(dev, pos + PCI_ATS_CAP, &cap);
  699. return PCI_ATS_CAP_QDEP(cap) ? PCI_ATS_CAP_QDEP(cap) :
  700. PCI_ATS_MAX_QDEP;
  701. }