mic_virtio.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700
  1. /*
  2. * Intel MIC Platform Software Stack (MPSS)
  3. *
  4. * Copyright(c) 2013 Intel Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License, version 2, as
  8. * published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * General Public License for more details.
  14. *
  15. * The full GNU General Public License is included in this distribution in
  16. * the file called "COPYING".
  17. *
  18. * Intel MIC Host driver.
  19. *
  20. */
  21. #include <linux/pci.h>
  22. #include <linux/sched.h>
  23. #include <linux/uaccess.h>
  24. #include <linux/mic_common.h>
  25. #include "../common/mic_dev.h"
  26. #include "mic_device.h"
  27. #include "mic_smpt.h"
  28. #include "mic_virtio.h"
  29. /*
  30. * Initiates the copies across the PCIe bus from card memory to
  31. * a user space buffer.
  32. */
  33. static int mic_virtio_copy_to_user(struct mic_vdev *mvdev,
  34. void __user *ubuf, size_t len, u64 addr)
  35. {
  36. int err;
  37. void __iomem *dbuf = mvdev->mdev->aper.va + addr;
  38. /*
  39. * We are copying from IO below an should ideally use something
  40. * like copy_to_user_fromio(..) if it existed.
  41. */
  42. if (copy_to_user(ubuf, dbuf, len)) {
  43. err = -EFAULT;
  44. dev_err(mic_dev(mvdev), "%s %d err %d\n",
  45. __func__, __LINE__, err);
  46. goto err;
  47. }
  48. mvdev->in_bytes += len;
  49. err = 0;
  50. err:
  51. return err;
  52. }
  53. /*
  54. * Initiates copies across the PCIe bus from a user space
  55. * buffer to card memory.
  56. */
  57. static int mic_virtio_copy_from_user(struct mic_vdev *mvdev,
  58. void __user *ubuf, size_t len, u64 addr)
  59. {
  60. int err;
  61. void __iomem *dbuf = mvdev->mdev->aper.va + addr;
  62. /*
  63. * We are copying to IO below and should ideally use something
  64. * like copy_from_user_toio(..) if it existed.
  65. */
  66. if (copy_from_user(dbuf, ubuf, len)) {
  67. err = -EFAULT;
  68. dev_err(mic_dev(mvdev), "%s %d err %d\n",
  69. __func__, __LINE__, err);
  70. goto err;
  71. }
  72. mvdev->out_bytes += len;
  73. err = 0;
  74. err:
  75. return err;
  76. }
  77. #define MIC_VRINGH_READ true
  78. /* The function to call to notify the card about added buffers */
  79. static void mic_notify(struct vringh *vrh)
  80. {
  81. struct mic_vringh *mvrh = container_of(vrh, struct mic_vringh, vrh);
  82. struct mic_vdev *mvdev = mvrh->mvdev;
  83. s8 db = mvdev->dc->h2c_vdev_db;
  84. if (db != -1)
  85. mvdev->mdev->ops->send_intr(mvdev->mdev, db);
  86. }
  87. /* Determine the total number of bytes consumed in a VRINGH KIOV */
  88. static inline u32 mic_vringh_iov_consumed(struct vringh_kiov *iov)
  89. {
  90. int i;
  91. u32 total = iov->consumed;
  92. for (i = 0; i < iov->i; i++)
  93. total += iov->iov[i].iov_len;
  94. return total;
  95. }
  96. /*
  97. * Traverse the VRINGH KIOV and issue the APIs to trigger the copies.
  98. * This API is heavily based on the vringh_iov_xfer(..) implementation
  99. * in vringh.c. The reason we cannot reuse vringh_iov_pull_kern(..)
  100. * and vringh_iov_push_kern(..) directly is because there is no
  101. * way to override the VRINGH xfer(..) routines as of v3.10.
  102. */
  103. static int mic_vringh_copy(struct mic_vdev *mvdev, struct vringh_kiov *iov,
  104. void __user *ubuf, size_t len, bool read, size_t *out_len)
  105. {
  106. int ret = 0;
  107. size_t partlen, tot_len = 0;
  108. while (len && iov->i < iov->used) {
  109. partlen = min(iov->iov[iov->i].iov_len, len);
  110. if (read)
  111. ret = mic_virtio_copy_to_user(mvdev,
  112. ubuf, partlen,
  113. (u64)iov->iov[iov->i].iov_base);
  114. else
  115. ret = mic_virtio_copy_from_user(mvdev,
  116. ubuf, partlen,
  117. (u64)iov->iov[iov->i].iov_base);
  118. if (ret) {
  119. dev_err(mic_dev(mvdev), "%s %d err %d\n",
  120. __func__, __LINE__, ret);
  121. break;
  122. }
  123. len -= partlen;
  124. ubuf += partlen;
  125. tot_len += partlen;
  126. iov->consumed += partlen;
  127. iov->iov[iov->i].iov_len -= partlen;
  128. iov->iov[iov->i].iov_base += partlen;
  129. if (!iov->iov[iov->i].iov_len) {
  130. /* Fix up old iov element then increment. */
  131. iov->iov[iov->i].iov_len = iov->consumed;
  132. iov->iov[iov->i].iov_base -= iov->consumed;
  133. iov->consumed = 0;
  134. iov->i++;
  135. }
  136. }
  137. *out_len = tot_len;
  138. return ret;
  139. }
  140. /*
  141. * Use the standard VRINGH infrastructure in the kernel to fetch new
  142. * descriptors, initiate the copies and update the used ring.
  143. */
  144. static int _mic_virtio_copy(struct mic_vdev *mvdev,
  145. struct mic_copy_desc *copy)
  146. {
  147. int ret = 0, iovcnt = copy->iovcnt;
  148. struct iovec iov;
  149. struct iovec __user *u_iov = copy->iov;
  150. void __user *ubuf = NULL;
  151. struct mic_vringh *mvr = &mvdev->mvr[copy->vr_idx];
  152. struct vringh_kiov *riov = &mvr->riov;
  153. struct vringh_kiov *wiov = &mvr->wiov;
  154. struct vringh *vrh = &mvr->vrh;
  155. u16 *head = &mvr->head;
  156. struct mic_vring *vr = &mvr->vring;
  157. size_t len = 0, out_len;
  158. copy->out_len = 0;
  159. /* Fetch a new IOVEC if all previous elements have been processed */
  160. if (riov->i == riov->used && wiov->i == wiov->used) {
  161. ret = vringh_getdesc_kern(vrh, riov, wiov,
  162. head, GFP_KERNEL);
  163. /* Check if there are available descriptors */
  164. if (ret <= 0)
  165. return ret;
  166. }
  167. while (iovcnt) {
  168. if (!len) {
  169. /* Copy over a new iovec from user space. */
  170. ret = copy_from_user(&iov, u_iov, sizeof(*u_iov));
  171. if (ret) {
  172. ret = -EINVAL;
  173. dev_err(mic_dev(mvdev), "%s %d err %d\n",
  174. __func__, __LINE__, ret);
  175. break;
  176. }
  177. len = iov.iov_len;
  178. ubuf = iov.iov_base;
  179. }
  180. /* Issue all the read descriptors first */
  181. ret = mic_vringh_copy(mvdev, riov, ubuf, len,
  182. MIC_VRINGH_READ, &out_len);
  183. if (ret) {
  184. dev_err(mic_dev(mvdev), "%s %d err %d\n",
  185. __func__, __LINE__, ret);
  186. break;
  187. }
  188. len -= out_len;
  189. ubuf += out_len;
  190. copy->out_len += out_len;
  191. /* Issue the write descriptors next */
  192. ret = mic_vringh_copy(mvdev, wiov, ubuf, len,
  193. !MIC_VRINGH_READ, &out_len);
  194. if (ret) {
  195. dev_err(mic_dev(mvdev), "%s %d err %d\n",
  196. __func__, __LINE__, ret);
  197. break;
  198. }
  199. len -= out_len;
  200. ubuf += out_len;
  201. copy->out_len += out_len;
  202. if (!len) {
  203. /* One user space iovec is now completed */
  204. iovcnt--;
  205. u_iov++;
  206. }
  207. /* Exit loop if all elements in KIOVs have been processed. */
  208. if (riov->i == riov->used && wiov->i == wiov->used)
  209. break;
  210. }
  211. /*
  212. * Update the used ring if a descriptor was available and some data was
  213. * copied in/out and the user asked for a used ring update.
  214. */
  215. if (*head != USHRT_MAX && copy->out_len && copy->update_used) {
  216. u32 total = 0;
  217. /* Determine the total data consumed */
  218. total += mic_vringh_iov_consumed(riov);
  219. total += mic_vringh_iov_consumed(wiov);
  220. vringh_complete_kern(vrh, *head, total);
  221. *head = USHRT_MAX;
  222. if (vringh_need_notify_kern(vrh) > 0)
  223. vringh_notify(vrh);
  224. vringh_kiov_cleanup(riov);
  225. vringh_kiov_cleanup(wiov);
  226. /* Update avail idx for user space */
  227. vr->info->avail_idx = vrh->last_avail_idx;
  228. }
  229. return ret;
  230. }
  231. static inline int mic_verify_copy_args(struct mic_vdev *mvdev,
  232. struct mic_copy_desc *copy)
  233. {
  234. if (copy->vr_idx >= mvdev->dd->num_vq) {
  235. dev_err(mic_dev(mvdev), "%s %d err %d\n",
  236. __func__, __LINE__, -EINVAL);
  237. return -EINVAL;
  238. }
  239. return 0;
  240. }
  241. /* Copy a specified number of virtio descriptors in a chain */
  242. int mic_virtio_copy_desc(struct mic_vdev *mvdev,
  243. struct mic_copy_desc *copy)
  244. {
  245. int err;
  246. struct mic_vringh *mvr = &mvdev->mvr[copy->vr_idx];
  247. err = mic_verify_copy_args(mvdev, copy);
  248. if (err)
  249. return err;
  250. mutex_lock(&mvr->vr_mutex);
  251. if (!mic_vdevup(mvdev)) {
  252. err = -ENODEV;
  253. dev_err(mic_dev(mvdev), "%s %d err %d\n",
  254. __func__, __LINE__, err);
  255. goto err;
  256. }
  257. err = _mic_virtio_copy(mvdev, copy);
  258. if (err) {
  259. dev_err(mic_dev(mvdev), "%s %d err %d\n",
  260. __func__, __LINE__, err);
  261. }
  262. err:
  263. mutex_unlock(&mvr->vr_mutex);
  264. return err;
  265. }
  266. static void mic_virtio_init_post(struct mic_vdev *mvdev)
  267. {
  268. struct mic_vqconfig *vqconfig = mic_vq_config(mvdev->dd);
  269. int i;
  270. for (i = 0; i < mvdev->dd->num_vq; i++) {
  271. if (!le64_to_cpu(vqconfig[i].used_address)) {
  272. dev_warn(mic_dev(mvdev), "used_address zero??\n");
  273. continue;
  274. }
  275. mvdev->mvr[i].vrh.vring.used =
  276. mvdev->mdev->aper.va +
  277. le64_to_cpu(vqconfig[i].used_address);
  278. }
  279. mvdev->dc->used_address_updated = 0;
  280. dev_dbg(mic_dev(mvdev), "%s: device type %d LINKUP\n",
  281. __func__, mvdev->virtio_id);
  282. }
  283. static inline void mic_virtio_device_reset(struct mic_vdev *mvdev)
  284. {
  285. int i;
  286. dev_dbg(mic_dev(mvdev), "%s: status %d device type %d RESET\n",
  287. __func__, mvdev->dd->status, mvdev->virtio_id);
  288. for (i = 0; i < mvdev->dd->num_vq; i++)
  289. /*
  290. * Avoid lockdep false positive. The + 1 is for the mic
  291. * mutex which is held in the reset devices code path.
  292. */
  293. mutex_lock_nested(&mvdev->mvr[i].vr_mutex, i + 1);
  294. /* 0 status means "reset" */
  295. mvdev->dd->status = 0;
  296. mvdev->dc->vdev_reset = 0;
  297. mvdev->dc->host_ack = 1;
  298. for (i = 0; i < mvdev->dd->num_vq; i++) {
  299. struct vringh *vrh = &mvdev->mvr[i].vrh;
  300. mvdev->mvr[i].vring.info->avail_idx = 0;
  301. vrh->completed = 0;
  302. vrh->last_avail_idx = 0;
  303. vrh->last_used_idx = 0;
  304. }
  305. for (i = 0; i < mvdev->dd->num_vq; i++)
  306. mutex_unlock(&mvdev->mvr[i].vr_mutex);
  307. }
  308. void mic_virtio_reset_devices(struct mic_device *mdev)
  309. {
  310. struct list_head *pos, *tmp;
  311. struct mic_vdev *mvdev;
  312. dev_dbg(mdev->sdev->parent, "%s\n", __func__);
  313. list_for_each_safe(pos, tmp, &mdev->vdev_list) {
  314. mvdev = list_entry(pos, struct mic_vdev, list);
  315. mic_virtio_device_reset(mvdev);
  316. mvdev->poll_wake = 1;
  317. wake_up(&mvdev->waitq);
  318. }
  319. }
  320. void mic_bh_handler(struct work_struct *work)
  321. {
  322. struct mic_vdev *mvdev = container_of(work, struct mic_vdev,
  323. virtio_bh_work);
  324. if (mvdev->dc->used_address_updated)
  325. mic_virtio_init_post(mvdev);
  326. if (mvdev->dc->vdev_reset)
  327. mic_virtio_device_reset(mvdev);
  328. mvdev->poll_wake = 1;
  329. wake_up(&mvdev->waitq);
  330. }
  331. static irqreturn_t mic_virtio_intr_handler(int irq, void *data)
  332. {
  333. struct mic_vdev *mvdev = data;
  334. struct mic_device *mdev = mvdev->mdev;
  335. mdev->ops->ack_interrupt(mdev);
  336. schedule_work(&mvdev->virtio_bh_work);
  337. return IRQ_HANDLED;
  338. }
  339. int mic_virtio_config_change(struct mic_vdev *mvdev,
  340. void __user *argp)
  341. {
  342. DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wake);
  343. int ret = 0, retry = 100, i;
  344. struct mic_bootparam *bootparam = mvdev->mdev->dp;
  345. s8 db = bootparam->h2c_config_db;
  346. mutex_lock(&mvdev->mdev->mic_mutex);
  347. for (i = 0; i < mvdev->dd->num_vq; i++)
  348. mutex_lock_nested(&mvdev->mvr[i].vr_mutex, i + 1);
  349. if (db == -1 || mvdev->dd->type == -1) {
  350. ret = -EIO;
  351. goto exit;
  352. }
  353. if (copy_from_user(mic_vq_configspace(mvdev->dd),
  354. argp, mvdev->dd->config_len)) {
  355. dev_err(mic_dev(mvdev), "%s %d err %d\n",
  356. __func__, __LINE__, -EFAULT);
  357. ret = -EFAULT;
  358. goto exit;
  359. }
  360. mvdev->dc->config_change = MIC_VIRTIO_PARAM_CONFIG_CHANGED;
  361. mvdev->mdev->ops->send_intr(mvdev->mdev, db);
  362. for (i = retry; i--;) {
  363. ret = wait_event_timeout(wake,
  364. mvdev->dc->guest_ack, msecs_to_jiffies(100));
  365. if (ret)
  366. break;
  367. }
  368. dev_dbg(mic_dev(mvdev),
  369. "%s %d retry: %d\n", __func__, __LINE__, retry);
  370. mvdev->dc->config_change = 0;
  371. mvdev->dc->guest_ack = 0;
  372. exit:
  373. for (i = 0; i < mvdev->dd->num_vq; i++)
  374. mutex_unlock(&mvdev->mvr[i].vr_mutex);
  375. mutex_unlock(&mvdev->mdev->mic_mutex);
  376. return ret;
  377. }
  378. static int mic_copy_dp_entry(struct mic_vdev *mvdev,
  379. void __user *argp,
  380. __u8 *type,
  381. struct mic_device_desc **devpage)
  382. {
  383. struct mic_device *mdev = mvdev->mdev;
  384. struct mic_device_desc dd, *dd_config, *devp;
  385. struct mic_vqconfig *vqconfig;
  386. int ret = 0, i;
  387. bool slot_found = false;
  388. if (copy_from_user(&dd, argp, sizeof(dd))) {
  389. dev_err(mic_dev(mvdev), "%s %d err %d\n",
  390. __func__, __LINE__, -EFAULT);
  391. return -EFAULT;
  392. }
  393. if (mic_aligned_desc_size(&dd) > MIC_MAX_DESC_BLK_SIZE ||
  394. dd.num_vq > MIC_MAX_VRINGS) {
  395. dev_err(mic_dev(mvdev), "%s %d err %d\n",
  396. __func__, __LINE__, -EINVAL);
  397. return -EINVAL;
  398. }
  399. dd_config = kmalloc(mic_desc_size(&dd), GFP_KERNEL);
  400. if (dd_config == NULL) {
  401. dev_err(mic_dev(mvdev), "%s %d err %d\n",
  402. __func__, __LINE__, -ENOMEM);
  403. return -ENOMEM;
  404. }
  405. if (copy_from_user(dd_config, argp, mic_desc_size(&dd))) {
  406. ret = -EFAULT;
  407. dev_err(mic_dev(mvdev), "%s %d err %d\n",
  408. __func__, __LINE__, ret);
  409. goto exit;
  410. }
  411. vqconfig = mic_vq_config(dd_config);
  412. for (i = 0; i < dd.num_vq; i++) {
  413. if (le16_to_cpu(vqconfig[i].num) > MIC_MAX_VRING_ENTRIES) {
  414. ret = -EINVAL;
  415. dev_err(mic_dev(mvdev), "%s %d err %d\n",
  416. __func__, __LINE__, ret);
  417. goto exit;
  418. }
  419. }
  420. /* Find the first free device page entry */
  421. for (i = mic_aligned_size(struct mic_bootparam);
  422. i < MIC_DP_SIZE - mic_total_desc_size(dd_config);
  423. i += mic_total_desc_size(devp)) {
  424. devp = mdev->dp + i;
  425. if (devp->type == 0 || devp->type == -1) {
  426. slot_found = true;
  427. break;
  428. }
  429. }
  430. if (!slot_found) {
  431. ret = -EINVAL;
  432. dev_err(mic_dev(mvdev), "%s %d err %d\n",
  433. __func__, __LINE__, ret);
  434. goto exit;
  435. }
  436. /*
  437. * Save off the type before doing the memcpy. Type will be set in the
  438. * end after completing all initialization for the new device.
  439. */
  440. *type = dd_config->type;
  441. dd_config->type = 0;
  442. memcpy(devp, dd_config, mic_desc_size(dd_config));
  443. *devpage = devp;
  444. exit:
  445. kfree(dd_config);
  446. return ret;
  447. }
  448. static void mic_init_device_ctrl(struct mic_vdev *mvdev,
  449. struct mic_device_desc *devpage)
  450. {
  451. struct mic_device_ctrl *dc;
  452. dc = (void *)devpage + mic_aligned_desc_size(devpage);
  453. dc->config_change = 0;
  454. dc->guest_ack = 0;
  455. dc->vdev_reset = 0;
  456. dc->host_ack = 0;
  457. dc->used_address_updated = 0;
  458. dc->c2h_vdev_db = -1;
  459. dc->h2c_vdev_db = -1;
  460. mvdev->dc = dc;
  461. }
  462. int mic_virtio_add_device(struct mic_vdev *mvdev,
  463. void __user *argp)
  464. {
  465. struct mic_device *mdev = mvdev->mdev;
  466. struct mic_device_desc *dd;
  467. struct mic_vqconfig *vqconfig;
  468. int vr_size, i, j, ret;
  469. u8 type;
  470. s8 db;
  471. char irqname[10];
  472. struct mic_bootparam *bootparam = mdev->dp;
  473. u16 num;
  474. mutex_lock(&mdev->mic_mutex);
  475. ret = mic_copy_dp_entry(mvdev, argp, &type, &dd);
  476. if (ret) {
  477. mutex_unlock(&mdev->mic_mutex);
  478. return ret;
  479. }
  480. mic_init_device_ctrl(mvdev, dd);
  481. mvdev->dd = dd;
  482. mvdev->virtio_id = type;
  483. vqconfig = mic_vq_config(dd);
  484. INIT_WORK(&mvdev->virtio_bh_work, mic_bh_handler);
  485. for (i = 0; i < dd->num_vq; i++) {
  486. struct mic_vringh *mvr = &mvdev->mvr[i];
  487. struct mic_vring *vr = &mvdev->mvr[i].vring;
  488. num = le16_to_cpu(vqconfig[i].num);
  489. mutex_init(&mvr->vr_mutex);
  490. vr_size = PAGE_ALIGN(vring_size(num, MIC_VIRTIO_RING_ALIGN) +
  491. sizeof(struct _mic_vring_info));
  492. vr->va = (void *)
  493. __get_free_pages(GFP_KERNEL | __GFP_ZERO,
  494. get_order(vr_size));
  495. if (!vr->va) {
  496. ret = -ENOMEM;
  497. dev_err(mic_dev(mvdev), "%s %d err %d\n",
  498. __func__, __LINE__, ret);
  499. goto err;
  500. }
  501. vr->len = vr_size;
  502. vr->info = vr->va + vring_size(num, MIC_VIRTIO_RING_ALIGN);
  503. vr->info->magic = MIC_MAGIC + mvdev->virtio_id + i;
  504. vqconfig[i].address = mic_map_single(mdev,
  505. vr->va, vr_size);
  506. if (mic_map_error(vqconfig[i].address)) {
  507. free_pages((unsigned long)vr->va, get_order(vr_size));
  508. ret = -ENOMEM;
  509. dev_err(mic_dev(mvdev), "%s %d err %d\n",
  510. __func__, __LINE__, ret);
  511. goto err;
  512. }
  513. vqconfig[i].address = cpu_to_le64(vqconfig[i].address);
  514. vring_init(&vr->vr, num, vr->va, MIC_VIRTIO_RING_ALIGN);
  515. ret = vringh_init_kern(&mvr->vrh,
  516. *(u32 *)mic_vq_features(mvdev->dd), num, false,
  517. vr->vr.desc, vr->vr.avail, vr->vr.used);
  518. if (ret) {
  519. dev_err(mic_dev(mvdev), "%s %d err %d\n",
  520. __func__, __LINE__, ret);
  521. goto err;
  522. }
  523. vringh_kiov_init(&mvr->riov, NULL, 0);
  524. vringh_kiov_init(&mvr->wiov, NULL, 0);
  525. mvr->head = USHRT_MAX;
  526. mvr->mvdev = mvdev;
  527. mvr->vrh.notify = mic_notify;
  528. dev_dbg(mdev->sdev->parent,
  529. "%s %d index %d va %p info %p vr_size 0x%x\n",
  530. __func__, __LINE__, i, vr->va, vr->info, vr_size);
  531. }
  532. snprintf(irqname, sizeof(irqname), "mic%dvirtio%d", mdev->id,
  533. mvdev->virtio_id);
  534. mvdev->virtio_db = mic_next_db(mdev);
  535. mvdev->virtio_cookie = mic_request_irq(mdev, mic_virtio_intr_handler,
  536. irqname, mvdev, mvdev->virtio_db, MIC_INTR_DB);
  537. if (IS_ERR(mvdev->virtio_cookie)) {
  538. ret = PTR_ERR(mvdev->virtio_cookie);
  539. dev_dbg(mdev->sdev->parent, "request irq failed\n");
  540. goto err;
  541. }
  542. mvdev->dc->c2h_vdev_db = mvdev->virtio_db;
  543. list_add_tail(&mvdev->list, &mdev->vdev_list);
  544. /*
  545. * Order the type update with previous stores. This write barrier
  546. * is paired with the corresponding read barrier before the uncached
  547. * system memory read of the type, on the card while scanning the
  548. * device page.
  549. */
  550. smp_wmb();
  551. dd->type = type;
  552. dev_dbg(mdev->sdev->parent, "Added virtio device id %d\n", dd->type);
  553. db = bootparam->h2c_config_db;
  554. if (db != -1)
  555. mdev->ops->send_intr(mdev, db);
  556. mutex_unlock(&mdev->mic_mutex);
  557. return 0;
  558. err:
  559. vqconfig = mic_vq_config(dd);
  560. for (j = 0; j < i; j++) {
  561. struct mic_vringh *mvr = &mvdev->mvr[j];
  562. mic_unmap_single(mdev, le64_to_cpu(vqconfig[j].address),
  563. mvr->vring.len);
  564. free_pages((unsigned long)mvr->vring.va,
  565. get_order(mvr->vring.len));
  566. }
  567. mutex_unlock(&mdev->mic_mutex);
  568. return ret;
  569. }
  570. void mic_virtio_del_device(struct mic_vdev *mvdev)
  571. {
  572. struct list_head *pos, *tmp;
  573. struct mic_vdev *tmp_mvdev;
  574. struct mic_device *mdev = mvdev->mdev;
  575. DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wake);
  576. int i, ret, retry = 100;
  577. struct mic_vqconfig *vqconfig;
  578. struct mic_bootparam *bootparam = mdev->dp;
  579. s8 db;
  580. mutex_lock(&mdev->mic_mutex);
  581. db = bootparam->h2c_config_db;
  582. if (db == -1)
  583. goto skip_hot_remove;
  584. dev_dbg(mdev->sdev->parent,
  585. "Requesting hot remove id %d\n", mvdev->virtio_id);
  586. mvdev->dc->config_change = MIC_VIRTIO_PARAM_DEV_REMOVE;
  587. mdev->ops->send_intr(mdev, db);
  588. for (i = retry; i--;) {
  589. ret = wait_event_timeout(wake,
  590. mvdev->dc->guest_ack, msecs_to_jiffies(100));
  591. if (ret)
  592. break;
  593. }
  594. dev_dbg(mdev->sdev->parent,
  595. "Device id %d config_change %d guest_ack %d\n",
  596. mvdev->virtio_id, mvdev->dc->config_change,
  597. mvdev->dc->guest_ack);
  598. mvdev->dc->config_change = 0;
  599. mvdev->dc->guest_ack = 0;
  600. skip_hot_remove:
  601. mic_free_irq(mdev, mvdev->virtio_cookie, mvdev);
  602. flush_work(&mvdev->virtio_bh_work);
  603. vqconfig = mic_vq_config(mvdev->dd);
  604. for (i = 0; i < mvdev->dd->num_vq; i++) {
  605. struct mic_vringh *mvr = &mvdev->mvr[i];
  606. vringh_kiov_cleanup(&mvr->riov);
  607. vringh_kiov_cleanup(&mvr->wiov);
  608. mic_unmap_single(mdev, le64_to_cpu(vqconfig[i].address),
  609. mvr->vring.len);
  610. free_pages((unsigned long)mvr->vring.va,
  611. get_order(mvr->vring.len));
  612. }
  613. list_for_each_safe(pos, tmp, &mdev->vdev_list) {
  614. tmp_mvdev = list_entry(pos, struct mic_vdev, list);
  615. if (tmp_mvdev == mvdev) {
  616. list_del(pos);
  617. dev_dbg(mdev->sdev->parent,
  618. "Removing virtio device id %d\n",
  619. mvdev->virtio_id);
  620. break;
  621. }
  622. }
  623. /*
  624. * Order the type update with previous stores. This write barrier
  625. * is paired with the corresponding read barrier before the uncached
  626. * system memory read of the type, on the card while scanning the
  627. * device page.
  628. */
  629. smp_wmb();
  630. mvdev->dd->type = -1;
  631. mutex_unlock(&mdev->mic_mutex);
  632. }