mic_virtio.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703
  1. /*
  2. * Intel MIC Platform Software Stack (MPSS)
  3. *
  4. * Copyright(c) 2013 Intel Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License, version 2, as
  8. * published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * General Public License for more details.
  14. *
  15. * The full GNU General Public License is included in this distribution in
  16. * the file called "COPYING".
  17. *
  18. * Intel MIC Host driver.
  19. *
  20. */
  21. #include <linux/pci.h>
  22. #include <linux/sched.h>
  23. #include <linux/uaccess.h>
  24. #include <linux/mic_common.h>
  25. #include "../common/mic_device.h"
  26. #include "mic_device.h"
  27. #include "mic_smpt.h"
  28. #include "mic_virtio.h"
  29. /*
  30. * Initiates the copies across the PCIe bus from card memory to
  31. * a user space buffer.
  32. */
  33. static int mic_virtio_copy_to_user(struct mic_vdev *mvdev,
  34. void __user *ubuf, size_t len, u64 addr)
  35. {
  36. int err;
  37. void __iomem *dbuf = mvdev->mdev->aper.va + addr;
  38. /*
  39. * We are copying from IO below an should ideally use something
  40. * like copy_to_user_fromio(..) if it existed.
  41. */
  42. if (copy_to_user(ubuf, dbuf, len)) {
  43. err = -EFAULT;
  44. dev_err(mic_dev(mvdev), "%s %d err %d\n",
  45. __func__, __LINE__, err);
  46. goto err;
  47. }
  48. mvdev->in_bytes += len;
  49. err = 0;
  50. err:
  51. return err;
  52. }
  53. /*
  54. * Initiates copies across the PCIe bus from a user space
  55. * buffer to card memory.
  56. */
  57. static int mic_virtio_copy_from_user(struct mic_vdev *mvdev,
  58. void __user *ubuf, size_t len, u64 addr)
  59. {
  60. int err;
  61. void __iomem *dbuf = mvdev->mdev->aper.va + addr;
  62. /*
  63. * We are copying to IO below and should ideally use something
  64. * like copy_from_user_toio(..) if it existed.
  65. */
  66. if (copy_from_user(dbuf, ubuf, len)) {
  67. err = -EFAULT;
  68. dev_err(mic_dev(mvdev), "%s %d err %d\n",
  69. __func__, __LINE__, err);
  70. goto err;
  71. }
  72. mvdev->out_bytes += len;
  73. err = 0;
  74. err:
  75. return err;
  76. }
  77. #define MIC_VRINGH_READ true
  78. /* The function to call to notify the card about added buffers */
  79. static void mic_notify(struct vringh *vrh)
  80. {
  81. struct mic_vringh *mvrh = container_of(vrh, struct mic_vringh, vrh);
  82. struct mic_vdev *mvdev = mvrh->mvdev;
  83. s8 db = mvdev->dc->h2c_vdev_db;
  84. if (db != -1)
  85. mvdev->mdev->ops->send_intr(mvdev->mdev, db);
  86. }
  87. /* Determine the total number of bytes consumed in a VRINGH KIOV */
  88. static inline u32 mic_vringh_iov_consumed(struct vringh_kiov *iov)
  89. {
  90. int i;
  91. u32 total = iov->consumed;
  92. for (i = 0; i < iov->i; i++)
  93. total += iov->iov[i].iov_len;
  94. return total;
  95. }
  96. /*
  97. * Traverse the VRINGH KIOV and issue the APIs to trigger the copies.
  98. * This API is heavily based on the vringh_iov_xfer(..) implementation
  99. * in vringh.c. The reason we cannot reuse vringh_iov_pull_kern(..)
  100. * and vringh_iov_push_kern(..) directly is because there is no
  101. * way to override the VRINGH xfer(..) routines as of v3.10.
  102. */
  103. static int mic_vringh_copy(struct mic_vdev *mvdev, struct vringh_kiov *iov,
  104. void __user *ubuf, size_t len, bool read, size_t *out_len)
  105. {
  106. int ret = 0;
  107. size_t partlen, tot_len = 0;
  108. while (len && iov->i < iov->used) {
  109. partlen = min(iov->iov[iov->i].iov_len, len);
  110. if (read)
  111. ret = mic_virtio_copy_to_user(mvdev,
  112. ubuf, partlen,
  113. (u64)iov->iov[iov->i].iov_base);
  114. else
  115. ret = mic_virtio_copy_from_user(mvdev,
  116. ubuf, partlen,
  117. (u64)iov->iov[iov->i].iov_base);
  118. if (ret) {
  119. dev_err(mic_dev(mvdev), "%s %d err %d\n",
  120. __func__, __LINE__, ret);
  121. break;
  122. }
  123. len -= partlen;
  124. ubuf += partlen;
  125. tot_len += partlen;
  126. iov->consumed += partlen;
  127. iov->iov[iov->i].iov_len -= partlen;
  128. iov->iov[iov->i].iov_base += partlen;
  129. if (!iov->iov[iov->i].iov_len) {
  130. /* Fix up old iov element then increment. */
  131. iov->iov[iov->i].iov_len = iov->consumed;
  132. iov->iov[iov->i].iov_base -= iov->consumed;
  133. iov->consumed = 0;
  134. iov->i++;
  135. }
  136. }
  137. *out_len = tot_len;
  138. return ret;
  139. }
  140. /*
  141. * Use the standard VRINGH infrastructure in the kernel to fetch new
  142. * descriptors, initiate the copies and update the used ring.
  143. */
  144. static int _mic_virtio_copy(struct mic_vdev *mvdev,
  145. struct mic_copy_desc *copy)
  146. {
  147. int ret = 0, iovcnt = copy->iovcnt;
  148. struct iovec iov;
  149. struct iovec __user *u_iov = copy->iov;
  150. void __user *ubuf = NULL;
  151. struct mic_vringh *mvr = &mvdev->mvr[copy->vr_idx];
  152. struct vringh_kiov *riov = &mvr->riov;
  153. struct vringh_kiov *wiov = &mvr->wiov;
  154. struct vringh *vrh = &mvr->vrh;
  155. u16 *head = &mvr->head;
  156. struct mic_vring *vr = &mvr->vring;
  157. size_t len = 0, out_len;
  158. copy->out_len = 0;
  159. /* Fetch a new IOVEC if all previous elements have been processed */
  160. if (riov->i == riov->used && wiov->i == wiov->used) {
  161. ret = vringh_getdesc_kern(vrh, riov, wiov,
  162. head, GFP_KERNEL);
  163. /* Check if there are available descriptors */
  164. if (ret <= 0)
  165. return ret;
  166. }
  167. while (iovcnt) {
  168. if (!len) {
  169. /* Copy over a new iovec from user space. */
  170. ret = copy_from_user(&iov, u_iov, sizeof(*u_iov));
  171. if (ret) {
  172. ret = -EINVAL;
  173. dev_err(mic_dev(mvdev), "%s %d err %d\n",
  174. __func__, __LINE__, ret);
  175. break;
  176. }
  177. len = iov.iov_len;
  178. ubuf = iov.iov_base;
  179. }
  180. /* Issue all the read descriptors first */
  181. ret = mic_vringh_copy(mvdev, riov, ubuf, len,
  182. MIC_VRINGH_READ, &out_len);
  183. if (ret) {
  184. dev_err(mic_dev(mvdev), "%s %d err %d\n",
  185. __func__, __LINE__, ret);
  186. break;
  187. }
  188. len -= out_len;
  189. ubuf += out_len;
  190. copy->out_len += out_len;
  191. /* Issue the write descriptors next */
  192. ret = mic_vringh_copy(mvdev, wiov, ubuf, len,
  193. !MIC_VRINGH_READ, &out_len);
  194. if (ret) {
  195. dev_err(mic_dev(mvdev), "%s %d err %d\n",
  196. __func__, __LINE__, ret);
  197. break;
  198. }
  199. len -= out_len;
  200. ubuf += out_len;
  201. copy->out_len += out_len;
  202. if (!len) {
  203. /* One user space iovec is now completed */
  204. iovcnt--;
  205. u_iov++;
  206. }
  207. /* Exit loop if all elements in KIOVs have been processed. */
  208. if (riov->i == riov->used && wiov->i == wiov->used)
  209. break;
  210. }
  211. /*
  212. * Update the used ring if a descriptor was available and some data was
  213. * copied in/out and the user asked for a used ring update.
  214. */
  215. if (*head != USHRT_MAX && copy->out_len &&
  216. copy->update_used) {
  217. u32 total = 0;
  218. /* Determine the total data consumed */
  219. total += mic_vringh_iov_consumed(riov);
  220. total += mic_vringh_iov_consumed(wiov);
  221. vringh_complete_kern(vrh, *head, total);
  222. *head = USHRT_MAX;
  223. if (vringh_need_notify_kern(vrh) > 0)
  224. vringh_notify(vrh);
  225. vringh_kiov_cleanup(riov);
  226. vringh_kiov_cleanup(wiov);
  227. /* Update avail idx for user space */
  228. vr->info->avail_idx = vrh->last_avail_idx;
  229. }
  230. return ret;
  231. }
  232. static inline int mic_verify_copy_args(struct mic_vdev *mvdev,
  233. struct mic_copy_desc *copy)
  234. {
  235. if (copy->vr_idx >= mvdev->dd->num_vq) {
  236. dev_err(mic_dev(mvdev), "%s %d err %d\n",
  237. __func__, __LINE__, -EINVAL);
  238. return -EINVAL;
  239. }
  240. return 0;
  241. }
  242. /* Copy a specified number of virtio descriptors in a chain */
  243. int mic_virtio_copy_desc(struct mic_vdev *mvdev,
  244. struct mic_copy_desc *copy)
  245. {
  246. int err;
  247. struct mic_vringh *mvr = &mvdev->mvr[copy->vr_idx];
  248. err = mic_verify_copy_args(mvdev, copy);
  249. if (err)
  250. return err;
  251. mutex_lock(&mvr->vr_mutex);
  252. if (!mic_vdevup(mvdev)) {
  253. err = -ENODEV;
  254. dev_err(mic_dev(mvdev), "%s %d err %d\n",
  255. __func__, __LINE__, err);
  256. goto err;
  257. }
  258. err = _mic_virtio_copy(mvdev, copy);
  259. if (err) {
  260. dev_err(mic_dev(mvdev), "%s %d err %d\n",
  261. __func__, __LINE__, err);
  262. }
  263. err:
  264. mutex_unlock(&mvr->vr_mutex);
  265. return err;
  266. }
  267. static void mic_virtio_init_post(struct mic_vdev *mvdev)
  268. {
  269. struct mic_vqconfig *vqconfig = mic_vq_config(mvdev->dd);
  270. int i;
  271. for (i = 0; i < mvdev->dd->num_vq; i++) {
  272. if (!le64_to_cpu(vqconfig[i].used_address)) {
  273. dev_warn(mic_dev(mvdev), "used_address zero??\n");
  274. continue;
  275. }
  276. mvdev->mvr[i].vrh.vring.used =
  277. mvdev->mdev->aper.va +
  278. le64_to_cpu(vqconfig[i].used_address);
  279. }
  280. mvdev->dc->used_address_updated = 0;
  281. dev_dbg(mic_dev(mvdev), "%s: device type %d LINKUP\n",
  282. __func__, mvdev->virtio_id);
  283. }
  284. static inline void mic_virtio_device_reset(struct mic_vdev *mvdev)
  285. {
  286. int i;
  287. dev_dbg(mic_dev(mvdev), "%s: status %d device type %d RESET\n",
  288. __func__, mvdev->dd->status, mvdev->virtio_id);
  289. for (i = 0; i < mvdev->dd->num_vq; i++)
  290. /*
  291. * Avoid lockdep false positive. The + 1 is for the mic
  292. * mutex which is held in the reset devices code path.
  293. */
  294. mutex_lock_nested(&mvdev->mvr[i].vr_mutex, i + 1);
  295. /* 0 status means "reset" */
  296. mvdev->dd->status = 0;
  297. mvdev->dc->vdev_reset = 0;
  298. mvdev->dc->host_ack = 1;
  299. for (i = 0; i < mvdev->dd->num_vq; i++) {
  300. struct vringh *vrh = &mvdev->mvr[i].vrh;
  301. mvdev->mvr[i].vring.info->avail_idx = 0;
  302. vrh->completed = 0;
  303. vrh->last_avail_idx = 0;
  304. vrh->last_used_idx = 0;
  305. }
  306. for (i = 0; i < mvdev->dd->num_vq; i++)
  307. mutex_unlock(&mvdev->mvr[i].vr_mutex);
  308. }
  309. void mic_virtio_reset_devices(struct mic_device *mdev)
  310. {
  311. struct list_head *pos, *tmp;
  312. struct mic_vdev *mvdev;
  313. dev_dbg(mdev->sdev->parent, "%s\n", __func__);
  314. list_for_each_safe(pos, tmp, &mdev->vdev_list) {
  315. mvdev = list_entry(pos, struct mic_vdev, list);
  316. mic_virtio_device_reset(mvdev);
  317. mvdev->poll_wake = 1;
  318. wake_up(&mvdev->waitq);
  319. }
  320. }
  321. void mic_bh_handler(struct work_struct *work)
  322. {
  323. struct mic_vdev *mvdev = container_of(work, struct mic_vdev,
  324. virtio_bh_work);
  325. if (mvdev->dc->used_address_updated)
  326. mic_virtio_init_post(mvdev);
  327. if (mvdev->dc->vdev_reset)
  328. mic_virtio_device_reset(mvdev);
  329. mvdev->poll_wake = 1;
  330. wake_up(&mvdev->waitq);
  331. }
  332. static irqreturn_t mic_virtio_intr_handler(int irq, void *data)
  333. {
  334. struct mic_vdev *mvdev = data;
  335. struct mic_device *mdev = mvdev->mdev;
  336. mdev->ops->ack_interrupt(mdev);
  337. schedule_work(&mvdev->virtio_bh_work);
  338. return IRQ_HANDLED;
  339. }
  340. int mic_virtio_config_change(struct mic_vdev *mvdev,
  341. void __user *argp)
  342. {
  343. DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wake);
  344. int ret = 0, retry = 100, i;
  345. struct mic_bootparam *bootparam = mvdev->mdev->dp;
  346. s8 db = bootparam->h2c_config_db;
  347. mutex_lock(&mvdev->mdev->mic_mutex);
  348. for (i = 0; i < mvdev->dd->num_vq; i++)
  349. mutex_lock_nested(&mvdev->mvr[i].vr_mutex, i + 1);
  350. if (db == -1 || mvdev->dd->type == -1) {
  351. ret = -EIO;
  352. goto exit;
  353. }
  354. if (copy_from_user(mic_vq_configspace(mvdev->dd),
  355. argp, mvdev->dd->config_len)) {
  356. dev_err(mic_dev(mvdev), "%s %d err %d\n",
  357. __func__, __LINE__, -EFAULT);
  358. ret = -EFAULT;
  359. goto exit;
  360. }
  361. mvdev->dc->config_change = MIC_VIRTIO_PARAM_CONFIG_CHANGED;
  362. mvdev->mdev->ops->send_intr(mvdev->mdev, db);
  363. for (i = retry; i--;) {
  364. ret = wait_event_timeout(wake,
  365. mvdev->dc->guest_ack, msecs_to_jiffies(100));
  366. if (ret)
  367. break;
  368. }
  369. dev_dbg(mic_dev(mvdev),
  370. "%s %d retry: %d\n", __func__, __LINE__, retry);
  371. mvdev->dc->config_change = 0;
  372. mvdev->dc->guest_ack = 0;
  373. exit:
  374. for (i = 0; i < mvdev->dd->num_vq; i++)
  375. mutex_unlock(&mvdev->mvr[i].vr_mutex);
  376. mutex_unlock(&mvdev->mdev->mic_mutex);
  377. return ret;
  378. }
  379. static int mic_copy_dp_entry(struct mic_vdev *mvdev,
  380. void __user *argp,
  381. __u8 *type,
  382. struct mic_device_desc **devpage)
  383. {
  384. struct mic_device *mdev = mvdev->mdev;
  385. struct mic_device_desc dd, *dd_config, *devp;
  386. struct mic_vqconfig *vqconfig;
  387. int ret = 0, i;
  388. bool slot_found = false;
  389. if (copy_from_user(&dd, argp, sizeof(dd))) {
  390. dev_err(mic_dev(mvdev), "%s %d err %d\n",
  391. __func__, __LINE__, -EFAULT);
  392. return -EFAULT;
  393. }
  394. if (mic_aligned_desc_size(&dd) > MIC_MAX_DESC_BLK_SIZE
  395. || dd.num_vq > MIC_MAX_VRINGS) {
  396. dev_err(mic_dev(mvdev), "%s %d err %d\n",
  397. __func__, __LINE__, -EINVAL);
  398. return -EINVAL;
  399. }
  400. dd_config = kmalloc(mic_desc_size(&dd), GFP_KERNEL);
  401. if (dd_config == NULL) {
  402. dev_err(mic_dev(mvdev), "%s %d err %d\n",
  403. __func__, __LINE__, -ENOMEM);
  404. return -ENOMEM;
  405. }
  406. if (copy_from_user(dd_config, argp, mic_desc_size(&dd))) {
  407. ret = -EFAULT;
  408. dev_err(mic_dev(mvdev), "%s %d err %d\n",
  409. __func__, __LINE__, ret);
  410. goto exit;
  411. }
  412. vqconfig = mic_vq_config(dd_config);
  413. for (i = 0; i < dd.num_vq; i++) {
  414. if (le16_to_cpu(vqconfig[i].num) > MIC_MAX_VRING_ENTRIES) {
  415. ret = -EINVAL;
  416. dev_err(mic_dev(mvdev), "%s %d err %d\n",
  417. __func__, __LINE__, ret);
  418. goto exit;
  419. }
  420. }
  421. /* Find the first free device page entry */
  422. for (i = mic_aligned_size(struct mic_bootparam);
  423. i < MIC_DP_SIZE - mic_total_desc_size(dd_config);
  424. i += mic_total_desc_size(devp)) {
  425. devp = mdev->dp + i;
  426. if (devp->type == 0 || devp->type == -1) {
  427. slot_found = true;
  428. break;
  429. }
  430. }
  431. if (!slot_found) {
  432. ret = -EINVAL;
  433. dev_err(mic_dev(mvdev), "%s %d err %d\n",
  434. __func__, __LINE__, ret);
  435. goto exit;
  436. }
  437. /*
  438. * Save off the type before doing the memcpy. Type will be set in the
  439. * end after completing all initialization for the new device.
  440. */
  441. *type = dd_config->type;
  442. dd_config->type = 0;
  443. memcpy(devp, dd_config, mic_desc_size(dd_config));
  444. *devpage = devp;
  445. exit:
  446. kfree(dd_config);
  447. return ret;
  448. }
  449. static void mic_init_device_ctrl(struct mic_vdev *mvdev,
  450. struct mic_device_desc *devpage)
  451. {
  452. struct mic_device_ctrl *dc;
  453. dc = mvdev->dc = (void *)devpage + mic_aligned_desc_size(devpage);
  454. dc->config_change = 0;
  455. dc->guest_ack = 0;
  456. dc->vdev_reset = 0;
  457. dc->host_ack = 0;
  458. dc->used_address_updated = 0;
  459. dc->c2h_vdev_db = -1;
  460. dc->h2c_vdev_db = -1;
  461. }
  462. int mic_virtio_add_device(struct mic_vdev *mvdev,
  463. void __user *argp)
  464. {
  465. struct mic_device *mdev = mvdev->mdev;
  466. struct mic_device_desc *dd;
  467. struct mic_vqconfig *vqconfig;
  468. int vr_size, i, j, ret;
  469. u8 type;
  470. s8 db;
  471. char irqname[10];
  472. struct mic_bootparam *bootparam = mdev->dp;
  473. u16 num;
  474. mutex_lock(&mdev->mic_mutex);
  475. ret = mic_copy_dp_entry(mvdev, argp, &type, &dd);
  476. if (ret) {
  477. mutex_unlock(&mdev->mic_mutex);
  478. return ret;
  479. }
  480. mic_init_device_ctrl(mvdev, dd);
  481. mvdev->dd = dd;
  482. mvdev->virtio_id = type;
  483. vqconfig = mic_vq_config(dd);
  484. INIT_WORK(&mvdev->virtio_bh_work, mic_bh_handler);
  485. for (i = 0; i < dd->num_vq; i++) {
  486. struct mic_vringh *mvr = &mvdev->mvr[i];
  487. struct mic_vring *vr = &mvdev->mvr[i].vring;
  488. num = le16_to_cpu(vqconfig[i].num);
  489. mutex_init(&mvr->vr_mutex);
  490. vr_size = PAGE_ALIGN(vring_size(num, MIC_VIRTIO_RING_ALIGN) +
  491. sizeof(struct _mic_vring_info));
  492. vr->va = (void *)
  493. __get_free_pages(GFP_KERNEL | __GFP_ZERO,
  494. get_order(vr_size));
  495. if (!vr->va) {
  496. ret = -ENOMEM;
  497. dev_err(mic_dev(mvdev), "%s %d err %d\n",
  498. __func__, __LINE__, ret);
  499. goto err;
  500. }
  501. vr->len = vr_size;
  502. vr->info = vr->va + vring_size(num, MIC_VIRTIO_RING_ALIGN);
  503. vr->info->magic = MIC_MAGIC + mvdev->virtio_id + i;
  504. vqconfig[i].address = mic_map_single(mdev,
  505. vr->va, vr_size);
  506. if (mic_map_error(vqconfig[i].address)) {
  507. free_pages((unsigned long)vr->va,
  508. get_order(vr_size));
  509. ret = -ENOMEM;
  510. dev_err(mic_dev(mvdev), "%s %d err %d\n",
  511. __func__, __LINE__, ret);
  512. goto err;
  513. }
  514. vqconfig[i].address = cpu_to_le64(vqconfig[i].address);
  515. vring_init(&vr->vr, num,
  516. vr->va, MIC_VIRTIO_RING_ALIGN);
  517. ret = vringh_init_kern(&mvr->vrh,
  518. *(u32 *)mic_vq_features(mvdev->dd), num, false,
  519. vr->vr.desc, vr->vr.avail, vr->vr.used);
  520. if (ret) {
  521. dev_err(mic_dev(mvdev), "%s %d err %d\n",
  522. __func__, __LINE__, ret);
  523. goto err;
  524. }
  525. vringh_kiov_init(&mvr->riov, NULL, 0);
  526. vringh_kiov_init(&mvr->wiov, NULL, 0);
  527. mvr->head = USHRT_MAX;
  528. mvr->mvdev = mvdev;
  529. mvr->vrh.notify = mic_notify;
  530. dev_dbg(mdev->sdev->parent,
  531. "%s %d index %d va %p info %p vr_size 0x%x\n",
  532. __func__, __LINE__, i, vr->va, vr->info, vr_size);
  533. }
  534. snprintf(irqname, sizeof(irqname),
  535. "mic%dvirtio%d", mdev->id, mvdev->virtio_id);
  536. mvdev->virtio_db = mic_next_db(mdev);
  537. mvdev->virtio_cookie = mic_request_irq(mdev, mic_virtio_intr_handler,
  538. irqname, mvdev, mvdev->virtio_db, MIC_INTR_DB);
  539. if (IS_ERR(mvdev->virtio_cookie)) {
  540. ret = PTR_ERR(mvdev->virtio_cookie);
  541. dev_dbg(mdev->sdev->parent, "request irq failed\n");
  542. goto err;
  543. }
  544. mvdev->dc->c2h_vdev_db = mvdev->virtio_db;
  545. list_add_tail(&mvdev->list, &mdev->vdev_list);
  546. /*
  547. * Order the type update with previous stores. This write barrier
  548. * is paired with the corresponding read barrier before the uncached
  549. * system memory read of the type, on the card while scanning the
  550. * device page.
  551. */
  552. smp_wmb();
  553. dd->type = type;
  554. dev_dbg(mdev->sdev->parent, "Added virtio device id %d\n", dd->type);
  555. db = bootparam->h2c_config_db;
  556. if (db != -1)
  557. mdev->ops->send_intr(mdev, db);
  558. mutex_unlock(&mdev->mic_mutex);
  559. return 0;
  560. err:
  561. vqconfig = mic_vq_config(dd);
  562. for (j = 0; j < i; j++) {
  563. struct mic_vringh *mvr = &mvdev->mvr[j];
  564. mic_unmap_single(mdev, le64_to_cpu(vqconfig[j].address),
  565. mvr->vring.len);
  566. free_pages((unsigned long)mvr->vring.va,
  567. get_order(mvr->vring.len));
  568. }
  569. mutex_unlock(&mdev->mic_mutex);
  570. return ret;
  571. }
  572. void mic_virtio_del_device(struct mic_vdev *mvdev)
  573. {
  574. struct list_head *pos, *tmp;
  575. struct mic_vdev *tmp_mvdev;
  576. struct mic_device *mdev = mvdev->mdev;
  577. DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wake);
  578. int i, ret, retry = 100;
  579. struct mic_vqconfig *vqconfig;
  580. struct mic_bootparam *bootparam = mdev->dp;
  581. s8 db;
  582. mutex_lock(&mdev->mic_mutex);
  583. db = bootparam->h2c_config_db;
  584. if (db == -1)
  585. goto skip_hot_remove;
  586. dev_dbg(mdev->sdev->parent,
  587. "Requesting hot remove id %d\n", mvdev->virtio_id);
  588. mvdev->dc->config_change = MIC_VIRTIO_PARAM_DEV_REMOVE;
  589. mdev->ops->send_intr(mdev, db);
  590. for (i = retry; i--;) {
  591. ret = wait_event_timeout(wake,
  592. mvdev->dc->guest_ack, msecs_to_jiffies(100));
  593. if (ret)
  594. break;
  595. }
  596. dev_dbg(mdev->sdev->parent,
  597. "Device id %d config_change %d guest_ack %d\n",
  598. mvdev->virtio_id, mvdev->dc->config_change,
  599. mvdev->dc->guest_ack);
  600. mvdev->dc->config_change = 0;
  601. mvdev->dc->guest_ack = 0;
  602. skip_hot_remove:
  603. mic_free_irq(mdev, mvdev->virtio_cookie, mvdev);
  604. flush_work(&mvdev->virtio_bh_work);
  605. vqconfig = mic_vq_config(mvdev->dd);
  606. for (i = 0; i < mvdev->dd->num_vq; i++) {
  607. struct mic_vringh *mvr = &mvdev->mvr[i];
  608. vringh_kiov_cleanup(&mvr->riov);
  609. vringh_kiov_cleanup(&mvr->wiov);
  610. mic_unmap_single(mdev, le64_to_cpu(vqconfig[i].address),
  611. mvr->vring.len);
  612. free_pages((unsigned long)mvr->vring.va,
  613. get_order(mvr->vring.len));
  614. }
  615. list_for_each_safe(pos, tmp, &mdev->vdev_list) {
  616. tmp_mvdev = list_entry(pos, struct mic_vdev, list);
  617. if (tmp_mvdev == mvdev) {
  618. list_del(pos);
  619. dev_dbg(mdev->sdev->parent,
  620. "Removing virtio device id %d\n",
  621. mvdev->virtio_id);
  622. break;
  623. }
  624. }
  625. /*
  626. * Order the type update with previous stores. This write barrier
  627. * is paired with the corresponding read barrier before the uncached
  628. * system memory read of the type, on the card while scanning the
  629. * device page.
  630. */
  631. smp_wmb();
  632. mvdev->dd->type = -1;
  633. mutex_unlock(&mdev->mic_mutex);
  634. }