virtio_ccw.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933
  1. /*
  2. * ccw based virtio transport
  3. *
  4. * Copyright IBM Corp. 2012
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License (version 2 only)
  8. * as published by the Free Software Foundation.
  9. *
  10. * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
  11. */
  12. #include <linux/kernel_stat.h>
  13. #include <linux/init.h>
  14. #include <linux/bootmem.h>
  15. #include <linux/err.h>
  16. #include <linux/virtio.h>
  17. #include <linux/virtio_config.h>
  18. #include <linux/slab.h>
  19. #include <linux/interrupt.h>
  20. #include <linux/virtio_ring.h>
  21. #include <linux/pfn.h>
  22. #include <linux/async.h>
  23. #include <linux/wait.h>
  24. #include <linux/list.h>
  25. #include <linux/bitops.h>
  26. #include <linux/module.h>
  27. #include <linux/io.h>
  28. #include <linux/kvm_para.h>
  29. #include <asm/setup.h>
  30. #include <asm/irq.h>
  31. #include <asm/cio.h>
  32. #include <asm/ccwdev.h>
  33. #include <asm/virtio-ccw.h>
  34. /*
  35. * virtio related functions
  36. */
  37. struct vq_config_block {
  38. __u16 index;
  39. __u16 num;
  40. } __packed;
  41. #define VIRTIO_CCW_CONFIG_SIZE 0x100
  42. /* same as PCI config space size, should be enough for all drivers */
  43. struct virtio_ccw_device {
  44. struct virtio_device vdev;
  45. __u8 *status;
  46. __u8 config[VIRTIO_CCW_CONFIG_SIZE];
  47. struct ccw_device *cdev;
  48. __u32 curr_io;
  49. int err;
  50. wait_queue_head_t wait_q;
  51. spinlock_t lock;
  52. struct list_head virtqueues;
  53. unsigned long indicators;
  54. unsigned long indicators2;
  55. struct vq_config_block *config_block;
  56. };
  57. struct vq_info_block {
  58. __u64 queue;
  59. __u32 align;
  60. __u16 index;
  61. __u16 num;
  62. } __packed;
  63. struct virtio_feature_desc {
  64. __u32 features;
  65. __u8 index;
  66. } __packed;
  67. struct virtio_ccw_vq_info {
  68. struct virtqueue *vq;
  69. int num;
  70. void *queue;
  71. struct vq_info_block *info_block;
  72. struct list_head node;
  73. long cookie;
  74. };
  75. #define CCW_CMD_SET_VQ 0x13
  76. #define CCW_CMD_VDEV_RESET 0x33
  77. #define CCW_CMD_SET_IND 0x43
  78. #define CCW_CMD_SET_CONF_IND 0x53
  79. #define CCW_CMD_READ_FEAT 0x12
  80. #define CCW_CMD_WRITE_FEAT 0x11
  81. #define CCW_CMD_READ_CONF 0x22
  82. #define CCW_CMD_WRITE_CONF 0x21
  83. #define CCW_CMD_WRITE_STATUS 0x31
  84. #define CCW_CMD_READ_VQ_CONF 0x32
  85. #define VIRTIO_CCW_DOING_SET_VQ 0x00010000
  86. #define VIRTIO_CCW_DOING_RESET 0x00040000
  87. #define VIRTIO_CCW_DOING_READ_FEAT 0x00080000
  88. #define VIRTIO_CCW_DOING_WRITE_FEAT 0x00100000
  89. #define VIRTIO_CCW_DOING_READ_CONFIG 0x00200000
  90. #define VIRTIO_CCW_DOING_WRITE_CONFIG 0x00400000
  91. #define VIRTIO_CCW_DOING_WRITE_STATUS 0x00800000
  92. #define VIRTIO_CCW_DOING_SET_IND 0x01000000
  93. #define VIRTIO_CCW_DOING_READ_VQ_CONF 0x02000000
  94. #define VIRTIO_CCW_DOING_SET_CONF_IND 0x04000000
  95. #define VIRTIO_CCW_INTPARM_MASK 0xffff0000
  96. static struct virtio_ccw_device *to_vc_device(struct virtio_device *vdev)
  97. {
  98. return container_of(vdev, struct virtio_ccw_device, vdev);
  99. }
  100. static int doing_io(struct virtio_ccw_device *vcdev, __u32 flag)
  101. {
  102. unsigned long flags;
  103. __u32 ret;
  104. spin_lock_irqsave(get_ccwdev_lock(vcdev->cdev), flags);
  105. if (vcdev->err)
  106. ret = 0;
  107. else
  108. ret = vcdev->curr_io & flag;
  109. spin_unlock_irqrestore(get_ccwdev_lock(vcdev->cdev), flags);
  110. return ret;
  111. }
  112. static int ccw_io_helper(struct virtio_ccw_device *vcdev,
  113. struct ccw1 *ccw, __u32 intparm)
  114. {
  115. int ret;
  116. unsigned long flags;
  117. int flag = intparm & VIRTIO_CCW_INTPARM_MASK;
  118. do {
  119. spin_lock_irqsave(get_ccwdev_lock(vcdev->cdev), flags);
  120. ret = ccw_device_start(vcdev->cdev, ccw, intparm, 0, 0);
  121. if (!ret) {
  122. if (!vcdev->curr_io)
  123. vcdev->err = 0;
  124. vcdev->curr_io |= flag;
  125. }
  126. spin_unlock_irqrestore(get_ccwdev_lock(vcdev->cdev), flags);
  127. cpu_relax();
  128. } while (ret == -EBUSY);
  129. wait_event(vcdev->wait_q, doing_io(vcdev, flag) == 0);
  130. return ret ? ret : vcdev->err;
  131. }
  132. static inline long do_kvm_notify(struct subchannel_id schid,
  133. unsigned long queue_index,
  134. long cookie)
  135. {
  136. register unsigned long __nr asm("1") = KVM_S390_VIRTIO_CCW_NOTIFY;
  137. register struct subchannel_id __schid asm("2") = schid;
  138. register unsigned long __index asm("3") = queue_index;
  139. register long __rc asm("2");
  140. register long __cookie asm("4") = cookie;
  141. asm volatile ("diag 2,4,0x500\n"
  142. : "=d" (__rc) : "d" (__nr), "d" (__schid), "d" (__index),
  143. "d"(__cookie)
  144. : "memory", "cc");
  145. return __rc;
  146. }
  147. static bool virtio_ccw_kvm_notify(struct virtqueue *vq)
  148. {
  149. struct virtio_ccw_vq_info *info = vq->priv;
  150. struct virtio_ccw_device *vcdev;
  151. struct subchannel_id schid;
  152. vcdev = to_vc_device(info->vq->vdev);
  153. ccw_device_get_schid(vcdev->cdev, &schid);
  154. info->cookie = do_kvm_notify(schid, vq->index, info->cookie);
  155. if (info->cookie < 0)
  156. return false;
  157. return true;
  158. }
  159. static int virtio_ccw_read_vq_conf(struct virtio_ccw_device *vcdev,
  160. struct ccw1 *ccw, int index)
  161. {
  162. vcdev->config_block->index = index;
  163. ccw->cmd_code = CCW_CMD_READ_VQ_CONF;
  164. ccw->flags = 0;
  165. ccw->count = sizeof(struct vq_config_block);
  166. ccw->cda = (__u32)(unsigned long)(vcdev->config_block);
  167. ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_VQ_CONF);
  168. return vcdev->config_block->num;
  169. }
  170. static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw)
  171. {
  172. struct virtio_ccw_device *vcdev = to_vc_device(vq->vdev);
  173. struct virtio_ccw_vq_info *info = vq->priv;
  174. unsigned long flags;
  175. unsigned long size;
  176. int ret;
  177. unsigned int index = vq->index;
  178. /* Remove from our list. */
  179. spin_lock_irqsave(&vcdev->lock, flags);
  180. list_del(&info->node);
  181. spin_unlock_irqrestore(&vcdev->lock, flags);
  182. /* Release from host. */
  183. info->info_block->queue = 0;
  184. info->info_block->align = 0;
  185. info->info_block->index = index;
  186. info->info_block->num = 0;
  187. ccw->cmd_code = CCW_CMD_SET_VQ;
  188. ccw->flags = 0;
  189. ccw->count = sizeof(*info->info_block);
  190. ccw->cda = (__u32)(unsigned long)(info->info_block);
  191. ret = ccw_io_helper(vcdev, ccw,
  192. VIRTIO_CCW_DOING_SET_VQ | index);
  193. /*
  194. * -ENODEV isn't considered an error: The device is gone anyway.
  195. * This may happen on device detach.
  196. */
  197. if (ret && (ret != -ENODEV))
  198. dev_warn(&vq->vdev->dev, "Error %d while deleting queue %d",
  199. ret, index);
  200. vring_del_virtqueue(vq);
  201. size = PAGE_ALIGN(vring_size(info->num, KVM_VIRTIO_CCW_RING_ALIGN));
  202. free_pages_exact(info->queue, size);
  203. kfree(info->info_block);
  204. kfree(info);
  205. }
  206. static void virtio_ccw_del_vqs(struct virtio_device *vdev)
  207. {
  208. struct virtqueue *vq, *n;
  209. struct ccw1 *ccw;
  210. ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
  211. if (!ccw)
  212. return;
  213. list_for_each_entry_safe(vq, n, &vdev->vqs, list)
  214. virtio_ccw_del_vq(vq, ccw);
  215. kfree(ccw);
  216. }
  217. static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev,
  218. int i, vq_callback_t *callback,
  219. const char *name,
  220. struct ccw1 *ccw)
  221. {
  222. struct virtio_ccw_device *vcdev = to_vc_device(vdev);
  223. int err;
  224. struct virtqueue *vq = NULL;
  225. struct virtio_ccw_vq_info *info;
  226. unsigned long size = 0; /* silence the compiler */
  227. unsigned long flags;
  228. /* Allocate queue. */
  229. info = kzalloc(sizeof(struct virtio_ccw_vq_info), GFP_KERNEL);
  230. if (!info) {
  231. dev_warn(&vcdev->cdev->dev, "no info\n");
  232. err = -ENOMEM;
  233. goto out_err;
  234. }
  235. info->info_block = kzalloc(sizeof(*info->info_block),
  236. GFP_DMA | GFP_KERNEL);
  237. if (!info->info_block) {
  238. dev_warn(&vcdev->cdev->dev, "no info block\n");
  239. err = -ENOMEM;
  240. goto out_err;
  241. }
  242. info->num = virtio_ccw_read_vq_conf(vcdev, ccw, i);
  243. size = PAGE_ALIGN(vring_size(info->num, KVM_VIRTIO_CCW_RING_ALIGN));
  244. info->queue = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO);
  245. if (info->queue == NULL) {
  246. dev_warn(&vcdev->cdev->dev, "no queue\n");
  247. err = -ENOMEM;
  248. goto out_err;
  249. }
  250. vq = vring_new_virtqueue(i, info->num, KVM_VIRTIO_CCW_RING_ALIGN, vdev,
  251. true, info->queue, virtio_ccw_kvm_notify,
  252. callback, name);
  253. if (!vq) {
  254. /* For now, we fail if we can't get the requested size. */
  255. dev_warn(&vcdev->cdev->dev, "no vq\n");
  256. err = -ENOMEM;
  257. goto out_err;
  258. }
  259. /* Register it with the host. */
  260. info->info_block->queue = (__u64)info->queue;
  261. info->info_block->align = KVM_VIRTIO_CCW_RING_ALIGN;
  262. info->info_block->index = i;
  263. info->info_block->num = info->num;
  264. ccw->cmd_code = CCW_CMD_SET_VQ;
  265. ccw->flags = 0;
  266. ccw->count = sizeof(*info->info_block);
  267. ccw->cda = (__u32)(unsigned long)(info->info_block);
  268. err = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_VQ | i);
  269. if (err) {
  270. dev_warn(&vcdev->cdev->dev, "SET_VQ failed\n");
  271. goto out_err;
  272. }
  273. info->vq = vq;
  274. vq->priv = info;
  275. /* Save it to our list. */
  276. spin_lock_irqsave(&vcdev->lock, flags);
  277. list_add(&info->node, &vcdev->virtqueues);
  278. spin_unlock_irqrestore(&vcdev->lock, flags);
  279. return vq;
  280. out_err:
  281. if (vq)
  282. vring_del_virtqueue(vq);
  283. if (info) {
  284. if (info->queue)
  285. free_pages_exact(info->queue, size);
  286. kfree(info->info_block);
  287. }
  288. kfree(info);
  289. return ERR_PTR(err);
  290. }
  291. static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
  292. struct virtqueue *vqs[],
  293. vq_callback_t *callbacks[],
  294. const char *names[])
  295. {
  296. struct virtio_ccw_device *vcdev = to_vc_device(vdev);
  297. unsigned long *indicatorp = NULL;
  298. int ret, i;
  299. struct ccw1 *ccw;
  300. ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
  301. if (!ccw)
  302. return -ENOMEM;
  303. for (i = 0; i < nvqs; ++i) {
  304. vqs[i] = virtio_ccw_setup_vq(vdev, i, callbacks[i], names[i],
  305. ccw);
  306. if (IS_ERR(vqs[i])) {
  307. ret = PTR_ERR(vqs[i]);
  308. vqs[i] = NULL;
  309. goto out;
  310. }
  311. }
  312. ret = -ENOMEM;
  313. /* We need a data area under 2G to communicate. */
  314. indicatorp = kmalloc(sizeof(&vcdev->indicators), GFP_DMA | GFP_KERNEL);
  315. if (!indicatorp)
  316. goto out;
  317. *indicatorp = (unsigned long) &vcdev->indicators;
  318. /* Register queue indicators with host. */
  319. vcdev->indicators = 0;
  320. ccw->cmd_code = CCW_CMD_SET_IND;
  321. ccw->flags = 0;
  322. ccw->count = sizeof(vcdev->indicators);
  323. ccw->cda = (__u32)(unsigned long) indicatorp;
  324. ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_IND);
  325. if (ret)
  326. goto out;
  327. /* Register indicators2 with host for config changes */
  328. *indicatorp = (unsigned long) &vcdev->indicators2;
  329. vcdev->indicators2 = 0;
  330. ccw->cmd_code = CCW_CMD_SET_CONF_IND;
  331. ccw->flags = 0;
  332. ccw->count = sizeof(vcdev->indicators2);
  333. ccw->cda = (__u32)(unsigned long) indicatorp;
  334. ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_CONF_IND);
  335. if (ret)
  336. goto out;
  337. kfree(indicatorp);
  338. kfree(ccw);
  339. return 0;
  340. out:
  341. kfree(indicatorp);
  342. kfree(ccw);
  343. virtio_ccw_del_vqs(vdev);
  344. return ret;
  345. }
  346. static void virtio_ccw_reset(struct virtio_device *vdev)
  347. {
  348. struct virtio_ccw_device *vcdev = to_vc_device(vdev);
  349. struct ccw1 *ccw;
  350. ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
  351. if (!ccw)
  352. return;
  353. /* Zero status bits. */
  354. *vcdev->status = 0;
  355. /* Send a reset ccw on device. */
  356. ccw->cmd_code = CCW_CMD_VDEV_RESET;
  357. ccw->flags = 0;
  358. ccw->count = 0;
  359. ccw->cda = 0;
  360. ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_RESET);
  361. kfree(ccw);
  362. }
  363. static u32 virtio_ccw_get_features(struct virtio_device *vdev)
  364. {
  365. struct virtio_ccw_device *vcdev = to_vc_device(vdev);
  366. struct virtio_feature_desc *features;
  367. int ret, rc;
  368. struct ccw1 *ccw;
  369. ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
  370. if (!ccw)
  371. return 0;
  372. features = kzalloc(sizeof(*features), GFP_DMA | GFP_KERNEL);
  373. if (!features) {
  374. rc = 0;
  375. goto out_free;
  376. }
  377. /* Read the feature bits from the host. */
  378. /* TODO: Features > 32 bits */
  379. features->index = 0;
  380. ccw->cmd_code = CCW_CMD_READ_FEAT;
  381. ccw->flags = 0;
  382. ccw->count = sizeof(*features);
  383. ccw->cda = (__u32)(unsigned long)features;
  384. ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_FEAT);
  385. if (ret) {
  386. rc = 0;
  387. goto out_free;
  388. }
  389. rc = le32_to_cpu(features->features);
  390. out_free:
  391. kfree(features);
  392. kfree(ccw);
  393. return rc;
  394. }
  395. static void virtio_ccw_finalize_features(struct virtio_device *vdev)
  396. {
  397. struct virtio_ccw_device *vcdev = to_vc_device(vdev);
  398. struct virtio_feature_desc *features;
  399. int i;
  400. struct ccw1 *ccw;
  401. ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
  402. if (!ccw)
  403. return;
  404. features = kzalloc(sizeof(*features), GFP_DMA | GFP_KERNEL);
  405. if (!features)
  406. goto out_free;
  407. /* Give virtio_ring a chance to accept features. */
  408. vring_transport_features(vdev);
  409. for (i = 0; i < sizeof(*vdev->features) / sizeof(features->features);
  410. i++) {
  411. int highbits = i % 2 ? 32 : 0;
  412. features->index = i;
  413. features->features = cpu_to_le32(vdev->features[i / 2]
  414. >> highbits);
  415. /* Write the feature bits to the host. */
  416. ccw->cmd_code = CCW_CMD_WRITE_FEAT;
  417. ccw->flags = 0;
  418. ccw->count = sizeof(*features);
  419. ccw->cda = (__u32)(unsigned long)features;
  420. ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_FEAT);
  421. }
  422. out_free:
  423. kfree(features);
  424. kfree(ccw);
  425. }
  426. static void virtio_ccw_get_config(struct virtio_device *vdev,
  427. unsigned int offset, void *buf, unsigned len)
  428. {
  429. struct virtio_ccw_device *vcdev = to_vc_device(vdev);
  430. int ret;
  431. struct ccw1 *ccw;
  432. void *config_area;
  433. ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
  434. if (!ccw)
  435. return;
  436. config_area = kzalloc(VIRTIO_CCW_CONFIG_SIZE, GFP_DMA | GFP_KERNEL);
  437. if (!config_area)
  438. goto out_free;
  439. /* Read the config area from the host. */
  440. ccw->cmd_code = CCW_CMD_READ_CONF;
  441. ccw->flags = 0;
  442. ccw->count = offset + len;
  443. ccw->cda = (__u32)(unsigned long)config_area;
  444. ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_CONFIG);
  445. if (ret)
  446. goto out_free;
  447. memcpy(vcdev->config, config_area, sizeof(vcdev->config));
  448. memcpy(buf, &vcdev->config[offset], len);
  449. out_free:
  450. kfree(config_area);
  451. kfree(ccw);
  452. }
  453. static void virtio_ccw_set_config(struct virtio_device *vdev,
  454. unsigned int offset, const void *buf,
  455. unsigned len)
  456. {
  457. struct virtio_ccw_device *vcdev = to_vc_device(vdev);
  458. struct ccw1 *ccw;
  459. void *config_area;
  460. ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
  461. if (!ccw)
  462. return;
  463. config_area = kzalloc(VIRTIO_CCW_CONFIG_SIZE, GFP_DMA | GFP_KERNEL);
  464. if (!config_area)
  465. goto out_free;
  466. memcpy(&vcdev->config[offset], buf, len);
  467. /* Write the config area to the host. */
  468. memcpy(config_area, vcdev->config, sizeof(vcdev->config));
  469. ccw->cmd_code = CCW_CMD_WRITE_CONF;
  470. ccw->flags = 0;
  471. ccw->count = offset + len;
  472. ccw->cda = (__u32)(unsigned long)config_area;
  473. ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_CONFIG);
  474. out_free:
  475. kfree(config_area);
  476. kfree(ccw);
  477. }
  478. static u8 virtio_ccw_get_status(struct virtio_device *vdev)
  479. {
  480. struct virtio_ccw_device *vcdev = to_vc_device(vdev);
  481. return *vcdev->status;
  482. }
  483. static void virtio_ccw_set_status(struct virtio_device *vdev, u8 status)
  484. {
  485. struct virtio_ccw_device *vcdev = to_vc_device(vdev);
  486. struct ccw1 *ccw;
  487. ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
  488. if (!ccw)
  489. return;
  490. /* Write the status to the host. */
  491. *vcdev->status = status;
  492. ccw->cmd_code = CCW_CMD_WRITE_STATUS;
  493. ccw->flags = 0;
  494. ccw->count = sizeof(status);
  495. ccw->cda = (__u32)(unsigned long)vcdev->status;
  496. ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_STATUS);
  497. kfree(ccw);
  498. }
  499. static struct virtio_config_ops virtio_ccw_config_ops = {
  500. .get_features = virtio_ccw_get_features,
  501. .finalize_features = virtio_ccw_finalize_features,
  502. .get = virtio_ccw_get_config,
  503. .set = virtio_ccw_set_config,
  504. .get_status = virtio_ccw_get_status,
  505. .set_status = virtio_ccw_set_status,
  506. .reset = virtio_ccw_reset,
  507. .find_vqs = virtio_ccw_find_vqs,
  508. .del_vqs = virtio_ccw_del_vqs,
  509. };
  510. /*
  511. * ccw bus driver related functions
  512. */
  513. static void virtio_ccw_release_dev(struct device *_d)
  514. {
  515. struct virtio_device *dev = container_of(_d, struct virtio_device,
  516. dev);
  517. struct virtio_ccw_device *vcdev = to_vc_device(dev);
  518. kfree(vcdev->status);
  519. kfree(vcdev->config_block);
  520. kfree(vcdev);
  521. }
  522. static int irb_is_error(struct irb *irb)
  523. {
  524. if (scsw_cstat(&irb->scsw) != 0)
  525. return 1;
  526. if (scsw_dstat(&irb->scsw) & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END))
  527. return 1;
  528. if (scsw_cc(&irb->scsw) != 0)
  529. return 1;
  530. return 0;
  531. }
  532. static struct virtqueue *virtio_ccw_vq_by_ind(struct virtio_ccw_device *vcdev,
  533. int index)
  534. {
  535. struct virtio_ccw_vq_info *info;
  536. unsigned long flags;
  537. struct virtqueue *vq;
  538. vq = NULL;
  539. spin_lock_irqsave(&vcdev->lock, flags);
  540. list_for_each_entry(info, &vcdev->virtqueues, node) {
  541. if (info->vq->index == index) {
  542. vq = info->vq;
  543. break;
  544. }
  545. }
  546. spin_unlock_irqrestore(&vcdev->lock, flags);
  547. return vq;
  548. }
  549. static void virtio_ccw_int_handler(struct ccw_device *cdev,
  550. unsigned long intparm,
  551. struct irb *irb)
  552. {
  553. __u32 activity = intparm & VIRTIO_CCW_INTPARM_MASK;
  554. struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev);
  555. int i;
  556. struct virtqueue *vq;
  557. struct virtio_driver *drv;
  558. /* Check if it's a notification from the host. */
  559. if ((intparm == 0) &&
  560. (scsw_stctl(&irb->scsw) ==
  561. (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))) {
  562. /* OK */
  563. }
  564. if (irb_is_error(irb))
  565. vcdev->err = -EIO; /* XXX - use real error */
  566. if (vcdev->curr_io & activity) {
  567. switch (activity) {
  568. case VIRTIO_CCW_DOING_READ_FEAT:
  569. case VIRTIO_CCW_DOING_WRITE_FEAT:
  570. case VIRTIO_CCW_DOING_READ_CONFIG:
  571. case VIRTIO_CCW_DOING_WRITE_CONFIG:
  572. case VIRTIO_CCW_DOING_WRITE_STATUS:
  573. case VIRTIO_CCW_DOING_SET_VQ:
  574. case VIRTIO_CCW_DOING_SET_IND:
  575. case VIRTIO_CCW_DOING_SET_CONF_IND:
  576. case VIRTIO_CCW_DOING_RESET:
  577. case VIRTIO_CCW_DOING_READ_VQ_CONF:
  578. vcdev->curr_io &= ~activity;
  579. wake_up(&vcdev->wait_q);
  580. break;
  581. default:
  582. /* don't know what to do... */
  583. dev_warn(&cdev->dev, "Suspicious activity '%08x'\n",
  584. activity);
  585. WARN_ON(1);
  586. break;
  587. }
  588. }
  589. for_each_set_bit(i, &vcdev->indicators,
  590. sizeof(vcdev->indicators) * BITS_PER_BYTE) {
  591. /* The bit clear must happen before the vring kick. */
  592. clear_bit(i, &vcdev->indicators);
  593. barrier();
  594. vq = virtio_ccw_vq_by_ind(vcdev, i);
  595. vring_interrupt(0, vq);
  596. }
  597. if (test_bit(0, &vcdev->indicators2)) {
  598. drv = container_of(vcdev->vdev.dev.driver,
  599. struct virtio_driver, driver);
  600. if (drv && drv->config_changed)
  601. drv->config_changed(&vcdev->vdev);
  602. clear_bit(0, &vcdev->indicators2);
  603. }
  604. }
  605. /*
  606. * We usually want to autoonline all devices, but give the admin
  607. * a way to exempt devices from this.
  608. */
  609. #define __DEV_WORDS ((__MAX_SUBCHANNEL + (8*sizeof(long) - 1)) / \
  610. (8*sizeof(long)))
  611. static unsigned long devs_no_auto[__MAX_SSID + 1][__DEV_WORDS];
  612. static char *no_auto = "";
  613. module_param(no_auto, charp, 0444);
  614. MODULE_PARM_DESC(no_auto, "list of ccw bus id ranges not to be auto-onlined");
  615. static int virtio_ccw_check_autoonline(struct ccw_device *cdev)
  616. {
  617. struct ccw_dev_id id;
  618. ccw_device_get_id(cdev, &id);
  619. if (test_bit(id.devno, devs_no_auto[id.ssid]))
  620. return 0;
  621. return 1;
  622. }
  623. static void virtio_ccw_auto_online(void *data, async_cookie_t cookie)
  624. {
  625. struct ccw_device *cdev = data;
  626. int ret;
  627. ret = ccw_device_set_online(cdev);
  628. if (ret)
  629. dev_warn(&cdev->dev, "Failed to set online: %d\n", ret);
  630. }
  631. static int virtio_ccw_probe(struct ccw_device *cdev)
  632. {
  633. cdev->handler = virtio_ccw_int_handler;
  634. if (virtio_ccw_check_autoonline(cdev))
  635. async_schedule(virtio_ccw_auto_online, cdev);
  636. return 0;
  637. }
  638. static void virtio_ccw_remove(struct ccw_device *cdev)
  639. {
  640. struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev);
  641. if (cdev->online) {
  642. unregister_virtio_device(&vcdev->vdev);
  643. dev_set_drvdata(&cdev->dev, NULL);
  644. }
  645. cdev->handler = NULL;
  646. }
  647. static int virtio_ccw_offline(struct ccw_device *cdev)
  648. {
  649. struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev);
  650. unregister_virtio_device(&vcdev->vdev);
  651. dev_set_drvdata(&cdev->dev, NULL);
  652. return 0;
  653. }
  654. static int virtio_ccw_online(struct ccw_device *cdev)
  655. {
  656. int ret;
  657. struct virtio_ccw_device *vcdev;
  658. vcdev = kzalloc(sizeof(*vcdev), GFP_KERNEL);
  659. if (!vcdev) {
  660. dev_warn(&cdev->dev, "Could not get memory for virtio\n");
  661. ret = -ENOMEM;
  662. goto out_free;
  663. }
  664. vcdev->config_block = kzalloc(sizeof(*vcdev->config_block),
  665. GFP_DMA | GFP_KERNEL);
  666. if (!vcdev->config_block) {
  667. ret = -ENOMEM;
  668. goto out_free;
  669. }
  670. vcdev->status = kzalloc(sizeof(*vcdev->status), GFP_DMA | GFP_KERNEL);
  671. if (!vcdev->status) {
  672. ret = -ENOMEM;
  673. goto out_free;
  674. }
  675. vcdev->vdev.dev.parent = &cdev->dev;
  676. vcdev->vdev.dev.release = virtio_ccw_release_dev;
  677. vcdev->vdev.config = &virtio_ccw_config_ops;
  678. vcdev->cdev = cdev;
  679. init_waitqueue_head(&vcdev->wait_q);
  680. INIT_LIST_HEAD(&vcdev->virtqueues);
  681. spin_lock_init(&vcdev->lock);
  682. dev_set_drvdata(&cdev->dev, vcdev);
  683. vcdev->vdev.id.vendor = cdev->id.cu_type;
  684. vcdev->vdev.id.device = cdev->id.cu_model;
  685. ret = register_virtio_device(&vcdev->vdev);
  686. if (ret) {
  687. dev_warn(&cdev->dev, "Failed to register virtio device: %d\n",
  688. ret);
  689. goto out_put;
  690. }
  691. return 0;
  692. out_put:
  693. dev_set_drvdata(&cdev->dev, NULL);
  694. put_device(&vcdev->vdev.dev);
  695. return ret;
  696. out_free:
  697. if (vcdev) {
  698. kfree(vcdev->status);
  699. kfree(vcdev->config_block);
  700. }
  701. kfree(vcdev);
  702. return ret;
  703. }
  704. static int virtio_ccw_cio_notify(struct ccw_device *cdev, int event)
  705. {
  706. /* TODO: Check whether we need special handling here. */
  707. return 0;
  708. }
  709. static struct ccw_device_id virtio_ids[] = {
  710. { CCW_DEVICE(0x3832, 0) },
  711. {},
  712. };
  713. MODULE_DEVICE_TABLE(ccw, virtio_ids);
  714. static struct ccw_driver virtio_ccw_driver = {
  715. .driver = {
  716. .owner = THIS_MODULE,
  717. .name = "virtio_ccw",
  718. },
  719. .ids = virtio_ids,
  720. .probe = virtio_ccw_probe,
  721. .remove = virtio_ccw_remove,
  722. .set_offline = virtio_ccw_offline,
  723. .set_online = virtio_ccw_online,
  724. .notify = virtio_ccw_cio_notify,
  725. .int_class = IRQIO_VIR,
  726. };
  727. static int __init pure_hex(char **cp, unsigned int *val, int min_digit,
  728. int max_digit, int max_val)
  729. {
  730. int diff;
  731. diff = 0;
  732. *val = 0;
  733. while (diff <= max_digit) {
  734. int value = hex_to_bin(**cp);
  735. if (value < 0)
  736. break;
  737. *val = *val * 16 + value;
  738. (*cp)++;
  739. diff++;
  740. }
  741. if ((diff < min_digit) || (diff > max_digit) || (*val > max_val))
  742. return 1;
  743. return 0;
  744. }
  745. static int __init parse_busid(char *str, unsigned int *cssid,
  746. unsigned int *ssid, unsigned int *devno)
  747. {
  748. char *str_work;
  749. int rc, ret;
  750. rc = 1;
  751. if (*str == '\0')
  752. goto out;
  753. str_work = str;
  754. ret = pure_hex(&str_work, cssid, 1, 2, __MAX_CSSID);
  755. if (ret || (str_work[0] != '.'))
  756. goto out;
  757. str_work++;
  758. ret = pure_hex(&str_work, ssid, 1, 1, __MAX_SSID);
  759. if (ret || (str_work[0] != '.'))
  760. goto out;
  761. str_work++;
  762. ret = pure_hex(&str_work, devno, 4, 4, __MAX_SUBCHANNEL);
  763. if (ret || (str_work[0] != '\0'))
  764. goto out;
  765. rc = 0;
  766. out:
  767. return rc;
  768. }
  769. static void __init no_auto_parse(void)
  770. {
  771. unsigned int from_cssid, to_cssid, from_ssid, to_ssid, from, to;
  772. char *parm, *str;
  773. int rc;
  774. str = no_auto;
  775. while ((parm = strsep(&str, ","))) {
  776. rc = parse_busid(strsep(&parm, "-"), &from_cssid,
  777. &from_ssid, &from);
  778. if (rc)
  779. continue;
  780. if (parm != NULL) {
  781. rc = parse_busid(parm, &to_cssid,
  782. &to_ssid, &to);
  783. if ((from_ssid > to_ssid) ||
  784. ((from_ssid == to_ssid) && (from > to)))
  785. rc = -EINVAL;
  786. } else {
  787. to_cssid = from_cssid;
  788. to_ssid = from_ssid;
  789. to = from;
  790. }
  791. if (rc)
  792. continue;
  793. while ((from_ssid < to_ssid) ||
  794. ((from_ssid == to_ssid) && (from <= to))) {
  795. set_bit(from, devs_no_auto[from_ssid]);
  796. from++;
  797. if (from > __MAX_SUBCHANNEL) {
  798. from_ssid++;
  799. from = 0;
  800. }
  801. }
  802. }
  803. }
  804. static int __init virtio_ccw_init(void)
  805. {
  806. /* parse no_auto string before we do anything further */
  807. no_auto_parse();
  808. return ccw_driver_register(&virtio_ccw_driver);
  809. }
  810. module_init(virtio_ccw_init);
  811. static void __exit virtio_ccw_exit(void)
  812. {
  813. ccw_driver_unregister(&virtio_ccw_driver);
  814. }
  815. module_exit(virtio_ccw_exit);