virtio_ccw.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930
  1. /*
  2. * ccw based virtio transport
  3. *
  4. * Copyright IBM Corp. 2012
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License (version 2 only)
  8. * as published by the Free Software Foundation.
  9. *
  10. * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
  11. */
  12. #include <linux/kernel_stat.h>
  13. #include <linux/init.h>
  14. #include <linux/bootmem.h>
  15. #include <linux/err.h>
  16. #include <linux/virtio.h>
  17. #include <linux/virtio_config.h>
  18. #include <linux/slab.h>
  19. #include <linux/interrupt.h>
  20. #include <linux/virtio_ring.h>
  21. #include <linux/pfn.h>
  22. #include <linux/async.h>
  23. #include <linux/wait.h>
  24. #include <linux/list.h>
  25. #include <linux/bitops.h>
  26. #include <linux/module.h>
  27. #include <linux/io.h>
  28. #include <linux/kvm_para.h>
  29. #include <asm/setup.h>
  30. #include <asm/irq.h>
  31. #include <asm/cio.h>
  32. #include <asm/ccwdev.h>
  33. #include <asm/virtio-ccw.h>
  34. /*
  35. * virtio related functions
  36. */
  37. struct vq_config_block {
  38. __u16 index;
  39. __u16 num;
  40. } __packed;
  41. #define VIRTIO_CCW_CONFIG_SIZE 0x100
  42. /* same as PCI config space size, should be enough for all drivers */
  43. struct virtio_ccw_device {
  44. struct virtio_device vdev;
  45. __u8 *status;
  46. __u8 config[VIRTIO_CCW_CONFIG_SIZE];
  47. struct ccw_device *cdev;
  48. __u32 curr_io;
  49. int err;
  50. wait_queue_head_t wait_q;
  51. spinlock_t lock;
  52. struct list_head virtqueues;
  53. unsigned long indicators;
  54. unsigned long indicators2;
  55. struct vq_config_block *config_block;
  56. };
  57. struct vq_info_block {
  58. __u64 queue;
  59. __u32 align;
  60. __u16 index;
  61. __u16 num;
  62. } __packed;
  63. struct virtio_feature_desc {
  64. __u32 features;
  65. __u8 index;
  66. } __packed;
  67. struct virtio_ccw_vq_info {
  68. struct virtqueue *vq;
  69. int num;
  70. void *queue;
  71. struct vq_info_block *info_block;
  72. struct list_head node;
  73. long cookie;
  74. };
  75. #define CCW_CMD_SET_VQ 0x13
  76. #define CCW_CMD_VDEV_RESET 0x33
  77. #define CCW_CMD_SET_IND 0x43
  78. #define CCW_CMD_SET_CONF_IND 0x53
  79. #define CCW_CMD_READ_FEAT 0x12
  80. #define CCW_CMD_WRITE_FEAT 0x11
  81. #define CCW_CMD_READ_CONF 0x22
  82. #define CCW_CMD_WRITE_CONF 0x21
  83. #define CCW_CMD_WRITE_STATUS 0x31
  84. #define CCW_CMD_READ_VQ_CONF 0x32
  85. #define VIRTIO_CCW_DOING_SET_VQ 0x00010000
  86. #define VIRTIO_CCW_DOING_RESET 0x00040000
  87. #define VIRTIO_CCW_DOING_READ_FEAT 0x00080000
  88. #define VIRTIO_CCW_DOING_WRITE_FEAT 0x00100000
  89. #define VIRTIO_CCW_DOING_READ_CONFIG 0x00200000
  90. #define VIRTIO_CCW_DOING_WRITE_CONFIG 0x00400000
  91. #define VIRTIO_CCW_DOING_WRITE_STATUS 0x00800000
  92. #define VIRTIO_CCW_DOING_SET_IND 0x01000000
  93. #define VIRTIO_CCW_DOING_READ_VQ_CONF 0x02000000
  94. #define VIRTIO_CCW_DOING_SET_CONF_IND 0x04000000
  95. #define VIRTIO_CCW_INTPARM_MASK 0xffff0000
  96. static struct virtio_ccw_device *to_vc_device(struct virtio_device *vdev)
  97. {
  98. return container_of(vdev, struct virtio_ccw_device, vdev);
  99. }
  100. static int doing_io(struct virtio_ccw_device *vcdev, __u32 flag)
  101. {
  102. unsigned long flags;
  103. __u32 ret;
  104. spin_lock_irqsave(get_ccwdev_lock(vcdev->cdev), flags);
  105. if (vcdev->err)
  106. ret = 0;
  107. else
  108. ret = vcdev->curr_io & flag;
  109. spin_unlock_irqrestore(get_ccwdev_lock(vcdev->cdev), flags);
  110. return ret;
  111. }
  112. static int ccw_io_helper(struct virtio_ccw_device *vcdev,
  113. struct ccw1 *ccw, __u32 intparm)
  114. {
  115. int ret;
  116. unsigned long flags;
  117. int flag = intparm & VIRTIO_CCW_INTPARM_MASK;
  118. do {
  119. spin_lock_irqsave(get_ccwdev_lock(vcdev->cdev), flags);
  120. ret = ccw_device_start(vcdev->cdev, ccw, intparm, 0, 0);
  121. if (!ret) {
  122. if (!vcdev->curr_io)
  123. vcdev->err = 0;
  124. vcdev->curr_io |= flag;
  125. }
  126. spin_unlock_irqrestore(get_ccwdev_lock(vcdev->cdev), flags);
  127. cpu_relax();
  128. } while (ret == -EBUSY);
  129. wait_event(vcdev->wait_q, doing_io(vcdev, flag) == 0);
  130. return ret ? ret : vcdev->err;
  131. }
  132. static inline long do_kvm_notify(struct subchannel_id schid,
  133. unsigned long queue_index,
  134. long cookie)
  135. {
  136. register unsigned long __nr asm("1") = KVM_S390_VIRTIO_CCW_NOTIFY;
  137. register struct subchannel_id __schid asm("2") = schid;
  138. register unsigned long __index asm("3") = queue_index;
  139. register long __rc asm("2");
  140. register long __cookie asm("4") = cookie;
  141. asm volatile ("diag 2,4,0x500\n"
  142. : "=d" (__rc) : "d" (__nr), "d" (__schid), "d" (__index),
  143. "d"(__cookie)
  144. : "memory", "cc");
  145. return __rc;
  146. }
  147. static void virtio_ccw_kvm_notify(struct virtqueue *vq)
  148. {
  149. struct virtio_ccw_vq_info *info = vq->priv;
  150. struct virtio_ccw_device *vcdev;
  151. struct subchannel_id schid;
  152. vcdev = to_vc_device(info->vq->vdev);
  153. ccw_device_get_schid(vcdev->cdev, &schid);
  154. info->cookie = do_kvm_notify(schid, vq->index, info->cookie);
  155. }
  156. static int virtio_ccw_read_vq_conf(struct virtio_ccw_device *vcdev,
  157. struct ccw1 *ccw, int index)
  158. {
  159. vcdev->config_block->index = index;
  160. ccw->cmd_code = CCW_CMD_READ_VQ_CONF;
  161. ccw->flags = 0;
  162. ccw->count = sizeof(struct vq_config_block);
  163. ccw->cda = (__u32)(unsigned long)(vcdev->config_block);
  164. ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_VQ_CONF);
  165. return vcdev->config_block->num;
  166. }
  167. static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw)
  168. {
  169. struct virtio_ccw_device *vcdev = to_vc_device(vq->vdev);
  170. struct virtio_ccw_vq_info *info = vq->priv;
  171. unsigned long flags;
  172. unsigned long size;
  173. int ret;
  174. unsigned int index = vq->index;
  175. /* Remove from our list. */
  176. spin_lock_irqsave(&vcdev->lock, flags);
  177. list_del(&info->node);
  178. spin_unlock_irqrestore(&vcdev->lock, flags);
  179. /* Release from host. */
  180. info->info_block->queue = 0;
  181. info->info_block->align = 0;
  182. info->info_block->index = index;
  183. info->info_block->num = 0;
  184. ccw->cmd_code = CCW_CMD_SET_VQ;
  185. ccw->flags = 0;
  186. ccw->count = sizeof(*info->info_block);
  187. ccw->cda = (__u32)(unsigned long)(info->info_block);
  188. ret = ccw_io_helper(vcdev, ccw,
  189. VIRTIO_CCW_DOING_SET_VQ | index);
  190. /*
  191. * -ENODEV isn't considered an error: The device is gone anyway.
  192. * This may happen on device detach.
  193. */
  194. if (ret && (ret != -ENODEV))
  195. dev_warn(&vq->vdev->dev, "Error %d while deleting queue %d",
  196. ret, index);
  197. vring_del_virtqueue(vq);
  198. size = PAGE_ALIGN(vring_size(info->num, KVM_VIRTIO_CCW_RING_ALIGN));
  199. free_pages_exact(info->queue, size);
  200. kfree(info->info_block);
  201. kfree(info);
  202. }
  203. static void virtio_ccw_del_vqs(struct virtio_device *vdev)
  204. {
  205. struct virtqueue *vq, *n;
  206. struct ccw1 *ccw;
  207. ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
  208. if (!ccw)
  209. return;
  210. list_for_each_entry_safe(vq, n, &vdev->vqs, list)
  211. virtio_ccw_del_vq(vq, ccw);
  212. kfree(ccw);
  213. }
  214. static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev,
  215. int i, vq_callback_t *callback,
  216. const char *name,
  217. struct ccw1 *ccw)
  218. {
  219. struct virtio_ccw_device *vcdev = to_vc_device(vdev);
  220. int err;
  221. struct virtqueue *vq = NULL;
  222. struct virtio_ccw_vq_info *info;
  223. unsigned long size = 0; /* silence the compiler */
  224. unsigned long flags;
  225. /* Allocate queue. */
  226. info = kzalloc(sizeof(struct virtio_ccw_vq_info), GFP_KERNEL);
  227. if (!info) {
  228. dev_warn(&vcdev->cdev->dev, "no info\n");
  229. err = -ENOMEM;
  230. goto out_err;
  231. }
  232. info->info_block = kzalloc(sizeof(*info->info_block),
  233. GFP_DMA | GFP_KERNEL);
  234. if (!info->info_block) {
  235. dev_warn(&vcdev->cdev->dev, "no info block\n");
  236. err = -ENOMEM;
  237. goto out_err;
  238. }
  239. info->num = virtio_ccw_read_vq_conf(vcdev, ccw, i);
  240. size = PAGE_ALIGN(vring_size(info->num, KVM_VIRTIO_CCW_RING_ALIGN));
  241. info->queue = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO);
  242. if (info->queue == NULL) {
  243. dev_warn(&vcdev->cdev->dev, "no queue\n");
  244. err = -ENOMEM;
  245. goto out_err;
  246. }
  247. vq = vring_new_virtqueue(i, info->num, KVM_VIRTIO_CCW_RING_ALIGN, vdev,
  248. true, info->queue, virtio_ccw_kvm_notify,
  249. callback, name);
  250. if (!vq) {
  251. /* For now, we fail if we can't get the requested size. */
  252. dev_warn(&vcdev->cdev->dev, "no vq\n");
  253. err = -ENOMEM;
  254. goto out_err;
  255. }
  256. /* Register it with the host. */
  257. info->info_block->queue = (__u64)info->queue;
  258. info->info_block->align = KVM_VIRTIO_CCW_RING_ALIGN;
  259. info->info_block->index = i;
  260. info->info_block->num = info->num;
  261. ccw->cmd_code = CCW_CMD_SET_VQ;
  262. ccw->flags = 0;
  263. ccw->count = sizeof(*info->info_block);
  264. ccw->cda = (__u32)(unsigned long)(info->info_block);
  265. err = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_VQ | i);
  266. if (err) {
  267. dev_warn(&vcdev->cdev->dev, "SET_VQ failed\n");
  268. goto out_err;
  269. }
  270. info->vq = vq;
  271. vq->priv = info;
  272. /* Save it to our list. */
  273. spin_lock_irqsave(&vcdev->lock, flags);
  274. list_add(&info->node, &vcdev->virtqueues);
  275. spin_unlock_irqrestore(&vcdev->lock, flags);
  276. return vq;
  277. out_err:
  278. if (vq)
  279. vring_del_virtqueue(vq);
  280. if (info) {
  281. if (info->queue)
  282. free_pages_exact(info->queue, size);
  283. kfree(info->info_block);
  284. }
  285. kfree(info);
  286. return ERR_PTR(err);
  287. }
  288. static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
  289. struct virtqueue *vqs[],
  290. vq_callback_t *callbacks[],
  291. const char *names[])
  292. {
  293. struct virtio_ccw_device *vcdev = to_vc_device(vdev);
  294. unsigned long *indicatorp = NULL;
  295. int ret, i;
  296. struct ccw1 *ccw;
  297. ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
  298. if (!ccw)
  299. return -ENOMEM;
  300. for (i = 0; i < nvqs; ++i) {
  301. vqs[i] = virtio_ccw_setup_vq(vdev, i, callbacks[i], names[i],
  302. ccw);
  303. if (IS_ERR(vqs[i])) {
  304. ret = PTR_ERR(vqs[i]);
  305. vqs[i] = NULL;
  306. goto out;
  307. }
  308. }
  309. ret = -ENOMEM;
  310. /* We need a data area under 2G to communicate. */
  311. indicatorp = kmalloc(sizeof(&vcdev->indicators), GFP_DMA | GFP_KERNEL);
  312. if (!indicatorp)
  313. goto out;
  314. *indicatorp = (unsigned long) &vcdev->indicators;
  315. /* Register queue indicators with host. */
  316. vcdev->indicators = 0;
  317. ccw->cmd_code = CCW_CMD_SET_IND;
  318. ccw->flags = 0;
  319. ccw->count = sizeof(vcdev->indicators);
  320. ccw->cda = (__u32)(unsigned long) indicatorp;
  321. ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_IND);
  322. if (ret)
  323. goto out;
  324. /* Register indicators2 with host for config changes */
  325. *indicatorp = (unsigned long) &vcdev->indicators2;
  326. vcdev->indicators2 = 0;
  327. ccw->cmd_code = CCW_CMD_SET_CONF_IND;
  328. ccw->flags = 0;
  329. ccw->count = sizeof(vcdev->indicators2);
  330. ccw->cda = (__u32)(unsigned long) indicatorp;
  331. ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_CONF_IND);
  332. if (ret)
  333. goto out;
  334. kfree(indicatorp);
  335. kfree(ccw);
  336. return 0;
  337. out:
  338. kfree(indicatorp);
  339. kfree(ccw);
  340. virtio_ccw_del_vqs(vdev);
  341. return ret;
  342. }
  343. static void virtio_ccw_reset(struct virtio_device *vdev)
  344. {
  345. struct virtio_ccw_device *vcdev = to_vc_device(vdev);
  346. struct ccw1 *ccw;
  347. ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
  348. if (!ccw)
  349. return;
  350. /* Zero status bits. */
  351. *vcdev->status = 0;
  352. /* Send a reset ccw on device. */
  353. ccw->cmd_code = CCW_CMD_VDEV_RESET;
  354. ccw->flags = 0;
  355. ccw->count = 0;
  356. ccw->cda = 0;
  357. ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_RESET);
  358. kfree(ccw);
  359. }
  360. static u32 virtio_ccw_get_features(struct virtio_device *vdev)
  361. {
  362. struct virtio_ccw_device *vcdev = to_vc_device(vdev);
  363. struct virtio_feature_desc *features;
  364. int ret, rc;
  365. struct ccw1 *ccw;
  366. ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
  367. if (!ccw)
  368. return 0;
  369. features = kzalloc(sizeof(*features), GFP_DMA | GFP_KERNEL);
  370. if (!features) {
  371. rc = 0;
  372. goto out_free;
  373. }
  374. /* Read the feature bits from the host. */
  375. /* TODO: Features > 32 bits */
  376. features->index = 0;
  377. ccw->cmd_code = CCW_CMD_READ_FEAT;
  378. ccw->flags = 0;
  379. ccw->count = sizeof(*features);
  380. ccw->cda = (__u32)(unsigned long)features;
  381. ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_FEAT);
  382. if (ret) {
  383. rc = 0;
  384. goto out_free;
  385. }
  386. rc = le32_to_cpu(features->features);
  387. out_free:
  388. kfree(features);
  389. kfree(ccw);
  390. return rc;
  391. }
  392. static void virtio_ccw_finalize_features(struct virtio_device *vdev)
  393. {
  394. struct virtio_ccw_device *vcdev = to_vc_device(vdev);
  395. struct virtio_feature_desc *features;
  396. int i;
  397. struct ccw1 *ccw;
  398. ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
  399. if (!ccw)
  400. return;
  401. features = kzalloc(sizeof(*features), GFP_DMA | GFP_KERNEL);
  402. if (!features)
  403. goto out_free;
  404. /* Give virtio_ring a chance to accept features. */
  405. vring_transport_features(vdev);
  406. for (i = 0; i < sizeof(*vdev->features) / sizeof(features->features);
  407. i++) {
  408. int highbits = i % 2 ? 32 : 0;
  409. features->index = i;
  410. features->features = cpu_to_le32(vdev->features[i / 2]
  411. >> highbits);
  412. /* Write the feature bits to the host. */
  413. ccw->cmd_code = CCW_CMD_WRITE_FEAT;
  414. ccw->flags = 0;
  415. ccw->count = sizeof(*features);
  416. ccw->cda = (__u32)(unsigned long)features;
  417. ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_FEAT);
  418. }
  419. out_free:
  420. kfree(features);
  421. kfree(ccw);
  422. }
  423. static void virtio_ccw_get_config(struct virtio_device *vdev,
  424. unsigned int offset, void *buf, unsigned len)
  425. {
  426. struct virtio_ccw_device *vcdev = to_vc_device(vdev);
  427. int ret;
  428. struct ccw1 *ccw;
  429. void *config_area;
  430. ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
  431. if (!ccw)
  432. return;
  433. config_area = kzalloc(VIRTIO_CCW_CONFIG_SIZE, GFP_DMA | GFP_KERNEL);
  434. if (!config_area)
  435. goto out_free;
  436. /* Read the config area from the host. */
  437. ccw->cmd_code = CCW_CMD_READ_CONF;
  438. ccw->flags = 0;
  439. ccw->count = offset + len;
  440. ccw->cda = (__u32)(unsigned long)config_area;
  441. ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_CONFIG);
  442. if (ret)
  443. goto out_free;
  444. memcpy(vcdev->config, config_area, sizeof(vcdev->config));
  445. memcpy(buf, &vcdev->config[offset], len);
  446. out_free:
  447. kfree(config_area);
  448. kfree(ccw);
  449. }
  450. static void virtio_ccw_set_config(struct virtio_device *vdev,
  451. unsigned int offset, const void *buf,
  452. unsigned len)
  453. {
  454. struct virtio_ccw_device *vcdev = to_vc_device(vdev);
  455. struct ccw1 *ccw;
  456. void *config_area;
  457. ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
  458. if (!ccw)
  459. return;
  460. config_area = kzalloc(VIRTIO_CCW_CONFIG_SIZE, GFP_DMA | GFP_KERNEL);
  461. if (!config_area)
  462. goto out_free;
  463. memcpy(&vcdev->config[offset], buf, len);
  464. /* Write the config area to the host. */
  465. memcpy(config_area, vcdev->config, sizeof(vcdev->config));
  466. ccw->cmd_code = CCW_CMD_WRITE_CONF;
  467. ccw->flags = 0;
  468. ccw->count = offset + len;
  469. ccw->cda = (__u32)(unsigned long)config_area;
  470. ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_CONFIG);
  471. out_free:
  472. kfree(config_area);
  473. kfree(ccw);
  474. }
  475. static u8 virtio_ccw_get_status(struct virtio_device *vdev)
  476. {
  477. struct virtio_ccw_device *vcdev = to_vc_device(vdev);
  478. return *vcdev->status;
  479. }
  480. static void virtio_ccw_set_status(struct virtio_device *vdev, u8 status)
  481. {
  482. struct virtio_ccw_device *vcdev = to_vc_device(vdev);
  483. struct ccw1 *ccw;
  484. ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
  485. if (!ccw)
  486. return;
  487. /* Write the status to the host. */
  488. *vcdev->status = status;
  489. ccw->cmd_code = CCW_CMD_WRITE_STATUS;
  490. ccw->flags = 0;
  491. ccw->count = sizeof(status);
  492. ccw->cda = (__u32)(unsigned long)vcdev->status;
  493. ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_STATUS);
  494. kfree(ccw);
  495. }
  496. static struct virtio_config_ops virtio_ccw_config_ops = {
  497. .get_features = virtio_ccw_get_features,
  498. .finalize_features = virtio_ccw_finalize_features,
  499. .get = virtio_ccw_get_config,
  500. .set = virtio_ccw_set_config,
  501. .get_status = virtio_ccw_get_status,
  502. .set_status = virtio_ccw_set_status,
  503. .reset = virtio_ccw_reset,
  504. .find_vqs = virtio_ccw_find_vqs,
  505. .del_vqs = virtio_ccw_del_vqs,
  506. };
  507. /*
  508. * ccw bus driver related functions
  509. */
  510. static void virtio_ccw_release_dev(struct device *_d)
  511. {
  512. struct virtio_device *dev = container_of(_d, struct virtio_device,
  513. dev);
  514. struct virtio_ccw_device *vcdev = to_vc_device(dev);
  515. kfree(vcdev->status);
  516. kfree(vcdev->config_block);
  517. kfree(vcdev);
  518. }
  519. static int irb_is_error(struct irb *irb)
  520. {
  521. if (scsw_cstat(&irb->scsw) != 0)
  522. return 1;
  523. if (scsw_dstat(&irb->scsw) & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END))
  524. return 1;
  525. if (scsw_cc(&irb->scsw) != 0)
  526. return 1;
  527. return 0;
  528. }
  529. static struct virtqueue *virtio_ccw_vq_by_ind(struct virtio_ccw_device *vcdev,
  530. int index)
  531. {
  532. struct virtio_ccw_vq_info *info;
  533. unsigned long flags;
  534. struct virtqueue *vq;
  535. vq = NULL;
  536. spin_lock_irqsave(&vcdev->lock, flags);
  537. list_for_each_entry(info, &vcdev->virtqueues, node) {
  538. if (info->vq->index == index) {
  539. vq = info->vq;
  540. break;
  541. }
  542. }
  543. spin_unlock_irqrestore(&vcdev->lock, flags);
  544. return vq;
  545. }
  546. static void virtio_ccw_int_handler(struct ccw_device *cdev,
  547. unsigned long intparm,
  548. struct irb *irb)
  549. {
  550. __u32 activity = intparm & VIRTIO_CCW_INTPARM_MASK;
  551. struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev);
  552. int i;
  553. struct virtqueue *vq;
  554. struct virtio_driver *drv;
  555. /* Check if it's a notification from the host. */
  556. if ((intparm == 0) &&
  557. (scsw_stctl(&irb->scsw) ==
  558. (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))) {
  559. /* OK */
  560. }
  561. if (irb_is_error(irb))
  562. vcdev->err = -EIO; /* XXX - use real error */
  563. if (vcdev->curr_io & activity) {
  564. switch (activity) {
  565. case VIRTIO_CCW_DOING_READ_FEAT:
  566. case VIRTIO_CCW_DOING_WRITE_FEAT:
  567. case VIRTIO_CCW_DOING_READ_CONFIG:
  568. case VIRTIO_CCW_DOING_WRITE_CONFIG:
  569. case VIRTIO_CCW_DOING_WRITE_STATUS:
  570. case VIRTIO_CCW_DOING_SET_VQ:
  571. case VIRTIO_CCW_DOING_SET_IND:
  572. case VIRTIO_CCW_DOING_SET_CONF_IND:
  573. case VIRTIO_CCW_DOING_RESET:
  574. case VIRTIO_CCW_DOING_READ_VQ_CONF:
  575. vcdev->curr_io &= ~activity;
  576. wake_up(&vcdev->wait_q);
  577. break;
  578. default:
  579. /* don't know what to do... */
  580. dev_warn(&cdev->dev, "Suspicious activity '%08x'\n",
  581. activity);
  582. WARN_ON(1);
  583. break;
  584. }
  585. }
  586. for_each_set_bit(i, &vcdev->indicators,
  587. sizeof(vcdev->indicators) * BITS_PER_BYTE) {
  588. /* The bit clear must happen before the vring kick. */
  589. clear_bit(i, &vcdev->indicators);
  590. barrier();
  591. vq = virtio_ccw_vq_by_ind(vcdev, i);
  592. vring_interrupt(0, vq);
  593. }
  594. if (test_bit(0, &vcdev->indicators2)) {
  595. drv = container_of(vcdev->vdev.dev.driver,
  596. struct virtio_driver, driver);
  597. if (drv && drv->config_changed)
  598. drv->config_changed(&vcdev->vdev);
  599. clear_bit(0, &vcdev->indicators2);
  600. }
  601. }
  602. /*
  603. * We usually want to autoonline all devices, but give the admin
  604. * a way to exempt devices from this.
  605. */
  606. #define __DEV_WORDS ((__MAX_SUBCHANNEL + (8*sizeof(long) - 1)) / \
  607. (8*sizeof(long)))
  608. static unsigned long devs_no_auto[__MAX_SSID + 1][__DEV_WORDS];
  609. static char *no_auto = "";
  610. module_param(no_auto, charp, 0444);
  611. MODULE_PARM_DESC(no_auto, "list of ccw bus id ranges not to be auto-onlined");
  612. static int virtio_ccw_check_autoonline(struct ccw_device *cdev)
  613. {
  614. struct ccw_dev_id id;
  615. ccw_device_get_id(cdev, &id);
  616. if (test_bit(id.devno, devs_no_auto[id.ssid]))
  617. return 0;
  618. return 1;
  619. }
  620. static void virtio_ccw_auto_online(void *data, async_cookie_t cookie)
  621. {
  622. struct ccw_device *cdev = data;
  623. int ret;
  624. ret = ccw_device_set_online(cdev);
  625. if (ret)
  626. dev_warn(&cdev->dev, "Failed to set online: %d\n", ret);
  627. }
  628. static int virtio_ccw_probe(struct ccw_device *cdev)
  629. {
  630. cdev->handler = virtio_ccw_int_handler;
  631. if (virtio_ccw_check_autoonline(cdev))
  632. async_schedule(virtio_ccw_auto_online, cdev);
  633. return 0;
  634. }
  635. static void virtio_ccw_remove(struct ccw_device *cdev)
  636. {
  637. struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev);
  638. if (cdev->online) {
  639. unregister_virtio_device(&vcdev->vdev);
  640. dev_set_drvdata(&cdev->dev, NULL);
  641. }
  642. cdev->handler = NULL;
  643. }
  644. static int virtio_ccw_offline(struct ccw_device *cdev)
  645. {
  646. struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev);
  647. unregister_virtio_device(&vcdev->vdev);
  648. dev_set_drvdata(&cdev->dev, NULL);
  649. return 0;
  650. }
  651. static int virtio_ccw_online(struct ccw_device *cdev)
  652. {
  653. int ret;
  654. struct virtio_ccw_device *vcdev;
  655. vcdev = kzalloc(sizeof(*vcdev), GFP_KERNEL);
  656. if (!vcdev) {
  657. dev_warn(&cdev->dev, "Could not get memory for virtio\n");
  658. ret = -ENOMEM;
  659. goto out_free;
  660. }
  661. vcdev->config_block = kzalloc(sizeof(*vcdev->config_block),
  662. GFP_DMA | GFP_KERNEL);
  663. if (!vcdev->config_block) {
  664. ret = -ENOMEM;
  665. goto out_free;
  666. }
  667. vcdev->status = kzalloc(sizeof(*vcdev->status), GFP_DMA | GFP_KERNEL);
  668. if (!vcdev->status) {
  669. ret = -ENOMEM;
  670. goto out_free;
  671. }
  672. vcdev->vdev.dev.parent = &cdev->dev;
  673. vcdev->vdev.dev.release = virtio_ccw_release_dev;
  674. vcdev->vdev.config = &virtio_ccw_config_ops;
  675. vcdev->cdev = cdev;
  676. init_waitqueue_head(&vcdev->wait_q);
  677. INIT_LIST_HEAD(&vcdev->virtqueues);
  678. spin_lock_init(&vcdev->lock);
  679. dev_set_drvdata(&cdev->dev, vcdev);
  680. vcdev->vdev.id.vendor = cdev->id.cu_type;
  681. vcdev->vdev.id.device = cdev->id.cu_model;
  682. ret = register_virtio_device(&vcdev->vdev);
  683. if (ret) {
  684. dev_warn(&cdev->dev, "Failed to register virtio device: %d\n",
  685. ret);
  686. goto out_put;
  687. }
  688. return 0;
  689. out_put:
  690. dev_set_drvdata(&cdev->dev, NULL);
  691. put_device(&vcdev->vdev.dev);
  692. return ret;
  693. out_free:
  694. if (vcdev) {
  695. kfree(vcdev->status);
  696. kfree(vcdev->config_block);
  697. }
  698. kfree(vcdev);
  699. return ret;
  700. }
  701. static int virtio_ccw_cio_notify(struct ccw_device *cdev, int event)
  702. {
  703. /* TODO: Check whether we need special handling here. */
  704. return 0;
  705. }
  706. static struct ccw_device_id virtio_ids[] = {
  707. { CCW_DEVICE(0x3832, 0) },
  708. {},
  709. };
  710. MODULE_DEVICE_TABLE(ccw, virtio_ids);
  711. static struct ccw_driver virtio_ccw_driver = {
  712. .driver = {
  713. .owner = THIS_MODULE,
  714. .name = "virtio_ccw",
  715. },
  716. .ids = virtio_ids,
  717. .probe = virtio_ccw_probe,
  718. .remove = virtio_ccw_remove,
  719. .set_offline = virtio_ccw_offline,
  720. .set_online = virtio_ccw_online,
  721. .notify = virtio_ccw_cio_notify,
  722. .int_class = IRQIO_VIR,
  723. };
  724. static int __init pure_hex(char **cp, unsigned int *val, int min_digit,
  725. int max_digit, int max_val)
  726. {
  727. int diff;
  728. diff = 0;
  729. *val = 0;
  730. while (diff <= max_digit) {
  731. int value = hex_to_bin(**cp);
  732. if (value < 0)
  733. break;
  734. *val = *val * 16 + value;
  735. (*cp)++;
  736. diff++;
  737. }
  738. if ((diff < min_digit) || (diff > max_digit) || (*val > max_val))
  739. return 1;
  740. return 0;
  741. }
  742. static int __init parse_busid(char *str, unsigned int *cssid,
  743. unsigned int *ssid, unsigned int *devno)
  744. {
  745. char *str_work;
  746. int rc, ret;
  747. rc = 1;
  748. if (*str == '\0')
  749. goto out;
  750. str_work = str;
  751. ret = pure_hex(&str_work, cssid, 1, 2, __MAX_CSSID);
  752. if (ret || (str_work[0] != '.'))
  753. goto out;
  754. str_work++;
  755. ret = pure_hex(&str_work, ssid, 1, 1, __MAX_SSID);
  756. if (ret || (str_work[0] != '.'))
  757. goto out;
  758. str_work++;
  759. ret = pure_hex(&str_work, devno, 4, 4, __MAX_SUBCHANNEL);
  760. if (ret || (str_work[0] != '\0'))
  761. goto out;
  762. rc = 0;
  763. out:
  764. return rc;
  765. }
  766. static void __init no_auto_parse(void)
  767. {
  768. unsigned int from_cssid, to_cssid, from_ssid, to_ssid, from, to;
  769. char *parm, *str;
  770. int rc;
  771. str = no_auto;
  772. while ((parm = strsep(&str, ","))) {
  773. rc = parse_busid(strsep(&parm, "-"), &from_cssid,
  774. &from_ssid, &from);
  775. if (rc)
  776. continue;
  777. if (parm != NULL) {
  778. rc = parse_busid(parm, &to_cssid,
  779. &to_ssid, &to);
  780. if ((from_ssid > to_ssid) ||
  781. ((from_ssid == to_ssid) && (from > to)))
  782. rc = -EINVAL;
  783. } else {
  784. to_cssid = from_cssid;
  785. to_ssid = from_ssid;
  786. to = from;
  787. }
  788. if (rc)
  789. continue;
  790. while ((from_ssid < to_ssid) ||
  791. ((from_ssid == to_ssid) && (from <= to))) {
  792. set_bit(from, devs_no_auto[from_ssid]);
  793. from++;
  794. if (from > __MAX_SUBCHANNEL) {
  795. from_ssid++;
  796. from = 0;
  797. }
  798. }
  799. }
  800. }
  801. static int __init virtio_ccw_init(void)
  802. {
  803. /* parse no_auto string before we do anything further */
  804. no_auto_parse();
  805. return ccw_driver_register(&virtio_ccw_driver);
  806. }
  807. module_init(virtio_ccw_init);
  808. static void __exit virtio_ccw_exit(void)
  809. {
  810. ccw_driver_unregister(&virtio_ccw_driver);
  811. }
  812. module_exit(virtio_ccw_exit);