uio.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701
  1. /*
  2. * drivers/uio/uio.c
  3. *
  4. * Copyright(C) 2005, Benedikt Spranger <b.spranger@linutronix.de>
  5. * Copyright(C) 2005, Thomas Gleixner <tglx@linutronix.de>
  6. * Copyright(C) 2006, Hans J. Koch <hjk@linutronix.de>
  7. * Copyright(C) 2006, Greg Kroah-Hartman <greg@kroah.com>
  8. *
  9. * Userspace IO
  10. *
  11. * Base Functions
  12. *
  13. * Licensed under the GPLv2 only.
  14. */
  15. #include <linux/module.h>
  16. #include <linux/init.h>
  17. #include <linux/poll.h>
  18. #include <linux/device.h>
  19. #include <linux/mm.h>
  20. #include <linux/idr.h>
  21. #include <linux/string.h>
  22. #include <linux/kobject.h>
  23. #include <linux/uio_driver.h>
  24. #define UIO_MAX_DEVICES 255
  25. struct uio_device {
  26. struct module *owner;
  27. struct device *dev;
  28. int minor;
  29. atomic_t event;
  30. struct fasync_struct *async_queue;
  31. wait_queue_head_t wait;
  32. int vma_count;
  33. struct uio_info *info;
  34. struct kset map_attr_kset;
  35. };
  36. static int uio_major;
  37. static DEFINE_IDR(uio_idr);
  38. static struct file_operations uio_fops;
  39. /* UIO class infrastructure */
  40. static struct uio_class {
  41. struct kref kref;
  42. struct class *class;
  43. } *uio_class;
  44. /*
  45. * attributes
  46. */
  47. static struct attribute attr_addr = {
  48. .name = "addr",
  49. .mode = S_IRUGO,
  50. };
  51. static struct attribute attr_size = {
  52. .name = "size",
  53. .mode = S_IRUGO,
  54. };
  55. static struct attribute* map_attrs[] = {
  56. &attr_addr, &attr_size, NULL
  57. };
  58. static ssize_t map_attr_show(struct kobject *kobj, struct attribute *attr,
  59. char *buf)
  60. {
  61. struct uio_mem *mem = container_of(kobj, struct uio_mem, kobj);
  62. if (strncmp(attr->name,"addr",4) == 0)
  63. return sprintf(buf, "0x%lx\n", mem->addr);
  64. if (strncmp(attr->name,"size",4) == 0)
  65. return sprintf(buf, "0x%lx\n", mem->size);
  66. return -ENODEV;
  67. }
  68. static void map_attr_release(struct kobject *kobj)
  69. {
  70. /* TODO ??? */
  71. }
  72. static struct sysfs_ops map_attr_ops = {
  73. .show = map_attr_show,
  74. };
  75. static struct kobj_type map_attr_type = {
  76. .release = map_attr_release,
  77. .sysfs_ops = &map_attr_ops,
  78. .default_attrs = map_attrs,
  79. };
  80. static ssize_t show_name(struct device *dev,
  81. struct device_attribute *attr, char *buf)
  82. {
  83. struct uio_device *idev = dev_get_drvdata(dev);
  84. if (idev)
  85. return sprintf(buf, "%s\n", idev->info->name);
  86. else
  87. return -ENODEV;
  88. }
  89. static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
  90. static ssize_t show_version(struct device *dev,
  91. struct device_attribute *attr, char *buf)
  92. {
  93. struct uio_device *idev = dev_get_drvdata(dev);
  94. if (idev)
  95. return sprintf(buf, "%s\n", idev->info->version);
  96. else
  97. return -ENODEV;
  98. }
  99. static DEVICE_ATTR(version, S_IRUGO, show_version, NULL);
  100. static ssize_t show_event(struct device *dev,
  101. struct device_attribute *attr, char *buf)
  102. {
  103. struct uio_device *idev = dev_get_drvdata(dev);
  104. if (idev)
  105. return sprintf(buf, "%u\n",
  106. (unsigned int)atomic_read(&idev->event));
  107. else
  108. return -ENODEV;
  109. }
  110. static DEVICE_ATTR(event, S_IRUGO, show_event, NULL);
  111. static struct attribute *uio_attrs[] = {
  112. &dev_attr_name.attr,
  113. &dev_attr_version.attr,
  114. &dev_attr_event.attr,
  115. NULL,
  116. };
  117. static struct attribute_group uio_attr_grp = {
  118. .attrs = uio_attrs,
  119. };
  120. /*
  121. * device functions
  122. */
  123. static int uio_dev_add_attributes(struct uio_device *idev)
  124. {
  125. int ret;
  126. int mi;
  127. int map_found = 0;
  128. struct uio_mem *mem;
  129. ret = sysfs_create_group(&idev->dev->kobj, &uio_attr_grp);
  130. if (ret)
  131. goto err_group;
  132. for (mi = 0; mi < MAX_UIO_MAPS; mi++) {
  133. mem = &idev->info->mem[mi];
  134. if (mem->size == 0)
  135. break;
  136. if (!map_found) {
  137. map_found = 1;
  138. kobject_set_name(&idev->map_attr_kset.kobj,"maps");
  139. idev->map_attr_kset.ktype = &map_attr_type;
  140. idev->map_attr_kset.kobj.parent = &idev->dev->kobj;
  141. ret = kset_register(&idev->map_attr_kset);
  142. if (ret)
  143. goto err_remove_group;
  144. }
  145. kobject_init(&mem->kobj);
  146. kobject_set_name(&mem->kobj,"map%d",mi);
  147. mem->kobj.parent = &idev->map_attr_kset.kobj;
  148. mem->kobj.kset = &idev->map_attr_kset;
  149. ret = kobject_add(&mem->kobj);
  150. if (ret)
  151. goto err_remove_maps;
  152. }
  153. return 0;
  154. err_remove_maps:
  155. for (mi--; mi>=0; mi--) {
  156. mem = &idev->info->mem[mi];
  157. kobject_unregister(&mem->kobj);
  158. }
  159. kset_unregister(&idev->map_attr_kset); /* Needed ? */
  160. err_remove_group:
  161. sysfs_remove_group(&idev->dev->kobj, &uio_attr_grp);
  162. err_group:
  163. dev_err(idev->dev, "error creating sysfs files (%d)\n", ret);
  164. return ret;
  165. }
  166. static void uio_dev_del_attributes(struct uio_device *idev)
  167. {
  168. int mi;
  169. struct uio_mem *mem;
  170. for (mi = 0; mi < MAX_UIO_MAPS; mi++) {
  171. mem = &idev->info->mem[mi];
  172. if (mem->size == 0)
  173. break;
  174. kobject_unregister(&mem->kobj);
  175. }
  176. kset_unregister(&idev->map_attr_kset);
  177. sysfs_remove_group(&idev->dev->kobj, &uio_attr_grp);
  178. }
  179. static int uio_get_minor(struct uio_device *idev)
  180. {
  181. static DEFINE_MUTEX(minor_lock);
  182. int retval = -ENOMEM;
  183. int id;
  184. mutex_lock(&minor_lock);
  185. if (idr_pre_get(&uio_idr, GFP_KERNEL) == 0)
  186. goto exit;
  187. retval = idr_get_new(&uio_idr, idev, &id);
  188. if (retval < 0) {
  189. if (retval == -EAGAIN)
  190. retval = -ENOMEM;
  191. goto exit;
  192. }
  193. idev->minor = id & MAX_ID_MASK;
  194. exit:
  195. mutex_unlock(&minor_lock);
  196. return retval;
  197. }
  198. static void uio_free_minor(struct uio_device *idev)
  199. {
  200. idr_remove(&uio_idr, idev->minor);
  201. }
  202. /**
  203. * uio_event_notify - trigger an interrupt event
  204. * @info: UIO device capabilities
  205. */
  206. void uio_event_notify(struct uio_info *info)
  207. {
  208. struct uio_device *idev = info->uio_dev;
  209. atomic_inc(&idev->event);
  210. wake_up_interruptible(&idev->wait);
  211. kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
  212. }
  213. EXPORT_SYMBOL_GPL(uio_event_notify);
  214. /**
  215. * uio_interrupt - hardware interrupt handler
  216. * @irq: IRQ number, can be UIO_IRQ_CYCLIC for cyclic timer
  217. * @dev_id: Pointer to the devices uio_device structure
  218. */
  219. static irqreturn_t uio_interrupt(int irq, void *dev_id)
  220. {
  221. struct uio_device *idev = (struct uio_device *)dev_id;
  222. irqreturn_t ret = idev->info->handler(irq, idev->info);
  223. if (ret == IRQ_HANDLED)
  224. uio_event_notify(idev->info);
  225. return ret;
  226. }
  227. struct uio_listener {
  228. struct uio_device *dev;
  229. s32 event_count;
  230. };
  231. static int uio_open(struct inode *inode, struct file *filep)
  232. {
  233. struct uio_device *idev;
  234. struct uio_listener *listener;
  235. int ret = 0;
  236. idev = idr_find(&uio_idr, iminor(inode));
  237. if (!idev)
  238. return -ENODEV;
  239. listener = kmalloc(sizeof(*listener), GFP_KERNEL);
  240. if (!listener)
  241. return -ENOMEM;
  242. listener->dev = idev;
  243. listener->event_count = atomic_read(&idev->event);
  244. filep->private_data = listener;
  245. if (idev->info->open) {
  246. if (!try_module_get(idev->owner))
  247. return -ENODEV;
  248. ret = idev->info->open(idev->info, inode);
  249. module_put(idev->owner);
  250. }
  251. if (ret)
  252. kfree(listener);
  253. return ret;
  254. }
  255. static int uio_fasync(int fd, struct file *filep, int on)
  256. {
  257. struct uio_listener *listener = filep->private_data;
  258. struct uio_device *idev = listener->dev;
  259. return fasync_helper(fd, filep, on, &idev->async_queue);
  260. }
  261. static int uio_release(struct inode *inode, struct file *filep)
  262. {
  263. int ret = 0;
  264. struct uio_listener *listener = filep->private_data;
  265. struct uio_device *idev = listener->dev;
  266. if (idev->info->release) {
  267. if (!try_module_get(idev->owner))
  268. return -ENODEV;
  269. ret = idev->info->release(idev->info, inode);
  270. module_put(idev->owner);
  271. }
  272. if (filep->f_flags & FASYNC)
  273. ret = uio_fasync(-1, filep, 0);
  274. kfree(listener);
  275. return ret;
  276. }
  277. static unsigned int uio_poll(struct file *filep, poll_table *wait)
  278. {
  279. struct uio_listener *listener = filep->private_data;
  280. struct uio_device *idev = listener->dev;
  281. if (idev->info->irq == UIO_IRQ_NONE)
  282. return -EIO;
  283. poll_wait(filep, &idev->wait, wait);
  284. if (listener->event_count != atomic_read(&idev->event))
  285. return POLLIN | POLLRDNORM;
  286. return 0;
  287. }
  288. static ssize_t uio_read(struct file *filep, char __user *buf,
  289. size_t count, loff_t *ppos)
  290. {
  291. struct uio_listener *listener = filep->private_data;
  292. struct uio_device *idev = listener->dev;
  293. DECLARE_WAITQUEUE(wait, current);
  294. ssize_t retval;
  295. s32 event_count;
  296. if (idev->info->irq == UIO_IRQ_NONE)
  297. return -EIO;
  298. if (count != sizeof(s32))
  299. return -EINVAL;
  300. add_wait_queue(&idev->wait, &wait);
  301. do {
  302. set_current_state(TASK_INTERRUPTIBLE);
  303. event_count = atomic_read(&idev->event);
  304. if (event_count != listener->event_count) {
  305. if (copy_to_user(buf, &event_count, count))
  306. retval = -EFAULT;
  307. else {
  308. listener->event_count = event_count;
  309. retval = count;
  310. }
  311. break;
  312. }
  313. if (filep->f_flags & O_NONBLOCK) {
  314. retval = -EAGAIN;
  315. break;
  316. }
  317. if (signal_pending(current)) {
  318. retval = -ERESTARTSYS;
  319. break;
  320. }
  321. schedule();
  322. } while (1);
  323. __set_current_state(TASK_RUNNING);
  324. remove_wait_queue(&idev->wait, &wait);
  325. return retval;
  326. }
  327. static int uio_find_mem_index(struct vm_area_struct *vma)
  328. {
  329. int mi;
  330. struct uio_device *idev = vma->vm_private_data;
  331. for (mi = 0; mi < MAX_UIO_MAPS; mi++) {
  332. if (idev->info->mem[mi].size == 0)
  333. return -1;
  334. if (vma->vm_pgoff == mi)
  335. return mi;
  336. }
  337. return -1;
  338. }
  339. static void uio_vma_open(struct vm_area_struct *vma)
  340. {
  341. struct uio_device *idev = vma->vm_private_data;
  342. idev->vma_count++;
  343. }
  344. static void uio_vma_close(struct vm_area_struct *vma)
  345. {
  346. struct uio_device *idev = vma->vm_private_data;
  347. idev->vma_count--;
  348. }
  349. static struct page *uio_vma_nopage(struct vm_area_struct *vma,
  350. unsigned long address, int *type)
  351. {
  352. struct uio_device *idev = vma->vm_private_data;
  353. struct page* page = NOPAGE_SIGBUS;
  354. int mi = uio_find_mem_index(vma);
  355. if (mi < 0)
  356. return page;
  357. if (idev->info->mem[mi].memtype == UIO_MEM_LOGICAL)
  358. page = virt_to_page(idev->info->mem[mi].addr);
  359. else
  360. page = vmalloc_to_page((void*)idev->info->mem[mi].addr);
  361. get_page(page);
  362. if (type)
  363. *type = VM_FAULT_MINOR;
  364. return page;
  365. }
  366. static struct vm_operations_struct uio_vm_ops = {
  367. .open = uio_vma_open,
  368. .close = uio_vma_close,
  369. .nopage = uio_vma_nopage,
  370. };
  371. static int uio_mmap_physical(struct vm_area_struct *vma)
  372. {
  373. struct uio_device *idev = vma->vm_private_data;
  374. int mi = uio_find_mem_index(vma);
  375. if (mi < 0)
  376. return -EINVAL;
  377. vma->vm_flags |= VM_IO | VM_RESERVED;
  378. return remap_pfn_range(vma,
  379. vma->vm_start,
  380. idev->info->mem[mi].addr >> PAGE_SHIFT,
  381. vma->vm_end - vma->vm_start,
  382. vma->vm_page_prot);
  383. }
  384. static int uio_mmap_logical(struct vm_area_struct *vma)
  385. {
  386. vma->vm_flags |= VM_RESERVED;
  387. vma->vm_ops = &uio_vm_ops;
  388. uio_vma_open(vma);
  389. return 0;
  390. }
  391. static int uio_mmap(struct file *filep, struct vm_area_struct *vma)
  392. {
  393. struct uio_listener *listener = filep->private_data;
  394. struct uio_device *idev = listener->dev;
  395. int mi;
  396. unsigned long requested_pages, actual_pages;
  397. int ret = 0;
  398. if (vma->vm_end < vma->vm_start)
  399. return -EINVAL;
  400. vma->vm_private_data = idev;
  401. mi = uio_find_mem_index(vma);
  402. if (mi < 0)
  403. return -EINVAL;
  404. requested_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
  405. actual_pages = (idev->info->mem[mi].size + PAGE_SIZE -1) >> PAGE_SHIFT;
  406. if (requested_pages > actual_pages)
  407. return -EINVAL;
  408. if (idev->info->mmap) {
  409. if (!try_module_get(idev->owner))
  410. return -ENODEV;
  411. ret = idev->info->mmap(idev->info, vma);
  412. module_put(idev->owner);
  413. return ret;
  414. }
  415. switch (idev->info->mem[mi].memtype) {
  416. case UIO_MEM_PHYS:
  417. return uio_mmap_physical(vma);
  418. case UIO_MEM_LOGICAL:
  419. case UIO_MEM_VIRTUAL:
  420. return uio_mmap_logical(vma);
  421. default:
  422. return -EINVAL;
  423. }
  424. }
  425. static struct file_operations uio_fops = {
  426. .owner = THIS_MODULE,
  427. .open = uio_open,
  428. .release = uio_release,
  429. .read = uio_read,
  430. .mmap = uio_mmap,
  431. .poll = uio_poll,
  432. .fasync = uio_fasync,
  433. };
  434. static int uio_major_init(void)
  435. {
  436. uio_major = register_chrdev(0, "uio", &uio_fops);
  437. if (uio_major < 0)
  438. return uio_major;
  439. return 0;
  440. }
  441. static void uio_major_cleanup(void)
  442. {
  443. unregister_chrdev(uio_major, "uio");
  444. }
  445. static int init_uio_class(void)
  446. {
  447. int ret = 0;
  448. if (uio_class != NULL) {
  449. kref_get(&uio_class->kref);
  450. goto exit;
  451. }
  452. /* This is the first time in here, set everything up properly */
  453. ret = uio_major_init();
  454. if (ret)
  455. goto exit;
  456. uio_class = kzalloc(sizeof(*uio_class), GFP_KERNEL);
  457. if (!uio_class) {
  458. ret = -ENOMEM;
  459. goto err_kzalloc;
  460. }
  461. kref_init(&uio_class->kref);
  462. uio_class->class = class_create(THIS_MODULE, "uio");
  463. if (IS_ERR(uio_class->class)) {
  464. ret = IS_ERR(uio_class->class);
  465. printk(KERN_ERR "class_create failed for uio\n");
  466. goto err_class_create;
  467. }
  468. return 0;
  469. err_class_create:
  470. kfree(uio_class);
  471. uio_class = NULL;
  472. err_kzalloc:
  473. uio_major_cleanup();
  474. exit:
  475. return ret;
  476. }
  477. static void release_uio_class(struct kref *kref)
  478. {
  479. /* Ok, we cheat as we know we only have one uio_class */
  480. class_destroy(uio_class->class);
  481. kfree(uio_class);
  482. uio_major_cleanup();
  483. uio_class = NULL;
  484. }
  485. static void uio_class_destroy(void)
  486. {
  487. if (uio_class)
  488. kref_put(&uio_class->kref, release_uio_class);
  489. }
  490. /**
  491. * uio_register_device - register a new userspace IO device
  492. * @owner: module that creates the new device
  493. * @parent: parent device
  494. * @info: UIO device capabilities
  495. *
  496. * returns zero on success or a negative error code.
  497. */
  498. int __uio_register_device(struct module *owner,
  499. struct device *parent,
  500. struct uio_info *info)
  501. {
  502. struct uio_device *idev;
  503. int ret = 0;
  504. if (!parent || !info || !info->name || !info->version)
  505. return -EINVAL;
  506. info->uio_dev = NULL;
  507. ret = init_uio_class();
  508. if (ret)
  509. return ret;
  510. idev = kzalloc(sizeof(*idev), GFP_KERNEL);
  511. if (!idev) {
  512. ret = -ENOMEM;
  513. goto err_kzalloc;
  514. }
  515. idev->owner = owner;
  516. idev->info = info;
  517. init_waitqueue_head(&idev->wait);
  518. atomic_set(&idev->event, 0);
  519. ret = uio_get_minor(idev);
  520. if (ret)
  521. goto err_get_minor;
  522. idev->dev = device_create(uio_class->class, parent,
  523. MKDEV(uio_major, idev->minor),
  524. "uio%d", idev->minor);
  525. if (IS_ERR(idev->dev)) {
  526. printk(KERN_ERR "UIO: device register failed\n");
  527. ret = PTR_ERR(idev->dev);
  528. goto err_device_create;
  529. }
  530. dev_set_drvdata(idev->dev, idev);
  531. ret = uio_dev_add_attributes(idev);
  532. if (ret)
  533. goto err_uio_dev_add_attributes;
  534. info->uio_dev = idev;
  535. if (idev->info->irq >= 0) {
  536. ret = request_irq(idev->info->irq, uio_interrupt,
  537. idev->info->irq_flags, idev->info->name, idev);
  538. if (ret)
  539. goto err_request_irq;
  540. }
  541. return 0;
  542. err_request_irq:
  543. uio_dev_del_attributes(idev);
  544. err_uio_dev_add_attributes:
  545. device_destroy(uio_class->class, MKDEV(uio_major, idev->minor));
  546. err_device_create:
  547. uio_free_minor(idev);
  548. err_get_minor:
  549. kfree(idev);
  550. err_kzalloc:
  551. uio_class_destroy();
  552. return ret;
  553. }
  554. EXPORT_SYMBOL_GPL(__uio_register_device);
  555. /**
  556. * uio_unregister_device - unregister a industrial IO device
  557. * @info: UIO device capabilities
  558. *
  559. */
  560. void uio_unregister_device(struct uio_info *info)
  561. {
  562. struct uio_device *idev;
  563. if (!info || !info->uio_dev)
  564. return;
  565. idev = info->uio_dev;
  566. uio_free_minor(idev);
  567. if (info->irq >= 0)
  568. free_irq(info->irq, idev);
  569. uio_dev_del_attributes(idev);
  570. dev_set_drvdata(idev->dev, NULL);
  571. device_destroy(uio_class->class, MKDEV(uio_major, idev->minor));
  572. kfree(idev);
  573. uio_class_destroy();
  574. return;
  575. }
  576. EXPORT_SYMBOL_GPL(uio_unregister_device);
  577. static int __init uio_init(void)
  578. {
  579. return 0;
  580. }
  581. static void __exit uio_exit(void)
  582. {
  583. }
  584. module_init(uio_init)
  585. module_exit(uio_exit)
  586. MODULE_LICENSE("GPL v2");