raw.c 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307
  1. /*
  2. * linux/drivers/char/raw.c
  3. *
  4. * Front-end raw character devices. These can be bound to any block
  5. * devices to provide genuine Unix raw character device semantics.
  6. *
  7. * We reserve minor number 0 for a control interface. ioctl()s on this
  8. * device are used to bind the other minor numbers to block devices.
  9. */
  10. #include <linux/init.h>
  11. #include <linux/fs.h>
  12. #include <linux/major.h>
  13. #include <linux/blkdev.h>
  14. #include <linux/module.h>
  15. #include <linux/raw.h>
  16. #include <linux/capability.h>
  17. #include <linux/uio.h>
  18. #include <linux/cdev.h>
  19. #include <linux/device.h>
  20. #include <linux/mutex.h>
  21. #include <linux/smp_lock.h>
  22. #include <asm/uaccess.h>
  23. struct raw_device_data {
  24. struct block_device *binding;
  25. int inuse;
  26. };
  27. static struct class *raw_class;
  28. static struct raw_device_data raw_devices[MAX_RAW_MINORS];
  29. static DEFINE_MUTEX(raw_mutex);
  30. static const struct file_operations raw_ctl_fops; /* forward declaration */
  31. /*
  32. * Open/close code for raw IO.
  33. *
  34. * We just rewrite the i_mapping for the /dev/raw/rawN file descriptor to
  35. * point at the blockdev's address_space and set the file handle to use
  36. * O_DIRECT.
  37. *
  38. * Set the device's soft blocksize to the minimum possible. This gives the
  39. * finest possible alignment and has no adverse impact on performance.
  40. */
  41. static int raw_open(struct inode *inode, struct file *filp)
  42. {
  43. const int minor = iminor(inode);
  44. struct block_device *bdev;
  45. int err;
  46. if (minor == 0) { /* It is the control device */
  47. filp->f_op = &raw_ctl_fops;
  48. return 0;
  49. }
  50. lock_kernel();
  51. mutex_lock(&raw_mutex);
  52. /*
  53. * All we need to do on open is check that the device is bound.
  54. */
  55. bdev = raw_devices[minor].binding;
  56. err = -ENODEV;
  57. if (!bdev)
  58. goto out;
  59. igrab(bdev->bd_inode);
  60. err = blkdev_get(bdev, filp->f_mode, 0);
  61. if (err)
  62. goto out;
  63. err = bd_claim(bdev, raw_open);
  64. if (err)
  65. goto out1;
  66. err = set_blocksize(bdev, bdev_hardsect_size(bdev));
  67. if (err)
  68. goto out2;
  69. filp->f_flags |= O_DIRECT;
  70. filp->f_mapping = bdev->bd_inode->i_mapping;
  71. if (++raw_devices[minor].inuse == 1)
  72. filp->f_path.dentry->d_inode->i_mapping =
  73. bdev->bd_inode->i_mapping;
  74. filp->private_data = bdev;
  75. mutex_unlock(&raw_mutex);
  76. unlock_kernel();
  77. return 0;
  78. out2:
  79. bd_release(bdev);
  80. out1:
  81. blkdev_put(bdev);
  82. out:
  83. mutex_unlock(&raw_mutex);
  84. return err;
  85. }
  86. /*
  87. * When the final fd which refers to this character-special node is closed, we
  88. * make its ->mapping point back at its own i_data.
  89. */
  90. static int raw_release(struct inode *inode, struct file *filp)
  91. {
  92. const int minor= iminor(inode);
  93. struct block_device *bdev;
  94. mutex_lock(&raw_mutex);
  95. bdev = raw_devices[minor].binding;
  96. if (--raw_devices[minor].inuse == 0) {
  97. /* Here inode->i_mapping == bdev->bd_inode->i_mapping */
  98. inode->i_mapping = &inode->i_data;
  99. inode->i_mapping->backing_dev_info = &default_backing_dev_info;
  100. }
  101. mutex_unlock(&raw_mutex);
  102. bd_release(bdev);
  103. blkdev_put(bdev);
  104. return 0;
  105. }
  106. /*
  107. * Forward ioctls to the underlying block device.
  108. */
  109. static int
  110. raw_ioctl(struct inode *inode, struct file *filp,
  111. unsigned int command, unsigned long arg)
  112. {
  113. struct block_device *bdev = filp->private_data;
  114. return blkdev_ioctl(bdev->bd_inode, NULL, command, arg);
  115. }
  116. static void bind_device(struct raw_config_request *rq)
  117. {
  118. device_destroy(raw_class, MKDEV(RAW_MAJOR, rq->raw_minor));
  119. device_create_drvdata(raw_class, NULL, MKDEV(RAW_MAJOR, rq->raw_minor),
  120. NULL, "raw%d", rq->raw_minor);
  121. }
  122. /*
  123. * Deal with ioctls against the raw-device control interface, to bind
  124. * and unbind other raw devices.
  125. */
  126. static int raw_ctl_ioctl(struct inode *inode, struct file *filp,
  127. unsigned int command, unsigned long arg)
  128. {
  129. struct raw_config_request rq;
  130. struct raw_device_data *rawdev;
  131. int err = 0;
  132. switch (command) {
  133. case RAW_SETBIND:
  134. case RAW_GETBIND:
  135. /* First, find out which raw minor we want */
  136. if (copy_from_user(&rq, (void __user *) arg, sizeof(rq))) {
  137. err = -EFAULT;
  138. goto out;
  139. }
  140. if (rq.raw_minor <= 0 || rq.raw_minor >= MAX_RAW_MINORS) {
  141. err = -EINVAL;
  142. goto out;
  143. }
  144. rawdev = &raw_devices[rq.raw_minor];
  145. if (command == RAW_SETBIND) {
  146. dev_t dev;
  147. /*
  148. * This is like making block devices, so demand the
  149. * same capability
  150. */
  151. if (!capable(CAP_SYS_ADMIN)) {
  152. err = -EPERM;
  153. goto out;
  154. }
  155. /*
  156. * For now, we don't need to check that the underlying
  157. * block device is present or not: we can do that when
  158. * the raw device is opened. Just check that the
  159. * major/minor numbers make sense.
  160. */
  161. dev = MKDEV(rq.block_major, rq.block_minor);
  162. if ((rq.block_major == 0 && rq.block_minor != 0) ||
  163. MAJOR(dev) != rq.block_major ||
  164. MINOR(dev) != rq.block_minor) {
  165. err = -EINVAL;
  166. goto out;
  167. }
  168. mutex_lock(&raw_mutex);
  169. if (rawdev->inuse) {
  170. mutex_unlock(&raw_mutex);
  171. err = -EBUSY;
  172. goto out;
  173. }
  174. if (rawdev->binding) {
  175. bdput(rawdev->binding);
  176. module_put(THIS_MODULE);
  177. }
  178. if (rq.block_major == 0 && rq.block_minor == 0) {
  179. /* unbind */
  180. rawdev->binding = NULL;
  181. device_destroy(raw_class,
  182. MKDEV(RAW_MAJOR, rq.raw_minor));
  183. } else {
  184. rawdev->binding = bdget(dev);
  185. if (rawdev->binding == NULL)
  186. err = -ENOMEM;
  187. else {
  188. __module_get(THIS_MODULE);
  189. bind_device(&rq);
  190. }
  191. }
  192. mutex_unlock(&raw_mutex);
  193. } else {
  194. struct block_device *bdev;
  195. mutex_lock(&raw_mutex);
  196. bdev = rawdev->binding;
  197. if (bdev) {
  198. rq.block_major = MAJOR(bdev->bd_dev);
  199. rq.block_minor = MINOR(bdev->bd_dev);
  200. } else {
  201. rq.block_major = rq.block_minor = 0;
  202. }
  203. mutex_unlock(&raw_mutex);
  204. if (copy_to_user((void __user *)arg, &rq, sizeof(rq))) {
  205. err = -EFAULT;
  206. goto out;
  207. }
  208. }
  209. break;
  210. default:
  211. err = -EINVAL;
  212. break;
  213. }
  214. out:
  215. return err;
  216. }
  217. static const struct file_operations raw_fops = {
  218. .read = do_sync_read,
  219. .aio_read = generic_file_aio_read,
  220. .write = do_sync_write,
  221. .aio_write = generic_file_aio_write_nolock,
  222. .open = raw_open,
  223. .release= raw_release,
  224. .ioctl = raw_ioctl,
  225. .owner = THIS_MODULE,
  226. };
  227. static const struct file_operations raw_ctl_fops = {
  228. .ioctl = raw_ctl_ioctl,
  229. .open = raw_open,
  230. .owner = THIS_MODULE,
  231. };
  232. static struct cdev raw_cdev;
  233. static int __init raw_init(void)
  234. {
  235. dev_t dev = MKDEV(RAW_MAJOR, 0);
  236. int ret;
  237. ret = register_chrdev_region(dev, MAX_RAW_MINORS, "raw");
  238. if (ret)
  239. goto error;
  240. cdev_init(&raw_cdev, &raw_fops);
  241. ret = cdev_add(&raw_cdev, dev, MAX_RAW_MINORS);
  242. if (ret) {
  243. kobject_put(&raw_cdev.kobj);
  244. goto error_region;
  245. }
  246. raw_class = class_create(THIS_MODULE, "raw");
  247. if (IS_ERR(raw_class)) {
  248. printk(KERN_ERR "Error creating raw class.\n");
  249. cdev_del(&raw_cdev);
  250. ret = PTR_ERR(raw_class);
  251. goto error_region;
  252. }
  253. device_create_drvdata(raw_class, NULL, MKDEV(RAW_MAJOR, 0), NULL,
  254. "rawctl");
  255. return 0;
  256. error_region:
  257. unregister_chrdev_region(dev, MAX_RAW_MINORS);
  258. error:
  259. return ret;
  260. }
  261. static void __exit raw_exit(void)
  262. {
  263. device_destroy(raw_class, MKDEV(RAW_MAJOR, 0));
  264. class_destroy(raw_class);
  265. cdev_del(&raw_cdev);
  266. unregister_chrdev_region(MKDEV(RAW_MAJOR, 0), MAX_RAW_MINORS);
  267. }
  268. module_init(raw_init);
  269. module_exit(raw_exit);
  270. MODULE_LICENSE("GPL");