raw.c 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315
  1. /*
  2. * linux/drivers/char/raw.c
  3. *
  4. * Front-end raw character devices. These can be bound to any block
  5. * devices to provide genuine Unix raw character device semantics.
  6. *
  7. * We reserve minor number 0 for a control interface. ioctl()s on this
  8. * device are used to bind the other minor numbers to block devices.
  9. */
  10. #include <linux/init.h>
  11. #include <linux/fs.h>
  12. #include <linux/major.h>
  13. #include <linux/blkdev.h>
  14. #include <linux/module.h>
  15. #include <linux/raw.h>
  16. #include <linux/capability.h>
  17. #include <linux/uio.h>
  18. #include <linux/cdev.h>
  19. #include <linux/device.h>
  20. #include <linux/mutex.h>
  21. #include <linux/smp_lock.h>
  22. #include <linux/gfp.h>
  23. #include <asm/uaccess.h>
  24. struct raw_device_data {
  25. struct block_device *binding;
  26. int inuse;
  27. };
  28. static struct class *raw_class;
  29. static struct raw_device_data raw_devices[MAX_RAW_MINORS];
  30. static DEFINE_MUTEX(raw_mutex);
  31. static const struct file_operations raw_ctl_fops; /* forward declaration */
  32. /*
  33. * Open/close code for raw IO.
  34. *
  35. * We just rewrite the i_mapping for the /dev/raw/rawN file descriptor to
  36. * point at the blockdev's address_space and set the file handle to use
  37. * O_DIRECT.
  38. *
  39. * Set the device's soft blocksize to the minimum possible. This gives the
  40. * finest possible alignment and has no adverse impact on performance.
  41. */
  42. static int raw_open(struct inode *inode, struct file *filp)
  43. {
  44. const int minor = iminor(inode);
  45. struct block_device *bdev;
  46. int err;
  47. if (minor == 0) { /* It is the control device */
  48. filp->f_op = &raw_ctl_fops;
  49. return 0;
  50. }
  51. lock_kernel();
  52. mutex_lock(&raw_mutex);
  53. /*
  54. * All we need to do on open is check that the device is bound.
  55. */
  56. bdev = raw_devices[minor].binding;
  57. err = -ENODEV;
  58. if (!bdev)
  59. goto out;
  60. igrab(bdev->bd_inode);
  61. err = blkdev_get(bdev, filp->f_mode);
  62. if (err)
  63. goto out;
  64. err = bd_claim(bdev, raw_open);
  65. if (err)
  66. goto out1;
  67. err = set_blocksize(bdev, bdev_logical_block_size(bdev));
  68. if (err)
  69. goto out2;
  70. filp->f_flags |= O_DIRECT;
  71. filp->f_mapping = bdev->bd_inode->i_mapping;
  72. if (++raw_devices[minor].inuse == 1)
  73. filp->f_path.dentry->d_inode->i_mapping =
  74. bdev->bd_inode->i_mapping;
  75. filp->private_data = bdev;
  76. mutex_unlock(&raw_mutex);
  77. unlock_kernel();
  78. return 0;
  79. out2:
  80. bd_release(bdev);
  81. out1:
  82. blkdev_put(bdev, filp->f_mode);
  83. out:
  84. mutex_unlock(&raw_mutex);
  85. unlock_kernel();
  86. return err;
  87. }
  88. /*
  89. * When the final fd which refers to this character-special node is closed, we
  90. * make its ->mapping point back at its own i_data.
  91. */
  92. static int raw_release(struct inode *inode, struct file *filp)
  93. {
  94. const int minor= iminor(inode);
  95. struct block_device *bdev;
  96. mutex_lock(&raw_mutex);
  97. bdev = raw_devices[minor].binding;
  98. if (--raw_devices[minor].inuse == 0) {
  99. /* Here inode->i_mapping == bdev->bd_inode->i_mapping */
  100. inode->i_mapping = &inode->i_data;
  101. inode->i_mapping->backing_dev_info = &default_backing_dev_info;
  102. }
  103. mutex_unlock(&raw_mutex);
  104. bd_release(bdev);
  105. blkdev_put(bdev, filp->f_mode);
  106. return 0;
  107. }
  108. /*
  109. * Forward ioctls to the underlying block device.
  110. */
  111. static int
  112. raw_ioctl(struct inode *inode, struct file *filp,
  113. unsigned int command, unsigned long arg)
  114. {
  115. struct block_device *bdev = filp->private_data;
  116. return blkdev_ioctl(bdev, 0, command, arg);
  117. }
  118. static void bind_device(struct raw_config_request *rq)
  119. {
  120. device_destroy(raw_class, MKDEV(RAW_MAJOR, rq->raw_minor));
  121. device_create(raw_class, NULL, MKDEV(RAW_MAJOR, rq->raw_minor), NULL,
  122. "raw%d", rq->raw_minor);
  123. }
  124. /*
  125. * Deal with ioctls against the raw-device control interface, to bind
  126. * and unbind other raw devices.
  127. */
  128. static int raw_ctl_ioctl(struct inode *inode, struct file *filp,
  129. unsigned int command, unsigned long arg)
  130. {
  131. struct raw_config_request rq;
  132. struct raw_device_data *rawdev;
  133. int err = 0;
  134. switch (command) {
  135. case RAW_SETBIND:
  136. case RAW_GETBIND:
  137. /* First, find out which raw minor we want */
  138. if (copy_from_user(&rq, (void __user *) arg, sizeof(rq))) {
  139. err = -EFAULT;
  140. goto out;
  141. }
  142. if (rq.raw_minor <= 0 || rq.raw_minor >= MAX_RAW_MINORS) {
  143. err = -EINVAL;
  144. goto out;
  145. }
  146. rawdev = &raw_devices[rq.raw_minor];
  147. if (command == RAW_SETBIND) {
  148. dev_t dev;
  149. /*
  150. * This is like making block devices, so demand the
  151. * same capability
  152. */
  153. if (!capable(CAP_SYS_ADMIN)) {
  154. err = -EPERM;
  155. goto out;
  156. }
  157. /*
  158. * For now, we don't need to check that the underlying
  159. * block device is present or not: we can do that when
  160. * the raw device is opened. Just check that the
  161. * major/minor numbers make sense.
  162. */
  163. dev = MKDEV(rq.block_major, rq.block_minor);
  164. if ((rq.block_major == 0 && rq.block_minor != 0) ||
  165. MAJOR(dev) != rq.block_major ||
  166. MINOR(dev) != rq.block_minor) {
  167. err = -EINVAL;
  168. goto out;
  169. }
  170. mutex_lock(&raw_mutex);
  171. if (rawdev->inuse) {
  172. mutex_unlock(&raw_mutex);
  173. err = -EBUSY;
  174. goto out;
  175. }
  176. if (rawdev->binding) {
  177. bdput(rawdev->binding);
  178. module_put(THIS_MODULE);
  179. }
  180. if (rq.block_major == 0 && rq.block_minor == 0) {
  181. /* unbind */
  182. rawdev->binding = NULL;
  183. device_destroy(raw_class,
  184. MKDEV(RAW_MAJOR, rq.raw_minor));
  185. } else {
  186. rawdev->binding = bdget(dev);
  187. if (rawdev->binding == NULL)
  188. err = -ENOMEM;
  189. else {
  190. __module_get(THIS_MODULE);
  191. bind_device(&rq);
  192. }
  193. }
  194. mutex_unlock(&raw_mutex);
  195. } else {
  196. struct block_device *bdev;
  197. mutex_lock(&raw_mutex);
  198. bdev = rawdev->binding;
  199. if (bdev) {
  200. rq.block_major = MAJOR(bdev->bd_dev);
  201. rq.block_minor = MINOR(bdev->bd_dev);
  202. } else {
  203. rq.block_major = rq.block_minor = 0;
  204. }
  205. mutex_unlock(&raw_mutex);
  206. if (copy_to_user((void __user *)arg, &rq, sizeof(rq))) {
  207. err = -EFAULT;
  208. goto out;
  209. }
  210. }
  211. break;
  212. default:
  213. err = -EINVAL;
  214. break;
  215. }
  216. out:
  217. return err;
  218. }
  219. static const struct file_operations raw_fops = {
  220. .read = do_sync_read,
  221. .aio_read = generic_file_aio_read,
  222. .write = do_sync_write,
  223. .aio_write = blkdev_aio_write,
  224. .fsync = blkdev_fsync,
  225. .open = raw_open,
  226. .release= raw_release,
  227. .ioctl = raw_ioctl,
  228. .owner = THIS_MODULE,
  229. };
  230. static const struct file_operations raw_ctl_fops = {
  231. .ioctl = raw_ctl_ioctl,
  232. .open = raw_open,
  233. .owner = THIS_MODULE,
  234. };
  235. static struct cdev raw_cdev;
  236. static char *raw_devnode(struct device *dev, mode_t *mode)
  237. {
  238. return kasprintf(GFP_KERNEL, "raw/%s", dev_name(dev));
  239. }
  240. static int __init raw_init(void)
  241. {
  242. dev_t dev = MKDEV(RAW_MAJOR, 0);
  243. int ret;
  244. ret = register_chrdev_region(dev, MAX_RAW_MINORS, "raw");
  245. if (ret)
  246. goto error;
  247. cdev_init(&raw_cdev, &raw_fops);
  248. ret = cdev_add(&raw_cdev, dev, MAX_RAW_MINORS);
  249. if (ret) {
  250. kobject_put(&raw_cdev.kobj);
  251. goto error_region;
  252. }
  253. raw_class = class_create(THIS_MODULE, "raw");
  254. if (IS_ERR(raw_class)) {
  255. printk(KERN_ERR "Error creating raw class.\n");
  256. cdev_del(&raw_cdev);
  257. ret = PTR_ERR(raw_class);
  258. goto error_region;
  259. }
  260. raw_class->devnode = raw_devnode;
  261. device_create(raw_class, NULL, MKDEV(RAW_MAJOR, 0), NULL, "rawctl");
  262. return 0;
  263. error_region:
  264. unregister_chrdev_region(dev, MAX_RAW_MINORS);
  265. error:
  266. return ret;
  267. }
  268. static void __exit raw_exit(void)
  269. {
  270. device_destroy(raw_class, MKDEV(RAW_MAJOR, 0));
  271. class_destroy(raw_class);
  272. cdev_del(&raw_cdev);
  273. unregister_chrdev_region(MKDEV(RAW_MAJOR, 0), MAX_RAW_MINORS);
  274. }
  275. module_init(raw_init);
  276. module_exit(raw_exit);
  277. MODULE_LICENSE("GPL");