raw.c 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313
  1. /*
  2. * linux/drivers/char/raw.c
  3. *
  4. * Front-end raw character devices. These can be bound to any block
  5. * devices to provide genuine Unix raw character device semantics.
  6. *
  7. * We reserve minor number 0 for a control interface. ioctl()s on this
  8. * device are used to bind the other minor numbers to block devices.
  9. */
  10. #include <linux/init.h>
  11. #include <linux/fs.h>
  12. #include <linux/major.h>
  13. #include <linux/blkdev.h>
  14. #include <linux/module.h>
  15. #include <linux/raw.h>
  16. #include <linux/capability.h>
  17. #include <linux/uio.h>
  18. #include <linux/cdev.h>
  19. #include <linux/device.h>
  20. #include <linux/mutex.h>
  21. #include <linux/smp_lock.h>
  22. #include <asm/uaccess.h>
  23. struct raw_device_data {
  24. struct block_device *binding;
  25. int inuse;
  26. };
  27. static struct class *raw_class;
  28. static struct raw_device_data raw_devices[MAX_RAW_MINORS];
  29. static DEFINE_MUTEX(raw_mutex);
  30. static const struct file_operations raw_ctl_fops; /* forward declaration */
  31. /*
  32. * Open/close code for raw IO.
  33. *
  34. * We just rewrite the i_mapping for the /dev/raw/rawN file descriptor to
  35. * point at the blockdev's address_space and set the file handle to use
  36. * O_DIRECT.
  37. *
  38. * Set the device's soft blocksize to the minimum possible. This gives the
  39. * finest possible alignment and has no adverse impact on performance.
  40. */
  41. static int raw_open(struct inode *inode, struct file *filp)
  42. {
  43. const int minor = iminor(inode);
  44. struct block_device *bdev;
  45. int err;
  46. if (minor == 0) { /* It is the control device */
  47. filp->f_op = &raw_ctl_fops;
  48. return 0;
  49. }
  50. lock_kernel();
  51. mutex_lock(&raw_mutex);
  52. /*
  53. * All we need to do on open is check that the device is bound.
  54. */
  55. bdev = raw_devices[minor].binding;
  56. err = -ENODEV;
  57. if (!bdev)
  58. goto out;
  59. igrab(bdev->bd_inode);
  60. err = blkdev_get(bdev, filp->f_mode);
  61. if (err)
  62. goto out;
  63. err = bd_claim(bdev, raw_open);
  64. if (err)
  65. goto out1;
  66. err = set_blocksize(bdev, bdev_logical_block_size(bdev));
  67. if (err)
  68. goto out2;
  69. filp->f_flags |= O_DIRECT;
  70. filp->f_mapping = bdev->bd_inode->i_mapping;
  71. if (++raw_devices[minor].inuse == 1)
  72. filp->f_path.dentry->d_inode->i_mapping =
  73. bdev->bd_inode->i_mapping;
  74. filp->private_data = bdev;
  75. mutex_unlock(&raw_mutex);
  76. unlock_kernel();
  77. return 0;
  78. out2:
  79. bd_release(bdev);
  80. out1:
  81. blkdev_put(bdev, filp->f_mode);
  82. out:
  83. mutex_unlock(&raw_mutex);
  84. unlock_kernel();
  85. return err;
  86. }
  87. /*
  88. * When the final fd which refers to this character-special node is closed, we
  89. * make its ->mapping point back at its own i_data.
  90. */
  91. static int raw_release(struct inode *inode, struct file *filp)
  92. {
  93. const int minor= iminor(inode);
  94. struct block_device *bdev;
  95. mutex_lock(&raw_mutex);
  96. bdev = raw_devices[minor].binding;
  97. if (--raw_devices[minor].inuse == 0) {
  98. /* Here inode->i_mapping == bdev->bd_inode->i_mapping */
  99. inode->i_mapping = &inode->i_data;
  100. inode->i_mapping->backing_dev_info = &default_backing_dev_info;
  101. }
  102. mutex_unlock(&raw_mutex);
  103. bd_release(bdev);
  104. blkdev_put(bdev, filp->f_mode);
  105. return 0;
  106. }
  107. /*
  108. * Forward ioctls to the underlying block device.
  109. */
  110. static int
  111. raw_ioctl(struct inode *inode, struct file *filp,
  112. unsigned int command, unsigned long arg)
  113. {
  114. struct block_device *bdev = filp->private_data;
  115. return blkdev_ioctl(bdev, 0, command, arg);
  116. }
  117. static void bind_device(struct raw_config_request *rq)
  118. {
  119. device_destroy(raw_class, MKDEV(RAW_MAJOR, rq->raw_minor));
  120. device_create(raw_class, NULL, MKDEV(RAW_MAJOR, rq->raw_minor), NULL,
  121. "raw%d", rq->raw_minor);
  122. }
  123. /*
  124. * Deal with ioctls against the raw-device control interface, to bind
  125. * and unbind other raw devices.
  126. */
  127. static int raw_ctl_ioctl(struct inode *inode, struct file *filp,
  128. unsigned int command, unsigned long arg)
  129. {
  130. struct raw_config_request rq;
  131. struct raw_device_data *rawdev;
  132. int err = 0;
  133. switch (command) {
  134. case RAW_SETBIND:
  135. case RAW_GETBIND:
  136. /* First, find out which raw minor we want */
  137. if (copy_from_user(&rq, (void __user *) arg, sizeof(rq))) {
  138. err = -EFAULT;
  139. goto out;
  140. }
  141. if (rq.raw_minor <= 0 || rq.raw_minor >= MAX_RAW_MINORS) {
  142. err = -EINVAL;
  143. goto out;
  144. }
  145. rawdev = &raw_devices[rq.raw_minor];
  146. if (command == RAW_SETBIND) {
  147. dev_t dev;
  148. /*
  149. * This is like making block devices, so demand the
  150. * same capability
  151. */
  152. if (!capable(CAP_SYS_ADMIN)) {
  153. err = -EPERM;
  154. goto out;
  155. }
  156. /*
  157. * For now, we don't need to check that the underlying
  158. * block device is present or not: we can do that when
  159. * the raw device is opened. Just check that the
  160. * major/minor numbers make sense.
  161. */
  162. dev = MKDEV(rq.block_major, rq.block_minor);
  163. if ((rq.block_major == 0 && rq.block_minor != 0) ||
  164. MAJOR(dev) != rq.block_major ||
  165. MINOR(dev) != rq.block_minor) {
  166. err = -EINVAL;
  167. goto out;
  168. }
  169. mutex_lock(&raw_mutex);
  170. if (rawdev->inuse) {
  171. mutex_unlock(&raw_mutex);
  172. err = -EBUSY;
  173. goto out;
  174. }
  175. if (rawdev->binding) {
  176. bdput(rawdev->binding);
  177. module_put(THIS_MODULE);
  178. }
  179. if (rq.block_major == 0 && rq.block_minor == 0) {
  180. /* unbind */
  181. rawdev->binding = NULL;
  182. device_destroy(raw_class,
  183. MKDEV(RAW_MAJOR, rq.raw_minor));
  184. } else {
  185. rawdev->binding = bdget(dev);
  186. if (rawdev->binding == NULL)
  187. err = -ENOMEM;
  188. else {
  189. __module_get(THIS_MODULE);
  190. bind_device(&rq);
  191. }
  192. }
  193. mutex_unlock(&raw_mutex);
  194. } else {
  195. struct block_device *bdev;
  196. mutex_lock(&raw_mutex);
  197. bdev = rawdev->binding;
  198. if (bdev) {
  199. rq.block_major = MAJOR(bdev->bd_dev);
  200. rq.block_minor = MINOR(bdev->bd_dev);
  201. } else {
  202. rq.block_major = rq.block_minor = 0;
  203. }
  204. mutex_unlock(&raw_mutex);
  205. if (copy_to_user((void __user *)arg, &rq, sizeof(rq))) {
  206. err = -EFAULT;
  207. goto out;
  208. }
  209. }
  210. break;
  211. default:
  212. err = -EINVAL;
  213. break;
  214. }
  215. out:
  216. return err;
  217. }
  218. static const struct file_operations raw_fops = {
  219. .read = do_sync_read,
  220. .aio_read = generic_file_aio_read,
  221. .write = do_sync_write,
  222. .aio_write = generic_file_aio_write_nolock,
  223. .open = raw_open,
  224. .release= raw_release,
  225. .ioctl = raw_ioctl,
  226. .owner = THIS_MODULE,
  227. };
  228. static const struct file_operations raw_ctl_fops = {
  229. .ioctl = raw_ctl_ioctl,
  230. .open = raw_open,
  231. .owner = THIS_MODULE,
  232. };
  233. static struct cdev raw_cdev;
  234. static char *raw_nodename(struct device *dev)
  235. {
  236. return kasprintf(GFP_KERNEL, "raw/%s", dev_name(dev));
  237. }
  238. static int __init raw_init(void)
  239. {
  240. dev_t dev = MKDEV(RAW_MAJOR, 0);
  241. int ret;
  242. ret = register_chrdev_region(dev, MAX_RAW_MINORS, "raw");
  243. if (ret)
  244. goto error;
  245. cdev_init(&raw_cdev, &raw_fops);
  246. ret = cdev_add(&raw_cdev, dev, MAX_RAW_MINORS);
  247. if (ret) {
  248. kobject_put(&raw_cdev.kobj);
  249. goto error_region;
  250. }
  251. raw_class = class_create(THIS_MODULE, "raw");
  252. if (IS_ERR(raw_class)) {
  253. printk(KERN_ERR "Error creating raw class.\n");
  254. cdev_del(&raw_cdev);
  255. ret = PTR_ERR(raw_class);
  256. goto error_region;
  257. }
  258. raw_class->nodename = raw_nodename;
  259. device_create(raw_class, NULL, MKDEV(RAW_MAJOR, 0), NULL, "rawctl");
  260. return 0;
  261. error_region:
  262. unregister_chrdev_region(dev, MAX_RAW_MINORS);
  263. error:
  264. return ret;
  265. }
  266. static void __exit raw_exit(void)
  267. {
  268. device_destroy(raw_class, MKDEV(RAW_MAJOR, 0));
  269. class_destroy(raw_class);
  270. cdev_del(&raw_cdev);
  271. unregister_chrdev_region(MKDEV(RAW_MAJOR, 0), MAX_RAW_MINORS);
  272. }
  273. module_init(raw_init);
  274. module_exit(raw_exit);
  275. MODULE_LICENSE("GPL");