brd.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646
  1. /*
  2. * Ram backed block device driver.
  3. *
  4. * Copyright (C) 2007 Nick Piggin
  5. * Copyright (C) 2007 Novell Inc.
  6. *
  7. * Parts derived from drivers/block/rd.c, and drivers/block/loop.c, copyright
  8. * of their respective owners.
  9. */
  10. #include <linux/init.h>
  11. #include <linux/module.h>
  12. #include <linux/moduleparam.h>
  13. #include <linux/major.h>
  14. #include <linux/blkdev.h>
  15. #include <linux/bio.h>
  16. #include <linux/highmem.h>
  17. #include <linux/smp_lock.h>
  18. #include <linux/radix-tree.h>
  19. #include <linux/buffer_head.h> /* invalidate_bh_lrus() */
  20. #include <linux/slab.h>
  21. #include <asm/uaccess.h>
  22. #define SECTOR_SHIFT 9
  23. #define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
  24. #define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT)
  25. /*
  26. * Each block ramdisk device has a radix_tree brd_pages of pages that stores
  27. * the pages containing the block device's contents. A brd page's ->index is
  28. * its offset in PAGE_SIZE units. This is similar to, but in no way connected
  29. * with, the kernel's pagecache or buffer cache (which sit above our block
  30. * device).
  31. */
  32. struct brd_device {
  33. int brd_number;
  34. int brd_refcnt;
  35. loff_t brd_offset;
  36. loff_t brd_sizelimit;
  37. unsigned brd_blocksize;
  38. struct request_queue *brd_queue;
  39. struct gendisk *brd_disk;
  40. struct list_head brd_list;
  41. /*
  42. * Backing store of pages and lock to protect it. This is the contents
  43. * of the block device.
  44. */
  45. spinlock_t brd_lock;
  46. struct radix_tree_root brd_pages;
  47. };
  48. /*
  49. * Look up and return a brd's page for a given sector.
  50. */
  51. static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector)
  52. {
  53. pgoff_t idx;
  54. struct page *page;
  55. /*
  56. * The page lifetime is protected by the fact that we have opened the
  57. * device node -- brd pages will never be deleted under us, so we
  58. * don't need any further locking or refcounting.
  59. *
  60. * This is strictly true for the radix-tree nodes as well (ie. we
  61. * don't actually need the rcu_read_lock()), however that is not a
  62. * documented feature of the radix-tree API so it is better to be
  63. * safe here (we don't have total exclusion from radix tree updates
  64. * here, only deletes).
  65. */
  66. rcu_read_lock();
  67. idx = sector >> PAGE_SECTORS_SHIFT; /* sector to page index */
  68. page = radix_tree_lookup(&brd->brd_pages, idx);
  69. rcu_read_unlock();
  70. BUG_ON(page && page->index != idx);
  71. return page;
  72. }
  73. /*
  74. * Look up and return a brd's page for a given sector.
  75. * If one does not exist, allocate an empty page, and insert that. Then
  76. * return it.
  77. */
  78. static struct page *brd_insert_page(struct brd_device *brd, sector_t sector)
  79. {
  80. pgoff_t idx;
  81. struct page *page;
  82. gfp_t gfp_flags;
  83. page = brd_lookup_page(brd, sector);
  84. if (page)
  85. return page;
  86. /*
  87. * Must use NOIO because we don't want to recurse back into the
  88. * block or filesystem layers from page reclaim.
  89. *
  90. * Cannot support XIP and highmem, because our ->direct_access
  91. * routine for XIP must return memory that is always addressable.
  92. * If XIP was reworked to use pfns and kmap throughout, this
  93. * restriction might be able to be lifted.
  94. */
  95. gfp_flags = GFP_NOIO | __GFP_ZERO;
  96. #ifndef CONFIG_BLK_DEV_XIP
  97. gfp_flags |= __GFP_HIGHMEM;
  98. #endif
  99. page = alloc_page(gfp_flags);
  100. if (!page)
  101. return NULL;
  102. if (radix_tree_preload(GFP_NOIO)) {
  103. __free_page(page);
  104. return NULL;
  105. }
  106. spin_lock(&brd->brd_lock);
  107. idx = sector >> PAGE_SECTORS_SHIFT;
  108. if (radix_tree_insert(&brd->brd_pages, idx, page)) {
  109. __free_page(page);
  110. page = radix_tree_lookup(&brd->brd_pages, idx);
  111. BUG_ON(!page);
  112. BUG_ON(page->index != idx);
  113. } else
  114. page->index = idx;
  115. spin_unlock(&brd->brd_lock);
  116. radix_tree_preload_end();
  117. return page;
  118. }
  119. static void brd_free_page(struct brd_device *brd, sector_t sector)
  120. {
  121. struct page *page;
  122. pgoff_t idx;
  123. spin_lock(&brd->brd_lock);
  124. idx = sector >> PAGE_SECTORS_SHIFT;
  125. page = radix_tree_delete(&brd->brd_pages, idx);
  126. spin_unlock(&brd->brd_lock);
  127. if (page)
  128. __free_page(page);
  129. }
  130. static void brd_zero_page(struct brd_device *brd, sector_t sector)
  131. {
  132. struct page *page;
  133. page = brd_lookup_page(brd, sector);
  134. if (page)
  135. clear_highpage(page);
  136. }
  137. /*
  138. * Free all backing store pages and radix tree. This must only be called when
  139. * there are no other users of the device.
  140. */
  141. #define FREE_BATCH 16
  142. static void brd_free_pages(struct brd_device *brd)
  143. {
  144. unsigned long pos = 0;
  145. struct page *pages[FREE_BATCH];
  146. int nr_pages;
  147. do {
  148. int i;
  149. nr_pages = radix_tree_gang_lookup(&brd->brd_pages,
  150. (void **)pages, pos, FREE_BATCH);
  151. for (i = 0; i < nr_pages; i++) {
  152. void *ret;
  153. BUG_ON(pages[i]->index < pos);
  154. pos = pages[i]->index;
  155. ret = radix_tree_delete(&brd->brd_pages, pos);
  156. BUG_ON(!ret || ret != pages[i]);
  157. __free_page(pages[i]);
  158. }
  159. pos++;
  160. /*
  161. * This assumes radix_tree_gang_lookup always returns as
  162. * many pages as possible. If the radix-tree code changes,
  163. * so will this have to.
  164. */
  165. } while (nr_pages == FREE_BATCH);
  166. }
  167. /*
  168. * copy_to_brd_setup must be called before copy_to_brd. It may sleep.
  169. */
  170. static int copy_to_brd_setup(struct brd_device *brd, sector_t sector, size_t n)
  171. {
  172. unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
  173. size_t copy;
  174. copy = min_t(size_t, n, PAGE_SIZE - offset);
  175. if (!brd_insert_page(brd, sector))
  176. return -ENOMEM;
  177. if (copy < n) {
  178. sector += copy >> SECTOR_SHIFT;
  179. if (!brd_insert_page(brd, sector))
  180. return -ENOMEM;
  181. }
  182. return 0;
  183. }
  184. static void discard_from_brd(struct brd_device *brd,
  185. sector_t sector, size_t n)
  186. {
  187. while (n >= PAGE_SIZE) {
  188. /*
  189. * Don't want to actually discard pages here because
  190. * re-allocating the pages can result in writeback
  191. * deadlocks under heavy load.
  192. */
  193. if (0)
  194. brd_free_page(brd, sector);
  195. else
  196. brd_zero_page(brd, sector);
  197. sector += PAGE_SIZE >> SECTOR_SHIFT;
  198. n -= PAGE_SIZE;
  199. }
  200. }
  201. /*
  202. * Copy n bytes from src to the brd starting at sector. Does not sleep.
  203. */
  204. static void copy_to_brd(struct brd_device *brd, const void *src,
  205. sector_t sector, size_t n)
  206. {
  207. struct page *page;
  208. void *dst;
  209. unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
  210. size_t copy;
  211. copy = min_t(size_t, n, PAGE_SIZE - offset);
  212. page = brd_lookup_page(brd, sector);
  213. BUG_ON(!page);
  214. dst = kmap_atomic(page, KM_USER1);
  215. memcpy(dst + offset, src, copy);
  216. kunmap_atomic(dst, KM_USER1);
  217. if (copy < n) {
  218. src += copy;
  219. sector += copy >> SECTOR_SHIFT;
  220. copy = n - copy;
  221. page = brd_lookup_page(brd, sector);
  222. BUG_ON(!page);
  223. dst = kmap_atomic(page, KM_USER1);
  224. memcpy(dst, src, copy);
  225. kunmap_atomic(dst, KM_USER1);
  226. }
  227. }
  228. /*
  229. * Copy n bytes to dst from the brd starting at sector. Does not sleep.
  230. */
  231. static void copy_from_brd(void *dst, struct brd_device *brd,
  232. sector_t sector, size_t n)
  233. {
  234. struct page *page;
  235. void *src;
  236. unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
  237. size_t copy;
  238. copy = min_t(size_t, n, PAGE_SIZE - offset);
  239. page = brd_lookup_page(brd, sector);
  240. if (page) {
  241. src = kmap_atomic(page, KM_USER1);
  242. memcpy(dst, src + offset, copy);
  243. kunmap_atomic(src, KM_USER1);
  244. } else
  245. memset(dst, 0, copy);
  246. if (copy < n) {
  247. dst += copy;
  248. sector += copy >> SECTOR_SHIFT;
  249. copy = n - copy;
  250. page = brd_lookup_page(brd, sector);
  251. if (page) {
  252. src = kmap_atomic(page, KM_USER1);
  253. memcpy(dst, src, copy);
  254. kunmap_atomic(src, KM_USER1);
  255. } else
  256. memset(dst, 0, copy);
  257. }
  258. }
  259. /*
  260. * Process a single bvec of a bio.
  261. */
  262. static int brd_do_bvec(struct brd_device *brd, struct page *page,
  263. unsigned int len, unsigned int off, int rw,
  264. sector_t sector)
  265. {
  266. void *mem;
  267. int err = 0;
  268. if (rw != READ) {
  269. err = copy_to_brd_setup(brd, sector, len);
  270. if (err)
  271. goto out;
  272. }
  273. mem = kmap_atomic(page, KM_USER0);
  274. if (rw == READ) {
  275. copy_from_brd(mem + off, brd, sector, len);
  276. flush_dcache_page(page);
  277. } else {
  278. flush_dcache_page(page);
  279. copy_to_brd(brd, mem + off, sector, len);
  280. }
  281. kunmap_atomic(mem, KM_USER0);
  282. out:
  283. return err;
  284. }
  285. static int brd_make_request(struct request_queue *q, struct bio *bio)
  286. {
  287. struct block_device *bdev = bio->bi_bdev;
  288. struct brd_device *brd = bdev->bd_disk->private_data;
  289. int rw;
  290. struct bio_vec *bvec;
  291. sector_t sector;
  292. int i;
  293. int err = -EIO;
  294. sector = bio->bi_sector;
  295. if (sector + (bio->bi_size >> SECTOR_SHIFT) >
  296. get_capacity(bdev->bd_disk))
  297. goto out;
  298. if (unlikely(bio->bi_rw & REQ_DISCARD)) {
  299. err = 0;
  300. discard_from_brd(brd, sector, bio->bi_size);
  301. goto out;
  302. }
  303. rw = bio_rw(bio);
  304. if (rw == READA)
  305. rw = READ;
  306. bio_for_each_segment(bvec, bio, i) {
  307. unsigned int len = bvec->bv_len;
  308. err = brd_do_bvec(brd, bvec->bv_page, len,
  309. bvec->bv_offset, rw, sector);
  310. if (err)
  311. break;
  312. sector += len >> SECTOR_SHIFT;
  313. }
  314. out:
  315. bio_endio(bio, err);
  316. return 0;
  317. }
  318. #ifdef CONFIG_BLK_DEV_XIP
  319. static int brd_direct_access(struct block_device *bdev, sector_t sector,
  320. void **kaddr, unsigned long *pfn)
  321. {
  322. struct brd_device *brd = bdev->bd_disk->private_data;
  323. struct page *page;
  324. if (!brd)
  325. return -ENODEV;
  326. if (sector & (PAGE_SECTORS-1))
  327. return -EINVAL;
  328. if (sector + PAGE_SECTORS > get_capacity(bdev->bd_disk))
  329. return -ERANGE;
  330. page = brd_insert_page(brd, sector);
  331. if (!page)
  332. return -ENOMEM;
  333. *kaddr = page_address(page);
  334. *pfn = page_to_pfn(page);
  335. return 0;
  336. }
  337. #endif
  338. static int brd_ioctl(struct block_device *bdev, fmode_t mode,
  339. unsigned int cmd, unsigned long arg)
  340. {
  341. int error;
  342. struct brd_device *brd = bdev->bd_disk->private_data;
  343. if (cmd != BLKFLSBUF)
  344. return -ENOTTY;
  345. /*
  346. * ram device BLKFLSBUF has special semantics, we want to actually
  347. * release and destroy the ramdisk data.
  348. */
  349. lock_kernel();
  350. mutex_lock(&bdev->bd_mutex);
  351. error = -EBUSY;
  352. if (bdev->bd_openers <= 1) {
  353. /*
  354. * Invalidate the cache first, so it isn't written
  355. * back to the device.
  356. *
  357. * Another thread might instantiate more buffercache here,
  358. * but there is not much we can do to close that race.
  359. */
  360. invalidate_bh_lrus();
  361. truncate_inode_pages(bdev->bd_inode->i_mapping, 0);
  362. brd_free_pages(brd);
  363. error = 0;
  364. }
  365. mutex_unlock(&bdev->bd_mutex);
  366. unlock_kernel();
  367. return error;
  368. }
  369. static const struct block_device_operations brd_fops = {
  370. .owner = THIS_MODULE,
  371. .ioctl = brd_ioctl,
  372. #ifdef CONFIG_BLK_DEV_XIP
  373. .direct_access = brd_direct_access,
  374. #endif
  375. };
  376. /*
  377. * And now the modules code and kernel interface.
  378. */
  379. static int rd_nr;
  380. int rd_size = CONFIG_BLK_DEV_RAM_SIZE;
  381. static int max_part;
  382. static int part_shift;
  383. module_param(rd_nr, int, 0);
  384. MODULE_PARM_DESC(rd_nr, "Maximum number of brd devices");
  385. module_param(rd_size, int, 0);
  386. MODULE_PARM_DESC(rd_size, "Size of each RAM disk in kbytes.");
  387. module_param(max_part, int, 0);
  388. MODULE_PARM_DESC(max_part, "Maximum number of partitions per RAM disk");
  389. MODULE_LICENSE("GPL");
  390. MODULE_ALIAS_BLOCKDEV_MAJOR(RAMDISK_MAJOR);
  391. MODULE_ALIAS("rd");
  392. #ifndef MODULE
  393. /* Legacy boot options - nonmodular */
  394. static int __init ramdisk_size(char *str)
  395. {
  396. rd_size = simple_strtol(str, NULL, 0);
  397. return 1;
  398. }
  399. __setup("ramdisk_size=", ramdisk_size);
  400. #endif
  401. /*
  402. * The device scheme is derived from loop.c. Keep them in synch where possible
  403. * (should share code eventually).
  404. */
  405. static LIST_HEAD(brd_devices);
  406. static DEFINE_MUTEX(brd_devices_mutex);
  407. static struct brd_device *brd_alloc(int i)
  408. {
  409. struct brd_device *brd;
  410. struct gendisk *disk;
  411. brd = kzalloc(sizeof(*brd), GFP_KERNEL);
  412. if (!brd)
  413. goto out;
  414. brd->brd_number = i;
  415. spin_lock_init(&brd->brd_lock);
  416. INIT_RADIX_TREE(&brd->brd_pages, GFP_ATOMIC);
  417. brd->brd_queue = blk_alloc_queue(GFP_KERNEL);
  418. if (!brd->brd_queue)
  419. goto out_free_dev;
  420. blk_queue_make_request(brd->brd_queue, brd_make_request);
  421. blk_queue_ordered(brd->brd_queue, QUEUE_ORDERED_TAG);
  422. blk_queue_max_hw_sectors(brd->brd_queue, 1024);
  423. blk_queue_bounce_limit(brd->brd_queue, BLK_BOUNCE_ANY);
  424. brd->brd_queue->limits.discard_granularity = PAGE_SIZE;
  425. brd->brd_queue->limits.max_discard_sectors = UINT_MAX;
  426. brd->brd_queue->limits.discard_zeroes_data = 1;
  427. queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, brd->brd_queue);
  428. disk = brd->brd_disk = alloc_disk(1 << part_shift);
  429. if (!disk)
  430. goto out_free_queue;
  431. disk->major = RAMDISK_MAJOR;
  432. disk->first_minor = i << part_shift;
  433. disk->fops = &brd_fops;
  434. disk->private_data = brd;
  435. disk->queue = brd->brd_queue;
  436. disk->flags |= GENHD_FL_SUPPRESS_PARTITION_INFO;
  437. sprintf(disk->disk_name, "ram%d", i);
  438. set_capacity(disk, rd_size * 2);
  439. return brd;
  440. out_free_queue:
  441. blk_cleanup_queue(brd->brd_queue);
  442. out_free_dev:
  443. kfree(brd);
  444. out:
  445. return NULL;
  446. }
  447. static void brd_free(struct brd_device *brd)
  448. {
  449. put_disk(brd->brd_disk);
  450. blk_cleanup_queue(brd->brd_queue);
  451. brd_free_pages(brd);
  452. kfree(brd);
  453. }
  454. static struct brd_device *brd_init_one(int i)
  455. {
  456. struct brd_device *brd;
  457. list_for_each_entry(brd, &brd_devices, brd_list) {
  458. if (brd->brd_number == i)
  459. goto out;
  460. }
  461. brd = brd_alloc(i);
  462. if (brd) {
  463. add_disk(brd->brd_disk);
  464. list_add_tail(&brd->brd_list, &brd_devices);
  465. }
  466. out:
  467. return brd;
  468. }
  469. static void brd_del_one(struct brd_device *brd)
  470. {
  471. list_del(&brd->brd_list);
  472. del_gendisk(brd->brd_disk);
  473. brd_free(brd);
  474. }
  475. static struct kobject *brd_probe(dev_t dev, int *part, void *data)
  476. {
  477. struct brd_device *brd;
  478. struct kobject *kobj;
  479. mutex_lock(&brd_devices_mutex);
  480. brd = brd_init_one(dev & MINORMASK);
  481. kobj = brd ? get_disk(brd->brd_disk) : ERR_PTR(-ENOMEM);
  482. mutex_unlock(&brd_devices_mutex);
  483. *part = 0;
  484. return kobj;
  485. }
  486. static int __init brd_init(void)
  487. {
  488. int i, nr;
  489. unsigned long range;
  490. struct brd_device *brd, *next;
  491. /*
  492. * brd module now has a feature to instantiate underlying device
  493. * structure on-demand, provided that there is an access dev node.
  494. * However, this will not work well with user space tool that doesn't
  495. * know about such "feature". In order to not break any existing
  496. * tool, we do the following:
  497. *
  498. * (1) if rd_nr is specified, create that many upfront, and this
  499. * also becomes a hard limit.
  500. * (2) if rd_nr is not specified, create 1 rd device on module
  501. * load, user can further extend brd device by create dev node
  502. * themselves and have kernel automatically instantiate actual
  503. * device on-demand.
  504. */
  505. part_shift = 0;
  506. if (max_part > 0)
  507. part_shift = fls(max_part);
  508. if (rd_nr > 1UL << (MINORBITS - part_shift))
  509. return -EINVAL;
  510. if (rd_nr) {
  511. nr = rd_nr;
  512. range = rd_nr;
  513. } else {
  514. nr = CONFIG_BLK_DEV_RAM_COUNT;
  515. range = 1UL << (MINORBITS - part_shift);
  516. }
  517. if (register_blkdev(RAMDISK_MAJOR, "ramdisk"))
  518. return -EIO;
  519. for (i = 0; i < nr; i++) {
  520. brd = brd_alloc(i);
  521. if (!brd)
  522. goto out_free;
  523. list_add_tail(&brd->brd_list, &brd_devices);
  524. }
  525. /* point of no return */
  526. list_for_each_entry(brd, &brd_devices, brd_list)
  527. add_disk(brd->brd_disk);
  528. blk_register_region(MKDEV(RAMDISK_MAJOR, 0), range,
  529. THIS_MODULE, brd_probe, NULL, NULL);
  530. printk(KERN_INFO "brd: module loaded\n");
  531. return 0;
  532. out_free:
  533. list_for_each_entry_safe(brd, next, &brd_devices, brd_list) {
  534. list_del(&brd->brd_list);
  535. brd_free(brd);
  536. }
  537. unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
  538. return -ENOMEM;
  539. }
  540. static void __exit brd_exit(void)
  541. {
  542. unsigned long range;
  543. struct brd_device *brd, *next;
  544. range = rd_nr ? rd_nr : 1UL << (MINORBITS - part_shift);
  545. list_for_each_entry_safe(brd, next, &brd_devices, brd_list)
  546. brd_del_one(brd);
  547. blk_unregister_region(MKDEV(RAMDISK_MAJOR, 0), range);
  548. unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
  549. }
  550. module_init(brd_init);
  551. module_exit(brd_exit);