aoeblk.c 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263
  1. /* Copyright (c) 2004 Coraid, Inc. See COPYING for GPL terms. */
  2. /*
  3. * aoeblk.c
  4. * block device routines
  5. */
  6. #include <linux/hdreg.h>
  7. #include <linux/blkdev.h>
  8. #include <linux/fs.h>
  9. #include <linux/ioctl.h>
  10. #include <linux/genhd.h>
  11. #include <linux/netdevice.h>
  12. #include "aoe.h"
  13. static kmem_cache_t *buf_pool_cache;
  14. /* add attributes for our block devices in sysfs */
  15. static ssize_t aoedisk_show_state(struct gendisk * disk, char *page)
  16. {
  17. struct aoedev *d = disk->private_data;
  18. return snprintf(page, PAGE_SIZE,
  19. "%s%s\n",
  20. (d->flags & DEVFL_UP) ? "up" : "down",
  21. (d->flags & DEVFL_PAUSE) ? ",paused" :
  22. (d->nopen && !(d->flags & DEVFL_UP)) ? ",closewait" : "");
  23. /* I'd rather see nopen exported so we can ditch closewait */
  24. }
  25. static ssize_t aoedisk_show_mac(struct gendisk * disk, char *page)
  26. {
  27. struct aoedev *d = disk->private_data;
  28. return snprintf(page, PAGE_SIZE, "%012llx\n",
  29. (unsigned long long)mac_addr(d->addr));
  30. }
  31. static ssize_t aoedisk_show_netif(struct gendisk * disk, char *page)
  32. {
  33. struct aoedev *d = disk->private_data;
  34. return snprintf(page, PAGE_SIZE, "%s\n", d->ifp->name);
  35. }
  36. /* firmware version */
  37. static ssize_t aoedisk_show_fwver(struct gendisk * disk, char *page)
  38. {
  39. struct aoedev *d = disk->private_data;
  40. return snprintf(page, PAGE_SIZE, "0x%04x\n", (unsigned int) d->fw_ver);
  41. }
  42. static struct disk_attribute disk_attr_state = {
  43. .attr = {.name = "state", .mode = S_IRUGO },
  44. .show = aoedisk_show_state
  45. };
  46. static struct disk_attribute disk_attr_mac = {
  47. .attr = {.name = "mac", .mode = S_IRUGO },
  48. .show = aoedisk_show_mac
  49. };
  50. static struct disk_attribute disk_attr_netif = {
  51. .attr = {.name = "netif", .mode = S_IRUGO },
  52. .show = aoedisk_show_netif
  53. };
  54. static struct disk_attribute disk_attr_fwver = {
  55. .attr = {.name = "firmware-version", .mode = S_IRUGO },
  56. .show = aoedisk_show_fwver
  57. };
  58. static void
  59. aoedisk_add_sysfs(struct aoedev *d)
  60. {
  61. sysfs_create_file(&d->gd->kobj, &disk_attr_state.attr);
  62. sysfs_create_file(&d->gd->kobj, &disk_attr_mac.attr);
  63. sysfs_create_file(&d->gd->kobj, &disk_attr_netif.attr);
  64. sysfs_create_file(&d->gd->kobj, &disk_attr_fwver.attr);
  65. }
  66. void
  67. aoedisk_rm_sysfs(struct aoedev *d)
  68. {
  69. sysfs_remove_link(&d->gd->kobj, "state");
  70. sysfs_remove_link(&d->gd->kobj, "mac");
  71. sysfs_remove_link(&d->gd->kobj, "netif");
  72. sysfs_remove_link(&d->gd->kobj, "firmware-version");
  73. }
  74. static int
  75. aoeblk_open(struct inode *inode, struct file *filp)
  76. {
  77. struct aoedev *d;
  78. ulong flags;
  79. d = inode->i_bdev->bd_disk->private_data;
  80. spin_lock_irqsave(&d->lock, flags);
  81. if (d->flags & DEVFL_UP) {
  82. d->nopen++;
  83. spin_unlock_irqrestore(&d->lock, flags);
  84. return 0;
  85. }
  86. spin_unlock_irqrestore(&d->lock, flags);
  87. return -ENODEV;
  88. }
  89. static int
  90. aoeblk_release(struct inode *inode, struct file *filp)
  91. {
  92. struct aoedev *d;
  93. ulong flags;
  94. d = inode->i_bdev->bd_disk->private_data;
  95. spin_lock_irqsave(&d->lock, flags);
  96. if (--d->nopen == 0) {
  97. spin_unlock_irqrestore(&d->lock, flags);
  98. aoecmd_cfg(d->aoemajor, d->aoeminor);
  99. return 0;
  100. }
  101. spin_unlock_irqrestore(&d->lock, flags);
  102. return 0;
  103. }
  104. static int
  105. aoeblk_make_request(request_queue_t *q, struct bio *bio)
  106. {
  107. struct aoedev *d;
  108. struct buf *buf;
  109. struct sk_buff *sl;
  110. ulong flags;
  111. blk_queue_bounce(q, &bio);
  112. d = bio->bi_bdev->bd_disk->private_data;
  113. buf = mempool_alloc(d->bufpool, GFP_NOIO);
  114. if (buf == NULL) {
  115. printk(KERN_INFO "aoe: aoeblk_make_request: buf allocation "
  116. "failure\n");
  117. bio_endio(bio, bio->bi_size, -ENOMEM);
  118. return 0;
  119. }
  120. memset(buf, 0, sizeof(*buf));
  121. INIT_LIST_HEAD(&buf->bufs);
  122. buf->start_time = jiffies;
  123. buf->bio = bio;
  124. buf->resid = bio->bi_size;
  125. buf->sector = bio->bi_sector;
  126. buf->bv = buf->bio->bi_io_vec;
  127. buf->bv_resid = buf->bv->bv_len;
  128. buf->bufaddr = page_address(buf->bv->bv_page) + buf->bv->bv_offset;
  129. spin_lock_irqsave(&d->lock, flags);
  130. if ((d->flags & DEVFL_UP) == 0) {
  131. printk(KERN_INFO "aoe: aoeblk_make_request: device %ld.%ld is not up\n",
  132. d->aoemajor, d->aoeminor);
  133. spin_unlock_irqrestore(&d->lock, flags);
  134. mempool_free(buf, d->bufpool);
  135. bio_endio(bio, bio->bi_size, -ENXIO);
  136. return 0;
  137. }
  138. list_add_tail(&buf->bufs, &d->bufq);
  139. aoecmd_work(d);
  140. sl = d->sendq_hd;
  141. d->sendq_hd = d->sendq_tl = NULL;
  142. spin_unlock_irqrestore(&d->lock, flags);
  143. aoenet_xmit(sl);
  144. return 0;
  145. }
  146. static int
  147. aoeblk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
  148. {
  149. struct aoedev *d = bdev->bd_disk->private_data;
  150. if ((d->flags & DEVFL_UP) == 0) {
  151. printk(KERN_ERR "aoe: aoeblk_ioctl: disk not up\n");
  152. return -ENODEV;
  153. }
  154. geo->cylinders = d->geo.cylinders;
  155. geo->heads = d->geo.heads;
  156. geo->sectors = d->geo.sectors;
  157. return 0;
  158. }
  159. static struct block_device_operations aoe_bdops = {
  160. .open = aoeblk_open,
  161. .release = aoeblk_release,
  162. .getgeo = aoeblk_getgeo,
  163. .owner = THIS_MODULE,
  164. };
  165. /* alloc_disk and add_disk can sleep */
  166. void
  167. aoeblk_gdalloc(void *vp)
  168. {
  169. struct aoedev *d = vp;
  170. struct gendisk *gd;
  171. ulong flags;
  172. gd = alloc_disk(AOE_PARTITIONS);
  173. if (gd == NULL) {
  174. printk(KERN_ERR "aoe: aoeblk_gdalloc: cannot allocate disk "
  175. "structure for %ld.%ld\n", d->aoemajor, d->aoeminor);
  176. spin_lock_irqsave(&d->lock, flags);
  177. d->flags &= ~DEVFL_GDALLOC;
  178. spin_unlock_irqrestore(&d->lock, flags);
  179. return;
  180. }
  181. d->bufpool = mempool_create_slab_pool(MIN_BUFS, buf_pool_cache);
  182. if (d->bufpool == NULL) {
  183. printk(KERN_ERR "aoe: aoeblk_gdalloc: cannot allocate bufpool "
  184. "for %ld.%ld\n", d->aoemajor, d->aoeminor);
  185. put_disk(gd);
  186. spin_lock_irqsave(&d->lock, flags);
  187. d->flags &= ~DEVFL_GDALLOC;
  188. spin_unlock_irqrestore(&d->lock, flags);
  189. return;
  190. }
  191. spin_lock_irqsave(&d->lock, flags);
  192. blk_queue_make_request(&d->blkq, aoeblk_make_request);
  193. gd->major = AOE_MAJOR;
  194. gd->first_minor = d->sysminor * AOE_PARTITIONS;
  195. gd->fops = &aoe_bdops;
  196. gd->private_data = d;
  197. gd->capacity = d->ssize;
  198. snprintf(gd->disk_name, sizeof gd->disk_name, "etherd/e%ld.%ld",
  199. d->aoemajor, d->aoeminor);
  200. gd->queue = &d->blkq;
  201. d->gd = gd;
  202. d->flags &= ~DEVFL_GDALLOC;
  203. d->flags |= DEVFL_UP;
  204. spin_unlock_irqrestore(&d->lock, flags);
  205. add_disk(gd);
  206. aoedisk_add_sysfs(d);
  207. }
  208. void
  209. aoeblk_exit(void)
  210. {
  211. kmem_cache_destroy(buf_pool_cache);
  212. }
  213. int __init
  214. aoeblk_init(void)
  215. {
  216. buf_pool_cache = kmem_cache_create("aoe_bufs",
  217. sizeof(struct buf),
  218. 0, 0, NULL, NULL);
  219. if (buf_pool_cache == NULL)
  220. return -ENOMEM;
  221. return 0;
  222. }