aoedev.c 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278
  1. /* Copyright (c) 2007 Coraid, Inc. See COPYING for GPL terms. */
  2. /*
  3. * aoedev.c
  4. * AoE device utility functions; maintains device list.
  5. */
  6. #include <linux/hdreg.h>
  7. #include <linux/blkdev.h>
  8. #include <linux/netdevice.h>
  9. #include <linux/delay.h>
  10. #include "aoe.h"
  11. static void dummy_timer(ulong);
  12. static void aoedev_freedev(struct aoedev *);
  13. static void freetgt(struct aoedev *d, struct aoetgt *t);
  14. static void skbpoolfree(struct aoedev *d);
  15. static struct aoedev *devlist;
  16. static DEFINE_SPINLOCK(devlist_lock);
  17. struct aoedev *
  18. aoedev_by_aoeaddr(int maj, int min)
  19. {
  20. struct aoedev *d;
  21. ulong flags;
  22. spin_lock_irqsave(&devlist_lock, flags);
  23. for (d=devlist; d; d=d->next)
  24. if (d->aoemajor == maj && d->aoeminor == min)
  25. break;
  26. spin_unlock_irqrestore(&devlist_lock, flags);
  27. return d;
  28. }
  29. static void
  30. dummy_timer(ulong vp)
  31. {
  32. struct aoedev *d;
  33. d = (struct aoedev *)vp;
  34. if (d->flags & DEVFL_TKILL)
  35. return;
  36. d->timer.expires = jiffies + HZ;
  37. add_timer(&d->timer);
  38. }
  39. void
  40. aoedev_downdev(struct aoedev *d)
  41. {
  42. struct aoetgt **t, **te;
  43. struct frame *f, *e;
  44. struct buf *buf;
  45. struct bio *bio;
  46. t = d->targets;
  47. te = t + NTARGETS;
  48. for (; t < te && *t; t++) {
  49. f = (*t)->frames;
  50. e = f + (*t)->nframes;
  51. for (; f < e; f->tag = FREETAG, f->buf = NULL, f++) {
  52. if (f->tag == FREETAG || f->buf == NULL)
  53. continue;
  54. buf = f->buf;
  55. bio = buf->bio;
  56. if (--buf->nframesout == 0
  57. && buf != d->inprocess) {
  58. mempool_free(buf, d->bufpool);
  59. bio_endio(bio, -EIO);
  60. }
  61. }
  62. (*t)->maxout = (*t)->nframes;
  63. (*t)->nout = 0;
  64. }
  65. buf = d->inprocess;
  66. if (buf) {
  67. bio = buf->bio;
  68. mempool_free(buf, d->bufpool);
  69. bio_endio(bio, -EIO);
  70. }
  71. d->inprocess = NULL;
  72. d->htgt = NULL;
  73. while (!list_empty(&d->bufq)) {
  74. buf = container_of(d->bufq.next, struct buf, bufs);
  75. list_del(d->bufq.next);
  76. bio = buf->bio;
  77. mempool_free(buf, d->bufpool);
  78. bio_endio(bio, -EIO);
  79. }
  80. if (d->gd)
  81. set_capacity(d->gd, 0);
  82. d->flags &= ~DEVFL_UP;
  83. }
  84. static void
  85. aoedev_freedev(struct aoedev *d)
  86. {
  87. struct aoetgt **t, **e;
  88. if (d->gd) {
  89. aoedisk_rm_sysfs(d);
  90. del_gendisk(d->gd);
  91. put_disk(d->gd);
  92. }
  93. t = d->targets;
  94. e = t + NTARGETS;
  95. for (; t < e && *t; t++)
  96. freetgt(d, *t);
  97. if (d->bufpool)
  98. mempool_destroy(d->bufpool);
  99. skbpoolfree(d);
  100. blk_cleanup_queue(d->blkq);
  101. kfree(d);
  102. }
  103. int
  104. aoedev_flush(const char __user *str, size_t cnt)
  105. {
  106. ulong flags;
  107. struct aoedev *d, **dd;
  108. struct aoedev *rmd = NULL;
  109. char buf[16];
  110. int all = 0;
  111. if (cnt >= 3) {
  112. if (cnt > sizeof buf)
  113. cnt = sizeof buf;
  114. if (copy_from_user(buf, str, cnt))
  115. return -EFAULT;
  116. all = !strncmp(buf, "all", 3);
  117. }
  118. flush_scheduled_work();
  119. spin_lock_irqsave(&devlist_lock, flags);
  120. dd = &devlist;
  121. while ((d = *dd)) {
  122. spin_lock(&d->lock);
  123. if ((!all && (d->flags & DEVFL_UP))
  124. || (d->flags & (DEVFL_GDALLOC|DEVFL_NEWSIZE))
  125. || d->nopen) {
  126. spin_unlock(&d->lock);
  127. dd = &d->next;
  128. continue;
  129. }
  130. *dd = d->next;
  131. aoedev_downdev(d);
  132. d->flags |= DEVFL_TKILL;
  133. spin_unlock(&d->lock);
  134. d->next = rmd;
  135. rmd = d;
  136. }
  137. spin_unlock_irqrestore(&devlist_lock, flags);
  138. while ((d = rmd)) {
  139. rmd = d->next;
  140. del_timer_sync(&d->timer);
  141. aoedev_freedev(d); /* must be able to sleep */
  142. }
  143. return 0;
  144. }
  145. /* I'm not really sure that this is a realistic problem, but if the
  146. network driver goes gonzo let's just leak memory after complaining. */
  147. static void
  148. skbfree(struct sk_buff *skb)
  149. {
  150. enum { Sms = 100, Tms = 3*1000};
  151. int i = Tms / Sms;
  152. if (skb == NULL)
  153. return;
  154. while (atomic_read(&skb_shinfo(skb)->dataref) != 1 && i-- > 0)
  155. msleep(Sms);
  156. if (i < 0) {
  157. printk(KERN_ERR
  158. "aoe: %s holds ref: %s\n",
  159. skb->dev ? skb->dev->name : "netif",
  160. "cannot free skb -- memory leaked.");
  161. return;
  162. }
  163. skb_shinfo(skb)->nr_frags = skb->data_len = 0;
  164. skb_trim(skb, 0);
  165. dev_kfree_skb(skb);
  166. }
  167. static void
  168. skbpoolfree(struct aoedev *d)
  169. {
  170. struct sk_buff *skb, *tmp;
  171. skb_queue_walk_safe(&d->skbpool, skb, tmp)
  172. skbfree(skb);
  173. __skb_queue_head_init(&d->skbpool);
  174. }
  175. /* find it or malloc it */
  176. struct aoedev *
  177. aoedev_by_sysminor_m(ulong sysminor)
  178. {
  179. struct aoedev *d;
  180. ulong flags;
  181. spin_lock_irqsave(&devlist_lock, flags);
  182. for (d=devlist; d; d=d->next)
  183. if (d->sysminor == sysminor)
  184. break;
  185. if (d)
  186. goto out;
  187. d = kcalloc(1, sizeof *d, GFP_ATOMIC);
  188. if (!d)
  189. goto out;
  190. INIT_WORK(&d->work, aoecmd_sleepwork);
  191. spin_lock_init(&d->lock);
  192. skb_queue_head_init(&d->sendq);
  193. skb_queue_head_init(&d->skbpool);
  194. init_timer(&d->timer);
  195. d->timer.data = (ulong) d;
  196. d->timer.function = dummy_timer;
  197. d->timer.expires = jiffies + HZ;
  198. add_timer(&d->timer);
  199. d->bufpool = NULL; /* defer to aoeblk_gdalloc */
  200. d->tgt = d->targets;
  201. INIT_LIST_HEAD(&d->bufq);
  202. d->sysminor = sysminor;
  203. d->aoemajor = AOEMAJOR(sysminor);
  204. d->aoeminor = AOEMINOR(sysminor);
  205. d->mintimer = MINTIMER;
  206. d->next = devlist;
  207. devlist = d;
  208. out:
  209. spin_unlock_irqrestore(&devlist_lock, flags);
  210. return d;
  211. }
  212. static void
  213. freetgt(struct aoedev *d, struct aoetgt *t)
  214. {
  215. struct frame *f, *e;
  216. f = t->frames;
  217. e = f + t->nframes;
  218. for (; f < e; f++)
  219. skbfree(f->skb);
  220. kfree(t->frames);
  221. kfree(t);
  222. }
  223. void
  224. aoedev_exit(void)
  225. {
  226. struct aoedev *d;
  227. ulong flags;
  228. flush_scheduled_work();
  229. while ((d = devlist)) {
  230. devlist = d->next;
  231. spin_lock_irqsave(&d->lock, flags);
  232. aoedev_downdev(d);
  233. d->flags |= DEVFL_TKILL;
  234. spin_unlock_irqrestore(&d->lock, flags);
  235. del_timer_sync(&d->timer);
  236. aoedev_freedev(d);
  237. }
  238. }
  239. int __init
  240. aoedev_init(void)
  241. {
  242. return 0;
  243. }