aoedev.c 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297
  1. /* Copyright (c) 2007 Coraid, Inc. See COPYING for GPL terms. */
  2. /*
  3. * aoedev.c
  4. * AoE device utility functions; maintains device list.
  5. */
  6. #include <linux/hdreg.h>
  7. #include <linux/blkdev.h>
  8. #include <linux/netdevice.h>
  9. #include <linux/delay.h>
  10. #include "aoe.h"
  11. static void dummy_timer(ulong);
  12. static void aoedev_freedev(struct aoedev *);
  13. static void freetgt(struct aoedev *d, struct aoetgt *t);
  14. static void skbpoolfree(struct aoedev *d);
  15. static struct aoedev *devlist;
  16. static spinlock_t devlist_lock;
  17. int
  18. aoedev_isbusy(struct aoedev *d)
  19. {
  20. struct aoetgt **t, **te;
  21. struct frame *f, *e;
  22. t = d->targets;
  23. te = t + NTARGETS;
  24. for (; t < te && *t; t++) {
  25. f = (*t)->frames;
  26. e = f + (*t)->nframes;
  27. for (; f < e; f++)
  28. if (f->tag != FREETAG)
  29. return 1;
  30. }
  31. return 0;
  32. }
  33. struct aoedev *
  34. aoedev_by_aoeaddr(int maj, int min)
  35. {
  36. struct aoedev *d;
  37. ulong flags;
  38. spin_lock_irqsave(&devlist_lock, flags);
  39. for (d=devlist; d; d=d->next)
  40. if (d->aoemajor == maj && d->aoeminor == min)
  41. break;
  42. spin_unlock_irqrestore(&devlist_lock, flags);
  43. return d;
  44. }
  45. static void
  46. dummy_timer(ulong vp)
  47. {
  48. struct aoedev *d;
  49. d = (struct aoedev *)vp;
  50. if (d->flags & DEVFL_TKILL)
  51. return;
  52. d->timer.expires = jiffies + HZ;
  53. add_timer(&d->timer);
  54. }
  55. void
  56. aoedev_downdev(struct aoedev *d)
  57. {
  58. struct aoetgt **t, **te;
  59. struct frame *f, *e;
  60. struct buf *buf;
  61. struct bio *bio;
  62. t = d->targets;
  63. te = t + NTARGETS;
  64. for (; t < te && *t; t++) {
  65. f = (*t)->frames;
  66. e = f + (*t)->nframes;
  67. for (; f < e; f->tag = FREETAG, f->buf = NULL, f++) {
  68. if (f->tag == FREETAG || f->buf == NULL)
  69. continue;
  70. buf = f->buf;
  71. bio = buf->bio;
  72. if (--buf->nframesout == 0
  73. && buf != d->inprocess) {
  74. mempool_free(buf, d->bufpool);
  75. bio_endio(bio, -EIO);
  76. }
  77. }
  78. (*t)->maxout = (*t)->nframes;
  79. (*t)->nout = 0;
  80. }
  81. buf = d->inprocess;
  82. if (buf) {
  83. bio = buf->bio;
  84. mempool_free(buf, d->bufpool);
  85. bio_endio(bio, -EIO);
  86. }
  87. d->inprocess = NULL;
  88. d->htgt = NULL;
  89. while (!list_empty(&d->bufq)) {
  90. buf = container_of(d->bufq.next, struct buf, bufs);
  91. list_del(d->bufq.next);
  92. bio = buf->bio;
  93. mempool_free(buf, d->bufpool);
  94. bio_endio(bio, -EIO);
  95. }
  96. if (d->gd)
  97. d->gd->capacity = 0;
  98. d->flags &= ~DEVFL_UP;
  99. }
  100. static void
  101. aoedev_freedev(struct aoedev *d)
  102. {
  103. struct aoetgt **t, **e;
  104. if (d->gd) {
  105. aoedisk_rm_sysfs(d);
  106. del_gendisk(d->gd);
  107. put_disk(d->gd);
  108. }
  109. t = d->targets;
  110. e = t + NTARGETS;
  111. for (; t < e && *t; t++)
  112. freetgt(d, *t);
  113. if (d->bufpool)
  114. mempool_destroy(d->bufpool);
  115. skbpoolfree(d);
  116. kfree(d);
  117. }
  118. int
  119. aoedev_flush(const char __user *str, size_t cnt)
  120. {
  121. ulong flags;
  122. struct aoedev *d, **dd;
  123. struct aoedev *rmd = NULL;
  124. char buf[16];
  125. int all = 0;
  126. if (cnt >= 3) {
  127. if (cnt > sizeof buf)
  128. cnt = sizeof buf;
  129. if (copy_from_user(buf, str, cnt))
  130. return -EFAULT;
  131. all = !strncmp(buf, "all", 3);
  132. }
  133. flush_scheduled_work();
  134. spin_lock_irqsave(&devlist_lock, flags);
  135. dd = &devlist;
  136. while ((d = *dd)) {
  137. spin_lock(&d->lock);
  138. if ((!all && (d->flags & DEVFL_UP))
  139. || (d->flags & (DEVFL_GDALLOC|DEVFL_NEWSIZE))
  140. || d->nopen) {
  141. spin_unlock(&d->lock);
  142. dd = &d->next;
  143. continue;
  144. }
  145. *dd = d->next;
  146. aoedev_downdev(d);
  147. d->flags |= DEVFL_TKILL;
  148. spin_unlock(&d->lock);
  149. d->next = rmd;
  150. rmd = d;
  151. }
  152. spin_unlock_irqrestore(&devlist_lock, flags);
  153. while ((d = rmd)) {
  154. rmd = d->next;
  155. del_timer_sync(&d->timer);
  156. aoedev_freedev(d); /* must be able to sleep */
  157. }
  158. return 0;
  159. }
  160. /* I'm not really sure that this is a realistic problem, but if the
  161. network driver goes gonzo let's just leak memory after complaining. */
  162. static void
  163. skbfree(struct sk_buff *skb)
  164. {
  165. enum { Sms = 100, Tms = 3*1000};
  166. int i = Tms / Sms;
  167. if (skb == NULL)
  168. return;
  169. while (atomic_read(&skb_shinfo(skb)->dataref) != 1 && i-- > 0)
  170. msleep(Sms);
  171. if (i <= 0) {
  172. printk(KERN_ERR
  173. "aoe: %s holds ref: %s\n",
  174. skb->dev ? skb->dev->name : "netif",
  175. "cannot free skb -- memory leaked.");
  176. return;
  177. }
  178. skb_shinfo(skb)->nr_frags = skb->data_len = 0;
  179. skb_trim(skb, 0);
  180. dev_kfree_skb(skb);
  181. }
  182. static void
  183. skbpoolfree(struct aoedev *d)
  184. {
  185. struct sk_buff *skb;
  186. while ((skb = d->skbpool_hd)) {
  187. d->skbpool_hd = skb->next;
  188. skb->next = NULL;
  189. skbfree(skb);
  190. }
  191. d->skbpool_tl = NULL;
  192. }
  193. /* find it or malloc it */
  194. struct aoedev *
  195. aoedev_by_sysminor_m(ulong sysminor)
  196. {
  197. struct aoedev *d;
  198. ulong flags;
  199. spin_lock_irqsave(&devlist_lock, flags);
  200. for (d=devlist; d; d=d->next)
  201. if (d->sysminor == sysminor)
  202. break;
  203. if (d)
  204. goto out;
  205. d = kcalloc(1, sizeof *d, GFP_ATOMIC);
  206. if (!d)
  207. goto out;
  208. INIT_WORK(&d->work, aoecmd_sleepwork);
  209. spin_lock_init(&d->lock);
  210. init_timer(&d->timer);
  211. d->timer.data = (ulong) d;
  212. d->timer.function = dummy_timer;
  213. d->timer.expires = jiffies + HZ;
  214. add_timer(&d->timer);
  215. d->bufpool = NULL; /* defer to aoeblk_gdalloc */
  216. d->tgt = d->targets;
  217. INIT_LIST_HEAD(&d->bufq);
  218. d->sysminor = sysminor;
  219. d->aoemajor = AOEMAJOR(sysminor);
  220. d->aoeminor = AOEMINOR(sysminor);
  221. d->mintimer = MINTIMER;
  222. d->next = devlist;
  223. devlist = d;
  224. out:
  225. spin_unlock_irqrestore(&devlist_lock, flags);
  226. return d;
  227. }
  228. static void
  229. freetgt(struct aoedev *d, struct aoetgt *t)
  230. {
  231. struct frame *f, *e;
  232. f = t->frames;
  233. e = f + t->nframes;
  234. for (; f < e; f++)
  235. skbfree(f->skb);
  236. kfree(t->frames);
  237. kfree(t);
  238. }
  239. void
  240. aoedev_exit(void)
  241. {
  242. struct aoedev *d;
  243. ulong flags;
  244. flush_scheduled_work();
  245. while ((d = devlist)) {
  246. devlist = d->next;
  247. spin_lock_irqsave(&d->lock, flags);
  248. aoedev_downdev(d);
  249. d->flags |= DEVFL_TKILL;
  250. spin_unlock_irqrestore(&d->lock, flags);
  251. del_timer_sync(&d->timer);
  252. aoedev_freedev(d);
  253. }
  254. }
  255. int __init
  256. aoedev_init(void)
  257. {
  258. spin_lock_init(&devlist_lock);
  259. return 0;
  260. }