aoecmd.c 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767
  1. /* Copyright (c) 2012 Coraid, Inc. See COPYING for GPL terms. */
  2. /*
  3. * aoecmd.c
  4. * Filesystem request handling methods
  5. */
  6. #include <linux/ata.h>
  7. #include <linux/slab.h>
  8. #include <linux/hdreg.h>
  9. #include <linux/blkdev.h>
  10. #include <linux/skbuff.h>
  11. #include <linux/netdevice.h>
  12. #include <linux/genhd.h>
  13. #include <linux/moduleparam.h>
  14. #include <linux/workqueue.h>
  15. #include <linux/kthread.h>
  16. #include <net/net_namespace.h>
  17. #include <asm/unaligned.h>
  18. #include <linux/uio.h>
  19. #include "aoe.h"
  20. #define MAXIOC (8192) /* default meant to avoid most soft lockups */
  21. static void ktcomplete(struct frame *, struct sk_buff *);
  22. static int count_targets(struct aoedev *d, int *untainted);
  23. static struct buf *nextbuf(struct aoedev *);
  24. static int aoe_deadsecs = 60 * 3;
  25. module_param(aoe_deadsecs, int, 0644);
  26. MODULE_PARM_DESC(aoe_deadsecs, "After aoe_deadsecs seconds, give up and fail dev.");
  27. static int aoe_maxout = 64;
  28. module_param(aoe_maxout, int, 0644);
  29. MODULE_PARM_DESC(aoe_maxout,
  30. "Only aoe_maxout outstanding packets for every MAC on eX.Y.");
  31. static wait_queue_head_t ktiowq;
  32. static struct ktstate kts;
  33. /* io completion queue */
  34. static struct {
  35. struct list_head head;
  36. spinlock_t lock;
  37. } iocq;
  38. static struct page *empty_page;
  39. static struct sk_buff *
  40. new_skb(ulong len)
  41. {
  42. struct sk_buff *skb;
  43. skb = alloc_skb(len, GFP_ATOMIC);
  44. if (skb) {
  45. skb_reset_mac_header(skb);
  46. skb_reset_network_header(skb);
  47. skb->protocol = __constant_htons(ETH_P_AOE);
  48. skb_checksum_none_assert(skb);
  49. }
  50. return skb;
  51. }
  52. static struct frame *
  53. getframe_deferred(struct aoedev *d, u32 tag)
  54. {
  55. struct list_head *head, *pos, *nx;
  56. struct frame *f;
  57. head = &d->rexmitq;
  58. list_for_each_safe(pos, nx, head) {
  59. f = list_entry(pos, struct frame, head);
  60. if (f->tag == tag) {
  61. list_del(pos);
  62. return f;
  63. }
  64. }
  65. return NULL;
  66. }
  67. static struct frame *
  68. getframe(struct aoedev *d, u32 tag)
  69. {
  70. struct frame *f;
  71. struct list_head *head, *pos, *nx;
  72. u32 n;
  73. n = tag % NFACTIVE;
  74. head = &d->factive[n];
  75. list_for_each_safe(pos, nx, head) {
  76. f = list_entry(pos, struct frame, head);
  77. if (f->tag == tag) {
  78. list_del(pos);
  79. return f;
  80. }
  81. }
  82. return NULL;
  83. }
  84. /*
  85. * Leave the top bit clear so we have tagspace for userland.
  86. * The bottom 16 bits are the xmit tick for rexmit/rttavg processing.
  87. * This driver reserves tag -1 to mean "unused frame."
  88. */
  89. static int
  90. newtag(struct aoedev *d)
  91. {
  92. register ulong n;
  93. n = jiffies & 0xffff;
  94. return n |= (++d->lasttag & 0x7fff) << 16;
  95. }
  96. static u32
  97. aoehdr_atainit(struct aoedev *d, struct aoetgt *t, struct aoe_hdr *h)
  98. {
  99. u32 host_tag = newtag(d);
  100. memcpy(h->src, t->ifp->nd->dev_addr, sizeof h->src);
  101. memcpy(h->dst, t->addr, sizeof h->dst);
  102. h->type = __constant_cpu_to_be16(ETH_P_AOE);
  103. h->verfl = AOE_HVER;
  104. h->major = cpu_to_be16(d->aoemajor);
  105. h->minor = d->aoeminor;
  106. h->cmd = AOECMD_ATA;
  107. h->tag = cpu_to_be32(host_tag);
  108. return host_tag;
  109. }
  110. static inline void
  111. put_lba(struct aoe_atahdr *ah, sector_t lba)
  112. {
  113. ah->lba0 = lba;
  114. ah->lba1 = lba >>= 8;
  115. ah->lba2 = lba >>= 8;
  116. ah->lba3 = lba >>= 8;
  117. ah->lba4 = lba >>= 8;
  118. ah->lba5 = lba >>= 8;
  119. }
  120. static struct aoeif *
  121. ifrotate(struct aoetgt *t)
  122. {
  123. struct aoeif *ifp;
  124. ifp = t->ifp;
  125. ifp++;
  126. if (ifp >= &t->ifs[NAOEIFS] || ifp->nd == NULL)
  127. ifp = t->ifs;
  128. if (ifp->nd == NULL)
  129. return NULL;
  130. return t->ifp = ifp;
  131. }
  132. static void
  133. skb_pool_put(struct aoedev *d, struct sk_buff *skb)
  134. {
  135. __skb_queue_tail(&d->skbpool, skb);
  136. }
  137. static struct sk_buff *
  138. skb_pool_get(struct aoedev *d)
  139. {
  140. struct sk_buff *skb = skb_peek(&d->skbpool);
  141. if (skb && atomic_read(&skb_shinfo(skb)->dataref) == 1) {
  142. __skb_unlink(skb, &d->skbpool);
  143. return skb;
  144. }
  145. if (skb_queue_len(&d->skbpool) < NSKBPOOLMAX &&
  146. (skb = new_skb(ETH_ZLEN)))
  147. return skb;
  148. return NULL;
  149. }
  150. void
  151. aoe_freetframe(struct frame *f)
  152. {
  153. struct aoetgt *t;
  154. t = f->t;
  155. f->buf = NULL;
  156. f->lba = 0;
  157. f->bv = NULL;
  158. f->r_skb = NULL;
  159. f->flags = 0;
  160. list_add(&f->head, &t->ffree);
  161. }
  162. static struct frame *
  163. newtframe(struct aoedev *d, struct aoetgt *t)
  164. {
  165. struct frame *f;
  166. struct sk_buff *skb;
  167. struct list_head *pos;
  168. if (list_empty(&t->ffree)) {
  169. if (t->falloc >= NSKBPOOLMAX*2)
  170. return NULL;
  171. f = kcalloc(1, sizeof(*f), GFP_ATOMIC);
  172. if (f == NULL)
  173. return NULL;
  174. t->falloc++;
  175. f->t = t;
  176. } else {
  177. pos = t->ffree.next;
  178. list_del(pos);
  179. f = list_entry(pos, struct frame, head);
  180. }
  181. skb = f->skb;
  182. if (skb == NULL) {
  183. f->skb = skb = new_skb(ETH_ZLEN);
  184. if (!skb) {
  185. bail: aoe_freetframe(f);
  186. return NULL;
  187. }
  188. }
  189. if (atomic_read(&skb_shinfo(skb)->dataref) != 1) {
  190. skb = skb_pool_get(d);
  191. if (skb == NULL)
  192. goto bail;
  193. skb_pool_put(d, f->skb);
  194. f->skb = skb;
  195. }
  196. skb->truesize -= skb->data_len;
  197. skb_shinfo(skb)->nr_frags = skb->data_len = 0;
  198. skb_trim(skb, 0);
  199. return f;
  200. }
  201. static struct frame *
  202. newframe(struct aoedev *d)
  203. {
  204. struct frame *f;
  205. struct aoetgt *t, **tt;
  206. int totout = 0;
  207. int use_tainted;
  208. int has_untainted;
  209. if (!d->targets || !d->targets[0]) {
  210. printk(KERN_ERR "aoe: NULL TARGETS!\n");
  211. return NULL;
  212. }
  213. tt = d->tgt; /* last used target */
  214. for (use_tainted = 0, has_untainted = 0;;) {
  215. tt++;
  216. if (tt >= &d->targets[d->ntargets] || !*tt)
  217. tt = d->targets;
  218. t = *tt;
  219. if (!t->taint) {
  220. has_untainted = 1;
  221. totout += t->nout;
  222. }
  223. if (t->nout < t->maxout
  224. && (use_tainted || !t->taint)
  225. && t->ifp->nd) {
  226. f = newtframe(d, t);
  227. if (f) {
  228. ifrotate(t);
  229. d->tgt = tt;
  230. return f;
  231. }
  232. }
  233. if (tt == d->tgt) { /* we've looped and found nada */
  234. if (!use_tainted && !has_untainted)
  235. use_tainted = 1;
  236. else
  237. break;
  238. }
  239. }
  240. if (totout == 0) {
  241. d->kicked++;
  242. d->flags |= DEVFL_KICKME;
  243. }
  244. return NULL;
  245. }
  246. static void
  247. skb_fillup(struct sk_buff *skb, struct bio_vec *bv, ulong off, ulong cnt)
  248. {
  249. int frag = 0;
  250. ulong fcnt;
  251. loop:
  252. fcnt = bv->bv_len - (off - bv->bv_offset);
  253. if (fcnt > cnt)
  254. fcnt = cnt;
  255. skb_fill_page_desc(skb, frag++, bv->bv_page, off, fcnt);
  256. cnt -= fcnt;
  257. if (cnt <= 0)
  258. return;
  259. bv++;
  260. off = bv->bv_offset;
  261. goto loop;
  262. }
  263. static void
  264. fhash(struct frame *f)
  265. {
  266. struct aoedev *d = f->t->d;
  267. u32 n;
  268. n = f->tag % NFACTIVE;
  269. list_add_tail(&f->head, &d->factive[n]);
  270. }
  271. static void
  272. ata_rw_frameinit(struct frame *f)
  273. {
  274. struct aoetgt *t;
  275. struct aoe_hdr *h;
  276. struct aoe_atahdr *ah;
  277. struct sk_buff *skb;
  278. char writebit, extbit;
  279. skb = f->skb;
  280. h = (struct aoe_hdr *) skb_mac_header(skb);
  281. ah = (struct aoe_atahdr *) (h + 1);
  282. skb_put(skb, sizeof(*h) + sizeof(*ah));
  283. memset(h, 0, skb->len);
  284. writebit = 0x10;
  285. extbit = 0x4;
  286. t = f->t;
  287. f->tag = aoehdr_atainit(t->d, t, h);
  288. fhash(f);
  289. t->nout++;
  290. f->waited = 0;
  291. f->waited_total = 0;
  292. if (f->buf)
  293. f->lba = f->buf->sector;
  294. /* set up ata header */
  295. ah->scnt = f->bcnt >> 9;
  296. put_lba(ah, f->lba);
  297. if (t->d->flags & DEVFL_EXT) {
  298. ah->aflags |= AOEAFL_EXT;
  299. } else {
  300. extbit = 0;
  301. ah->lba3 &= 0x0f;
  302. ah->lba3 |= 0xe0; /* LBA bit + obsolete 0xa0 */
  303. }
  304. if (f->buf && bio_data_dir(f->buf->bio) == WRITE) {
  305. skb_fillup(skb, f->bv, f->bv_off, f->bcnt);
  306. ah->aflags |= AOEAFL_WRITE;
  307. skb->len += f->bcnt;
  308. skb->data_len = f->bcnt;
  309. skb->truesize += f->bcnt;
  310. t->wpkts++;
  311. } else {
  312. t->rpkts++;
  313. writebit = 0;
  314. }
  315. ah->cmdstat = ATA_CMD_PIO_READ | writebit | extbit;
  316. skb->dev = t->ifp->nd;
  317. }
  318. static int
  319. aoecmd_ata_rw(struct aoedev *d)
  320. {
  321. struct frame *f;
  322. struct buf *buf;
  323. struct aoetgt *t;
  324. struct sk_buff *skb;
  325. struct sk_buff_head queue;
  326. ulong bcnt, fbcnt;
  327. buf = nextbuf(d);
  328. if (buf == NULL)
  329. return 0;
  330. f = newframe(d);
  331. if (f == NULL)
  332. return 0;
  333. t = *d->tgt;
  334. bcnt = d->maxbcnt;
  335. if (bcnt == 0)
  336. bcnt = DEFAULTBCNT;
  337. if (bcnt > buf->resid)
  338. bcnt = buf->resid;
  339. fbcnt = bcnt;
  340. f->bv = buf->bv;
  341. f->bv_off = f->bv->bv_offset + (f->bv->bv_len - buf->bv_resid);
  342. do {
  343. if (fbcnt < buf->bv_resid) {
  344. buf->bv_resid -= fbcnt;
  345. buf->resid -= fbcnt;
  346. break;
  347. }
  348. fbcnt -= buf->bv_resid;
  349. buf->resid -= buf->bv_resid;
  350. if (buf->resid == 0) {
  351. d->ip.buf = NULL;
  352. break;
  353. }
  354. buf->bv++;
  355. buf->bv_resid = buf->bv->bv_len;
  356. WARN_ON(buf->bv_resid == 0);
  357. } while (fbcnt);
  358. /* initialize the headers & frame */
  359. f->buf = buf;
  360. f->bcnt = bcnt;
  361. ata_rw_frameinit(f);
  362. /* mark all tracking fields and load out */
  363. buf->nframesout += 1;
  364. buf->sector += bcnt >> 9;
  365. skb = skb_clone(f->skb, GFP_ATOMIC);
  366. if (skb) {
  367. do_gettimeofday(&f->sent);
  368. f->sent_jiffs = (u32) jiffies;
  369. __skb_queue_head_init(&queue);
  370. __skb_queue_tail(&queue, skb);
  371. aoenet_xmit(&queue);
  372. }
  373. return 1;
  374. }
  375. /* some callers cannot sleep, and they can call this function,
  376. * transmitting the packets later, when interrupts are on
  377. */
  378. static void
  379. aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff_head *queue)
  380. {
  381. struct aoe_hdr *h;
  382. struct aoe_cfghdr *ch;
  383. struct sk_buff *skb;
  384. struct net_device *ifp;
  385. rcu_read_lock();
  386. for_each_netdev_rcu(&init_net, ifp) {
  387. dev_hold(ifp);
  388. if (!is_aoe_netif(ifp))
  389. goto cont;
  390. skb = new_skb(sizeof *h + sizeof *ch);
  391. if (skb == NULL) {
  392. printk(KERN_INFO "aoe: skb alloc failure\n");
  393. goto cont;
  394. }
  395. skb_put(skb, sizeof *h + sizeof *ch);
  396. skb->dev = ifp;
  397. __skb_queue_tail(queue, skb);
  398. h = (struct aoe_hdr *) skb_mac_header(skb);
  399. memset(h, 0, sizeof *h + sizeof *ch);
  400. memset(h->dst, 0xff, sizeof h->dst);
  401. memcpy(h->src, ifp->dev_addr, sizeof h->src);
  402. h->type = __constant_cpu_to_be16(ETH_P_AOE);
  403. h->verfl = AOE_HVER;
  404. h->major = cpu_to_be16(aoemajor);
  405. h->minor = aoeminor;
  406. h->cmd = AOECMD_CFG;
  407. cont:
  408. dev_put(ifp);
  409. }
  410. rcu_read_unlock();
  411. }
  412. static void
  413. resend(struct aoedev *d, struct frame *f)
  414. {
  415. struct sk_buff *skb;
  416. struct sk_buff_head queue;
  417. struct aoe_hdr *h;
  418. struct aoe_atahdr *ah;
  419. struct aoetgt *t;
  420. char buf[128];
  421. u32 n;
  422. t = f->t;
  423. n = newtag(d);
  424. skb = f->skb;
  425. if (ifrotate(t) == NULL) {
  426. /* probably can't happen, but set it up to fail anyway */
  427. pr_info("aoe: resend: no interfaces to rotate to.\n");
  428. ktcomplete(f, NULL);
  429. return;
  430. }
  431. h = (struct aoe_hdr *) skb_mac_header(skb);
  432. ah = (struct aoe_atahdr *) (h+1);
  433. if (!(f->flags & FFL_PROBE)) {
  434. snprintf(buf, sizeof(buf),
  435. "%15s e%ld.%d oldtag=%08x@%08lx newtag=%08x s=%pm d=%pm nout=%d\n",
  436. "retransmit", d->aoemajor, d->aoeminor,
  437. f->tag, jiffies, n,
  438. h->src, h->dst, t->nout);
  439. aoechr_error(buf);
  440. }
  441. f->tag = n;
  442. fhash(f);
  443. h->tag = cpu_to_be32(n);
  444. memcpy(h->dst, t->addr, sizeof h->dst);
  445. memcpy(h->src, t->ifp->nd->dev_addr, sizeof h->src);
  446. skb->dev = t->ifp->nd;
  447. skb = skb_clone(skb, GFP_ATOMIC);
  448. if (skb == NULL)
  449. return;
  450. do_gettimeofday(&f->sent);
  451. f->sent_jiffs = (u32) jiffies;
  452. __skb_queue_head_init(&queue);
  453. __skb_queue_tail(&queue, skb);
  454. aoenet_xmit(&queue);
  455. }
  456. static int
  457. tsince_hr(struct frame *f)
  458. {
  459. struct timeval now;
  460. int n;
  461. do_gettimeofday(&now);
  462. n = now.tv_usec - f->sent.tv_usec;
  463. n += (now.tv_sec - f->sent.tv_sec) * USEC_PER_SEC;
  464. if (n < 0)
  465. n = -n;
  466. /* For relatively long periods, use jiffies to avoid
  467. * discrepancies caused by updates to the system time.
  468. *
  469. * On system with HZ of 1000, 32-bits is over 49 days
  470. * worth of jiffies, or over 71 minutes worth of usecs.
  471. *
  472. * Jiffies overflow is handled by subtraction of unsigned ints:
  473. * (gdb) print (unsigned) 2 - (unsigned) 0xfffffffe
  474. * $3 = 4
  475. * (gdb)
  476. */
  477. if (n > USEC_PER_SEC / 4) {
  478. n = ((u32) jiffies) - f->sent_jiffs;
  479. n *= USEC_PER_SEC / HZ;
  480. }
  481. return n;
  482. }
  483. static int
  484. tsince(u32 tag)
  485. {
  486. int n;
  487. n = jiffies & 0xffff;
  488. n -= tag & 0xffff;
  489. if (n < 0)
  490. n += 1<<16;
  491. return jiffies_to_usecs(n + 1);
  492. }
  493. static struct aoeif *
  494. getif(struct aoetgt *t, struct net_device *nd)
  495. {
  496. struct aoeif *p, *e;
  497. p = t->ifs;
  498. e = p + NAOEIFS;
  499. for (; p < e; p++)
  500. if (p->nd == nd)
  501. return p;
  502. return NULL;
  503. }
  504. static void
  505. ejectif(struct aoetgt *t, struct aoeif *ifp)
  506. {
  507. struct aoeif *e;
  508. struct net_device *nd;
  509. ulong n;
  510. nd = ifp->nd;
  511. e = t->ifs + NAOEIFS - 1;
  512. n = (e - ifp) * sizeof *ifp;
  513. memmove(ifp, ifp+1, n);
  514. e->nd = NULL;
  515. dev_put(nd);
  516. }
  517. static struct frame *
  518. reassign_frame(struct frame *f)
  519. {
  520. struct frame *nf;
  521. struct sk_buff *skb;
  522. nf = newframe(f->t->d);
  523. if (!nf)
  524. return NULL;
  525. if (nf->t == f->t) {
  526. aoe_freetframe(nf);
  527. return NULL;
  528. }
  529. skb = nf->skb;
  530. nf->skb = f->skb;
  531. nf->buf = f->buf;
  532. nf->bcnt = f->bcnt;
  533. nf->lba = f->lba;
  534. nf->bv = f->bv;
  535. nf->bv_off = f->bv_off;
  536. nf->waited = 0;
  537. nf->waited_total = f->waited_total;
  538. nf->sent = f->sent;
  539. nf->sent_jiffs = f->sent_jiffs;
  540. f->skb = skb;
  541. return nf;
  542. }
  543. static void
  544. probe(struct aoetgt *t)
  545. {
  546. struct aoedev *d;
  547. struct frame *f;
  548. struct sk_buff *skb;
  549. struct sk_buff_head queue;
  550. size_t n, m;
  551. int frag;
  552. d = t->d;
  553. f = newtframe(d, t);
  554. if (!f) {
  555. pr_err("%s %pm for e%ld.%d: %s\n",
  556. "aoe: cannot probe remote address",
  557. t->addr,
  558. (long) d->aoemajor, d->aoeminor,
  559. "no frame available");
  560. return;
  561. }
  562. f->flags |= FFL_PROBE;
  563. ifrotate(t);
  564. f->bcnt = t->d->maxbcnt ? t->d->maxbcnt : DEFAULTBCNT;
  565. ata_rw_frameinit(f);
  566. skb = f->skb;
  567. for (frag = 0, n = f->bcnt; n > 0; ++frag, n -= m) {
  568. if (n < PAGE_SIZE)
  569. m = n;
  570. else
  571. m = PAGE_SIZE;
  572. skb_fill_page_desc(skb, frag, empty_page, 0, m);
  573. }
  574. skb->len += f->bcnt;
  575. skb->data_len = f->bcnt;
  576. skb->truesize += f->bcnt;
  577. skb = skb_clone(f->skb, GFP_ATOMIC);
  578. if (skb) {
  579. do_gettimeofday(&f->sent);
  580. f->sent_jiffs = (u32) jiffies;
  581. __skb_queue_head_init(&queue);
  582. __skb_queue_tail(&queue, skb);
  583. aoenet_xmit(&queue);
  584. }
  585. }
  586. static long
  587. rto(struct aoedev *d)
  588. {
  589. long t;
  590. t = 2 * d->rttavg >> RTTSCALE;
  591. t += 8 * d->rttdev >> RTTDSCALE;
  592. if (t == 0)
  593. t = 1;
  594. return t;
  595. }
  596. static void
  597. rexmit_deferred(struct aoedev *d)
  598. {
  599. struct aoetgt *t;
  600. struct frame *f;
  601. struct frame *nf;
  602. struct list_head *pos, *nx, *head;
  603. int since;
  604. int untainted;
  605. count_targets(d, &untainted);
  606. head = &d->rexmitq;
  607. list_for_each_safe(pos, nx, head) {
  608. f = list_entry(pos, struct frame, head);
  609. t = f->t;
  610. if (t->taint) {
  611. if (!(f->flags & FFL_PROBE)) {
  612. nf = reassign_frame(f);
  613. if (nf) {
  614. if (t->nout_probes == 0
  615. && untainted > 0) {
  616. probe(t);
  617. t->nout_probes++;
  618. }
  619. list_replace(&f->head, &nf->head);
  620. pos = &nf->head;
  621. aoe_freetframe(f);
  622. f = nf;
  623. t = f->t;
  624. }
  625. } else if (untainted < 1) {
  626. /* don't probe w/o other untainted aoetgts */
  627. goto stop_probe;
  628. } else if (tsince_hr(f) < t->taint * rto(d)) {
  629. /* reprobe slowly when taint is high */
  630. continue;
  631. }
  632. } else if (f->flags & FFL_PROBE) {
  633. stop_probe: /* don't probe untainted aoetgts */
  634. list_del(pos);
  635. aoe_freetframe(f);
  636. /* leaving d->kicked, because this is routine */
  637. f->t->d->flags |= DEVFL_KICKME;
  638. continue;
  639. }
  640. if (t->nout >= t->maxout)
  641. continue;
  642. list_del(pos);
  643. t->nout++;
  644. if (f->flags & FFL_PROBE)
  645. t->nout_probes++;
  646. since = tsince_hr(f);
  647. f->waited += since;
  648. f->waited_total += since;
  649. resend(d, f);
  650. }
  651. }
  652. /* An aoetgt accumulates demerits quickly, and successful
  653. * probing redeems the aoetgt slowly.
  654. */
  655. static void
  656. scorn(struct aoetgt *t)
  657. {
  658. int n;
  659. n = t->taint++;
  660. t->taint += t->taint * 2;
  661. if (n > t->taint)
  662. t->taint = n;
  663. if (t->taint > MAX_TAINT)
  664. t->taint = MAX_TAINT;
  665. }
  666. static int
  667. count_targets(struct aoedev *d, int *untainted)
  668. {
  669. int i, good;
  670. for (i = good = 0; i < d->ntargets && d->targets[i]; ++i)
  671. if (d->targets[i]->taint == 0)
  672. good++;
  673. if (untainted)
  674. *untainted = good;
  675. return i;
  676. }
  677. static void
  678. rexmit_timer(ulong vp)
  679. {
  680. struct aoedev *d;
  681. struct aoetgt *t;
  682. struct aoeif *ifp;
  683. struct frame *f;
  684. struct list_head *head, *pos, *nx;
  685. LIST_HEAD(flist);
  686. register long timeout;
  687. ulong flags, n;
  688. int i;
  689. int utgts; /* number of aoetgt descriptors (not slots) */
  690. int since;
  691. d = (struct aoedev *) vp;
  692. spin_lock_irqsave(&d->lock, flags);
  693. /* timeout based on observed timings and variations */
  694. timeout = rto(d);
  695. utgts = count_targets(d, NULL);
  696. if (d->flags & DEVFL_TKILL) {
  697. spin_unlock_irqrestore(&d->lock, flags);
  698. return;
  699. }
  700. /* collect all frames to rexmit into flist */
  701. for (i = 0; i < NFACTIVE; i++) {
  702. head = &d->factive[i];
  703. list_for_each_safe(pos, nx, head) {
  704. f = list_entry(pos, struct frame, head);
  705. if (tsince_hr(f) < timeout)
  706. break; /* end of expired frames */
  707. /* move to flist for later processing */
  708. list_move_tail(pos, &flist);
  709. }
  710. }
  711. /* process expired frames */
  712. while (!list_empty(&flist)) {
  713. pos = flist.next;
  714. f = list_entry(pos, struct frame, head);
  715. since = tsince_hr(f);
  716. n = f->waited_total + since;
  717. n /= USEC_PER_SEC;
  718. if (aoe_deadsecs
  719. && n > aoe_deadsecs
  720. && !(f->flags & FFL_PROBE)) {
  721. /* Waited too long. Device failure.
  722. * Hang all frames on first hash bucket for downdev
  723. * to clean up.
  724. */
  725. list_splice(&flist, &d->factive[0]);
  726. aoedev_downdev(d);
  727. goto out;
  728. }
  729. t = f->t;
  730. n = f->waited + since;
  731. n /= USEC_PER_SEC;
  732. if (aoe_deadsecs && utgts > 0
  733. && (n > aoe_deadsecs / utgts || n > HARD_SCORN_SECS))
  734. scorn(t); /* avoid this target */
  735. if (t->maxout != 1) {
  736. t->ssthresh = t->maxout / 2;
  737. t->maxout = 1;
  738. }
  739. if (f->flags & FFL_PROBE) {
  740. t->nout_probes--;
  741. } else {
  742. ifp = getif(t, f->skb->dev);
  743. if (ifp && ++ifp->lost > (t->nframes << 1)
  744. && (ifp != t->ifs || t->ifs[1].nd)) {
  745. ejectif(t, ifp);
  746. ifp = NULL;
  747. }
  748. }
  749. list_move_tail(pos, &d->rexmitq);
  750. t->nout--;
  751. }
  752. rexmit_deferred(d);
  753. out:
  754. if ((d->flags & DEVFL_KICKME) && d->blkq) {
  755. d->flags &= ~DEVFL_KICKME;
  756. d->blkq->request_fn(d->blkq);
  757. }
  758. d->timer.expires = jiffies + TIMERTICK;
  759. add_timer(&d->timer);
  760. spin_unlock_irqrestore(&d->lock, flags);
  761. }
  762. static unsigned long
  763. rqbiocnt(struct request *r)
  764. {
  765. struct bio *bio;
  766. unsigned long n = 0;
  767. __rq_for_each_bio(bio, r)
  768. n++;
  769. return n;
  770. }
  771. /* This can be removed if we are certain that no users of the block
  772. * layer will ever use zero-count pages in bios. Otherwise we have to
  773. * protect against the put_page sometimes done by the network layer.
  774. *
  775. * See http://oss.sgi.com/archives/xfs/2007-01/msg00594.html for
  776. * discussion.
  777. *
  778. * We cannot use get_page in the workaround, because it insists on a
  779. * positive page count as a precondition. So we use _count directly.
  780. */
  781. static void
  782. bio_pageinc(struct bio *bio)
  783. {
  784. struct bio_vec *bv;
  785. struct page *page;
  786. int i;
  787. bio_for_each_segment(bv, bio, i) {
  788. page = bv->bv_page;
  789. /* Non-zero page count for non-head members of
  790. * compound pages is no longer allowed by the kernel,
  791. * but this has never been seen here.
  792. */
  793. if (unlikely(PageCompound(page)))
  794. if (compound_trans_head(page) != page) {
  795. pr_crit("page tail used for block I/O\n");
  796. BUG();
  797. }
  798. atomic_inc(&page->_count);
  799. }
  800. }
  801. static void
  802. bio_pagedec(struct bio *bio)
  803. {
  804. struct bio_vec *bv;
  805. int i;
  806. bio_for_each_segment(bv, bio, i)
  807. atomic_dec(&bv->bv_page->_count);
  808. }
  809. static void
  810. bufinit(struct buf *buf, struct request *rq, struct bio *bio)
  811. {
  812. struct bio_vec *bv;
  813. memset(buf, 0, sizeof(*buf));
  814. buf->rq = rq;
  815. buf->bio = bio;
  816. buf->resid = bio->bi_size;
  817. buf->sector = bio->bi_sector;
  818. bio_pageinc(bio);
  819. buf->bv = bv = &bio->bi_io_vec[bio->bi_idx];
  820. buf->bv_resid = bv->bv_len;
  821. WARN_ON(buf->bv_resid == 0);
  822. }
  823. static struct buf *
  824. nextbuf(struct aoedev *d)
  825. {
  826. struct request *rq;
  827. struct request_queue *q;
  828. struct buf *buf;
  829. struct bio *bio;
  830. q = d->blkq;
  831. if (q == NULL)
  832. return NULL; /* initializing */
  833. if (d->ip.buf)
  834. return d->ip.buf;
  835. rq = d->ip.rq;
  836. if (rq == NULL) {
  837. rq = blk_peek_request(q);
  838. if (rq == NULL)
  839. return NULL;
  840. blk_start_request(rq);
  841. d->ip.rq = rq;
  842. d->ip.nxbio = rq->bio;
  843. rq->special = (void *) rqbiocnt(rq);
  844. }
  845. buf = mempool_alloc(d->bufpool, GFP_ATOMIC);
  846. if (buf == NULL) {
  847. pr_err("aoe: nextbuf: unable to mempool_alloc!\n");
  848. return NULL;
  849. }
  850. bio = d->ip.nxbio;
  851. bufinit(buf, rq, bio);
  852. bio = bio->bi_next;
  853. d->ip.nxbio = bio;
  854. if (bio == NULL)
  855. d->ip.rq = NULL;
  856. return d->ip.buf = buf;
  857. }
  858. /* enters with d->lock held */
  859. void
  860. aoecmd_work(struct aoedev *d)
  861. {
  862. rexmit_deferred(d);
  863. while (aoecmd_ata_rw(d))
  864. ;
  865. }
  866. /* this function performs work that has been deferred until sleeping is OK
  867. */
  868. void
  869. aoecmd_sleepwork(struct work_struct *work)
  870. {
  871. struct aoedev *d = container_of(work, struct aoedev, work);
  872. struct block_device *bd;
  873. u64 ssize;
  874. if (d->flags & DEVFL_GDALLOC)
  875. aoeblk_gdalloc(d);
  876. if (d->flags & DEVFL_NEWSIZE) {
  877. ssize = get_capacity(d->gd);
  878. bd = bdget_disk(d->gd, 0);
  879. if (bd) {
  880. mutex_lock(&bd->bd_inode->i_mutex);
  881. i_size_write(bd->bd_inode, (loff_t)ssize<<9);
  882. mutex_unlock(&bd->bd_inode->i_mutex);
  883. bdput(bd);
  884. }
  885. spin_lock_irq(&d->lock);
  886. d->flags |= DEVFL_UP;
  887. d->flags &= ~DEVFL_NEWSIZE;
  888. spin_unlock_irq(&d->lock);
  889. }
  890. }
  891. static void
  892. ata_ident_fixstring(u16 *id, int ns)
  893. {
  894. u16 s;
  895. while (ns-- > 0) {
  896. s = *id;
  897. *id++ = s >> 8 | s << 8;
  898. }
  899. }
  900. static void
  901. ataid_complete(struct aoedev *d, struct aoetgt *t, unsigned char *id)
  902. {
  903. u64 ssize;
  904. u16 n;
  905. /* word 83: command set supported */
  906. n = get_unaligned_le16(&id[83 << 1]);
  907. /* word 86: command set/feature enabled */
  908. n |= get_unaligned_le16(&id[86 << 1]);
  909. if (n & (1<<10)) { /* bit 10: LBA 48 */
  910. d->flags |= DEVFL_EXT;
  911. /* word 100: number lba48 sectors */
  912. ssize = get_unaligned_le64(&id[100 << 1]);
  913. /* set as in ide-disk.c:init_idedisk_capacity */
  914. d->geo.cylinders = ssize;
  915. d->geo.cylinders /= (255 * 63);
  916. d->geo.heads = 255;
  917. d->geo.sectors = 63;
  918. } else {
  919. d->flags &= ~DEVFL_EXT;
  920. /* number lba28 sectors */
  921. ssize = get_unaligned_le32(&id[60 << 1]);
  922. /* NOTE: obsolete in ATA 6 */
  923. d->geo.cylinders = get_unaligned_le16(&id[54 << 1]);
  924. d->geo.heads = get_unaligned_le16(&id[55 << 1]);
  925. d->geo.sectors = get_unaligned_le16(&id[56 << 1]);
  926. }
  927. ata_ident_fixstring((u16 *) &id[10<<1], 10); /* serial */
  928. ata_ident_fixstring((u16 *) &id[23<<1], 4); /* firmware */
  929. ata_ident_fixstring((u16 *) &id[27<<1], 20); /* model */
  930. memcpy(d->ident, id, sizeof(d->ident));
  931. if (d->ssize != ssize)
  932. printk(KERN_INFO
  933. "aoe: %pm e%ld.%d v%04x has %llu sectors\n",
  934. t->addr,
  935. d->aoemajor, d->aoeminor,
  936. d->fw_ver, (long long)ssize);
  937. d->ssize = ssize;
  938. d->geo.start = 0;
  939. if (d->flags & (DEVFL_GDALLOC|DEVFL_NEWSIZE))
  940. return;
  941. if (d->gd != NULL) {
  942. set_capacity(d->gd, ssize);
  943. d->flags |= DEVFL_NEWSIZE;
  944. } else
  945. d->flags |= DEVFL_GDALLOC;
  946. schedule_work(&d->work);
  947. }
  948. static void
  949. calc_rttavg(struct aoedev *d, struct aoetgt *t, int rtt)
  950. {
  951. register long n;
  952. n = rtt;
  953. /* cf. Congestion Avoidance and Control, Jacobson & Karels, 1988 */
  954. n -= d->rttavg >> RTTSCALE;
  955. d->rttavg += n;
  956. if (n < 0)
  957. n = -n;
  958. n -= d->rttdev >> RTTDSCALE;
  959. d->rttdev += n;
  960. if (!t || t->maxout >= t->nframes)
  961. return;
  962. if (t->maxout < t->ssthresh)
  963. t->maxout += 1;
  964. else if (t->nout == t->maxout && t->next_cwnd-- == 0) {
  965. t->maxout += 1;
  966. t->next_cwnd = t->maxout;
  967. }
  968. }
  969. static struct aoetgt *
  970. gettgt(struct aoedev *d, char *addr)
  971. {
  972. struct aoetgt **t, **e;
  973. t = d->targets;
  974. e = t + d->ntargets;
  975. for (; t < e && *t; t++)
  976. if (memcmp((*t)->addr, addr, sizeof((*t)->addr)) == 0)
  977. return *t;
  978. return NULL;
  979. }
  980. static void
  981. bvcpy(struct bio_vec *bv, ulong off, struct sk_buff *skb, long cnt)
  982. {
  983. ulong fcnt;
  984. char *p;
  985. int soff = 0;
  986. loop:
  987. fcnt = bv->bv_len - (off - bv->bv_offset);
  988. if (fcnt > cnt)
  989. fcnt = cnt;
  990. p = page_address(bv->bv_page) + off;
  991. skb_copy_bits(skb, soff, p, fcnt);
  992. soff += fcnt;
  993. cnt -= fcnt;
  994. if (cnt <= 0)
  995. return;
  996. bv++;
  997. off = bv->bv_offset;
  998. goto loop;
  999. }
  1000. void
  1001. aoe_end_request(struct aoedev *d, struct request *rq, int fastfail)
  1002. {
  1003. struct bio *bio;
  1004. int bok;
  1005. struct request_queue *q;
  1006. q = d->blkq;
  1007. if (rq == d->ip.rq)
  1008. d->ip.rq = NULL;
  1009. do {
  1010. bio = rq->bio;
  1011. bok = !fastfail && test_bit(BIO_UPTODATE, &bio->bi_flags);
  1012. } while (__blk_end_request(rq, bok ? 0 : -EIO, bio->bi_size));
  1013. /* cf. http://lkml.org/lkml/2006/10/31/28 */
  1014. if (!fastfail)
  1015. __blk_run_queue(q);
  1016. }
  1017. static void
  1018. aoe_end_buf(struct aoedev *d, struct buf *buf)
  1019. {
  1020. struct request *rq;
  1021. unsigned long n;
  1022. if (buf == d->ip.buf)
  1023. d->ip.buf = NULL;
  1024. rq = buf->rq;
  1025. bio_pagedec(buf->bio);
  1026. mempool_free(buf, d->bufpool);
  1027. n = (unsigned long) rq->special;
  1028. rq->special = (void *) --n;
  1029. if (n == 0)
  1030. aoe_end_request(d, rq, 0);
  1031. }
  1032. static void
  1033. ktiocomplete(struct frame *f)
  1034. {
  1035. struct aoe_hdr *hin, *hout;
  1036. struct aoe_atahdr *ahin, *ahout;
  1037. struct buf *buf;
  1038. struct sk_buff *skb;
  1039. struct aoetgt *t;
  1040. struct aoeif *ifp;
  1041. struct aoedev *d;
  1042. long n;
  1043. int untainted;
  1044. if (f == NULL)
  1045. return;
  1046. t = f->t;
  1047. d = t->d;
  1048. skb = f->r_skb;
  1049. buf = f->buf;
  1050. if (f->flags & FFL_PROBE)
  1051. goto out;
  1052. if (!skb) /* just fail the buf. */
  1053. goto noskb;
  1054. hout = (struct aoe_hdr *) skb_mac_header(f->skb);
  1055. ahout = (struct aoe_atahdr *) (hout+1);
  1056. hin = (struct aoe_hdr *) skb->data;
  1057. skb_pull(skb, sizeof(*hin));
  1058. ahin = (struct aoe_atahdr *) skb->data;
  1059. skb_pull(skb, sizeof(*ahin));
  1060. if (ahin->cmdstat & 0xa9) { /* these bits cleared on success */
  1061. pr_err("aoe: ata error cmd=%2.2Xh stat=%2.2Xh from e%ld.%d\n",
  1062. ahout->cmdstat, ahin->cmdstat,
  1063. d->aoemajor, d->aoeminor);
  1064. noskb: if (buf)
  1065. clear_bit(BIO_UPTODATE, &buf->bio->bi_flags);
  1066. goto out;
  1067. }
  1068. n = ahout->scnt << 9;
  1069. switch (ahout->cmdstat) {
  1070. case ATA_CMD_PIO_READ:
  1071. case ATA_CMD_PIO_READ_EXT:
  1072. if (skb->len < n) {
  1073. pr_err("%s e%ld.%d. skb->len=%d need=%ld\n",
  1074. "aoe: runt data size in read from",
  1075. (long) d->aoemajor, d->aoeminor,
  1076. skb->len, n);
  1077. clear_bit(BIO_UPTODATE, &buf->bio->bi_flags);
  1078. break;
  1079. }
  1080. bvcpy(f->bv, f->bv_off, skb, n);
  1081. case ATA_CMD_PIO_WRITE:
  1082. case ATA_CMD_PIO_WRITE_EXT:
  1083. spin_lock_irq(&d->lock);
  1084. ifp = getif(t, skb->dev);
  1085. if (ifp)
  1086. ifp->lost = 0;
  1087. spin_unlock_irq(&d->lock);
  1088. break;
  1089. case ATA_CMD_ID_ATA:
  1090. if (skb->len < 512) {
  1091. pr_info("%s e%ld.%d. skb->len=%d need=512\n",
  1092. "aoe: runt data size in ataid from",
  1093. (long) d->aoemajor, d->aoeminor,
  1094. skb->len);
  1095. break;
  1096. }
  1097. if (skb_linearize(skb))
  1098. break;
  1099. spin_lock_irq(&d->lock);
  1100. ataid_complete(d, t, skb->data);
  1101. spin_unlock_irq(&d->lock);
  1102. break;
  1103. default:
  1104. pr_info("aoe: unrecognized ata command %2.2Xh for %d.%d\n",
  1105. ahout->cmdstat,
  1106. be16_to_cpu(get_unaligned(&hin->major)),
  1107. hin->minor);
  1108. }
  1109. out:
  1110. spin_lock_irq(&d->lock);
  1111. if (t->taint > 0
  1112. && --t->taint > 0
  1113. && t->nout_probes == 0) {
  1114. count_targets(d, &untainted);
  1115. if (untainted > 0) {
  1116. probe(t);
  1117. t->nout_probes++;
  1118. }
  1119. }
  1120. aoe_freetframe(f);
  1121. if (buf && --buf->nframesout == 0 && buf->resid == 0)
  1122. aoe_end_buf(d, buf);
  1123. spin_unlock_irq(&d->lock);
  1124. aoedev_put(d);
  1125. dev_kfree_skb(skb);
  1126. }
  1127. /* Enters with iocq.lock held.
  1128. * Returns true iff responses needing processing remain.
  1129. */
  1130. static int
  1131. ktio(void)
  1132. {
  1133. struct frame *f;
  1134. struct list_head *pos;
  1135. int i;
  1136. for (i = 0; ; ++i) {
  1137. if (i == MAXIOC)
  1138. return 1;
  1139. if (list_empty(&iocq.head))
  1140. return 0;
  1141. pos = iocq.head.next;
  1142. list_del(pos);
  1143. spin_unlock_irq(&iocq.lock);
  1144. f = list_entry(pos, struct frame, head);
  1145. ktiocomplete(f);
  1146. spin_lock_irq(&iocq.lock);
  1147. }
  1148. }
  1149. static int
  1150. kthread(void *vp)
  1151. {
  1152. struct ktstate *k;
  1153. DECLARE_WAITQUEUE(wait, current);
  1154. int more;
  1155. k = vp;
  1156. current->flags |= PF_NOFREEZE;
  1157. set_user_nice(current, -10);
  1158. complete(&k->rendez); /* tell spawner we're running */
  1159. do {
  1160. spin_lock_irq(k->lock);
  1161. more = k->fn();
  1162. if (!more) {
  1163. add_wait_queue(k->waitq, &wait);
  1164. __set_current_state(TASK_INTERRUPTIBLE);
  1165. }
  1166. spin_unlock_irq(k->lock);
  1167. if (!more) {
  1168. schedule();
  1169. remove_wait_queue(k->waitq, &wait);
  1170. } else
  1171. cond_resched();
  1172. } while (!kthread_should_stop());
  1173. complete(&k->rendez); /* tell spawner we're stopping */
  1174. return 0;
  1175. }
  1176. void
  1177. aoe_ktstop(struct ktstate *k)
  1178. {
  1179. kthread_stop(k->task);
  1180. wait_for_completion(&k->rendez);
  1181. }
  1182. int
  1183. aoe_ktstart(struct ktstate *k)
  1184. {
  1185. struct task_struct *task;
  1186. init_completion(&k->rendez);
  1187. task = kthread_run(kthread, k, k->name);
  1188. if (task == NULL || IS_ERR(task))
  1189. return -ENOMEM;
  1190. k->task = task;
  1191. wait_for_completion(&k->rendez); /* allow kthread to start */
  1192. init_completion(&k->rendez); /* for waiting for exit later */
  1193. return 0;
  1194. }
  1195. /* pass it off to kthreads for processing */
  1196. static void
  1197. ktcomplete(struct frame *f, struct sk_buff *skb)
  1198. {
  1199. ulong flags;
  1200. f->r_skb = skb;
  1201. spin_lock_irqsave(&iocq.lock, flags);
  1202. list_add_tail(&f->head, &iocq.head);
  1203. spin_unlock_irqrestore(&iocq.lock, flags);
  1204. wake_up(&ktiowq);
  1205. }
  1206. struct sk_buff *
  1207. aoecmd_ata_rsp(struct sk_buff *skb)
  1208. {
  1209. struct aoedev *d;
  1210. struct aoe_hdr *h;
  1211. struct frame *f;
  1212. u32 n;
  1213. ulong flags;
  1214. char ebuf[128];
  1215. u16 aoemajor;
  1216. h = (struct aoe_hdr *) skb->data;
  1217. aoemajor = be16_to_cpu(get_unaligned(&h->major));
  1218. d = aoedev_by_aoeaddr(aoemajor, h->minor, 0);
  1219. if (d == NULL) {
  1220. snprintf(ebuf, sizeof ebuf, "aoecmd_ata_rsp: ata response "
  1221. "for unknown device %d.%d\n",
  1222. aoemajor, h->minor);
  1223. aoechr_error(ebuf);
  1224. return skb;
  1225. }
  1226. spin_lock_irqsave(&d->lock, flags);
  1227. n = be32_to_cpu(get_unaligned(&h->tag));
  1228. f = getframe(d, n);
  1229. if (f) {
  1230. calc_rttavg(d, f->t, tsince_hr(f));
  1231. f->t->nout--;
  1232. if (f->flags & FFL_PROBE)
  1233. f->t->nout_probes--;
  1234. } else {
  1235. f = getframe_deferred(d, n);
  1236. if (f) {
  1237. calc_rttavg(d, NULL, tsince_hr(f));
  1238. } else {
  1239. calc_rttavg(d, NULL, tsince(n));
  1240. spin_unlock_irqrestore(&d->lock, flags);
  1241. aoedev_put(d);
  1242. snprintf(ebuf, sizeof(ebuf),
  1243. "%15s e%d.%d tag=%08x@%08lx s=%pm d=%pm\n",
  1244. "unexpected rsp",
  1245. get_unaligned_be16(&h->major),
  1246. h->minor,
  1247. get_unaligned_be32(&h->tag),
  1248. jiffies,
  1249. h->src,
  1250. h->dst);
  1251. aoechr_error(ebuf);
  1252. return skb;
  1253. }
  1254. }
  1255. aoecmd_work(d);
  1256. spin_unlock_irqrestore(&d->lock, flags);
  1257. ktcomplete(f, skb);
  1258. /*
  1259. * Note here that we do not perform an aoedev_put, as we are
  1260. * leaving this reference for the ktio to release.
  1261. */
  1262. return NULL;
  1263. }
  1264. void
  1265. aoecmd_cfg(ushort aoemajor, unsigned char aoeminor)
  1266. {
  1267. struct sk_buff_head queue;
  1268. __skb_queue_head_init(&queue);
  1269. aoecmd_cfg_pkts(aoemajor, aoeminor, &queue);
  1270. aoenet_xmit(&queue);
  1271. }
  1272. struct sk_buff *
  1273. aoecmd_ata_id(struct aoedev *d)
  1274. {
  1275. struct aoe_hdr *h;
  1276. struct aoe_atahdr *ah;
  1277. struct frame *f;
  1278. struct sk_buff *skb;
  1279. struct aoetgt *t;
  1280. f = newframe(d);
  1281. if (f == NULL)
  1282. return NULL;
  1283. t = *d->tgt;
  1284. /* initialize the headers & frame */
  1285. skb = f->skb;
  1286. h = (struct aoe_hdr *) skb_mac_header(skb);
  1287. ah = (struct aoe_atahdr *) (h+1);
  1288. skb_put(skb, sizeof *h + sizeof *ah);
  1289. memset(h, 0, skb->len);
  1290. f->tag = aoehdr_atainit(d, t, h);
  1291. fhash(f);
  1292. t->nout++;
  1293. f->waited = 0;
  1294. f->waited_total = 0;
  1295. /* set up ata header */
  1296. ah->scnt = 1;
  1297. ah->cmdstat = ATA_CMD_ID_ATA;
  1298. ah->lba3 = 0xa0;
  1299. skb->dev = t->ifp->nd;
  1300. d->rttavg = RTTAVG_INIT;
  1301. d->rttdev = RTTDEV_INIT;
  1302. d->timer.function = rexmit_timer;
  1303. skb = skb_clone(skb, GFP_ATOMIC);
  1304. if (skb) {
  1305. do_gettimeofday(&f->sent);
  1306. f->sent_jiffs = (u32) jiffies;
  1307. }
  1308. return skb;
  1309. }
  1310. static struct aoetgt **
  1311. grow_targets(struct aoedev *d)
  1312. {
  1313. ulong oldn, newn;
  1314. struct aoetgt **tt;
  1315. oldn = d->ntargets;
  1316. newn = oldn * 2;
  1317. tt = kcalloc(newn, sizeof(*d->targets), GFP_ATOMIC);
  1318. if (!tt)
  1319. return NULL;
  1320. memmove(tt, d->targets, sizeof(*d->targets) * oldn);
  1321. d->tgt = tt + (d->tgt - d->targets);
  1322. kfree(d->targets);
  1323. d->targets = tt;
  1324. d->ntargets = newn;
  1325. return &d->targets[oldn];
  1326. }
  1327. static struct aoetgt *
  1328. addtgt(struct aoedev *d, char *addr, ulong nframes)
  1329. {
  1330. struct aoetgt *t, **tt, **te;
  1331. tt = d->targets;
  1332. te = tt + d->ntargets;
  1333. for (; tt < te && *tt; tt++)
  1334. ;
  1335. if (tt == te) {
  1336. tt = grow_targets(d);
  1337. if (!tt)
  1338. goto nomem;
  1339. }
  1340. t = kzalloc(sizeof(*t), GFP_ATOMIC);
  1341. if (!t)
  1342. goto nomem;
  1343. t->nframes = nframes;
  1344. t->d = d;
  1345. memcpy(t->addr, addr, sizeof t->addr);
  1346. t->ifp = t->ifs;
  1347. aoecmd_wreset(t);
  1348. t->maxout = t->nframes / 2;
  1349. INIT_LIST_HEAD(&t->ffree);
  1350. return *tt = t;
  1351. nomem:
  1352. pr_info("aoe: cannot allocate memory to add target\n");
  1353. return NULL;
  1354. }
  1355. static void
  1356. setdbcnt(struct aoedev *d)
  1357. {
  1358. struct aoetgt **t, **e;
  1359. int bcnt = 0;
  1360. t = d->targets;
  1361. e = t + d->ntargets;
  1362. for (; t < e && *t; t++)
  1363. if (bcnt == 0 || bcnt > (*t)->minbcnt)
  1364. bcnt = (*t)->minbcnt;
  1365. if (bcnt != d->maxbcnt) {
  1366. d->maxbcnt = bcnt;
  1367. pr_info("aoe: e%ld.%d: setting %d byte data frames\n",
  1368. d->aoemajor, d->aoeminor, bcnt);
  1369. }
  1370. }
  1371. static void
  1372. setifbcnt(struct aoetgt *t, struct net_device *nd, int bcnt)
  1373. {
  1374. struct aoedev *d;
  1375. struct aoeif *p, *e;
  1376. int minbcnt;
  1377. d = t->d;
  1378. minbcnt = bcnt;
  1379. p = t->ifs;
  1380. e = p + NAOEIFS;
  1381. for (; p < e; p++) {
  1382. if (p->nd == NULL)
  1383. break; /* end of the valid interfaces */
  1384. if (p->nd == nd) {
  1385. p->bcnt = bcnt; /* we're updating */
  1386. nd = NULL;
  1387. } else if (minbcnt > p->bcnt)
  1388. minbcnt = p->bcnt; /* find the min interface */
  1389. }
  1390. if (nd) {
  1391. if (p == e) {
  1392. pr_err("aoe: device setifbcnt failure; too many interfaces.\n");
  1393. return;
  1394. }
  1395. dev_hold(nd);
  1396. p->nd = nd;
  1397. p->bcnt = bcnt;
  1398. }
  1399. t->minbcnt = minbcnt;
  1400. setdbcnt(d);
  1401. }
  1402. void
  1403. aoecmd_cfg_rsp(struct sk_buff *skb)
  1404. {
  1405. struct aoedev *d;
  1406. struct aoe_hdr *h;
  1407. struct aoe_cfghdr *ch;
  1408. struct aoetgt *t;
  1409. ulong flags, aoemajor;
  1410. struct sk_buff *sl;
  1411. struct sk_buff_head queue;
  1412. u16 n;
  1413. sl = NULL;
  1414. h = (struct aoe_hdr *) skb_mac_header(skb);
  1415. ch = (struct aoe_cfghdr *) (h+1);
  1416. /*
  1417. * Enough people have their dip switches set backwards to
  1418. * warrant a loud message for this special case.
  1419. */
  1420. aoemajor = get_unaligned_be16(&h->major);
  1421. if (aoemajor == 0xfff) {
  1422. printk(KERN_ERR "aoe: Warning: shelf address is all ones. "
  1423. "Check shelf dip switches.\n");
  1424. return;
  1425. }
  1426. if (aoemajor == 0xffff) {
  1427. pr_info("aoe: e%ld.%d: broadcast shelf number invalid\n",
  1428. aoemajor, (int) h->minor);
  1429. return;
  1430. }
  1431. if (h->minor == 0xff) {
  1432. pr_info("aoe: e%ld.%d: broadcast slot number invalid\n",
  1433. aoemajor, (int) h->minor);
  1434. return;
  1435. }
  1436. n = be16_to_cpu(ch->bufcnt);
  1437. if (n > aoe_maxout) /* keep it reasonable */
  1438. n = aoe_maxout;
  1439. d = aoedev_by_aoeaddr(aoemajor, h->minor, 1);
  1440. if (d == NULL) {
  1441. pr_info("aoe: device allocation failure\n");
  1442. return;
  1443. }
  1444. spin_lock_irqsave(&d->lock, flags);
  1445. t = gettgt(d, h->src);
  1446. if (t) {
  1447. t->nframes = n;
  1448. if (n < t->maxout)
  1449. aoecmd_wreset(t);
  1450. } else {
  1451. t = addtgt(d, h->src, n);
  1452. if (!t)
  1453. goto bail;
  1454. }
  1455. n = skb->dev->mtu;
  1456. n -= sizeof(struct aoe_hdr) + sizeof(struct aoe_atahdr);
  1457. n /= 512;
  1458. if (n > ch->scnt)
  1459. n = ch->scnt;
  1460. n = n ? n * 512 : DEFAULTBCNT;
  1461. setifbcnt(t, skb->dev, n);
  1462. /* don't change users' perspective */
  1463. if (d->nopen == 0) {
  1464. d->fw_ver = be16_to_cpu(ch->fwver);
  1465. sl = aoecmd_ata_id(d);
  1466. }
  1467. bail:
  1468. spin_unlock_irqrestore(&d->lock, flags);
  1469. aoedev_put(d);
  1470. if (sl) {
  1471. __skb_queue_head_init(&queue);
  1472. __skb_queue_tail(&queue, sl);
  1473. aoenet_xmit(&queue);
  1474. }
  1475. }
  1476. void
  1477. aoecmd_wreset(struct aoetgt *t)
  1478. {
  1479. t->maxout = 1;
  1480. t->ssthresh = t->nframes / 2;
  1481. t->next_cwnd = t->nframes;
  1482. }
  1483. void
  1484. aoecmd_cleanslate(struct aoedev *d)
  1485. {
  1486. struct aoetgt **t, **te;
  1487. d->rttavg = RTTAVG_INIT;
  1488. d->rttdev = RTTDEV_INIT;
  1489. d->maxbcnt = 0;
  1490. t = d->targets;
  1491. te = t + d->ntargets;
  1492. for (; t < te && *t; t++)
  1493. aoecmd_wreset(*t);
  1494. }
  1495. void
  1496. aoe_failbuf(struct aoedev *d, struct buf *buf)
  1497. {
  1498. if (buf == NULL)
  1499. return;
  1500. buf->resid = 0;
  1501. clear_bit(BIO_UPTODATE, &buf->bio->bi_flags);
  1502. if (buf->nframesout == 0)
  1503. aoe_end_buf(d, buf);
  1504. }
  1505. void
  1506. aoe_flush_iocq(void)
  1507. {
  1508. struct frame *f;
  1509. struct aoedev *d;
  1510. LIST_HEAD(flist);
  1511. struct list_head *pos;
  1512. struct sk_buff *skb;
  1513. ulong flags;
  1514. spin_lock_irqsave(&iocq.lock, flags);
  1515. list_splice_init(&iocq.head, &flist);
  1516. spin_unlock_irqrestore(&iocq.lock, flags);
  1517. while (!list_empty(&flist)) {
  1518. pos = flist.next;
  1519. list_del(pos);
  1520. f = list_entry(pos, struct frame, head);
  1521. d = f->t->d;
  1522. skb = f->r_skb;
  1523. spin_lock_irqsave(&d->lock, flags);
  1524. if (f->buf) {
  1525. f->buf->nframesout--;
  1526. aoe_failbuf(d, f->buf);
  1527. }
  1528. aoe_freetframe(f);
  1529. spin_unlock_irqrestore(&d->lock, flags);
  1530. dev_kfree_skb(skb);
  1531. aoedev_put(d);
  1532. }
  1533. }
  1534. int __init
  1535. aoecmd_init(void)
  1536. {
  1537. void *p;
  1538. /* get_zeroed_page returns page with ref count 1 */
  1539. p = (void *) get_zeroed_page(GFP_KERNEL | __GFP_REPEAT);
  1540. if (!p)
  1541. return -ENOMEM;
  1542. empty_page = virt_to_page(p);
  1543. INIT_LIST_HEAD(&iocq.head);
  1544. spin_lock_init(&iocq.lock);
  1545. init_waitqueue_head(&ktiowq);
  1546. kts.name = "aoe_ktio";
  1547. kts.fn = ktio;
  1548. kts.waitq = &ktiowq;
  1549. kts.lock = &iocq.lock;
  1550. return aoe_ktstart(&kts);
  1551. }
  1552. void
  1553. aoecmd_exit(void)
  1554. {
  1555. aoe_ktstop(&kts);
  1556. aoe_flush_iocq();
  1557. free_page((unsigned long) page_address(empty_page));
  1558. empty_page = NULL;
  1559. }