aoecmd.c 21 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045
  1. /* Copyright (c) 2006 Coraid, Inc. See COPYING for GPL terms. */
  2. /*
  3. * aoecmd.c
  4. * Filesystem request handling methods
  5. */
  6. #include <linux/hdreg.h>
  7. #include <linux/blkdev.h>
  8. #include <linux/skbuff.h>
  9. #include <linux/netdevice.h>
  10. #include <linux/genhd.h>
  11. #include <linux/moduleparam.h>
  12. #include <net/net_namespace.h>
  13. #include <asm/unaligned.h>
  14. #include "aoe.h"
  15. static int aoe_deadsecs = 60 * 3;
  16. module_param(aoe_deadsecs, int, 0644);
  17. MODULE_PARM_DESC(aoe_deadsecs, "After aoe_deadsecs seconds, give up and fail dev.");
  18. static struct sk_buff *
  19. new_skb(ulong len)
  20. {
  21. struct sk_buff *skb;
  22. skb = alloc_skb(len, GFP_ATOMIC);
  23. if (skb) {
  24. skb_reset_mac_header(skb);
  25. skb_reset_network_header(skb);
  26. skb->protocol = __constant_htons(ETH_P_AOE);
  27. skb->priority = 0;
  28. skb->next = skb->prev = NULL;
  29. /* tell the network layer not to perform IP checksums
  30. * or to get the NIC to do it
  31. */
  32. skb->ip_summed = CHECKSUM_NONE;
  33. }
  34. return skb;
  35. }
  36. static struct frame *
  37. getframe(struct aoetgt *t, int tag)
  38. {
  39. struct frame *f, *e;
  40. f = t->frames;
  41. e = f + t->nframes;
  42. for (; f<e; f++)
  43. if (f->tag == tag)
  44. return f;
  45. return NULL;
  46. }
  47. /*
  48. * Leave the top bit clear so we have tagspace for userland.
  49. * The bottom 16 bits are the xmit tick for rexmit/rttavg processing.
  50. * This driver reserves tag -1 to mean "unused frame."
  51. */
  52. static int
  53. newtag(struct aoetgt *t)
  54. {
  55. register ulong n;
  56. n = jiffies & 0xffff;
  57. return n |= (++t->lasttag & 0x7fff) << 16;
  58. }
  59. static int
  60. aoehdr_atainit(struct aoedev *d, struct aoetgt *t, struct aoe_hdr *h)
  61. {
  62. u32 host_tag = newtag(t);
  63. memcpy(h->src, t->ifp->nd->dev_addr, sizeof h->src);
  64. memcpy(h->dst, t->addr, sizeof h->dst);
  65. h->type = __constant_cpu_to_be16(ETH_P_AOE);
  66. h->verfl = AOE_HVER;
  67. h->major = cpu_to_be16(d->aoemajor);
  68. h->minor = d->aoeminor;
  69. h->cmd = AOECMD_ATA;
  70. h->tag = cpu_to_be32(host_tag);
  71. return host_tag;
  72. }
  73. static inline void
  74. put_lba(struct aoe_atahdr *ah, sector_t lba)
  75. {
  76. ah->lba0 = lba;
  77. ah->lba1 = lba >>= 8;
  78. ah->lba2 = lba >>= 8;
  79. ah->lba3 = lba >>= 8;
  80. ah->lba4 = lba >>= 8;
  81. ah->lba5 = lba >>= 8;
  82. }
  83. static void
  84. ifrotate(struct aoetgt *t)
  85. {
  86. t->ifp++;
  87. if (t->ifp >= &t->ifs[NAOEIFS] || t->ifp->nd == NULL)
  88. t->ifp = t->ifs;
  89. if (t->ifp->nd == NULL) {
  90. printk(KERN_INFO "aoe: no interface to rotate to\n");
  91. BUG();
  92. }
  93. }
  94. static struct frame *
  95. freeframe(struct aoedev *d)
  96. {
  97. struct frame *f, *e;
  98. struct aoetgt **t;
  99. ulong n;
  100. if (d->targets[0] == NULL) { /* shouldn't happen, but I'm paranoid */
  101. printk(KERN_ERR "aoe: NULL TARGETS!\n");
  102. return NULL;
  103. }
  104. t = d->targets;
  105. do {
  106. if (t != d->htgt
  107. && (*t)->ifp->nd
  108. && (*t)->nout < (*t)->maxout) {
  109. n = (*t)->nframes;
  110. f = (*t)->frames;
  111. e = f + n;
  112. for (; f < e; f++) {
  113. if (f->tag != FREETAG)
  114. continue;
  115. if (atomic_read(&skb_shinfo(f->skb)->dataref)
  116. != 1) {
  117. n--;
  118. continue;
  119. }
  120. skb_shinfo(f->skb)->nr_frags = 0;
  121. f->skb->data_len = 0;
  122. skb_trim(f->skb, 0);
  123. d->tgt = t;
  124. ifrotate(*t);
  125. return f;
  126. }
  127. if (n == 0) /* slow polling network card */
  128. d->flags |= DEVFL_KICKME;
  129. }
  130. t++;
  131. } while (t < &d->targets[NTARGETS] && *t);
  132. return NULL;
  133. }
  134. static int
  135. aoecmd_ata_rw(struct aoedev *d)
  136. {
  137. struct frame *f;
  138. struct aoe_hdr *h;
  139. struct aoe_atahdr *ah;
  140. struct buf *buf;
  141. struct bio_vec *bv;
  142. struct aoetgt *t;
  143. struct sk_buff *skb;
  144. ulong bcnt;
  145. char writebit, extbit;
  146. writebit = 0x10;
  147. extbit = 0x4;
  148. f = freeframe(d);
  149. if (f == NULL)
  150. return 0;
  151. t = *d->tgt;
  152. buf = d->inprocess;
  153. bv = buf->bv;
  154. bcnt = t->ifp->maxbcnt;
  155. if (bcnt == 0)
  156. bcnt = DEFAULTBCNT;
  157. if (bcnt > buf->bv_resid)
  158. bcnt = buf->bv_resid;
  159. /* initialize the headers & frame */
  160. skb = f->skb;
  161. h = (struct aoe_hdr *) skb_mac_header(skb);
  162. ah = (struct aoe_atahdr *) (h+1);
  163. skb_put(skb, sizeof *h + sizeof *ah);
  164. memset(h, 0, skb->len);
  165. f->tag = aoehdr_atainit(d, t, h);
  166. t->nout++;
  167. f->waited = 0;
  168. f->buf = buf;
  169. f->bufaddr = page_address(bv->bv_page) + buf->bv_off;
  170. f->bcnt = bcnt;
  171. f->lba = buf->sector;
  172. /* set up ata header */
  173. ah->scnt = bcnt >> 9;
  174. put_lba(ah, buf->sector);
  175. if (d->flags & DEVFL_EXT) {
  176. ah->aflags |= AOEAFL_EXT;
  177. } else {
  178. extbit = 0;
  179. ah->lba3 &= 0x0f;
  180. ah->lba3 |= 0xe0; /* LBA bit + obsolete 0xa0 */
  181. }
  182. if (bio_data_dir(buf->bio) == WRITE) {
  183. skb_fill_page_desc(skb, 0, bv->bv_page, buf->bv_off, bcnt);
  184. ah->aflags |= AOEAFL_WRITE;
  185. skb->len += bcnt;
  186. skb->data_len = bcnt;
  187. t->wpkts++;
  188. } else {
  189. t->rpkts++;
  190. writebit = 0;
  191. }
  192. ah->cmdstat = WIN_READ | writebit | extbit;
  193. /* mark all tracking fields and load out */
  194. buf->nframesout += 1;
  195. buf->bv_off += bcnt;
  196. buf->bv_resid -= bcnt;
  197. buf->resid -= bcnt;
  198. buf->sector += bcnt >> 9;
  199. if (buf->resid == 0) {
  200. d->inprocess = NULL;
  201. } else if (buf->bv_resid == 0) {
  202. buf->bv = ++bv;
  203. buf->bv_resid = bv->bv_len;
  204. WARN_ON(buf->bv_resid == 0);
  205. buf->bv_off = bv->bv_offset;
  206. }
  207. skb->dev = t->ifp->nd;
  208. skb = skb_clone(skb, GFP_ATOMIC);
  209. if (skb) {
  210. if (d->sendq_hd)
  211. d->sendq_tl->next = skb;
  212. else
  213. d->sendq_hd = skb;
  214. d->sendq_tl = skb;
  215. }
  216. return 1;
  217. }
  218. /* some callers cannot sleep, and they can call this function,
  219. * transmitting the packets later, when interrupts are on
  220. */
  221. static struct sk_buff *
  222. aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff **tail)
  223. {
  224. struct aoe_hdr *h;
  225. struct aoe_cfghdr *ch;
  226. struct sk_buff *skb, *sl, *sl_tail;
  227. struct net_device *ifp;
  228. sl = sl_tail = NULL;
  229. read_lock(&dev_base_lock);
  230. for_each_netdev(&init_net, ifp) {
  231. dev_hold(ifp);
  232. if (!is_aoe_netif(ifp))
  233. goto cont;
  234. skb = new_skb(sizeof *h + sizeof *ch);
  235. if (skb == NULL) {
  236. printk(KERN_INFO "aoe: skb alloc failure\n");
  237. goto cont;
  238. }
  239. skb_put(skb, sizeof *h + sizeof *ch);
  240. skb->dev = ifp;
  241. if (sl_tail == NULL)
  242. sl_tail = skb;
  243. h = (struct aoe_hdr *) skb_mac_header(skb);
  244. memset(h, 0, sizeof *h + sizeof *ch);
  245. memset(h->dst, 0xff, sizeof h->dst);
  246. memcpy(h->src, ifp->dev_addr, sizeof h->src);
  247. h->type = __constant_cpu_to_be16(ETH_P_AOE);
  248. h->verfl = AOE_HVER;
  249. h->major = cpu_to_be16(aoemajor);
  250. h->minor = aoeminor;
  251. h->cmd = AOECMD_CFG;
  252. skb->next = sl;
  253. sl = skb;
  254. cont:
  255. dev_put(ifp);
  256. }
  257. read_unlock(&dev_base_lock);
  258. if (tail != NULL)
  259. *tail = sl_tail;
  260. return sl;
  261. }
  262. static void
  263. resend(struct aoedev *d, struct aoetgt *t, struct frame *f)
  264. {
  265. struct sk_buff *skb;
  266. struct aoe_hdr *h;
  267. struct aoe_atahdr *ah;
  268. char buf[128];
  269. u32 n;
  270. ifrotate(t);
  271. n = newtag(t);
  272. skb = f->skb;
  273. h = (struct aoe_hdr *) skb_mac_header(skb);
  274. ah = (struct aoe_atahdr *) (h+1);
  275. snprintf(buf, sizeof buf,
  276. "%15s e%ld.%d oldtag=%08x@%08lx newtag=%08x "
  277. "s=%012llx d=%012llx nout=%d\n",
  278. "retransmit", d->aoemajor, d->aoeminor, f->tag, jiffies, n,
  279. mac_addr(h->src),
  280. mac_addr(h->dst), t->nout);
  281. aoechr_error(buf);
  282. f->tag = n;
  283. h->tag = cpu_to_be32(n);
  284. memcpy(h->dst, t->addr, sizeof h->dst);
  285. memcpy(h->src, t->ifp->nd->dev_addr, sizeof h->src);
  286. switch (ah->cmdstat) {
  287. default:
  288. break;
  289. case WIN_READ:
  290. case WIN_READ_EXT:
  291. case WIN_WRITE:
  292. case WIN_WRITE_EXT:
  293. put_lba(ah, f->lba);
  294. n = f->bcnt;
  295. if (n > DEFAULTBCNT)
  296. n = DEFAULTBCNT;
  297. ah->scnt = n >> 9;
  298. if (ah->aflags & AOEAFL_WRITE) {
  299. skb_fill_page_desc(skb, 0, virt_to_page(f->bufaddr),
  300. offset_in_page(f->bufaddr), n);
  301. skb->len = sizeof *h + sizeof *ah + n;
  302. skb->data_len = n;
  303. }
  304. }
  305. skb->dev = t->ifp->nd;
  306. skb = skb_clone(skb, GFP_ATOMIC);
  307. if (skb == NULL)
  308. return;
  309. if (d->sendq_hd)
  310. d->sendq_tl->next = skb;
  311. else
  312. d->sendq_hd = skb;
  313. d->sendq_tl = skb;
  314. }
  315. static int
  316. tsince(int tag)
  317. {
  318. int n;
  319. n = jiffies & 0xffff;
  320. n -= tag & 0xffff;
  321. if (n < 0)
  322. n += 1<<16;
  323. return n;
  324. }
  325. static struct aoeif *
  326. getif(struct aoetgt *t, struct net_device *nd)
  327. {
  328. struct aoeif *p, *e;
  329. p = t->ifs;
  330. e = p + NAOEIFS;
  331. for (; p < e; p++)
  332. if (p->nd == nd)
  333. return p;
  334. return NULL;
  335. }
  336. static struct aoeif *
  337. addif(struct aoetgt *t, struct net_device *nd)
  338. {
  339. struct aoeif *p;
  340. p = getif(t, NULL);
  341. if (!p)
  342. return NULL;
  343. p->nd = nd;
  344. p->maxbcnt = DEFAULTBCNT;
  345. p->lost = 0;
  346. p->lostjumbo = 0;
  347. return p;
  348. }
  349. static void
  350. ejectif(struct aoetgt *t, struct aoeif *ifp)
  351. {
  352. struct aoeif *e;
  353. ulong n;
  354. e = t->ifs + NAOEIFS - 1;
  355. n = (e - ifp) * sizeof *ifp;
  356. memmove(ifp, ifp+1, n);
  357. e->nd = NULL;
  358. }
  359. static int
  360. sthtith(struct aoedev *d)
  361. {
  362. struct frame *f, *e, *nf;
  363. struct sk_buff *skb;
  364. struct aoetgt *ht = *d->htgt;
  365. f = ht->frames;
  366. e = f + ht->nframes;
  367. for (; f < e; f++) {
  368. if (f->tag == FREETAG)
  369. continue;
  370. nf = freeframe(d);
  371. if (!nf)
  372. return 0;
  373. skb = nf->skb;
  374. *nf = *f;
  375. f->skb = skb;
  376. f->tag = FREETAG;
  377. nf->waited = 0;
  378. ht->nout--;
  379. (*d->tgt)->nout++;
  380. resend(d, *d->tgt, nf);
  381. }
  382. /* he's clean, he's useless. take away his interfaces */
  383. memset(ht->ifs, 0, sizeof ht->ifs);
  384. d->htgt = NULL;
  385. return 1;
  386. }
  387. static inline unsigned char
  388. ata_scnt(unsigned char *packet) {
  389. struct aoe_hdr *h;
  390. struct aoe_atahdr *ah;
  391. h = (struct aoe_hdr *) packet;
  392. ah = (struct aoe_atahdr *) (h+1);
  393. return ah->scnt;
  394. }
  395. static void
  396. rexmit_timer(ulong vp)
  397. {
  398. struct aoedev *d;
  399. struct aoetgt *t, **tt, **te;
  400. struct aoeif *ifp;
  401. struct frame *f, *e;
  402. struct sk_buff *sl;
  403. register long timeout;
  404. ulong flags, n;
  405. d = (struct aoedev *) vp;
  406. sl = NULL;
  407. /* timeout is always ~150% of the moving average */
  408. timeout = d->rttavg;
  409. timeout += timeout >> 1;
  410. spin_lock_irqsave(&d->lock, flags);
  411. if (d->flags & DEVFL_TKILL) {
  412. spin_unlock_irqrestore(&d->lock, flags);
  413. return;
  414. }
  415. tt = d->targets;
  416. te = tt + NTARGETS;
  417. for (; tt < te && *tt; tt++) {
  418. t = *tt;
  419. f = t->frames;
  420. e = f + t->nframes;
  421. for (; f < e; f++) {
  422. if (f->tag == FREETAG
  423. || tsince(f->tag) < timeout)
  424. continue;
  425. n = f->waited += timeout;
  426. n /= HZ;
  427. if (n > aoe_deadsecs) {
  428. /* waited too long. device failure. */
  429. aoedev_downdev(d);
  430. break;
  431. }
  432. if (n > HELPWAIT /* see if another target can help */
  433. && (tt != d->targets || d->targets[1]))
  434. d->htgt = tt;
  435. if (t->nout == t->maxout) {
  436. if (t->maxout > 1)
  437. t->maxout--;
  438. t->lastwadj = jiffies;
  439. }
  440. ifp = getif(t, f->skb->dev);
  441. if (ifp && ++ifp->lost > (t->nframes << 1)
  442. && (ifp != t->ifs || t->ifs[1].nd)) {
  443. ejectif(t, ifp);
  444. ifp = NULL;
  445. }
  446. if (ata_scnt(skb_mac_header(f->skb)) > DEFAULTBCNT / 512
  447. && ifp && ++ifp->lostjumbo > (t->nframes << 1)
  448. && ifp->maxbcnt != DEFAULTBCNT) {
  449. printk(KERN_INFO
  450. "aoe: e%ld.%d: "
  451. "too many lost jumbo on "
  452. "%s:%012llx - "
  453. "falling back to %d frames.\n",
  454. d->aoemajor, d->aoeminor,
  455. ifp->nd->name, mac_addr(t->addr),
  456. DEFAULTBCNT);
  457. ifp->maxbcnt = 0;
  458. }
  459. resend(d, t, f);
  460. }
  461. /* window check */
  462. if (t->nout == t->maxout
  463. && t->maxout < t->nframes
  464. && (jiffies - t->lastwadj)/HZ > 10) {
  465. t->maxout++;
  466. t->lastwadj = jiffies;
  467. }
  468. }
  469. if (d->sendq_hd) {
  470. n = d->rttavg <<= 1;
  471. if (n > MAXTIMER)
  472. d->rttavg = MAXTIMER;
  473. }
  474. if (d->flags & DEVFL_KICKME || d->htgt) {
  475. d->flags &= ~DEVFL_KICKME;
  476. aoecmd_work(d);
  477. }
  478. sl = d->sendq_hd;
  479. d->sendq_hd = d->sendq_tl = NULL;
  480. d->timer.expires = jiffies + TIMERTICK;
  481. add_timer(&d->timer);
  482. spin_unlock_irqrestore(&d->lock, flags);
  483. aoenet_xmit(sl);
  484. }
  485. /* enters with d->lock held */
  486. void
  487. aoecmd_work(struct aoedev *d)
  488. {
  489. struct buf *buf;
  490. loop:
  491. if (d->htgt && !sthtith(d))
  492. return;
  493. if (d->inprocess == NULL) {
  494. if (list_empty(&d->bufq))
  495. return;
  496. buf = container_of(d->bufq.next, struct buf, bufs);
  497. list_del(d->bufq.next);
  498. d->inprocess = buf;
  499. }
  500. if (aoecmd_ata_rw(d))
  501. goto loop;
  502. }
  503. /* this function performs work that has been deferred until sleeping is OK
  504. */
  505. void
  506. aoecmd_sleepwork(struct work_struct *work)
  507. {
  508. struct aoedev *d = container_of(work, struct aoedev, work);
  509. if (d->flags & DEVFL_GDALLOC)
  510. aoeblk_gdalloc(d);
  511. if (d->flags & DEVFL_NEWSIZE) {
  512. struct block_device *bd;
  513. unsigned long flags;
  514. u64 ssize;
  515. ssize = d->gd->capacity;
  516. bd = bdget_disk(d->gd, 0);
  517. if (bd) {
  518. mutex_lock(&bd->bd_inode->i_mutex);
  519. i_size_write(bd->bd_inode, (loff_t)ssize<<9);
  520. mutex_unlock(&bd->bd_inode->i_mutex);
  521. bdput(bd);
  522. }
  523. spin_lock_irqsave(&d->lock, flags);
  524. d->flags |= DEVFL_UP;
  525. d->flags &= ~DEVFL_NEWSIZE;
  526. spin_unlock_irqrestore(&d->lock, flags);
  527. }
  528. }
  529. static void
  530. ataid_complete(struct aoedev *d, struct aoetgt *t, unsigned char *id)
  531. {
  532. u64 ssize;
  533. u16 n;
  534. /* word 83: command set supported */
  535. n = le16_to_cpu(get_unaligned((__le16 *) &id[83<<1]));
  536. /* word 86: command set/feature enabled */
  537. n |= le16_to_cpu(get_unaligned((__le16 *) &id[86<<1]));
  538. if (n & (1<<10)) { /* bit 10: LBA 48 */
  539. d->flags |= DEVFL_EXT;
  540. /* word 100: number lba48 sectors */
  541. ssize = le64_to_cpu(get_unaligned((__le64 *) &id[100<<1]));
  542. /* set as in ide-disk.c:init_idedisk_capacity */
  543. d->geo.cylinders = ssize;
  544. d->geo.cylinders /= (255 * 63);
  545. d->geo.heads = 255;
  546. d->geo.sectors = 63;
  547. } else {
  548. d->flags &= ~DEVFL_EXT;
  549. /* number lba28 sectors */
  550. ssize = le32_to_cpu(get_unaligned((__le32 *) &id[60<<1]));
  551. /* NOTE: obsolete in ATA 6 */
  552. d->geo.cylinders = le16_to_cpu(get_unaligned((__le16 *) &id[54<<1]));
  553. d->geo.heads = le16_to_cpu(get_unaligned((__le16 *) &id[55<<1]));
  554. d->geo.sectors = le16_to_cpu(get_unaligned((__le16 *) &id[56<<1]));
  555. }
  556. if (d->ssize != ssize)
  557. printk(KERN_INFO "aoe: %012llx e%lu.%lu v%04x has %llu sectors\n",
  558. mac_addr(t->addr),
  559. d->aoemajor, d->aoeminor,
  560. d->fw_ver, (long long)ssize);
  561. d->ssize = ssize;
  562. d->geo.start = 0;
  563. if (d->gd != NULL) {
  564. d->gd->capacity = ssize;
  565. d->flags |= DEVFL_NEWSIZE;
  566. } else
  567. d->flags |= DEVFL_GDALLOC;
  568. schedule_work(&d->work);
  569. }
  570. static void
  571. calc_rttavg(struct aoedev *d, int rtt)
  572. {
  573. register long n;
  574. n = rtt;
  575. if (n < 0) {
  576. n = -rtt;
  577. if (n < MINTIMER)
  578. n = MINTIMER;
  579. else if (n > MAXTIMER)
  580. n = MAXTIMER;
  581. d->mintimer += (n - d->mintimer) >> 1;
  582. } else if (n < d->mintimer)
  583. n = d->mintimer;
  584. else if (n > MAXTIMER)
  585. n = MAXTIMER;
  586. /* g == .25; cf. Congestion Avoidance and Control, Jacobson & Karels; 1988 */
  587. n -= d->rttavg;
  588. d->rttavg += n >> 2;
  589. }
  590. static struct aoetgt *
  591. gettgt(struct aoedev *d, char *addr)
  592. {
  593. struct aoetgt **t, **e;
  594. t = d->targets;
  595. e = t + NTARGETS;
  596. for (; t < e && *t; t++)
  597. if (memcmp((*t)->addr, addr, sizeof((*t)->addr)) == 0)
  598. return *t;
  599. return NULL;
  600. }
  601. static inline void
  602. diskstats(struct gendisk *disk, struct bio *bio, ulong duration)
  603. {
  604. unsigned long n_sect = bio->bi_size >> 9;
  605. const int rw = bio_data_dir(bio);
  606. disk_stat_inc(disk, ios[rw]);
  607. disk_stat_add(disk, ticks[rw], duration);
  608. disk_stat_add(disk, sectors[rw], n_sect);
  609. disk_stat_add(disk, io_ticks, duration);
  610. }
  611. void
  612. aoecmd_ata_rsp(struct sk_buff *skb)
  613. {
  614. struct aoedev *d;
  615. struct aoe_hdr *hin, *hout;
  616. struct aoe_atahdr *ahin, *ahout;
  617. struct frame *f;
  618. struct buf *buf;
  619. struct sk_buff *sl;
  620. struct aoetgt *t;
  621. struct aoeif *ifp;
  622. register long n;
  623. ulong flags;
  624. char ebuf[128];
  625. u16 aoemajor;
  626. hin = (struct aoe_hdr *) skb_mac_header(skb);
  627. aoemajor = be16_to_cpu(get_unaligned(&hin->major));
  628. d = aoedev_by_aoeaddr(aoemajor, hin->minor);
  629. if (d == NULL) {
  630. snprintf(ebuf, sizeof ebuf, "aoecmd_ata_rsp: ata response "
  631. "for unknown device %d.%d\n",
  632. aoemajor, hin->minor);
  633. aoechr_error(ebuf);
  634. return;
  635. }
  636. spin_lock_irqsave(&d->lock, flags);
  637. n = be32_to_cpu(get_unaligned(&hin->tag));
  638. t = gettgt(d, hin->src);
  639. if (t == NULL) {
  640. printk(KERN_INFO "aoe: can't find target e%ld.%d:%012llx\n",
  641. d->aoemajor, d->aoeminor, mac_addr(hin->src));
  642. spin_unlock_irqrestore(&d->lock, flags);
  643. return;
  644. }
  645. f = getframe(t, n);
  646. if (f == NULL) {
  647. calc_rttavg(d, -tsince(n));
  648. spin_unlock_irqrestore(&d->lock, flags);
  649. snprintf(ebuf, sizeof ebuf,
  650. "%15s e%d.%d tag=%08x@%08lx\n",
  651. "unexpected rsp",
  652. be16_to_cpu(get_unaligned(&hin->major)),
  653. hin->minor,
  654. be32_to_cpu(get_unaligned(&hin->tag)),
  655. jiffies);
  656. aoechr_error(ebuf);
  657. return;
  658. }
  659. calc_rttavg(d, tsince(f->tag));
  660. ahin = (struct aoe_atahdr *) (hin+1);
  661. hout = (struct aoe_hdr *) skb_mac_header(f->skb);
  662. ahout = (struct aoe_atahdr *) (hout+1);
  663. buf = f->buf;
  664. if (ahin->cmdstat & 0xa9) { /* these bits cleared on success */
  665. printk(KERN_ERR
  666. "aoe: ata error cmd=%2.2Xh stat=%2.2Xh from e%ld.%ld\n",
  667. ahout->cmdstat, ahin->cmdstat,
  668. d->aoemajor, d->aoeminor);
  669. if (buf)
  670. buf->flags |= BUFFL_FAIL;
  671. } else {
  672. if (d->htgt && t == *d->htgt) /* I'll help myself, thank you. */
  673. d->htgt = NULL;
  674. n = ahout->scnt << 9;
  675. switch (ahout->cmdstat) {
  676. case WIN_READ:
  677. case WIN_READ_EXT:
  678. if (skb->len - sizeof *hin - sizeof *ahin < n) {
  679. printk(KERN_ERR
  680. "aoe: %s. skb->len=%d need=%ld\n",
  681. "runt data size in read", skb->len, n);
  682. /* fail frame f? just returning will rexmit. */
  683. spin_unlock_irqrestore(&d->lock, flags);
  684. return;
  685. }
  686. memcpy(f->bufaddr, ahin+1, n);
  687. case WIN_WRITE:
  688. case WIN_WRITE_EXT:
  689. ifp = getif(t, skb->dev);
  690. if (ifp) {
  691. ifp->lost = 0;
  692. if (n > DEFAULTBCNT)
  693. ifp->lostjumbo = 0;
  694. }
  695. if (f->bcnt -= n) {
  696. f->lba += n >> 9;
  697. f->bufaddr += n;
  698. resend(d, t, f);
  699. goto xmit;
  700. }
  701. break;
  702. case WIN_IDENTIFY:
  703. if (skb->len - sizeof *hin - sizeof *ahin < 512) {
  704. printk(KERN_INFO
  705. "aoe: runt data size in ataid. skb->len=%d\n",
  706. skb->len);
  707. spin_unlock_irqrestore(&d->lock, flags);
  708. return;
  709. }
  710. ataid_complete(d, t, (char *) (ahin+1));
  711. break;
  712. default:
  713. printk(KERN_INFO
  714. "aoe: unrecognized ata command %2.2Xh for %d.%d\n",
  715. ahout->cmdstat,
  716. be16_to_cpu(get_unaligned(&hin->major)),
  717. hin->minor);
  718. }
  719. }
  720. if (buf && --buf->nframesout == 0 && buf->resid == 0) {
  721. diskstats(d->gd, buf->bio, jiffies - buf->stime);
  722. n = (buf->flags & BUFFL_FAIL) ? -EIO : 0;
  723. bio_endio(buf->bio, n);
  724. mempool_free(buf, d->bufpool);
  725. }
  726. f->buf = NULL;
  727. f->tag = FREETAG;
  728. t->nout--;
  729. aoecmd_work(d);
  730. xmit:
  731. sl = d->sendq_hd;
  732. d->sendq_hd = d->sendq_tl = NULL;
  733. spin_unlock_irqrestore(&d->lock, flags);
  734. aoenet_xmit(sl);
  735. }
  736. void
  737. aoecmd_cfg(ushort aoemajor, unsigned char aoeminor)
  738. {
  739. struct sk_buff *sl;
  740. sl = aoecmd_cfg_pkts(aoemajor, aoeminor, NULL);
  741. aoenet_xmit(sl);
  742. }
  743. struct sk_buff *
  744. aoecmd_ata_id(struct aoedev *d)
  745. {
  746. struct aoe_hdr *h;
  747. struct aoe_atahdr *ah;
  748. struct frame *f;
  749. struct sk_buff *skb;
  750. struct aoetgt *t;
  751. f = freeframe(d);
  752. if (f == NULL)
  753. return NULL;
  754. t = *d->tgt;
  755. /* initialize the headers & frame */
  756. skb = f->skb;
  757. h = (struct aoe_hdr *) skb_mac_header(skb);
  758. ah = (struct aoe_atahdr *) (h+1);
  759. skb_put(skb, sizeof *h + sizeof *ah);
  760. memset(h, 0, skb->len);
  761. f->tag = aoehdr_atainit(d, t, h);
  762. t->nout++;
  763. f->waited = 0;
  764. /* set up ata header */
  765. ah->scnt = 1;
  766. ah->cmdstat = WIN_IDENTIFY;
  767. ah->lba3 = 0xa0;
  768. skb->dev = t->ifp->nd;
  769. d->rttavg = MAXTIMER;
  770. d->timer.function = rexmit_timer;
  771. return skb_clone(skb, GFP_ATOMIC);
  772. }
  773. static struct aoetgt *
  774. addtgt(struct aoedev *d, char *addr, ulong nframes)
  775. {
  776. struct aoetgt *t, **tt, **te;
  777. struct frame *f, *e;
  778. tt = d->targets;
  779. te = tt + NTARGETS;
  780. for (; tt < te && *tt; tt++)
  781. ;
  782. if (tt == te)
  783. return NULL;
  784. t = kcalloc(1, sizeof *t, GFP_ATOMIC);
  785. f = kcalloc(nframes, sizeof *f, GFP_ATOMIC);
  786. if (!t || !f)
  787. goto bail;
  788. t->nframes = nframes;
  789. t->frames = f;
  790. e = f + nframes;
  791. for (; f < e; f++) {
  792. f->tag = FREETAG;
  793. f->skb = new_skb(ETH_ZLEN);
  794. if (!f->skb)
  795. break;
  796. }
  797. if (f != e) {
  798. while (f > t->frames) {
  799. f--;
  800. dev_kfree_skb(f->skb);
  801. }
  802. goto bail;
  803. }
  804. memcpy(t->addr, addr, sizeof t->addr);
  805. t->ifp = t->ifs;
  806. t->maxout = t->nframes;
  807. return *tt = t;
  808. bail:
  809. kfree(t);
  810. kfree(f);
  811. return NULL;
  812. }
  813. void
  814. aoecmd_cfg_rsp(struct sk_buff *skb)
  815. {
  816. struct aoedev *d;
  817. struct aoe_hdr *h;
  818. struct aoe_cfghdr *ch;
  819. struct aoetgt *t;
  820. struct aoeif *ifp;
  821. ulong flags, sysminor, aoemajor;
  822. struct sk_buff *sl;
  823. enum { MAXFRAMES = 16 };
  824. u16 n;
  825. h = (struct aoe_hdr *) skb_mac_header(skb);
  826. ch = (struct aoe_cfghdr *) (h+1);
  827. /*
  828. * Enough people have their dip switches set backwards to
  829. * warrant a loud message for this special case.
  830. */
  831. aoemajor = be16_to_cpu(get_unaligned(&h->major));
  832. if (aoemajor == 0xfff) {
  833. printk(KERN_ERR "aoe: Warning: shelf address is all ones. "
  834. "Check shelf dip switches.\n");
  835. return;
  836. }
  837. sysminor = SYSMINOR(aoemajor, h->minor);
  838. if (sysminor * AOE_PARTITIONS + AOE_PARTITIONS > MINORMASK) {
  839. printk(KERN_INFO "aoe: e%ld.%d: minor number too large\n",
  840. aoemajor, (int) h->minor);
  841. return;
  842. }
  843. n = be16_to_cpu(ch->bufcnt);
  844. if (n > MAXFRAMES) /* keep it reasonable */
  845. n = MAXFRAMES;
  846. d = aoedev_by_sysminor_m(sysminor);
  847. if (d == NULL) {
  848. printk(KERN_INFO "aoe: device sysminor_m failure\n");
  849. return;
  850. }
  851. spin_lock_irqsave(&d->lock, flags);
  852. t = gettgt(d, h->src);
  853. if (!t) {
  854. t = addtgt(d, h->src, n);
  855. if (!t) {
  856. printk(KERN_INFO
  857. "aoe: device addtgt failure; "
  858. "too many targets?\n");
  859. spin_unlock_irqrestore(&d->lock, flags);
  860. return;
  861. }
  862. }
  863. ifp = getif(t, skb->dev);
  864. if (!ifp) {
  865. ifp = addif(t, skb->dev);
  866. if (!ifp) {
  867. printk(KERN_INFO
  868. "aoe: device addif failure; "
  869. "too many interfaces?\n");
  870. spin_unlock_irqrestore(&d->lock, flags);
  871. return;
  872. }
  873. }
  874. if (ifp->maxbcnt) {
  875. n = ifp->nd->mtu;
  876. n -= sizeof (struct aoe_hdr) + sizeof (struct aoe_atahdr);
  877. n /= 512;
  878. if (n > ch->scnt)
  879. n = ch->scnt;
  880. n = n ? n * 512 : DEFAULTBCNT;
  881. if (n != ifp->maxbcnt) {
  882. printk(KERN_INFO
  883. "aoe: e%ld.%d: setting %d%s%s:%012llx\n",
  884. d->aoemajor, d->aoeminor, n,
  885. " byte data frames on ", ifp->nd->name,
  886. mac_addr(t->addr));
  887. ifp->maxbcnt = n;
  888. }
  889. }
  890. /* don't change users' perspective */
  891. if (d->nopen) {
  892. spin_unlock_irqrestore(&d->lock, flags);
  893. return;
  894. }
  895. d->fw_ver = be16_to_cpu(ch->fwver);
  896. sl = aoecmd_ata_id(d);
  897. spin_unlock_irqrestore(&d->lock, flags);
  898. aoenet_xmit(sl);
  899. }
  900. void
  901. aoecmd_cleanslate(struct aoedev *d)
  902. {
  903. struct aoetgt **t, **te;
  904. struct aoeif *p, *e;
  905. d->mintimer = MINTIMER;
  906. t = d->targets;
  907. te = t + NTARGETS;
  908. for (; t < te && *t; t++) {
  909. (*t)->maxout = (*t)->nframes;
  910. p = (*t)->ifs;
  911. e = p + NAOEIFS;
  912. for (; p < e; p++) {
  913. p->lostjumbo = 0;
  914. p->lost = 0;
  915. p->maxbcnt = DEFAULTBCNT;
  916. }
  917. }
  918. }