aoecmd.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708
  1. /* Copyright (c) 2004 Coraid, Inc. See COPYING for GPL terms. */
  2. /*
  3. * aoecmd.c
  4. * Filesystem request handling methods
  5. */
  6. #include <linux/hdreg.h>
  7. #include <linux/blkdev.h>
  8. #include <linux/skbuff.h>
  9. #include <linux/netdevice.h>
  10. #include <linux/genhd.h>
  11. #include <asm/unaligned.h>
  12. #include "aoe.h"
  13. #define TIMERTICK (HZ / 10)
  14. #define MINTIMER (2 * TIMERTICK)
  15. #define MAXTIMER (HZ << 1)
  16. #define MAXWAIT (60 * 3) /* After MAXWAIT seconds, give up and fail dev */
  17. static struct sk_buff *
  18. new_skb(struct net_device *if_dev, ulong len)
  19. {
  20. struct sk_buff *skb;
  21. skb = alloc_skb(len, GFP_ATOMIC);
  22. if (skb) {
  23. skb->nh.raw = skb->mac.raw = skb->data;
  24. skb->dev = if_dev;
  25. skb->protocol = __constant_htons(ETH_P_AOE);
  26. skb->priority = 0;
  27. skb_put(skb, len);
  28. memset(skb->head, 0, len);
  29. skb->next = skb->prev = NULL;
  30. /* tell the network layer not to perform IP checksums
  31. * or to get the NIC to do it
  32. */
  33. skb->ip_summed = CHECKSUM_NONE;
  34. }
  35. return skb;
  36. }
  37. static struct sk_buff *
  38. skb_prepare(struct aoedev *d, struct frame *f)
  39. {
  40. struct sk_buff *skb;
  41. char *p;
  42. skb = new_skb(d->ifp, f->ndata + f->writedatalen);
  43. if (!skb) {
  44. printk(KERN_INFO "aoe: skb_prepare: failure to allocate skb\n");
  45. return NULL;
  46. }
  47. p = skb->mac.raw;
  48. memcpy(p, f->data, f->ndata);
  49. if (f->writedatalen) {
  50. p += sizeof(struct aoe_hdr) + sizeof(struct aoe_atahdr);
  51. memcpy(p, f->bufaddr, f->writedatalen);
  52. }
  53. return skb;
  54. }
  55. static struct frame *
  56. getframe(struct aoedev *d, int tag)
  57. {
  58. struct frame *f, *e;
  59. f = d->frames;
  60. e = f + d->nframes;
  61. for (; f<e; f++)
  62. if (f->tag == tag)
  63. return f;
  64. return NULL;
  65. }
  66. /*
  67. * Leave the top bit clear so we have tagspace for userland.
  68. * The bottom 16 bits are the xmit tick for rexmit/rttavg processing.
  69. * This driver reserves tag -1 to mean "unused frame."
  70. */
  71. static int
  72. newtag(struct aoedev *d)
  73. {
  74. register ulong n;
  75. n = jiffies & 0xffff;
  76. return n |= (++d->lasttag & 0x7fff) << 16;
  77. }
  78. static int
  79. aoehdr_atainit(struct aoedev *d, struct aoe_hdr *h)
  80. {
  81. u32 host_tag = newtag(d);
  82. memcpy(h->src, d->ifp->dev_addr, sizeof h->src);
  83. memcpy(h->dst, d->addr, sizeof h->dst);
  84. h->type = __constant_cpu_to_be16(ETH_P_AOE);
  85. h->verfl = AOE_HVER;
  86. h->major = cpu_to_be16(d->aoemajor);
  87. h->minor = d->aoeminor;
  88. h->cmd = AOECMD_ATA;
  89. h->tag = cpu_to_be32(host_tag);
  90. return host_tag;
  91. }
  92. static void
  93. aoecmd_ata_rw(struct aoedev *d, struct frame *f)
  94. {
  95. struct aoe_hdr *h;
  96. struct aoe_atahdr *ah;
  97. struct buf *buf;
  98. struct sk_buff *skb;
  99. ulong bcnt;
  100. register sector_t sector;
  101. char writebit, extbit;
  102. writebit = 0x10;
  103. extbit = 0x4;
  104. buf = d->inprocess;
  105. sector = buf->sector;
  106. bcnt = buf->bv_resid;
  107. if (bcnt > MAXATADATA)
  108. bcnt = MAXATADATA;
  109. /* initialize the headers & frame */
  110. h = (struct aoe_hdr *) f->data;
  111. ah = (struct aoe_atahdr *) (h+1);
  112. f->ndata = sizeof *h + sizeof *ah;
  113. memset(h, 0, f->ndata);
  114. f->tag = aoehdr_atainit(d, h);
  115. f->waited = 0;
  116. f->buf = buf;
  117. f->bufaddr = buf->bufaddr;
  118. /* set up ata header */
  119. ah->scnt = bcnt >> 9;
  120. ah->lba0 = sector;
  121. ah->lba1 = sector >>= 8;
  122. ah->lba2 = sector >>= 8;
  123. ah->lba3 = sector >>= 8;
  124. if (d->flags & DEVFL_EXT) {
  125. ah->aflags |= AOEAFL_EXT;
  126. ah->lba4 = sector >>= 8;
  127. ah->lba5 = sector >>= 8;
  128. } else {
  129. extbit = 0;
  130. ah->lba3 &= 0x0f;
  131. ah->lba3 |= 0xe0; /* LBA bit + obsolete 0xa0 */
  132. }
  133. if (bio_data_dir(buf->bio) == WRITE) {
  134. ah->aflags |= AOEAFL_WRITE;
  135. f->writedatalen = bcnt;
  136. } else {
  137. writebit = 0;
  138. f->writedatalen = 0;
  139. }
  140. ah->cmdstat = WIN_READ | writebit | extbit;
  141. /* mark all tracking fields and load out */
  142. buf->nframesout += 1;
  143. buf->bufaddr += bcnt;
  144. buf->bv_resid -= bcnt;
  145. /* printk(KERN_INFO "aoe: bv_resid=%ld\n", buf->bv_resid); */
  146. buf->resid -= bcnt;
  147. buf->sector += bcnt >> 9;
  148. if (buf->resid == 0) {
  149. d->inprocess = NULL;
  150. } else if (buf->bv_resid == 0) {
  151. buf->bv++;
  152. buf->bv_resid = buf->bv->bv_len;
  153. buf->bufaddr = page_address(buf->bv->bv_page) + buf->bv->bv_offset;
  154. }
  155. skb = skb_prepare(d, f);
  156. if (skb) {
  157. skb->next = NULL;
  158. if (d->sendq_hd)
  159. d->sendq_tl->next = skb;
  160. else
  161. d->sendq_hd = skb;
  162. d->sendq_tl = skb;
  163. }
  164. }
  165. /* some callers cannot sleep, and they can call this function,
  166. * transmitting the packets later, when interrupts are on
  167. */
  168. static struct sk_buff *
  169. aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff **tail)
  170. {
  171. struct aoe_hdr *h;
  172. struct aoe_cfghdr *ch;
  173. struct sk_buff *skb, *sl, *sl_tail;
  174. struct net_device *ifp;
  175. sl = sl_tail = NULL;
  176. read_lock(&dev_base_lock);
  177. for (ifp = dev_base; ifp; dev_put(ifp), ifp = ifp->next) {
  178. dev_hold(ifp);
  179. if (!is_aoe_netif(ifp))
  180. continue;
  181. skb = new_skb(ifp, sizeof *h + sizeof *ch);
  182. if (skb == NULL) {
  183. printk(KERN_INFO "aoe: aoecmd_cfg: skb alloc failure\n");
  184. continue;
  185. }
  186. if (sl_tail == NULL)
  187. sl_tail = skb;
  188. h = (struct aoe_hdr *) skb->mac.raw;
  189. memset(h, 0, sizeof *h + sizeof *ch);
  190. memset(h->dst, 0xff, sizeof h->dst);
  191. memcpy(h->src, ifp->dev_addr, sizeof h->src);
  192. h->type = __constant_cpu_to_be16(ETH_P_AOE);
  193. h->verfl = AOE_HVER;
  194. h->major = cpu_to_be16(aoemajor);
  195. h->minor = aoeminor;
  196. h->cmd = AOECMD_CFG;
  197. skb->next = sl;
  198. sl = skb;
  199. }
  200. read_unlock(&dev_base_lock);
  201. if (tail != NULL)
  202. *tail = sl_tail;
  203. return sl;
  204. }
  205. /* enters with d->lock held */
  206. void
  207. aoecmd_work(struct aoedev *d)
  208. {
  209. struct frame *f;
  210. struct buf *buf;
  211. if (d->flags & DEVFL_PAUSE) {
  212. if (!aoedev_isbusy(d))
  213. d->sendq_hd = aoecmd_cfg_pkts(d->aoemajor,
  214. d->aoeminor, &d->sendq_tl);
  215. return;
  216. }
  217. loop:
  218. f = getframe(d, FREETAG);
  219. if (f == NULL)
  220. return;
  221. if (d->inprocess == NULL) {
  222. if (list_empty(&d->bufq))
  223. return;
  224. buf = container_of(d->bufq.next, struct buf, bufs);
  225. list_del(d->bufq.next);
  226. /*printk(KERN_INFO "aoecmd_work: bi_size=%ld\n", buf->bio->bi_size); */
  227. d->inprocess = buf;
  228. }
  229. aoecmd_ata_rw(d, f);
  230. goto loop;
  231. }
  232. static void
  233. rexmit(struct aoedev *d, struct frame *f)
  234. {
  235. struct sk_buff *skb;
  236. struct aoe_hdr *h;
  237. char buf[128];
  238. u32 n;
  239. n = newtag(d);
  240. snprintf(buf, sizeof buf,
  241. "%15s e%ld.%ld oldtag=%08x@%08lx newtag=%08x\n",
  242. "retransmit",
  243. d->aoemajor, d->aoeminor, f->tag, jiffies, n);
  244. aoechr_error(buf);
  245. h = (struct aoe_hdr *) f->data;
  246. f->tag = n;
  247. h->tag = cpu_to_be32(n);
  248. memcpy(h->dst, d->addr, sizeof h->dst);
  249. memcpy(h->src, d->ifp->dev_addr, sizeof h->src);
  250. skb = skb_prepare(d, f);
  251. if (skb) {
  252. skb->next = NULL;
  253. if (d->sendq_hd)
  254. d->sendq_tl->next = skb;
  255. else
  256. d->sendq_hd = skb;
  257. d->sendq_tl = skb;
  258. }
  259. }
  260. static int
  261. tsince(int tag)
  262. {
  263. int n;
  264. n = jiffies & 0xffff;
  265. n -= tag & 0xffff;
  266. if (n < 0)
  267. n += 1<<16;
  268. return n;
  269. }
  270. static void
  271. rexmit_timer(ulong vp)
  272. {
  273. struct aoedev *d;
  274. struct frame *f, *e;
  275. struct sk_buff *sl;
  276. register long timeout;
  277. ulong flags, n;
  278. d = (struct aoedev *) vp;
  279. sl = NULL;
  280. /* timeout is always ~150% of the moving average */
  281. timeout = d->rttavg;
  282. timeout += timeout >> 1;
  283. spin_lock_irqsave(&d->lock, flags);
  284. if (d->flags & DEVFL_TKILL) {
  285. spin_unlock_irqrestore(&d->lock, flags);
  286. return;
  287. }
  288. f = d->frames;
  289. e = f + d->nframes;
  290. for (; f<e; f++) {
  291. if (f->tag != FREETAG && tsince(f->tag) >= timeout) {
  292. n = f->waited += timeout;
  293. n /= HZ;
  294. if (n > MAXWAIT) { /* waited too long. device failure. */
  295. aoedev_downdev(d);
  296. break;
  297. }
  298. rexmit(d, f);
  299. }
  300. }
  301. sl = d->sendq_hd;
  302. d->sendq_hd = d->sendq_tl = NULL;
  303. if (sl) {
  304. n = d->rttavg <<= 1;
  305. if (n > MAXTIMER)
  306. d->rttavg = MAXTIMER;
  307. }
  308. d->timer.expires = jiffies + TIMERTICK;
  309. add_timer(&d->timer);
  310. spin_unlock_irqrestore(&d->lock, flags);
  311. aoenet_xmit(sl);
  312. }
  313. /* this function performs work that has been deferred until sleeping is OK
  314. */
  315. void
  316. aoecmd_sleepwork(void *vp)
  317. {
  318. struct aoedev *d = (struct aoedev *) vp;
  319. if (d->flags & DEVFL_GDALLOC)
  320. aoeblk_gdalloc(d);
  321. if (d->flags & DEVFL_NEWSIZE) {
  322. struct block_device *bd;
  323. unsigned long flags;
  324. u64 ssize;
  325. ssize = d->gd->capacity;
  326. bd = bdget_disk(d->gd, 0);
  327. if (bd) {
  328. mutex_lock(&bd->bd_inode->i_mutex);
  329. i_size_write(bd->bd_inode, (loff_t)ssize<<9);
  330. mutex_unlock(&bd->bd_inode->i_mutex);
  331. bdput(bd);
  332. }
  333. spin_lock_irqsave(&d->lock, flags);
  334. d->flags |= DEVFL_UP;
  335. d->flags &= ~DEVFL_NEWSIZE;
  336. spin_unlock_irqrestore(&d->lock, flags);
  337. }
  338. }
  339. static void
  340. ataid_complete(struct aoedev *d, unsigned char *id)
  341. {
  342. u64 ssize;
  343. u16 n;
  344. /* word 83: command set supported */
  345. n = le16_to_cpu(get_unaligned((__le16 *) &id[83<<1]));
  346. /* word 86: command set/feature enabled */
  347. n |= le16_to_cpu(get_unaligned((__le16 *) &id[86<<1]));
  348. if (n & (1<<10)) { /* bit 10: LBA 48 */
  349. d->flags |= DEVFL_EXT;
  350. /* word 100: number lba48 sectors */
  351. ssize = le64_to_cpu(get_unaligned((__le64 *) &id[100<<1]));
  352. /* set as in ide-disk.c:init_idedisk_capacity */
  353. d->geo.cylinders = ssize;
  354. d->geo.cylinders /= (255 * 63);
  355. d->geo.heads = 255;
  356. d->geo.sectors = 63;
  357. } else {
  358. d->flags &= ~DEVFL_EXT;
  359. /* number lba28 sectors */
  360. ssize = le32_to_cpu(get_unaligned((__le32 *) &id[60<<1]));
  361. /* NOTE: obsolete in ATA 6 */
  362. d->geo.cylinders = le16_to_cpu(get_unaligned((__le16 *) &id[54<<1]));
  363. d->geo.heads = le16_to_cpu(get_unaligned((__le16 *) &id[55<<1]));
  364. d->geo.sectors = le16_to_cpu(get_unaligned((__le16 *) &id[56<<1]));
  365. }
  366. if (d->ssize != ssize)
  367. printk(KERN_INFO "aoe: %012llx e%lu.%lu v%04x has %llu "
  368. "sectors\n", (unsigned long long)mac_addr(d->addr),
  369. d->aoemajor, d->aoeminor,
  370. d->fw_ver, (long long)ssize);
  371. d->ssize = ssize;
  372. d->geo.start = 0;
  373. if (d->gd != NULL) {
  374. d->gd->capacity = ssize;
  375. d->flags |= DEVFL_NEWSIZE;
  376. } else {
  377. if (d->flags & DEVFL_GDALLOC) {
  378. printk(KERN_INFO "aoe: %s: %s e%lu.%lu, %s\n",
  379. __FUNCTION__,
  380. "can't schedule work for",
  381. d->aoemajor, d->aoeminor,
  382. "it's already on! (This really shouldn't happen).\n");
  383. return;
  384. }
  385. d->flags |= DEVFL_GDALLOC;
  386. }
  387. schedule_work(&d->work);
  388. }
  389. static void
  390. calc_rttavg(struct aoedev *d, int rtt)
  391. {
  392. register long n;
  393. n = rtt;
  394. if (n < MINTIMER)
  395. n = MINTIMER;
  396. else if (n > MAXTIMER)
  397. n = MAXTIMER;
  398. /* g == .25; cf. Congestion Avoidance and Control, Jacobson & Karels; 1988 */
  399. n -= d->rttavg;
  400. d->rttavg += n >> 2;
  401. }
  402. void
  403. aoecmd_ata_rsp(struct sk_buff *skb)
  404. {
  405. struct aoedev *d;
  406. struct aoe_hdr *hin;
  407. struct aoe_atahdr *ahin, *ahout;
  408. struct frame *f;
  409. struct buf *buf;
  410. struct sk_buff *sl;
  411. register long n;
  412. ulong flags;
  413. char ebuf[128];
  414. u16 aoemajor;
  415. hin = (struct aoe_hdr *) skb->mac.raw;
  416. aoemajor = be16_to_cpu(hin->major);
  417. d = aoedev_by_aoeaddr(aoemajor, hin->minor);
  418. if (d == NULL) {
  419. snprintf(ebuf, sizeof ebuf, "aoecmd_ata_rsp: ata response "
  420. "for unknown device %d.%d\n",
  421. aoemajor, hin->minor);
  422. aoechr_error(ebuf);
  423. return;
  424. }
  425. spin_lock_irqsave(&d->lock, flags);
  426. f = getframe(d, be32_to_cpu(hin->tag));
  427. if (f == NULL) {
  428. spin_unlock_irqrestore(&d->lock, flags);
  429. snprintf(ebuf, sizeof ebuf,
  430. "%15s e%d.%d tag=%08x@%08lx\n",
  431. "unexpected rsp",
  432. be16_to_cpu(hin->major),
  433. hin->minor,
  434. be32_to_cpu(hin->tag),
  435. jiffies);
  436. aoechr_error(ebuf);
  437. return;
  438. }
  439. calc_rttavg(d, tsince(f->tag));
  440. ahin = (struct aoe_atahdr *) (hin+1);
  441. ahout = (struct aoe_atahdr *) (f->data + sizeof(struct aoe_hdr));
  442. buf = f->buf;
  443. if (ahout->cmdstat == WIN_IDENTIFY)
  444. d->flags &= ~DEVFL_PAUSE;
  445. if (ahin->cmdstat & 0xa9) { /* these bits cleared on success */
  446. printk(KERN_CRIT "aoe: aoecmd_ata_rsp: ata error cmd=%2.2Xh "
  447. "stat=%2.2Xh from e%ld.%ld\n",
  448. ahout->cmdstat, ahin->cmdstat,
  449. d->aoemajor, d->aoeminor);
  450. if (buf)
  451. buf->flags |= BUFFL_FAIL;
  452. } else {
  453. switch (ahout->cmdstat) {
  454. case WIN_READ:
  455. case WIN_READ_EXT:
  456. n = ahout->scnt << 9;
  457. if (skb->len - sizeof *hin - sizeof *ahin < n) {
  458. printk(KERN_CRIT "aoe: aoecmd_ata_rsp: runt "
  459. "ata data size in read. skb->len=%d\n",
  460. skb->len);
  461. /* fail frame f? just returning will rexmit. */
  462. spin_unlock_irqrestore(&d->lock, flags);
  463. return;
  464. }
  465. memcpy(f->bufaddr, ahin+1, n);
  466. case WIN_WRITE:
  467. case WIN_WRITE_EXT:
  468. break;
  469. case WIN_IDENTIFY:
  470. if (skb->len - sizeof *hin - sizeof *ahin < 512) {
  471. printk(KERN_INFO "aoe: aoecmd_ata_rsp: runt data size "
  472. "in ataid. skb->len=%d\n", skb->len);
  473. spin_unlock_irqrestore(&d->lock, flags);
  474. return;
  475. }
  476. ataid_complete(d, (char *) (ahin+1));
  477. break;
  478. default:
  479. printk(KERN_INFO "aoe: aoecmd_ata_rsp: unrecognized "
  480. "outbound ata command %2.2Xh for %d.%d\n",
  481. ahout->cmdstat,
  482. be16_to_cpu(hin->major),
  483. hin->minor);
  484. }
  485. }
  486. if (buf) {
  487. buf->nframesout -= 1;
  488. if (buf->nframesout == 0 && buf->resid == 0) {
  489. unsigned long duration = jiffies - buf->start_time;
  490. unsigned long n_sect = buf->bio->bi_size >> 9;
  491. struct gendisk *disk = d->gd;
  492. const int rw = bio_data_dir(buf->bio);
  493. disk_stat_inc(disk, ios[rw]);
  494. disk_stat_add(disk, ticks[rw], duration);
  495. disk_stat_add(disk, sectors[rw], n_sect);
  496. disk_stat_add(disk, io_ticks, duration);
  497. n = (buf->flags & BUFFL_FAIL) ? -EIO : 0;
  498. bio_endio(buf->bio, buf->bio->bi_size, n);
  499. mempool_free(buf, d->bufpool);
  500. }
  501. }
  502. f->buf = NULL;
  503. f->tag = FREETAG;
  504. aoecmd_work(d);
  505. sl = d->sendq_hd;
  506. d->sendq_hd = d->sendq_tl = NULL;
  507. spin_unlock_irqrestore(&d->lock, flags);
  508. aoenet_xmit(sl);
  509. }
  510. void
  511. aoecmd_cfg(ushort aoemajor, unsigned char aoeminor)
  512. {
  513. struct sk_buff *sl;
  514. sl = aoecmd_cfg_pkts(aoemajor, aoeminor, NULL);
  515. aoenet_xmit(sl);
  516. }
  517. /*
  518. * Since we only call this in one place (and it only prepares one frame)
  519. * we just return the skb. Usually we'd chain it up to the aoedev sendq.
  520. */
  521. static struct sk_buff *
  522. aoecmd_ata_id(struct aoedev *d)
  523. {
  524. struct aoe_hdr *h;
  525. struct aoe_atahdr *ah;
  526. struct frame *f;
  527. struct sk_buff *skb;
  528. f = getframe(d, FREETAG);
  529. if (f == NULL) {
  530. printk(KERN_CRIT "aoe: aoecmd_ata_id: can't get a frame. "
  531. "This shouldn't happen.\n");
  532. return NULL;
  533. }
  534. /* initialize the headers & frame */
  535. h = (struct aoe_hdr *) f->data;
  536. ah = (struct aoe_atahdr *) (h+1);
  537. f->ndata = sizeof *h + sizeof *ah;
  538. memset(h, 0, f->ndata);
  539. f->tag = aoehdr_atainit(d, h);
  540. f->waited = 0;
  541. f->writedatalen = 0;
  542. /* set up ata header */
  543. ah->scnt = 1;
  544. ah->cmdstat = WIN_IDENTIFY;
  545. ah->lba3 = 0xa0;
  546. skb = skb_prepare(d, f);
  547. d->rttavg = MAXTIMER;
  548. d->timer.function = rexmit_timer;
  549. return skb;
  550. }
  551. void
  552. aoecmd_cfg_rsp(struct sk_buff *skb)
  553. {
  554. struct aoedev *d;
  555. struct aoe_hdr *h;
  556. struct aoe_cfghdr *ch;
  557. ulong flags, sysminor, aoemajor;
  558. u16 bufcnt;
  559. struct sk_buff *sl;
  560. enum { MAXFRAMES = 16 };
  561. h = (struct aoe_hdr *) skb->mac.raw;
  562. ch = (struct aoe_cfghdr *) (h+1);
  563. /*
  564. * Enough people have their dip switches set backwards to
  565. * warrant a loud message for this special case.
  566. */
  567. aoemajor = be16_to_cpu(h->major);
  568. if (aoemajor == 0xfff) {
  569. printk(KERN_CRIT "aoe: aoecmd_cfg_rsp: Warning: shelf "
  570. "address is all ones. Check shelf dip switches\n");
  571. return;
  572. }
  573. sysminor = SYSMINOR(aoemajor, h->minor);
  574. if (sysminor * AOE_PARTITIONS + AOE_PARTITIONS > MINORMASK) {
  575. printk(KERN_INFO
  576. "aoe: e%ld.%d: minor number too large\n",
  577. aoemajor, (int) h->minor);
  578. return;
  579. }
  580. bufcnt = be16_to_cpu(ch->bufcnt);
  581. if (bufcnt > MAXFRAMES) /* keep it reasonable */
  582. bufcnt = MAXFRAMES;
  583. d = aoedev_by_sysminor_m(sysminor, bufcnt);
  584. if (d == NULL) {
  585. printk(KERN_INFO "aoe: aoecmd_cfg_rsp: device sysminor_m failure\n");
  586. return;
  587. }
  588. spin_lock_irqsave(&d->lock, flags);
  589. /* permit device to migrate mac and network interface */
  590. d->ifp = skb->dev;
  591. memcpy(d->addr, h->src, sizeof d->addr);
  592. /* don't change users' perspective */
  593. if (d->nopen && !(d->flags & DEVFL_PAUSE)) {
  594. spin_unlock_irqrestore(&d->lock, flags);
  595. return;
  596. }
  597. d->flags |= DEVFL_PAUSE; /* force pause */
  598. d->fw_ver = be16_to_cpu(ch->fwver);
  599. /* check for already outstanding ataid */
  600. sl = aoedev_isbusy(d) == 0 ? aoecmd_ata_id(d) : NULL;
  601. spin_unlock_irqrestore(&d->lock, flags);
  602. aoenet_xmit(sl);
  603. }