aoecmd.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705
  1. /* Copyright (c) 2004 Coraid, Inc. See COPYING for GPL terms. */
  2. /*
  3. * aoecmd.c
  4. * Filesystem request handling methods
  5. */
  6. #include <linux/hdreg.h>
  7. #include <linux/blkdev.h>
  8. #include <linux/skbuff.h>
  9. #include <linux/netdevice.h>
  10. #include <linux/genhd.h>
  11. #include <asm/unaligned.h>
  12. #include "aoe.h"
  13. #define TIMERTICK (HZ / 10)
  14. #define MINTIMER (2 * TIMERTICK)
  15. #define MAXTIMER (HZ << 1)
  16. #define MAXWAIT (60 * 3) /* After MAXWAIT seconds, give up and fail dev */
  17. static struct sk_buff *
  18. new_skb(struct net_device *if_dev, ulong len)
  19. {
  20. struct sk_buff *skb;
  21. skb = alloc_skb(len, GFP_ATOMIC);
  22. if (skb) {
  23. skb->nh.raw = skb->mac.raw = skb->data;
  24. skb->dev = if_dev;
  25. skb->protocol = __constant_htons(ETH_P_AOE);
  26. skb->priority = 0;
  27. skb_put(skb, len);
  28. memset(skb->head, 0, len);
  29. skb->next = skb->prev = NULL;
  30. /* tell the network layer not to perform IP checksums
  31. * or to get the NIC to do it
  32. */
  33. skb->ip_summed = CHECKSUM_NONE;
  34. }
  35. return skb;
  36. }
  37. static struct sk_buff *
  38. skb_prepare(struct aoedev *d, struct frame *f)
  39. {
  40. struct sk_buff *skb;
  41. char *p;
  42. skb = new_skb(d->ifp, f->ndata + f->writedatalen);
  43. if (!skb) {
  44. printk(KERN_INFO "aoe: skb_prepare: failure to allocate skb\n");
  45. return NULL;
  46. }
  47. p = skb->mac.raw;
  48. memcpy(p, f->data, f->ndata);
  49. if (f->writedatalen) {
  50. p += sizeof(struct aoe_hdr) + sizeof(struct aoe_atahdr);
  51. memcpy(p, f->bufaddr, f->writedatalen);
  52. }
  53. return skb;
  54. }
  55. static struct frame *
  56. getframe(struct aoedev *d, int tag)
  57. {
  58. struct frame *f, *e;
  59. f = d->frames;
  60. e = f + d->nframes;
  61. for (; f<e; f++)
  62. if (f->tag == tag)
  63. return f;
  64. return NULL;
  65. }
  66. /*
  67. * Leave the top bit clear so we have tagspace for userland.
  68. * The bottom 16 bits are the xmit tick for rexmit/rttavg processing.
  69. * This driver reserves tag -1 to mean "unused frame."
  70. */
  71. static int
  72. newtag(struct aoedev *d)
  73. {
  74. register ulong n;
  75. n = jiffies & 0xffff;
  76. return n |= (++d->lasttag & 0x7fff) << 16;
  77. }
  78. static int
  79. aoehdr_atainit(struct aoedev *d, struct aoe_hdr *h)
  80. {
  81. u32 host_tag = newtag(d);
  82. memcpy(h->src, d->ifp->dev_addr, sizeof h->src);
  83. memcpy(h->dst, d->addr, sizeof h->dst);
  84. h->type = __constant_cpu_to_be16(ETH_P_AOE);
  85. h->verfl = AOE_HVER;
  86. h->major = cpu_to_be16(d->aoemajor);
  87. h->minor = d->aoeminor;
  88. h->cmd = AOECMD_ATA;
  89. h->tag = cpu_to_be32(host_tag);
  90. return host_tag;
  91. }
  92. static void
  93. aoecmd_ata_rw(struct aoedev *d, struct frame *f)
  94. {
  95. struct aoe_hdr *h;
  96. struct aoe_atahdr *ah;
  97. struct buf *buf;
  98. struct sk_buff *skb;
  99. ulong bcnt;
  100. register sector_t sector;
  101. char writebit, extbit;
  102. writebit = 0x10;
  103. extbit = 0x4;
  104. buf = d->inprocess;
  105. sector = buf->sector;
  106. bcnt = buf->bv_resid;
  107. if (bcnt > MAXATADATA)
  108. bcnt = MAXATADATA;
  109. /* initialize the headers & frame */
  110. h = (struct aoe_hdr *) f->data;
  111. ah = (struct aoe_atahdr *) (h+1);
  112. f->ndata = sizeof *h + sizeof *ah;
  113. memset(h, 0, f->ndata);
  114. f->tag = aoehdr_atainit(d, h);
  115. f->waited = 0;
  116. f->buf = buf;
  117. f->bufaddr = buf->bufaddr;
  118. /* set up ata header */
  119. ah->scnt = bcnt >> 9;
  120. ah->lba0 = sector;
  121. ah->lba1 = sector >>= 8;
  122. ah->lba2 = sector >>= 8;
  123. ah->lba3 = sector >>= 8;
  124. if (d->flags & DEVFL_EXT) {
  125. ah->aflags |= AOEAFL_EXT;
  126. ah->lba4 = sector >>= 8;
  127. ah->lba5 = sector >>= 8;
  128. } else {
  129. extbit = 0;
  130. ah->lba3 &= 0x0f;
  131. ah->lba3 |= 0xe0; /* LBA bit + obsolete 0xa0 */
  132. }
  133. if (bio_data_dir(buf->bio) == WRITE) {
  134. ah->aflags |= AOEAFL_WRITE;
  135. f->writedatalen = bcnt;
  136. } else {
  137. writebit = 0;
  138. f->writedatalen = 0;
  139. }
  140. ah->cmdstat = WIN_READ | writebit | extbit;
  141. /* mark all tracking fields and load out */
  142. buf->nframesout += 1;
  143. buf->bufaddr += bcnt;
  144. buf->bv_resid -= bcnt;
  145. /* printk(KERN_INFO "aoe: bv_resid=%ld\n", buf->bv_resid); */
  146. buf->resid -= bcnt;
  147. buf->sector += bcnt >> 9;
  148. if (buf->resid == 0) {
  149. d->inprocess = NULL;
  150. } else if (buf->bv_resid == 0) {
  151. buf->bv++;
  152. buf->bv_resid = buf->bv->bv_len;
  153. buf->bufaddr = page_address(buf->bv->bv_page) + buf->bv->bv_offset;
  154. }
  155. skb = skb_prepare(d, f);
  156. if (skb) {
  157. skb->next = NULL;
  158. if (d->sendq_hd)
  159. d->sendq_tl->next = skb;
  160. else
  161. d->sendq_hd = skb;
  162. d->sendq_tl = skb;
  163. }
  164. }
  165. /* some callers cannot sleep, and they can call this function,
  166. * transmitting the packets later, when interrupts are on
  167. */
  168. static struct sk_buff *
  169. aoecmd_cfg_pkts(ushort aoemajor, unsigned char aoeminor, struct sk_buff **tail)
  170. {
  171. struct aoe_hdr *h;
  172. struct aoe_cfghdr *ch;
  173. struct sk_buff *skb, *sl, *sl_tail;
  174. struct net_device *ifp;
  175. sl = sl_tail = NULL;
  176. read_lock(&dev_base_lock);
  177. for (ifp = dev_base; ifp; dev_put(ifp), ifp = ifp->next) {
  178. dev_hold(ifp);
  179. if (!is_aoe_netif(ifp))
  180. continue;
  181. skb = new_skb(ifp, sizeof *h + sizeof *ch);
  182. if (skb == NULL) {
  183. printk(KERN_INFO "aoe: aoecmd_cfg: skb alloc failure\n");
  184. continue;
  185. }
  186. if (sl_tail == NULL)
  187. sl_tail = skb;
  188. h = (struct aoe_hdr *) skb->mac.raw;
  189. memset(h, 0, sizeof *h + sizeof *ch);
  190. memset(h->dst, 0xff, sizeof h->dst);
  191. memcpy(h->src, ifp->dev_addr, sizeof h->src);
  192. h->type = __constant_cpu_to_be16(ETH_P_AOE);
  193. h->verfl = AOE_HVER;
  194. h->major = cpu_to_be16(aoemajor);
  195. h->minor = aoeminor;
  196. h->cmd = AOECMD_CFG;
  197. skb->next = sl;
  198. sl = skb;
  199. }
  200. read_unlock(&dev_base_lock);
  201. if (tail != NULL)
  202. *tail = sl_tail;
  203. return sl;
  204. }
  205. /* enters with d->lock held */
  206. void
  207. aoecmd_work(struct aoedev *d)
  208. {
  209. struct frame *f;
  210. struct buf *buf;
  211. if (d->flags & DEVFL_PAUSE) {
  212. if (!aoedev_isbusy(d))
  213. d->sendq_hd = aoecmd_cfg_pkts(d->aoemajor,
  214. d->aoeminor, &d->sendq_tl);
  215. return;
  216. }
  217. loop:
  218. f = getframe(d, FREETAG);
  219. if (f == NULL)
  220. return;
  221. if (d->inprocess == NULL) {
  222. if (list_empty(&d->bufq))
  223. return;
  224. buf = container_of(d->bufq.next, struct buf, bufs);
  225. list_del(d->bufq.next);
  226. /*printk(KERN_INFO "aoecmd_work: bi_size=%ld\n", buf->bio->bi_size); */
  227. d->inprocess = buf;
  228. }
  229. aoecmd_ata_rw(d, f);
  230. goto loop;
  231. }
  232. static void
  233. rexmit(struct aoedev *d, struct frame *f)
  234. {
  235. struct sk_buff *skb;
  236. struct aoe_hdr *h;
  237. char buf[128];
  238. u32 n;
  239. n = newtag(d);
  240. snprintf(buf, sizeof buf,
  241. "%15s e%ld.%ld oldtag=%08x@%08lx newtag=%08x\n",
  242. "retransmit",
  243. d->aoemajor, d->aoeminor, f->tag, jiffies, n);
  244. aoechr_error(buf);
  245. h = (struct aoe_hdr *) f->data;
  246. f->tag = n;
  247. h->tag = cpu_to_be32(n);
  248. skb = skb_prepare(d, f);
  249. if (skb) {
  250. skb->next = NULL;
  251. if (d->sendq_hd)
  252. d->sendq_tl->next = skb;
  253. else
  254. d->sendq_hd = skb;
  255. d->sendq_tl = skb;
  256. }
  257. }
  258. static int
  259. tsince(int tag)
  260. {
  261. int n;
  262. n = jiffies & 0xffff;
  263. n -= tag & 0xffff;
  264. if (n < 0)
  265. n += 1<<16;
  266. return n;
  267. }
  268. static void
  269. rexmit_timer(ulong vp)
  270. {
  271. struct aoedev *d;
  272. struct frame *f, *e;
  273. struct sk_buff *sl;
  274. register long timeout;
  275. ulong flags, n;
  276. d = (struct aoedev *) vp;
  277. sl = NULL;
  278. /* timeout is always ~150% of the moving average */
  279. timeout = d->rttavg;
  280. timeout += timeout >> 1;
  281. spin_lock_irqsave(&d->lock, flags);
  282. if (d->flags & DEVFL_TKILL) {
  283. tdie: spin_unlock_irqrestore(&d->lock, flags);
  284. return;
  285. }
  286. f = d->frames;
  287. e = f + d->nframes;
  288. for (; f<e; f++) {
  289. if (f->tag != FREETAG && tsince(f->tag) >= timeout) {
  290. n = f->waited += timeout;
  291. n /= HZ;
  292. if (n > MAXWAIT) { /* waited too long. device failure. */
  293. aoedev_downdev(d);
  294. goto tdie;
  295. }
  296. rexmit(d, f);
  297. }
  298. }
  299. sl = d->sendq_hd;
  300. d->sendq_hd = d->sendq_tl = NULL;
  301. if (sl) {
  302. n = d->rttavg <<= 1;
  303. if (n > MAXTIMER)
  304. d->rttavg = MAXTIMER;
  305. }
  306. d->timer.expires = jiffies + TIMERTICK;
  307. add_timer(&d->timer);
  308. spin_unlock_irqrestore(&d->lock, flags);
  309. aoenet_xmit(sl);
  310. }
  311. /* this function performs work that has been deferred until sleeping is OK
  312. */
  313. void
  314. aoecmd_sleepwork(void *vp)
  315. {
  316. struct aoedev *d = (struct aoedev *) vp;
  317. if (d->flags & DEVFL_GDALLOC)
  318. aoeblk_gdalloc(d);
  319. if (d->flags & DEVFL_NEWSIZE) {
  320. struct block_device *bd;
  321. unsigned long flags;
  322. u64 ssize;
  323. ssize = d->gd->capacity;
  324. bd = bdget_disk(d->gd, 0);
  325. if (bd) {
  326. mutex_lock(&bd->bd_inode->i_mutex);
  327. i_size_write(bd->bd_inode, (loff_t)ssize<<9);
  328. mutex_unlock(&bd->bd_inode->i_mutex);
  329. bdput(bd);
  330. }
  331. spin_lock_irqsave(&d->lock, flags);
  332. d->flags |= DEVFL_UP;
  333. d->flags &= ~DEVFL_NEWSIZE;
  334. spin_unlock_irqrestore(&d->lock, flags);
  335. }
  336. }
  337. static void
  338. ataid_complete(struct aoedev *d, unsigned char *id)
  339. {
  340. u64 ssize;
  341. u16 n;
  342. /* word 83: command set supported */
  343. n = le16_to_cpu(get_unaligned((__le16 *) &id[83<<1]));
  344. /* word 86: command set/feature enabled */
  345. n |= le16_to_cpu(get_unaligned((__le16 *) &id[86<<1]));
  346. if (n & (1<<10)) { /* bit 10: LBA 48 */
  347. d->flags |= DEVFL_EXT;
  348. /* word 100: number lba48 sectors */
  349. ssize = le64_to_cpu(get_unaligned((__le64 *) &id[100<<1]));
  350. /* set as in ide-disk.c:init_idedisk_capacity */
  351. d->geo.cylinders = ssize;
  352. d->geo.cylinders /= (255 * 63);
  353. d->geo.heads = 255;
  354. d->geo.sectors = 63;
  355. } else {
  356. d->flags &= ~DEVFL_EXT;
  357. /* number lba28 sectors */
  358. ssize = le32_to_cpu(get_unaligned((__le32 *) &id[60<<1]));
  359. /* NOTE: obsolete in ATA 6 */
  360. d->geo.cylinders = le16_to_cpu(get_unaligned((__le16 *) &id[54<<1]));
  361. d->geo.heads = le16_to_cpu(get_unaligned((__le16 *) &id[55<<1]));
  362. d->geo.sectors = le16_to_cpu(get_unaligned((__le16 *) &id[56<<1]));
  363. }
  364. if (d->ssize != ssize)
  365. printk(KERN_INFO "aoe: %012llx e%lu.%lu v%04x has %llu "
  366. "sectors\n", (unsigned long long)mac_addr(d->addr),
  367. d->aoemajor, d->aoeminor,
  368. d->fw_ver, (long long)ssize);
  369. d->ssize = ssize;
  370. d->geo.start = 0;
  371. if (d->gd != NULL) {
  372. d->gd->capacity = ssize;
  373. d->flags |= DEVFL_NEWSIZE;
  374. } else {
  375. if (d->flags & DEVFL_GDALLOC) {
  376. printk(KERN_INFO "aoe: %s: %s e%lu.%lu, %s\n",
  377. __FUNCTION__,
  378. "can't schedule work for",
  379. d->aoemajor, d->aoeminor,
  380. "it's already on! (This really shouldn't happen).\n");
  381. return;
  382. }
  383. d->flags |= DEVFL_GDALLOC;
  384. }
  385. schedule_work(&d->work);
  386. }
  387. static void
  388. calc_rttavg(struct aoedev *d, int rtt)
  389. {
  390. register long n;
  391. n = rtt;
  392. if (n < MINTIMER)
  393. n = MINTIMER;
  394. else if (n > MAXTIMER)
  395. n = MAXTIMER;
  396. /* g == .25; cf. Congestion Avoidance and Control, Jacobson & Karels; 1988 */
  397. n -= d->rttavg;
  398. d->rttavg += n >> 2;
  399. }
  400. void
  401. aoecmd_ata_rsp(struct sk_buff *skb)
  402. {
  403. struct aoedev *d;
  404. struct aoe_hdr *hin;
  405. struct aoe_atahdr *ahin, *ahout;
  406. struct frame *f;
  407. struct buf *buf;
  408. struct sk_buff *sl;
  409. register long n;
  410. ulong flags;
  411. char ebuf[128];
  412. u16 aoemajor;
  413. hin = (struct aoe_hdr *) skb->mac.raw;
  414. aoemajor = be16_to_cpu(hin->major);
  415. d = aoedev_by_aoeaddr(aoemajor, hin->minor);
  416. if (d == NULL) {
  417. snprintf(ebuf, sizeof ebuf, "aoecmd_ata_rsp: ata response "
  418. "for unknown device %d.%d\n",
  419. aoemajor, hin->minor);
  420. aoechr_error(ebuf);
  421. return;
  422. }
  423. spin_lock_irqsave(&d->lock, flags);
  424. f = getframe(d, be32_to_cpu(hin->tag));
  425. if (f == NULL) {
  426. spin_unlock_irqrestore(&d->lock, flags);
  427. snprintf(ebuf, sizeof ebuf,
  428. "%15s e%d.%d tag=%08x@%08lx\n",
  429. "unexpected rsp",
  430. be16_to_cpu(hin->major),
  431. hin->minor,
  432. be32_to_cpu(hin->tag),
  433. jiffies);
  434. aoechr_error(ebuf);
  435. return;
  436. }
  437. calc_rttavg(d, tsince(f->tag));
  438. ahin = (struct aoe_atahdr *) (hin+1);
  439. ahout = (struct aoe_atahdr *) (f->data + sizeof(struct aoe_hdr));
  440. buf = f->buf;
  441. if (ahin->cmdstat & 0xa9) { /* these bits cleared on success */
  442. printk(KERN_CRIT "aoe: aoecmd_ata_rsp: ata error cmd=%2.2Xh "
  443. "stat=%2.2Xh from e%ld.%ld\n",
  444. ahout->cmdstat, ahin->cmdstat,
  445. d->aoemajor, d->aoeminor);
  446. if (buf)
  447. buf->flags |= BUFFL_FAIL;
  448. } else {
  449. switch (ahout->cmdstat) {
  450. case WIN_READ:
  451. case WIN_READ_EXT:
  452. n = ahout->scnt << 9;
  453. if (skb->len - sizeof *hin - sizeof *ahin < n) {
  454. printk(KERN_CRIT "aoe: aoecmd_ata_rsp: runt "
  455. "ata data size in read. skb->len=%d\n",
  456. skb->len);
  457. /* fail frame f? just returning will rexmit. */
  458. spin_unlock_irqrestore(&d->lock, flags);
  459. return;
  460. }
  461. memcpy(f->bufaddr, ahin+1, n);
  462. case WIN_WRITE:
  463. case WIN_WRITE_EXT:
  464. break;
  465. case WIN_IDENTIFY:
  466. if (skb->len - sizeof *hin - sizeof *ahin < 512) {
  467. printk(KERN_INFO "aoe: aoecmd_ata_rsp: runt data size "
  468. "in ataid. skb->len=%d\n", skb->len);
  469. spin_unlock_irqrestore(&d->lock, flags);
  470. return;
  471. }
  472. ataid_complete(d, (char *) (ahin+1));
  473. d->flags &= ~DEVFL_PAUSE;
  474. break;
  475. default:
  476. printk(KERN_INFO "aoe: aoecmd_ata_rsp: unrecognized "
  477. "outbound ata command %2.2Xh for %d.%d\n",
  478. ahout->cmdstat,
  479. be16_to_cpu(hin->major),
  480. hin->minor);
  481. }
  482. }
  483. if (buf) {
  484. buf->nframesout -= 1;
  485. if (buf->nframesout == 0 && buf->resid == 0) {
  486. unsigned long duration = jiffies - buf->start_time;
  487. unsigned long n_sect = buf->bio->bi_size >> 9;
  488. struct gendisk *disk = d->gd;
  489. const int rw = bio_data_dir(buf->bio);
  490. disk_stat_inc(disk, ios[rw]);
  491. disk_stat_add(disk, ticks[rw], duration);
  492. disk_stat_add(disk, sectors[rw], n_sect);
  493. disk_stat_add(disk, io_ticks, duration);
  494. n = (buf->flags & BUFFL_FAIL) ? -EIO : 0;
  495. bio_endio(buf->bio, buf->bio->bi_size, n);
  496. mempool_free(buf, d->bufpool);
  497. }
  498. }
  499. f->buf = NULL;
  500. f->tag = FREETAG;
  501. aoecmd_work(d);
  502. sl = d->sendq_hd;
  503. d->sendq_hd = d->sendq_tl = NULL;
  504. spin_unlock_irqrestore(&d->lock, flags);
  505. aoenet_xmit(sl);
  506. }
  507. void
  508. aoecmd_cfg(ushort aoemajor, unsigned char aoeminor)
  509. {
  510. struct sk_buff *sl;
  511. sl = aoecmd_cfg_pkts(aoemajor, aoeminor, NULL);
  512. aoenet_xmit(sl);
  513. }
  514. /*
  515. * Since we only call this in one place (and it only prepares one frame)
  516. * we just return the skb. Usually we'd chain it up to the aoedev sendq.
  517. */
  518. static struct sk_buff *
  519. aoecmd_ata_id(struct aoedev *d)
  520. {
  521. struct aoe_hdr *h;
  522. struct aoe_atahdr *ah;
  523. struct frame *f;
  524. struct sk_buff *skb;
  525. f = getframe(d, FREETAG);
  526. if (f == NULL) {
  527. printk(KERN_CRIT "aoe: aoecmd_ata_id: can't get a frame. "
  528. "This shouldn't happen.\n");
  529. return NULL;
  530. }
  531. /* initialize the headers & frame */
  532. h = (struct aoe_hdr *) f->data;
  533. ah = (struct aoe_atahdr *) (h+1);
  534. f->ndata = sizeof *h + sizeof *ah;
  535. memset(h, 0, f->ndata);
  536. f->tag = aoehdr_atainit(d, h);
  537. f->waited = 0;
  538. f->writedatalen = 0;
  539. /* set up ata header */
  540. ah->scnt = 1;
  541. ah->cmdstat = WIN_IDENTIFY;
  542. ah->lba3 = 0xa0;
  543. skb = skb_prepare(d, f);
  544. d->rttavg = MAXTIMER;
  545. d->timer.function = rexmit_timer;
  546. return skb;
  547. }
  548. void
  549. aoecmd_cfg_rsp(struct sk_buff *skb)
  550. {
  551. struct aoedev *d;
  552. struct aoe_hdr *h;
  553. struct aoe_cfghdr *ch;
  554. ulong flags, sysminor, aoemajor;
  555. u16 bufcnt;
  556. struct sk_buff *sl;
  557. enum { MAXFRAMES = 8 };
  558. h = (struct aoe_hdr *) skb->mac.raw;
  559. ch = (struct aoe_cfghdr *) (h+1);
  560. /*
  561. * Enough people have their dip switches set backwards to
  562. * warrant a loud message for this special case.
  563. */
  564. aoemajor = be16_to_cpu(h->major);
  565. if (aoemajor == 0xfff) {
  566. printk(KERN_CRIT "aoe: aoecmd_cfg_rsp: Warning: shelf "
  567. "address is all ones. Check shelf dip switches\n");
  568. return;
  569. }
  570. sysminor = SYSMINOR(aoemajor, h->minor);
  571. if (sysminor * AOE_PARTITIONS + AOE_PARTITIONS > MINORMASK) {
  572. printk(KERN_INFO
  573. "aoe: e%ld.%d: minor number too large\n",
  574. aoemajor, (int) h->minor);
  575. return;
  576. }
  577. bufcnt = be16_to_cpu(ch->bufcnt);
  578. if (bufcnt > MAXFRAMES) /* keep it reasonable */
  579. bufcnt = MAXFRAMES;
  580. d = aoedev_by_sysminor_m(sysminor, bufcnt);
  581. if (d == NULL) {
  582. printk(KERN_INFO "aoe: aoecmd_cfg_rsp: device sysminor_m failure\n");
  583. return;
  584. }
  585. spin_lock_irqsave(&d->lock, flags);
  586. /* permit device to migrate mac and network interface */
  587. d->ifp = skb->dev;
  588. memcpy(d->addr, h->src, sizeof d->addr);
  589. /* don't change users' perspective */
  590. if (d->nopen && !(d->flags & DEVFL_PAUSE)) {
  591. spin_unlock_irqrestore(&d->lock, flags);
  592. return;
  593. }
  594. d->flags |= DEVFL_PAUSE; /* force pause */
  595. d->fw_ver = be16_to_cpu(ch->fwver);
  596. /* check for already outstanding ataid */
  597. sl = aoedev_isbusy(d) == 0 ? aoecmd_ata_id(d) : NULL;
  598. spin_unlock_irqrestore(&d->lock, flags);
  599. aoenet_xmit(sl);
  600. }