npe.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694
  1. /*
  2. * (C) Copyright 2005-2006
  3. * Stefan Roese, DENX Software Engineering, sr@denx.de.
  4. *
  5. * See file CREDITS for list of people who contributed to this
  6. * project.
  7. *
  8. * This program is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU General Public License as
  10. * published by the Free Software Foundation; either version 2 of
  11. * the License, or (at your option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
  21. * MA 02111-1307 USA
  22. */
  23. #if 0
  24. #define DEBUG /* define for debug output */
  25. #endif
  26. #include <config.h>
  27. #include <common.h>
  28. #include <net.h>
  29. #include <miiphy.h>
  30. #include <malloc.h>
  31. #include <asm/processor.h>
  32. #include <asm/arch-ixp/ixp425.h>
  33. #include <IxOsal.h>
  34. #include <IxEthAcc.h>
  35. #include <IxEthDB.h>
  36. #include <IxNpeDl.h>
  37. #include <IxQMgr.h>
  38. #include <IxNpeMh.h>
  39. #include <ix_ossl.h>
  40. #include <IxFeatureCtrl.h>
  41. #include <npe.h>
  42. #ifdef CONFIG_IXP4XX_NPE
  43. static IxQMgrDispatcherFuncPtr qDispatcherFunc = NULL;
  44. static int npe_exists[NPE_NUM_PORTS];
  45. static int npe_used[NPE_NUM_PORTS];
  46. /* A little extra so we can align to cacheline. */
  47. static u8 npe_alloc_pool[NPE_MEM_POOL_SIZE + CFG_CACHELINE_SIZE - 1];
  48. static u8 *npe_alloc_end;
  49. static u8 *npe_alloc_free;
  50. static void *npe_alloc(int size)
  51. {
  52. static int count = 0;
  53. void *p = NULL;
  54. size = (size + (CFG_CACHELINE_SIZE-1)) & ~(CFG_CACHELINE_SIZE-1);
  55. count++;
  56. if ((npe_alloc_free + size) < npe_alloc_end) {
  57. p = npe_alloc_free;
  58. npe_alloc_free += size;
  59. } else {
  60. printf("npe_alloc: failed (count=%d, size=%d)!\n", count, size);
  61. }
  62. return p;
  63. }
  64. /* Not interrupt safe! */
  65. static void mbuf_enqueue(IX_OSAL_MBUF **q, IX_OSAL_MBUF *new)
  66. {
  67. IX_OSAL_MBUF *m = *q;
  68. IX_OSAL_MBUF_NEXT_PKT_IN_CHAIN_PTR(new) = NULL;
  69. if (m) {
  70. while(IX_OSAL_MBUF_NEXT_PKT_IN_CHAIN_PTR(m))
  71. m = IX_OSAL_MBUF_NEXT_PKT_IN_CHAIN_PTR(m);
  72. IX_OSAL_MBUF_NEXT_PKT_IN_CHAIN_PTR(m) = new;
  73. } else
  74. *q = new;
  75. }
  76. /* Not interrupt safe! */
  77. static IX_OSAL_MBUF *mbuf_dequeue(IX_OSAL_MBUF **q)
  78. {
  79. IX_OSAL_MBUF *m = *q;
  80. if (m)
  81. *q = IX_OSAL_MBUF_NEXT_PKT_IN_CHAIN_PTR(m);
  82. return m;
  83. }
  84. static void reset_tx_mbufs(struct npe* p_npe)
  85. {
  86. IX_OSAL_MBUF *m;
  87. int i;
  88. p_npe->txQHead = NULL;
  89. for (i = 0; i < CONFIG_DEVS_ETH_INTEL_NPE_MAX_TX_DESCRIPTORS; i++) {
  90. m = &p_npe->tx_mbufs[i];
  91. memset(m, 0, sizeof(*m));
  92. IX_OSAL_MBUF_MDATA(m) = (void *)&p_npe->tx_pkts[i * NPE_PKT_SIZE];
  93. IX_OSAL_MBUF_MLEN(m) = IX_OSAL_MBUF_PKT_LEN(m) = NPE_PKT_SIZE;
  94. mbuf_enqueue(&p_npe->txQHead, m);
  95. }
  96. }
  97. static void reset_rx_mbufs(struct npe* p_npe)
  98. {
  99. IX_OSAL_MBUF *m;
  100. int i;
  101. p_npe->rxQHead = NULL;
  102. HAL_DCACHE_INVALIDATE(p_npe->rx_pkts, NPE_PKT_SIZE *
  103. CONFIG_DEVS_ETH_INTEL_NPE_MAX_RX_DESCRIPTORS);
  104. for (i = 0; i < CONFIG_DEVS_ETH_INTEL_NPE_MAX_RX_DESCRIPTORS; i++) {
  105. m = &p_npe->rx_mbufs[i];
  106. memset(m, 0, sizeof(*m));
  107. IX_OSAL_MBUF_MDATA(m) = (void *)&p_npe->rx_pkts[i * NPE_PKT_SIZE];
  108. IX_OSAL_MBUF_MLEN(m) = IX_OSAL_MBUF_PKT_LEN(m) = NPE_PKT_SIZE;
  109. if(ixEthAccPortRxFreeReplenish(p_npe->eth_id, m) != IX_SUCCESS) {
  110. printf("ixEthAccPortRxFreeReplenish failed for port %d\n", p_npe->eth_id);
  111. break;
  112. }
  113. }
  114. }
  115. static void init_rx_mbufs(struct npe* p_npe)
  116. {
  117. p_npe->rxQHead = NULL;
  118. p_npe->rx_pkts = npe_alloc(NPE_PKT_SIZE *
  119. CONFIG_DEVS_ETH_INTEL_NPE_MAX_RX_DESCRIPTORS);
  120. if (p_npe->rx_pkts == NULL) {
  121. printf("alloc of packets failed.\n");
  122. return;
  123. }
  124. p_npe->rx_mbufs = (IX_OSAL_MBUF *)
  125. npe_alloc(sizeof(IX_OSAL_MBUF) *
  126. CONFIG_DEVS_ETH_INTEL_NPE_MAX_RX_DESCRIPTORS);
  127. if (p_npe->rx_mbufs == NULL) {
  128. printf("alloc of mbufs failed.\n");
  129. return;
  130. }
  131. reset_rx_mbufs(p_npe);
  132. }
  133. static void init_tx_mbufs(struct npe* p_npe)
  134. {
  135. p_npe->tx_pkts = npe_alloc(NPE_PKT_SIZE *
  136. CONFIG_DEVS_ETH_INTEL_NPE_MAX_TX_DESCRIPTORS);
  137. if (p_npe->tx_pkts == NULL) {
  138. printf("alloc of packets failed.\n");
  139. return;
  140. }
  141. p_npe->tx_mbufs = (IX_OSAL_MBUF *)
  142. npe_alloc(sizeof(IX_OSAL_MBUF) *
  143. CONFIG_DEVS_ETH_INTEL_NPE_MAX_TX_DESCRIPTORS);
  144. if (p_npe->tx_mbufs == NULL) {
  145. printf("alloc of mbufs failed.\n");
  146. return;
  147. }
  148. reset_tx_mbufs(p_npe);
  149. }
  150. /* Convert IX_ETH_PORT_n to IX_NPEMH_NPEID_NPEx */
  151. static int __eth_to_npe(int eth_id)
  152. {
  153. switch(eth_id) {
  154. case IX_ETH_PORT_1:
  155. return IX_NPEMH_NPEID_NPEB;
  156. case IX_ETH_PORT_2:
  157. return IX_NPEMH_NPEID_NPEC;
  158. case IX_ETH_PORT_3:
  159. return IX_NPEMH_NPEID_NPEA;
  160. }
  161. return 0;
  162. }
  163. /* Poll the CSR machinery. */
  164. static void npe_poll(int eth_id)
  165. {
  166. if (qDispatcherFunc != NULL) {
  167. ixNpeMhMessagesReceive(__eth_to_npe(eth_id));
  168. (*qDispatcherFunc)(IX_QMGR_QUELOW_GROUP);
  169. }
  170. }
  171. /* ethAcc RX callback */
  172. static void npe_rx_callback(u32 cbTag, IX_OSAL_MBUF *m, IxEthAccPortId portid)
  173. {
  174. struct npe* p_npe = (struct npe *)cbTag;
  175. if (IX_OSAL_MBUF_MLEN(m) > 0) {
  176. mbuf_enqueue(&p_npe->rxQHead, m);
  177. if (p_npe->rx_write == ((p_npe->rx_read-1) & (PKTBUFSRX-1))) {
  178. debug("Rx overflow: rx_write=%d rx_read=%d\n",
  179. p_npe->rx_write, p_npe->rx_read);
  180. } else {
  181. debug("Received message #%d (len=%d)\n", p_npe->rx_write,
  182. IX_OSAL_MBUF_MLEN(m));
  183. memcpy((void *)NetRxPackets[p_npe->rx_write], IX_OSAL_MBUF_MDATA(m),
  184. IX_OSAL_MBUF_MLEN(m));
  185. p_npe->rx_len[p_npe->rx_write] = IX_OSAL_MBUF_MLEN(m);
  186. p_npe->rx_write++;
  187. if (p_npe->rx_write == PKTBUFSRX)
  188. p_npe->rx_write = 0;
  189. #ifdef CONFIG_PRINT_RX_FRAMES
  190. {
  191. u8 *ptr = IX_OSAL_MBUF_MDATA(m);
  192. int i;
  193. for (i=0; i<60; i++) {
  194. debug("%02x ", *ptr++);
  195. }
  196. debug("\n");
  197. }
  198. #endif
  199. }
  200. m = mbuf_dequeue(&p_npe->rxQHead);
  201. } else {
  202. debug("Received frame with length 0!!!\n");
  203. m = mbuf_dequeue(&p_npe->rxQHead);
  204. }
  205. /* Now return mbuf to NPE */
  206. IX_OSAL_MBUF_MLEN(m) = IX_OSAL_MBUF_PKT_LEN(m) = NPE_PKT_SIZE;
  207. IX_OSAL_MBUF_NEXT_BUFFER_IN_PKT_PTR(m) = NULL;
  208. IX_OSAL_MBUF_FLAGS(m) = 0;
  209. if(ixEthAccPortRxFreeReplenish(p_npe->eth_id, m) != IX_SUCCESS) {
  210. debug("npe_rx_callback: Error returning mbuf.\n");
  211. }
  212. }
  213. /* ethAcc TX callback */
  214. static void npe_tx_callback(u32 cbTag, IX_OSAL_MBUF *m)
  215. {
  216. struct npe* p_npe = (struct npe *)cbTag;
  217. debug("%s\n", __FUNCTION__);
  218. IX_OSAL_MBUF_MLEN(m) = IX_OSAL_MBUF_PKT_LEN(m) = NPE_PKT_SIZE;
  219. IX_OSAL_MBUF_NEXT_BUFFER_IN_PKT_PTR(m) = NULL;
  220. IX_OSAL_MBUF_FLAGS(m) = 0;
  221. mbuf_enqueue(&p_npe->txQHead, m);
  222. }
  223. static int npe_set_mac_address(struct eth_device *dev)
  224. {
  225. struct npe *p_npe = (struct npe *)dev->priv;
  226. IxEthAccMacAddr npeMac;
  227. debug("%s\n", __FUNCTION__);
  228. /* Set MAC address */
  229. memcpy(npeMac.macAddress, dev->enetaddr, 6);
  230. if (ixEthAccPortUnicastMacAddressSet(p_npe->eth_id, &npeMac) != IX_ETH_ACC_SUCCESS) {
  231. printf("Error setting unicast address! %02x:%02x:%02x:%02x:%02x:%02x\n",
  232. npeMac.macAddress[0], npeMac.macAddress[1],
  233. npeMac.macAddress[2], npeMac.macAddress[3],
  234. npeMac.macAddress[4], npeMac.macAddress[5]);
  235. return 0;
  236. }
  237. return 1;
  238. }
  239. /* Boot-time CSR library initialization. */
  240. static int npe_csr_load(void)
  241. {
  242. int i;
  243. if (ixQMgrInit() != IX_SUCCESS) {
  244. debug("Error initialising queue manager!\n");
  245. return 0;
  246. }
  247. ixQMgrDispatcherLoopGet(&qDispatcherFunc);
  248. if(ixNpeMhInitialize(IX_NPEMH_NPEINTERRUPTS_YES) != IX_SUCCESS) {
  249. printf("Error initialising NPE Message handler!\n");
  250. return 0;
  251. }
  252. if (npe_used[IX_ETH_PORT_1] && npe_exists[IX_ETH_PORT_1] &&
  253. ixNpeDlNpeInitAndStart(IX_NPEDL_NPEIMAGE_NPEB_ETH_LEARN_FILTER_SPAN_FIREWALL_VLAN_QOS)
  254. != IX_SUCCESS) {
  255. printf("Error downloading firmware to NPE-B!\n");
  256. return 0;
  257. }
  258. if (npe_used[IX_ETH_PORT_2] && npe_exists[IX_ETH_PORT_2] &&
  259. ixNpeDlNpeInitAndStart(IX_NPEDL_NPEIMAGE_NPEC_ETH_LEARN_FILTER_SPAN_FIREWALL_VLAN_QOS)
  260. != IX_SUCCESS) {
  261. printf("Error downloading firmware to NPE-C!\n");
  262. return 0;
  263. }
  264. /* don't need this for U-Boot */
  265. ixFeatureCtrlSwConfigurationWrite(IX_FEATURECTRL_ETH_LEARNING, FALSE);
  266. if (ixEthAccInit() != IX_ETH_ACC_SUCCESS) {
  267. printf("Error initialising Ethernet access driver!\n");
  268. return 0;
  269. }
  270. for (i = 0; i < IX_ETH_ACC_NUMBER_OF_PORTS; i++) {
  271. if (!npe_used[i] || !npe_exists[i])
  272. continue;
  273. if (ixEthAccPortInit(i) != IX_ETH_ACC_SUCCESS) {
  274. printf("Error initialising Ethernet port%d!\n", i);
  275. }
  276. if (ixEthAccTxSchedulingDisciplineSet(i, FIFO_NO_PRIORITY) != IX_ETH_ACC_SUCCESS) {
  277. printf("Error setting scheduling discipline for port %d.\n", i);
  278. }
  279. if (ixEthAccPortRxFrameAppendFCSDisable(i) != IX_ETH_ACC_SUCCESS) {
  280. printf("Error disabling RX FCS for port %d.\n", i);
  281. }
  282. if (ixEthAccPortTxFrameAppendFCSEnable(i) != IX_ETH_ACC_SUCCESS) {
  283. printf("Error enabling TX FCS for port %d.\n", i);
  284. }
  285. }
  286. return 1;
  287. }
  288. static int npe_init(struct eth_device *dev, bd_t * bis)
  289. {
  290. struct npe *p_npe = (struct npe *)dev->priv;
  291. int i;
  292. u16 reg_short;
  293. int speed;
  294. int duplex;
  295. debug("%s: 1\n", __FUNCTION__);
  296. miiphy_read (dev->name, p_npe->phy_no, PHY_BMSR, &reg_short);
  297. /*
  298. * Wait if PHY is capable of autonegotiation and autonegotiation is not complete
  299. */
  300. if ((reg_short & PHY_BMSR_AUTN_ABLE) && !(reg_short & PHY_BMSR_AUTN_COMP)) {
  301. puts ("Waiting for PHY auto negotiation to complete");
  302. i = 0;
  303. while (!(reg_short & PHY_BMSR_AUTN_COMP)) {
  304. /*
  305. * Timeout reached ?
  306. */
  307. if (i > PHY_AUTONEGOTIATE_TIMEOUT) {
  308. puts (" TIMEOUT !\n");
  309. break;
  310. }
  311. if ((i++ % 1000) == 0) {
  312. putc ('.');
  313. miiphy_read (dev->name, p_npe->phy_no, PHY_BMSR, &reg_short);
  314. }
  315. udelay (1000); /* 1 ms */
  316. }
  317. puts (" done\n");
  318. udelay (500000); /* another 500 ms (results in faster booting) */
  319. }
  320. speed = miiphy_speed (dev->name, p_npe->phy_no);
  321. duplex = miiphy_duplex (dev->name, p_npe->phy_no);
  322. if (p_npe->print_speed) {
  323. p_npe->print_speed = 0;
  324. printf ("ENET Speed is %d Mbps - %s duplex connection\n",
  325. (int) speed, (duplex == HALF) ? "HALF" : "FULL");
  326. }
  327. npe_alloc_end = npe_alloc_pool + sizeof(npe_alloc_pool);
  328. npe_alloc_free = (u8 *)(((unsigned)npe_alloc_pool +
  329. CFG_CACHELINE_SIZE - 1) & ~(CFG_CACHELINE_SIZE - 1));
  330. /* initialize mbuf pool */
  331. init_rx_mbufs(p_npe);
  332. init_tx_mbufs(p_npe);
  333. if (ixEthAccPortRxCallbackRegister(p_npe->eth_id, npe_rx_callback,
  334. (u32)p_npe) != IX_ETH_ACC_SUCCESS) {
  335. printf("can't register RX callback!\n");
  336. return -1;
  337. }
  338. if (ixEthAccPortTxDoneCallbackRegister(p_npe->eth_id, npe_tx_callback,
  339. (u32)p_npe) != IX_ETH_ACC_SUCCESS) {
  340. printf("can't register TX callback!\n");
  341. return -1;
  342. }
  343. npe_set_mac_address(dev);
  344. if (ixEthAccPortEnable(p_npe->eth_id) != IX_ETH_ACC_SUCCESS) {
  345. printf("can't enable port!\n");
  346. return -1;
  347. }
  348. p_npe->active = 1;
  349. return 0;
  350. }
  351. #if 0 /* test-only: probably have to deal with it when booting linux (for a clean state) */
  352. /* Uninitialize CSR library. */
  353. static void npe_csr_unload(void)
  354. {
  355. ixEthAccUnload();
  356. ixEthDBUnload();
  357. ixNpeMhUnload();
  358. ixQMgrUnload();
  359. }
  360. /* callback which is used by ethAcc to recover RX buffers when stopping */
  361. static void npe_rx_stop_callback(u32 cbTag, IX_OSAL_MBUF *m, IxEthAccPortId portid)
  362. {
  363. debug("%s\n", __FUNCTION__);
  364. }
  365. /* callback which is used by ethAcc to recover TX buffers when stopping */
  366. static void npe_tx_stop_callback(u32 cbTag, IX_OSAL_MBUF *m)
  367. {
  368. debug("%s\n", __FUNCTION__);
  369. }
  370. #endif
  371. static void npe_halt(struct eth_device *dev)
  372. {
  373. struct npe *p_npe = (struct npe *)dev->priv;
  374. int i;
  375. debug("%s\n", __FUNCTION__);
  376. /* Delay to give time for recovery of mbufs */
  377. for (i = 0; i < 100; i++) {
  378. npe_poll(p_npe->eth_id);
  379. udelay(100);
  380. }
  381. #if 0 /* test-only: probably have to deal with it when booting linux (for a clean state) */
  382. if (ixEthAccPortRxCallbackRegister(p_npe->eth_id, npe_rx_stop_callback,
  383. (u32)p_npe) != IX_ETH_ACC_SUCCESS) {
  384. debug("Error registering rx callback!\n");
  385. }
  386. if (ixEthAccPortTxDoneCallbackRegister(p_npe->eth_id, npe_tx_stop_callback,
  387. (u32)p_npe) != IX_ETH_ACC_SUCCESS) {
  388. debug("Error registering tx callback!\n");
  389. }
  390. if (ixEthAccPortDisable(p_npe->eth_id) != IX_ETH_ACC_SUCCESS) {
  391. debug("npe_stop: Error disabling NPEB!\n");
  392. }
  393. /* Delay to give time for recovery of mbufs */
  394. for (i = 0; i < 100; i++) {
  395. npe_poll(p_npe->eth_id);
  396. udelay(10000);
  397. }
  398. /*
  399. * For U-Boot only, we are probably launching Linux or other OS that
  400. * needs a clean slate for its NPE library.
  401. */
  402. #if 0 /* test-only */
  403. for (i = 0; i < IX_ETH_ACC_NUMBER_OF_PORTS; i++) {
  404. if (npe_used[i] && npe_exists[i])
  405. if (ixNpeDlNpeStopAndReset(__eth_to_npe(i)) != IX_SUCCESS)
  406. printf("Failed to stop and reset NPE B.\n");
  407. }
  408. #endif
  409. #endif
  410. p_npe->active = 0;
  411. }
  412. static int npe_send(struct eth_device *dev, volatile void *packet, int len)
  413. {
  414. struct npe *p_npe = (struct npe *)dev->priv;
  415. u8 *dest;
  416. int err;
  417. IX_OSAL_MBUF *m;
  418. debug("%s\n", __FUNCTION__);
  419. m = mbuf_dequeue(&p_npe->txQHead);
  420. dest = IX_OSAL_MBUF_MDATA(m);
  421. IX_OSAL_MBUF_PKT_LEN(m) = IX_OSAL_MBUF_MLEN(m) = len;
  422. IX_OSAL_MBUF_NEXT_PKT_IN_CHAIN_PTR(m) = NULL;
  423. memcpy(dest, (char *)packet, len);
  424. if ((err = ixEthAccPortTxFrameSubmit(p_npe->eth_id, m, IX_ETH_ACC_TX_DEFAULT_PRIORITY))
  425. != IX_ETH_ACC_SUCCESS) {
  426. printf("npe_send: Can't submit frame. err[%d]\n", err);
  427. mbuf_enqueue(&p_npe->txQHead, m);
  428. return 0;
  429. }
  430. #ifdef DEBUG_PRINT_TX_FRAMES
  431. {
  432. u8 *ptr = IX_OSAL_MBUF_MDATA(m);
  433. int i;
  434. for (i=0; i<IX_OSAL_MBUF_MLEN(m); i++) {
  435. printf("%02x ", *ptr++);
  436. }
  437. printf(" (tx-len=%d)\n", IX_OSAL_MBUF_MLEN(m));
  438. }
  439. #endif
  440. npe_poll(p_npe->eth_id);
  441. return len;
  442. }
  443. static int npe_rx(struct eth_device *dev)
  444. {
  445. struct npe *p_npe = (struct npe *)dev->priv;
  446. debug("%s\n", __FUNCTION__);
  447. npe_poll(p_npe->eth_id);
  448. debug("%s: rx_write=%d rx_read=%d\n", __FUNCTION__, p_npe->rx_write, p_npe->rx_read);
  449. while (p_npe->rx_write != p_npe->rx_read) {
  450. debug("Reading message #%d\n", p_npe->rx_read);
  451. NetReceive(NetRxPackets[p_npe->rx_read], p_npe->rx_len[p_npe->rx_read]);
  452. p_npe->rx_read++;
  453. if (p_npe->rx_read == PKTBUFSRX)
  454. p_npe->rx_read = 0;
  455. }
  456. return 0;
  457. }
  458. int npe_initialize(bd_t * bis)
  459. {
  460. static int virgin = 0;
  461. struct eth_device *dev;
  462. int eth_num = 0;
  463. struct npe *p_npe = NULL;
  464. for (eth_num = 0; eth_num < CFG_NPE_NUMS; eth_num++) {
  465. /* See if we can actually bring up the interface, otherwise, skip it */
  466. switch (eth_num) {
  467. default: /* fall through */
  468. case 0:
  469. if (memcmp (bis->bi_enetaddr, "\0\0\0\0\0\0", 6) == 0) {
  470. continue;
  471. }
  472. break;
  473. #ifdef CONFIG_HAS_ETH1
  474. case 1:
  475. if (memcmp (bis->bi_enet1addr, "\0\0\0\0\0\0", 6) == 0) {
  476. continue;
  477. }
  478. break;
  479. #endif
  480. }
  481. /* Allocate device structure */
  482. dev = (struct eth_device *)malloc(sizeof(*dev));
  483. if (dev == NULL) {
  484. printf ("%s: Cannot allocate eth_device %d\n", __FUNCTION__, eth_num);
  485. return -1;
  486. }
  487. memset(dev, 0, sizeof(*dev));
  488. /* Allocate our private use data */
  489. p_npe = (struct npe *)malloc(sizeof(struct npe));
  490. if (p_npe == NULL) {
  491. printf("%s: Cannot allocate private hw data for eth_device %d",
  492. __FUNCTION__, eth_num);
  493. free(dev);
  494. return -1;
  495. }
  496. memset(p_npe, 0, sizeof(struct npe));
  497. switch (eth_num) {
  498. default: /* fall through */
  499. case 0:
  500. memcpy(dev->enetaddr, bis->bi_enetaddr, 6);
  501. p_npe->eth_id = 0;
  502. p_npe->phy_no = CONFIG_PHY_ADDR;
  503. break;
  504. #ifdef CONFIG_HAS_ETH1
  505. case 1:
  506. memcpy(dev->enetaddr, bis->bi_enet1addr, 6);
  507. p_npe->eth_id = 1;
  508. p_npe->phy_no = CONFIG_PHY1_ADDR;
  509. break;
  510. #endif
  511. }
  512. sprintf(dev->name, "NPE%d", eth_num);
  513. dev->priv = (void *)p_npe;
  514. dev->init = npe_init;
  515. dev->halt = npe_halt;
  516. dev->send = npe_send;
  517. dev->recv = npe_rx;
  518. p_npe->print_speed = 1;
  519. if (0 == virgin) {
  520. virgin = 1;
  521. if (ixFeatureCtrlDeviceRead() == IX_FEATURE_CTRL_DEVICE_TYPE_IXP42X) {
  522. switch (ixFeatureCtrlProductIdRead() & IX_FEATURE_CTRL_SILICON_STEPPING_MASK) {
  523. case IX_FEATURE_CTRL_SILICON_TYPE_B0:
  524. /*
  525. * If it is B0 Silicon, we only enable port when its corresponding
  526. * Eth Coprocessor is available.
  527. */
  528. if (ixFeatureCtrlComponentCheck(IX_FEATURECTRL_ETH0) ==
  529. IX_FEATURE_CTRL_COMPONENT_ENABLED)
  530. npe_exists[IX_ETH_PORT_1] = TRUE;
  531. if (ixFeatureCtrlComponentCheck(IX_FEATURECTRL_ETH1) ==
  532. IX_FEATURE_CTRL_COMPONENT_ENABLED)
  533. npe_exists[IX_ETH_PORT_2] = TRUE;
  534. break;
  535. case IX_FEATURE_CTRL_SILICON_TYPE_A0:
  536. /*
  537. * If it is A0 Silicon, we enable both as both Eth Coprocessors
  538. * are available.
  539. */
  540. npe_exists[IX_ETH_PORT_1] = TRUE;
  541. npe_exists[IX_ETH_PORT_2] = TRUE;
  542. break;
  543. }
  544. } else if (ixFeatureCtrlDeviceRead() == IX_FEATURE_CTRL_DEVICE_TYPE_IXP46X) {
  545. if (ixFeatureCtrlComponentCheck(IX_FEATURECTRL_ETH0) ==
  546. IX_FEATURE_CTRL_COMPONENT_ENABLED)
  547. npe_exists[IX_ETH_PORT_1] = TRUE;
  548. if (ixFeatureCtrlComponentCheck(IX_FEATURECTRL_ETH1) ==
  549. IX_FEATURE_CTRL_COMPONENT_ENABLED)
  550. npe_exists[IX_ETH_PORT_2] = TRUE;
  551. }
  552. npe_used[IX_ETH_PORT_1] = 1;
  553. npe_used[IX_ETH_PORT_2] = 1;
  554. npe_alloc_end = npe_alloc_pool + sizeof(npe_alloc_pool);
  555. npe_alloc_free = (u8 *)(((unsigned)npe_alloc_pool +
  556. CFG_CACHELINE_SIZE - 1)
  557. & ~(CFG_CACHELINE_SIZE - 1));
  558. if (!npe_csr_load())
  559. return 0;
  560. }
  561. eth_register(dev);
  562. #if defined(CONFIG_MII) || defined(CONFIG_CMD_MII)
  563. miiphy_register(dev->name, npe_miiphy_read, npe_miiphy_write);
  564. #endif
  565. } /* end for each supported device */
  566. return 1;
  567. }
  568. #endif /* CONFIG_IXP4XX_NPE */